Tabnine Logo
BlockPlacementPolicyDefault
Code IndexAdd Tabnine to your IDE (free)

How to use
BlockPlacementPolicyDefault
in
org.apache.hadoop.hdfs.server.blockmanagement

Best Java code snippets using org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

splitNodesWithRack(availableReplicas, delCandidates, rackMap, moreThanOne,
  exactlyOne);
 if (firstOne && useDelHint(delNodeHintStorage, addedNodeStorage,
   moreThanOne, exactlyOne, excessTypes)) {
  cur = delNodeHintStorage;
 } else { // regular excessive replica removal
  cur = chooseReplicaToDelete(moreThanOne, exactlyOne,
    excessTypes, rackMap);
 adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur);
 excessReplicas.add(cur);
origin: org.apache.hadoop/hadoop-hdfs

int[] result = getMaxNodesPerRack(chosenStorage.size(), numOfReplicas);
numOfReplicas = result[0];
int maxNodesPerRack = result[1];
 addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
  excludedNodeCopy.add(writer);
 localNode = chooseTarget(numOfReplicas, writer,
   excludedNodeCopy, blocksize, maxNodesPerRack, results,
   avoidStaleNodes, storagePolicy,
 localNode = chooseTarget(numOfReplicas, writer, excludedNodes,
   blocksize, maxNodesPerRack, results, avoidStaleNodes,
   storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty());
return getPipeline(
  (writer != null && writer instanceof DatanodeDescriptor) ? writer
    : localNode,
origin: org.apache.hadoop/hadoop-hdfs

if (clusterMap instanceof DFSNetworkTopology) {
 for (StorageType type : storageTypes.keySet()) {
  chosenNode = chooseDataNode(scope, excludedNodes, type);
 chosenNode = chooseDataNode(scope, excludedNodes);
if (isGoodDatanode(chosenNode, maxNodesPerRack, considerLoad,
  results, avoidStaleNodes)) {
 for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
  storage = chooseStorage4Block(
    chosenNode, blocksize, results, entry.getKey());
  if (storage != null) {
   addToExcludedNodes(chosenNode, excludedNodes);
   int num = entry.getValue();
   if (num == 1) {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Choose <i>localMachine</i> as the target.
 * if <i>localMachine</i> is not available,
 * choose a node on the same rack
 * @return the chosen storage
 */
protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
  Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
  List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
  EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack)
  throws NotEnoughReplicasException {
 DatanodeStorageInfo localStorage = chooseLocalStorage(localMachine,
   excludedNodes, blocksize, maxNodesPerRack, results,
   avoidStaleNodes, storageTypes);
 if (localStorage != null) {
  return localStorage;
 }
 if (!fallbackToLocalRack) {
  return null;
 }
 // try a node on local rack
 return chooseLocalRack(localMachine, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
}

origin: org.apache.hadoop/hadoop-hdfs

private enum NodeNotChosenReason {
 NOT_IN_SERVICE("the node is not in service"),
 NODE_STALE("the node is stale"),
 NODE_TOO_BUSY("the node is too busy"),
 TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
 NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
 private final String text;
 NodeNotChosenReason(final String logText) {
  text = logText;
 }
 private String getText() {
  return text;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  && isGoodDatanode(localDatanode, maxNodesPerRack, false,
    results, avoidStaleNodes)) {
 for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
   .entrySet().iterator(); iter.hasNext(); ) {
  Map.Entry<StorageType, Integer> entry = iter.next();
  DatanodeStorageInfo localStorage = chooseStorage4Block(
    localDatanode, blocksize, results, entry.getKey());
  if (localStorage != null) {
   addToExcludedNodes(localDatanode, excludedNodes);
   int num = entry.getValue();
   if (num == 1) {
origin: ch.cern.hadoop/hadoop-hdfs

    unavailableStorages, newBlock);
final EnumMap<StorageType, Integer> storageTypes =
  getRequiredStorageTypes(requiredStorageTypes);
if (LOG.isTraceEnabled()) {
 LOG.trace("storageTypes=" + storageTypes);
  writer = chooseLocalStorage(writer, excludedNodes, blocksize,
    maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
      .getDatanodeDescriptor();
  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
  if (--numOfReplicas == 0) {
  final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
  if (clusterMap.isOnSameRack(dn0, dn1)) {
   chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
     results, avoidStaleNodes, storageTypes);
  } else if (newBlock){
   chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
     results, avoidStaleNodes, storageTypes);
  } else {
   chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
     results, avoidStaleNodes, storageTypes);
 chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
origin: ch.cern.hadoop/hadoop-hdfs

  return chooseTarget(src, numOfReplicas, writer,
    new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
    excludedNodes, blocksize, storagePolicy);
   .chooseStorageTypes((short)numOfReplicas);
 final EnumMap<StorageType, Integer> storageTypes =
   getRequiredStorageTypes(requiredStorageTypes);
 int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas);
 numOfReplicas = maxNodesAndReplicas[0];
 int maxNodesPerRack = maxNodesAndReplicas[1];
  final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
    favoriteAndExcludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes, false);
    chooseTarget(src, numOfReplicas, writer, results,
      false, favoriteAndExcludedNodes, blocksize, storagePolicy);
  for (int i = 0; i < remainingTargets.length; i++) {
 return getPipeline(writer,
   results.toArray(new DatanodeStorageInfo[results.size()]));
} catch (NotEnoughReplicasException nr) {
 return chooseTarget(src, numOfReplicas, writer, 
   new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
   excludedNodes, blocksize, storagePolicy);
origin: org.apache.hadoop/hadoop-hdfs

  return chooseTarget(src, numOfReplicas, writer,
    new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
    excludedNodes, blocksize, storagePolicy, flags);
   .chooseStorageTypes((short)numOfReplicas);
 final EnumMap<StorageType, Integer> storageTypes =
   getRequiredStorageTypes(requiredStorageTypes);
 int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas);
 numOfReplicas = maxNodesAndReplicas[0];
 int maxNodesPerRack = maxNodesAndReplicas[1];
 chooseFavouredNodes(src, numOfReplicas, favoredNodes,
   favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results,
   avoidStaleNodes, storageTypes);
   addToExcludedNodes(storage.getDatanodeDescriptor(),
     favoriteAndExcludedNodes);
    chooseTarget(src, numOfReplicas, writer,
      new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
      favoriteAndExcludedNodes, blocksize, storagePolicy, flags);
 return getPipeline(writer,
   results.toArray(new DatanodeStorageInfo[results.size()]));
} catch (NotEnoughReplicasException nr) {
 return chooseTarget(src, numOfReplicas, writer, 
   new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
origin: org.apache.hadoop/hadoop-hdfs

final int numOfResults = results.size();
if (numOfResults == 0) {
 DatanodeStorageInfo storageInfo = chooseLocalStorage(writer,
   excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes,
   storageTypes, true);
 chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
   results, avoidStaleNodes, storageTypes);
 if (--numOfReplicas == 0) {
 final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
 if (clusterMap.isOnSameRack(dn0, dn1)) {
  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
 } else if (newBlock){
  chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
 } else {
  chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
return writer;
origin: ch.cern.hadoop/hadoop-hdfs

 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
     localDatanode.getStorageInfos())) {
    StorageType type = entry.getKey();
    if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
      maxNodesPerRack, false, results, avoidStaleNodes, type) >= 0) {
     int num = entry.getValue();
return chooseLocalRack(localMachine, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: ch.cern.hadoop/hadoop-hdfs

DatanodeStorageInfo firstChosen = null;
while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
 DatanodeDescriptor chosenNode = chooseDataNode(scope);
 if (excludedNodes.add(chosenNode)) { //was not in the excluded list
  if (LOG.isDebugEnabled() && builder != null) {
   for (i = 0; i < storages.length; i++) {
    StorageType type = entry.getKey();
    final int newExcludedNodes = addIfIsGoodTarget(storages[i],
      excludedNodes, blocksize, maxNodesPerRack, considerLoad, results,
      avoidStaleNodes, type);
origin: org.apache.hadoop/hadoop-hdfs

 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return chooseRandom(localRack, excludedNodes,
   blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
      + nextNode.getNetworkLocation() + ")", e);
   return chooseFromNextRack(nextNode, excludedNodes, blocksize,
     maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: org.apache.hadoop/hadoop-hdfs

private DatanodeStorageInfo chooseFromNextRack(Node next,
  Set<Node> excludedNodes,
  long blocksize,
  int maxNodesPerRack,
  List<DatanodeStorageInfo> results,
  boolean avoidStaleNodes,
  EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
 final String nextRack = next.getNetworkLocation();
 try {
  return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
 } catch(NotEnoughReplicasException e) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Failed to choose from the next rack (location = " + nextRack
     + "), retry choosing randomly", e);
  }
  //otherwise randomly choose one from the network
  return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
    maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * If the given storage is a good target, add it to the result list and
 * update the set of excluded nodes.
 * @return -1 if the given is not a good target;
 *         otherwise, return the number of nodes added to excludedNodes set.
 */
int addIfIsGoodTarget(DatanodeStorageInfo storage,
  Set<Node> excludedNodes,
  long blockSize,
  int maxNodesPerRack,
  boolean considerLoad,
  List<DatanodeStorageInfo> results,                           
  boolean avoidStaleNodes,
  StorageType storageType) {
 if (isGoodTarget(storage, blockSize, maxNodesPerRack, considerLoad,
   results, avoidStaleNodes, storageType)) {
  results.add(storage);
  // add node and related nodes to excludedNode
  return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
 } else { 
  return -1;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
boolean useDelHint(DatanodeStorageInfo delHint,
  DatanodeStorageInfo added, List<DatanodeStorageInfo> moreThanOne,
  Collection<DatanodeStorageInfo> exactlyOne,
  List<StorageType> excessTypes) {
 if (!super.useDelHint(delHint, added, moreThanOne, exactlyOne,
   excessTypes)) {
  // If BlockPlacementPolicyDefault doesn't allow useDelHint, there is no
  // point checking with upgrade domain policy.
  return false;
 }
 return isMovableBasedOnUpgradeDomain(combine(moreThanOne, exactlyOne),
   delHint, added);
}
origin: ch.cern.hadoop/hadoop-hdfs

 @Override
 public DatanodeStorageInfo[] chooseTarget(String srcPath,
                  int numOfReplicas,
                  Node writer,
                  List<DatanodeStorageInfo> chosenNodes,
                  boolean returnChosenNodes,
                  Set<Node> excludedNodes,
                  long blocksize,
                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
    numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
    blocksize, storagePolicy);
  try {
   Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

   .chooseReplicaToDelete(first, second, excessTypes, rackMap));
  .chooseReplicaToDelete(first, second, excessTypes, rackMap);
chosen = ((BlockPlacementPolicyDefault) replicator).chooseReplicaToDelete(
  first, second, excessTypes, rackMap);
assertEquals(chosen, storages[1]);
origin: org.apache.hadoop/hadoop-hdfs

protected void chooseFavouredNodes(String src, int numOfReplicas,
  List<DatanodeDescriptor> favoredNodes,
  Set<Node> favoriteAndExcludedNodes, long blocksize, int maxNodesPerRack,
  List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
  EnumMap<StorageType, Integer> storageTypes)
  throws NotEnoughReplicasException {
 for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas;
   i++) {
  DatanodeDescriptor favoredNode = favoredNodes.get(i);
  // Choose a single node which is local to favoredNode.
  // 'results' is updated within chooseLocalNode
  final DatanodeStorageInfo target =
    chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize,
     maxNodesPerRack, results, avoidStaleNodes, storageTypes, false);
  if (target == null) {
   LOG.warn("Could not find a target for file " + src
     + " with favored node " + favoredNode);
   continue;
  }
  favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
 }
}
origin: io.prestosql.hadoop/hadoop-apache

    unavailableStorages, newBlock);
final EnumMap<StorageType, Integer> storageTypes =
  getRequiredStorageTypes(requiredStorageTypes);
if (LOG.isTraceEnabled()) {
 LOG.trace("storageTypes=" + storageTypes);
  writer = chooseLocalStorage(writer, excludedNodes, blocksize,
    maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
      .getDatanodeDescriptor();
  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
  if (--numOfReplicas == 0) {
  final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
  if (clusterMap.isOnSameRack(dn0, dn1)) {
   chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
     results, avoidStaleNodes, storageTypes);
  } else if (newBlock){
   chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
     results, avoidStaleNodes, storageTypes);
  } else {
   chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
     results, avoidStaleNodes, storageTypes);
 chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
org.apache.hadoop.hdfs.server.blockmanagementBlockPlacementPolicyDefault

Javadoc

The class is responsible for choosing the desired number of targets for placing block replicas. The replica placement strategy is that if the writer is on a datanode, the 1st replica is placed on the local machine, otherwise a random datanode. The 2nd replica is placed on a datanode that is on a different rack. The 3rd replica is placed on a datanode which is on a different node of the rack as the second replica.

Most used methods

  • chooseReplicaToDelete
    Decide whether deleting the specified replica of the block still makes the block conform to the conf
  • chooseTarget
  • useDelHint
    Check if we can use delHint.
  • addToExcludedNodes
    Add localMachine and related nodes to excludedNodes for next replica choosing. In sub class, we can
  • adjustSetsWithChosenReplica
  • chooseDataNode
    Choose a datanode from the given scope with specified storage type.
  • chooseFromNextRack
  • chooseLocalRack
    Choose one node from the rack that localMachine is on. if no such node is available, choose one node
  • chooseLocalStorage
    Choose localMachine as the target. if localMachine is not available, choose a node on the same rack
  • chooseRandom
    Randomly choose numOfReplicas targets from the given scope.
  • chooseRemoteRack
    Choose numOfReplicas nodes from the racks that localMachine is NOT on. if not enough nodes are avail
  • getMaxNodesPerRack
    Calculate the maximum number of replicas to allocate per rack. It also limits the total number of re
  • chooseRemoteRack,
  • getMaxNodesPerRack,
  • getPipeline,
  • getRequiredStorageTypes,
  • initialize,
  • logNodeIsNotChosen,
  • pickupReplicaSet,
  • splitNodesWithRack,
  • addIfIsGoodTarget,
  • isGoodTarget

Popular in Java

  • Reactive rest calls using spring rest template
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • runOnUiThread (Activity)
  • getSupportFragmentManager (FragmentActivity)
  • IOException (java.io)
    Signals a general, I/O-related error. Error details may be specified when calling the constructor, a
  • BitSet (java.util)
    The BitSet class implements abit array [http://en.wikipedia.org/wiki/Bit_array]. Each element is eit
  • Dictionary (java.util)
    Note: Do not use this class since it is obsolete. Please use the Map interface for new implementatio
  • Enumeration (java.util)
    A legacy iteration interface.New code should use Iterator instead. Iterator replaces the enumeration
  • Logger (org.apache.log4j)
    This is the central class in the log4j package. Most logging operations, except configuration, are d
  • Location (org.springframework.beans.factory.parsing)
    Class that models an arbitrary location in a Resource.Typically used to track the location of proble
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now