Tabnine Logo
BlockPlacementPolicyDefault.chooseRandom
Code IndexAdd Tabnine to your IDE (free)

How to use
chooseRandom
method
in
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault

Best Java code snippets using org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault.chooseRandom (Showing top 18 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

private DatanodeStorageInfo chooseFromNextRack(Node next,
  Set<Node> excludedNodes,
  long blocksize,
  int maxNodesPerRack,
  List<DatanodeStorageInfo> results,
  boolean avoidStaleNodes,
  EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
 final String nextRack = next.getNetworkLocation();
 try {
  return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
 } catch(NotEnoughReplicasException e) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Failed to choose from the next rack (location = " + nextRack
     + "), retry choosing randomly", e);
  }
  //otherwise randomly choose one from the network
  return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
    maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

chooseRandom(numOfReplicas, "~" + localMachine.getNetworkLocation(),
  excludedNodes, blocksize, maxReplicasPerRack, results,
  avoidStaleNodes, storageTypes);
   + localMachine.getNetworkLocation() + "), fallback to local rack", e);
chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas),
       localMachine.getNetworkLocation(), excludedNodes, blocksize, 
       maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
origin: org.apache.hadoop/hadoop-hdfs

 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return chooseRandom(localRack, excludedNodes,
   blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: org.apache.hadoop/hadoop-hdfs

return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
 * @return the first chosen node, if there is any.
 */
protected DatanodeStorageInfo chooseRandom(int numOfReplicas,
             String scope,
             Set<Node> excludedNodes,
             long blocksize,
             int maxNodesPerRack,
             List<DatanodeStorageInfo> results,
             boolean avoidStaleNodes,
             EnumMap<StorageType, Integer> storageTypes)
             throws NotEnoughReplicasException {
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
  builder = debugLoggingBuilder.get();
  builder.setLength(0);
  builder.append("[");
 }
 CHOOSE_RANDOM_REASONS.get().clear();
 boolean badTarget = false;
 DatanodeStorageInfo firstChosen = null;
 while (numOfReplicas > 0) {
  // the storage type that current node has
  StorageType includeType = null;
  DatanodeDescriptor chosenNode = null;
  if (clusterMap instanceof DFSNetworkTopology) {
   for (StorageType type : storageTypes.keySet()) {
    chosenNode = chooseDataNode(scope, excludedNodes, type);
origin: org.apache.hadoop/hadoop-hdfs

chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
return writer;
origin: ch.cern.hadoop/hadoop-hdfs

private DatanodeStorageInfo chooseFromNextRack(Node next,
  Set<Node> excludedNodes,
  long blocksize,
  int maxNodesPerRack,
  List<DatanodeStorageInfo> results,
  boolean avoidStaleNodes,
  EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
 final String nextRack = next.getNetworkLocation();
 try {
  return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
 } catch(NotEnoughReplicasException e) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Failed to choose from the next rack (location = " + nextRack
     + "), retry choosing ramdomly", e);
  }
  //otherwise randomly choose one from the network
  return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
    maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

chooseRandom(numOfReplicas, "~" + localMachine.getNetworkLocation(),
  excludedNodes, blocksize, maxReplicasPerRack, results,
  avoidStaleNodes, storageTypes);
   + localMachine.getNetworkLocation() + "), fallback to local rack", e);
chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas),
       localMachine.getNetworkLocation(), excludedNodes, blocksize, 
       maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
origin: io.prestosql.hadoop/hadoop-apache

private DatanodeStorageInfo chooseFromNextRack(Node next,
  Set<Node> excludedNodes,
  long blocksize,
  int maxNodesPerRack,
  List<DatanodeStorageInfo> results,
  boolean avoidStaleNodes,
  EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
 final String nextRack = next.getNetworkLocation();
 try {
  return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
    results, avoidStaleNodes, storageTypes);
 } catch(NotEnoughReplicasException e) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Failed to choose from the next rack (location = " + nextRack
     + "), retry choosing ramdomly", e);
  }
  //otherwise randomly choose one from the network
  return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
    maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return chooseRandom(localRack, excludedNodes,
   blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: ch.cern.hadoop/hadoop-hdfs

return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: io.prestosql.hadoop/hadoop-apache

return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: io.prestosql.hadoop/hadoop-apache

chooseRandom(numOfReplicas, "~" + localMachine.getNetworkLocation(),
  excludedNodes, blocksize, maxReplicasPerRack, results,
  avoidStaleNodes, storageTypes);
   + localMachine.getNetworkLocation() + "), fallback to local rack", e);
chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas),
       localMachine.getNetworkLocation(), excludedNodes, blocksize, 
       maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
 * @return the first chosen node, if there is any.
 */
protected DatanodeStorageInfo chooseRandom(int numOfReplicas,
             String scope,
             Set<Node> excludedNodes,
             long blocksize,
             int maxNodesPerRack,
             List<DatanodeStorageInfo> results,
             boolean avoidStaleNodes,
             EnumMap<StorageType, Integer> storageTypes)
             throws NotEnoughReplicasException {
  
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
   scope, excludedNodes);
 int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
  builder = debugLoggingBuilder.get();
  builder.setLength(0);
  builder.append("[");
 }
 boolean badTarget = false;
 DatanodeStorageInfo firstChosen = null;
 while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
  DatanodeDescriptor chosenNode = chooseDataNode(scope);
  if (excludedNodes.add(chosenNode)) { //was not in the excluded list
   if (LOG.isDebugEnabled() && builder != null) {
    builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
 * @return the first chosen node, if there is any.
 */
protected DatanodeStorageInfo chooseRandom(int numOfReplicas,
             String scope,
             Set<Node> excludedNodes,
             long blocksize,
             int maxNodesPerRack,
             List<DatanodeStorageInfo> results,
             boolean avoidStaleNodes,
             EnumMap<StorageType, Integer> storageTypes)
             throws NotEnoughReplicasException {
  
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
   scope, excludedNodes);
 int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
  builder = debugLoggingBuilder.get();
  builder.setLength(0);
  builder.append("[");
 }
 boolean badTarget = false;
 DatanodeStorageInfo firstChosen = null;
 while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
  DatanodeDescriptor chosenNode = chooseDataNode(scope);
  if (excludedNodes.add(chosenNode)) { //was not in the excluded list
   if (LOG.isDebugEnabled() && builder != null) {
    builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
origin: io.prestosql.hadoop/hadoop-apache

 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return chooseRandom(localRack, excludedNodes,
   blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
 return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
origin: ch.cern.hadoop/hadoop-hdfs

 chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
origin: io.prestosql.hadoop/hadoop-apache

 chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
org.apache.hadoop.hdfs.server.blockmanagementBlockPlacementPolicyDefaultchooseRandom

Javadoc

Randomly choose numOfReplicas targets from the given scope.

Popular methods of BlockPlacementPolicyDefault

  • chooseReplicaToDelete
    Decide whether deleting the specified replica of the block still makes the block conform to the conf
  • chooseTarget
  • useDelHint
    Check if we can use delHint.
  • addToExcludedNodes
    Add localMachine and related nodes to excludedNodes for next replica choosing. In sub class, we can
  • adjustSetsWithChosenReplica
  • chooseDataNode
    Choose a datanode from the given scope with specified storage type.
  • chooseFromNextRack
  • chooseLocalRack
    Choose one node from the rack that localMachine is on. if no such node is available, choose one node
  • chooseLocalStorage
    Choose localMachine as the target. if localMachine is not available, choose a node on the same rack
  • chooseRemoteRack
    Choose numOfReplicas nodes from the racks that localMachine is NOT on. if not enough nodes are avail
  • getMaxNodesPerRack
    Calculate the maximum number of replicas to allocate per rack. It also limits the total number of re
  • getPipeline
    Return a pipeline of nodes. The pipeline is formed finding a shortest path that starts from the writ
  • getMaxNodesPerRack,
  • getPipeline,
  • getRequiredStorageTypes,
  • initialize,
  • logNodeIsNotChosen,
  • pickupReplicaSet,
  • splitNodesWithRack,
  • addIfIsGoodTarget,
  • isGoodTarget

Popular in Java

  • Creating JSON documents from java classes using gson
  • getContentResolver (Context)
  • requestLocationUpdates (LocationManager)
  • onRequestPermissionsResult (Fragment)
  • Pointer (com.sun.jna)
    An abstraction for a native pointer data type. A Pointer instance represents, on the Java side, a na
  • FileWriter (java.io)
    A specialized Writer that writes to a file in the file system. All write requests made by calling me
  • Socket (java.net)
    Provides a client-side TCP socket.
  • URL (java.net)
    A Uniform Resource Locator that identifies the location of an Internet resource as specified by RFC
  • ArrayList (java.util)
    ArrayList is an implementation of List, backed by an array. All optional operations including adding
  • Notification (javax.management)
  • Top plugins for Android Studio
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now