Tabnine Logo
Host2NodesMap
Code IndexAdd Tabnine to your IDE (free)

How to use
Host2NodesMap
in
org.apache.hadoop.hdfs.server.blockmanagement

Best Java code snippets using org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/** Add a datanode. */
void addDatanode(final DatanodeDescriptor node) {
 // To keep host2DatanodeMap consistent with datanodeMap,
 // remove  from host2DatanodeMap the datanodeDescriptor removed
 // from datanodeMap before adding node to host2DatanodeMap.
 synchronized(this) {
  host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node));
 }
 networktopology.add(node); // may throw InvalidTopologyException
 host2DatanodeMap.add(node);
 checkIfClusterIsNowMultiRack(node);
 resolveUpgradeDomain(node);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + ".addDatanode: "
    + "node " + node + " is added to datanodeMap.");
 }
}
origin: org.apache.hadoop/hadoop-hdfs

hostmapLock.writeLock().lock();
try {
 if (node==null || contains(node)) {
  return false;
origin: org.apache.hadoop/hadoop-hdfs

/** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByHost(final String host) {
 return host2DatanodeMap.getDatanodeByHost(host);
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testRemove() throws Exception {
 DatanodeDescriptor nodeNotInMap =
  DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
 assertFalse(map.remove(nodeNotInMap));
 
 assertTrue(map.remove(dataNodes[0]));
 assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
 assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
 DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
 assertTrue(node==dataNodes[2] || node==dataNodes[3]);
 assertNull(map.getDatanodeByHost("4.4.4.4"));
 
 assertTrue(map.remove(dataNodes[2]));
 assertNull(map.getDatanodeByHost("1.1.1.1"));
 assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
 assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
 
 assertTrue(map.remove(dataNodes[3]));
 assertNull(map.getDatanodeByHost("1.1.1.1"));
 assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
 assertNull(map.getDatanodeByHost("3.3.3.3"));
 
 assertFalse(map.remove(null));
 assertTrue(map.remove(dataNodes[1]));
 assertFalse(map.remove(dataNodes[1]));
}
origin: org.apache.hadoop/hadoop-hdfs

/** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) {
 return host2DatanodeMap.getDatanodeByXferAddr(host, xferPort);
}
origin: org.apache.hadoop/hadoop-hdfs

/** Physically remove node from datanodeMap. */
private void wipeDatanode(final DatanodeID node) {
 final String key = node.getDatanodeUuid();
 synchronized (this) {
  host2DatanodeMap.remove(datanodeMap.remove(key));
 }
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
    + node + "): storage " + key 
    + " is removed from datanodeMap.");
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setup() {
 dataNodes = new DatanodeDescriptor[] {
   DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
   DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
   DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
   DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
 };
 for (DatanodeDescriptor node : dataNodes) {
  map.add(node);
 }
 map.add(null);
}

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Add all nodes from a dependent nodes list to excludedNodes.
 * @return number of new excluded nodes
 */
private int addDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode,
  Set<Node> excludedNodes) {
 if (this.host2datanodeMap == null) {
  return 0;
 }
 int countOfExcludedNodes = 0;
 for(String hostname : chosenNode.getDependentHostNames()) {
  DatanodeDescriptor node =
    this.host2datanodeMap.getDataNodeByHostName(hostname);
  if(node!=null) {
   if (excludedNodes.add(node)) {
    countOfExcludedNodes++;
   }
  } else {
   LOG.warn("Not able to find datanode " + hostname
     + " which has dependency with datanode "
     + chosenNode.getHostName());
  }
 }
 
 return countOfExcludedNodes;
}
origin: io.prestosql.hadoop/hadoop-apache

/** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) {
 return host2DatanodeMap.getDatanodeByXferAddr(host, xferPort);
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Physically remove node from datanodeMap. */
private void wipeDatanode(final DatanodeID node) {
 final String key = node.getDatanodeUuid();
 synchronized (datanodeMap) {
  host2DatanodeMap.remove(datanodeMap.remove(key));
 }
 // Also remove all block invalidation tasks under this node
 blockManager.removeFromInvalidates(new DatanodeInfo(node));
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
    + node + "): storage " + key 
    + " is removed from datanodeMap.");
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
 cluster.add(dataNodesForDependencies[i]);
 host2DatanodeMap.add(dataNodesForDependencies[i]);
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Add all nodes from a dependent nodes list to excludedNodes.
 * @return number of new excluded nodes
 */
private int addDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode,
  Set<Node> excludedNodes) {
 if (this.host2datanodeMap == null) {
  return 0;
 }
 int countOfExcludedNodes = 0;
 for(String hostname : chosenNode.getDependentHostNames()) {
  DatanodeDescriptor node =
    this.host2datanodeMap.getDataNodeByHostName(hostname);
  if(node!=null) {
   if (excludedNodes.add(node)) {
    countOfExcludedNodes++;
   }
  } else {
   LOG.warn("Not able to find datanode " + hostname
     + " which has dependency with datanode "
     + chosenNode.getHostName());
  }
 }
 
 return countOfExcludedNodes;
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Add a datanode. */
void addDatanode(final DatanodeDescriptor node) {
 // To keep host2DatanodeMap consistent with datanodeMap,
 // remove  from host2DatanodeMap the datanodeDescriptor removed
 // from datanodeMap before adding node to host2DatanodeMap.
 synchronized(datanodeMap) {
  host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node));
 }
 networktopology.add(node); // may throw InvalidTopologyException
 host2DatanodeMap.add(node);
 checkIfClusterIsNowMultiRack(node);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + ".addDatanode: "
    + "node " + node + " is added to datanodeMap.");
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/** get a data node by its hostname. This should be used if only one 
 * datanode service is running on a hostname. If multiple datanodes
 * are running on a hostname then use methods getDataNodeByXferAddr and
 * getDataNodeByHostNameAndPort.
 * @return DatanodeDescriptor if found; otherwise null.
 */
DatanodeDescriptor getDataNodeByHostName(String hostname) {
 if(hostname == null) {
  return null;
 }
 
 hostmapLock.readLock().lock();
 try {
  String ipAddr = mapHost.get(hostname);
  if(ipAddr == null) {
   return null;
  } else {  
   return getDatanodeByHost(ipAddr);
  }
 } finally {
  hostmapLock.readLock().unlock();
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

hostmapLock.writeLock().lock();
try {
 if (node==null || contains(node)) {
  return false;
origin: ch.cern.hadoop/hadoop-hdfs

/** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) {
 return host2DatanodeMap.getDatanodeByXferAddr(host, xferPort);
}
origin: io.prestosql.hadoop/hadoop-apache

/** Physically remove node from datanodeMap. */
private void wipeDatanode(final DatanodeID node) {
 final String key = node.getDatanodeUuid();
 synchronized (datanodeMap) {
  host2DatanodeMap.remove(datanodeMap.remove(key));
 }
 // Also remove all block invalidation tasks under this node
 blockManager.removeFromInvalidates(new DatanodeInfo(node));
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
    + node + "): storage " + key 
    + " is removed from datanodeMap.");
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Add all nodes from a dependent nodes list to excludedNodes.
 * @return number of new excluded nodes
 */
private int addDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode,
  Set<Node> excludedNodes) {
 if (this.host2datanodeMap == null) {
  return 0;
 }
 int countOfExcludedNodes = 0;
 for(String hostname : chosenNode.getDependentHostNames()) {
  DatanodeDescriptor node =
    this.host2datanodeMap.getDataNodeByHostName(hostname);
  if(node!=null) {
   if (excludedNodes.add(node)) {
    countOfExcludedNodes++;
   }
  } else {
   LOG.warn("Not able to find datanode " + hostname
     + " which has dependency with datanode "
     + chosenNode.getHostName());
  }
 }
 
 return countOfExcludedNodes;
}
origin: io.prestosql.hadoop/hadoop-apache

/** Add a datanode. */
void addDatanode(final DatanodeDescriptor node) {
 // To keep host2DatanodeMap consistent with datanodeMap,
 // remove  from host2DatanodeMap the datanodeDescriptor removed
 // from datanodeMap before adding node to host2DatanodeMap.
 synchronized(datanodeMap) {
  host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node));
 }
 networktopology.add(node); // may throw InvalidTopologyException
 host2DatanodeMap.add(node);
 checkIfClusterIsNowMultiRack(node);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + ".addDatanode: "
    + "node " + node + " is added to datanodeMap.");
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByHost(final String host) {
 return host2DatanodeMap.getDatanodeByHost(host);
}
org.apache.hadoop.hdfs.server.blockmanagementHost2NodesMap

Javadoc

A map from host names to datanode descriptors.

Most used methods

  • add
    add node to the map return true if the node is added; false otherwise.
  • contains
    Check if node is already in the map.
  • getDatanodeByHost
    Get a data node by its IP address.
  • remove
    remove node from the map return true if the node is removed; false otherwise.
  • getDataNodeByHostName
    get a data node by its hostname. This should be used if only one datanode service is running on a ho
  • getDatanodeByXferAddr
    Find data node by its transfer address

Popular in Java

  • Making http requests using okhttp
  • notifyDataSetChanged (ArrayAdapter)
  • setScale (BigDecimal)
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • FlowLayout (java.awt)
    A flow layout arranges components in a left-to-right flow, much like lines of text in a paragraph. F
  • FileOutputStream (java.io)
    An output stream that writes bytes to a file. If the output file exists, it can be replaced or appen
  • PrintStream (java.io)
    Fake signature of an existing Java class.
  • Time (java.sql)
    Java representation of an SQL TIME value. Provides utilities to format and parse the time's represen
  • SortedMap (java.util)
    A map that has its keys ordered. The sorting is according to either the natural ordering of its keys
  • TreeSet (java.util)
    TreeSet is an implementation of SortedSet. All optional operations (adding and removing) are support
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now