/** * Checks if the Admin state bit is DECOMMISSIONED. If so, then * we should shut it down. * * Returns true if the node should be shutdown. */ private boolean shouldNodeShutdown(DatanodeDescriptor node) { return (node.isDecommissioned()); }
/** * Decide if a replica is valid * @param node datanode the block is located * @param block a block * @return true if a replica is valid */ private boolean isGoodReplica(DatanodeDescriptor node, Block block) { Collection<Block> excessBlocks = excessReplicateMap.get(node.getStorageID()); Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block); return (nodesCorrupt == null || !nodesCorrupt.contains(node)) // not corrupt // not over scheduling for replication && (node.getNumberOfBlocksToBeReplicated() < maxReplicationStreams) // not alredy scheduled for removal && (excessBlocks == null || !excessBlocks.contains(block)) && !node.isDecommissioned(); // not decommissioned }
/** * Counts the number of live nodes in the given list */ private int countLiveNodes(Block b, Iterator<DatanodeDescriptor> nodeIter) { int live = 0; Collection<DatanodeDescriptor> nodesCorrupt = null; if (corruptReplicas.size() != 0) { nodesCorrupt = corruptReplicas.getNodes(b); } while (nodeIter.hasNext()) { DatanodeDescriptor node = nodeIter.next(); if (((nodesCorrupt != null) && (nodesCorrupt.contains(node))) || node.isDecommissionInProgress() || node.isDecommissioned()) { // do nothing } else { live++; } } return live; }
corrupt++; else if (node.isDecommissionInProgress() || node.isDecommissioned()) { count++;
/** * Counts the number of nodes in the given list into active and * decommissioned counters. */ private NumberReplicas countNodes(Block b, Iterator<DatanodeDescriptor> nodeIter) { int count = 0; int live = 0; int corrupt = 0; int excess = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); while (nodeIter.hasNext()) { DatanodeDescriptor node = nodeIter.next(); if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { corrupt++; } else if (node.isDecommissionInProgress() || node.isDecommissioned()) { count++; } else { Collection<Block> blocksExcess = excessReplicateMap.get(node.getStorageID()); if (blocksExcess != null && blocksExcess.contains(b)) { excess++; } else { live++; } } } return new NumberReplicas(live, count, corrupt, excess); }
/** * Find how many of the containing nodes are "extra", if any. * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ private void processOverReplicatedBlock(Block block, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { if(addedNode == delNodeHint) { delNodeHint = null; } Collection<DatanodeDescriptor> nonExcess = new ArrayList<DatanodeDescriptor>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { DatanodeDescriptor cur = it.next(); Collection<Block> excessBlocks = excessReplicateMap.get(cur.getStorageID()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { // exclude corrupt replicas if (corruptNodes == null || !corruptNodes.contains(cur)) { nonExcess.add(cur); } } } } chooseExcessReplicates(nonExcess, block, replication, addedNode, delNodeHint); }
@Deprecated public String randomDataNode() { int size = datanodeMap.size(); int index = 0; if (size != 0) { index = r.nextInt(size); for (int i = 0; i < size; i++) { DatanodeDescriptor d = getDatanodeByIndex(index); if (d != null && !d.isDecommissioned() && !isDatanodeDead(d) && !d.isDecommissionInProgress()) { return d.getHost() + ":" + d.getInfoPort(); } index = (index + 1) % size; } } return null; }
@Deprecated public String randomDataNode() { int size = datanodeMap.size(); int index = 0; if (size != 0) { index = r.nextInt(size); for(int i=0; i<size; i++) { DatanodeDescriptor d = getDatanodeByIndex(index); if (d != null && !d.isDecommissioned() && !isDatanodeDead(d) && !d.isDecommissionInProgress()) { return d.getHost() + ":" + d.getInfoPort(); } index = (index + 1) % size; } } return null; }
DatanodeDescriptor node = it.next(); if ((!inHostsList(node, null)) && (!inExcludedHostsList(node, null)) && node.isDecommissioned()) {
/** * Start decommissioning the specified datanode. */ private void startDecommission (DatanodeDescriptor node) throws IOException { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { LOG.info("Start Decommissioning node " + node.getName()); node.startDecommission(); // // all the blocks that reside on this node have to be // replicated. Iterator<Block> decommissionBlocks = node.getBlockIterator(); while(decommissionBlocks.hasNext()) { Block block = decommissionBlocks.next(); updateNeededReplications(block, -1, 0); } } }
/** * Returned information is a JSON representation of map with host name as the * key and value is a map of dead node attribute keys to its values */ @Override // NameNodeMXBean public String getDeadNodes() { final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>(); final ArrayList<DatanodeDescriptor> liveNodeList = new ArrayList<DatanodeDescriptor>(); final ArrayList<DatanodeDescriptor> deadNodeList = new ArrayList<DatanodeDescriptor>(); // we need to call DFSNodeStatus to filter out the dead data nodes DFSNodesStatus(liveNodeList, deadNodeList); removeDecommissionedNodeFromList(deadNodeList); for (DatanodeDescriptor node : deadNodeList) { final Map<String, Object> innerinfo = new HashMap<String, Object>(); innerinfo.put("lastContact", getLastContact(node)); innerinfo.put("decommissioned", node.isDecommissioned()); innerinfo.put("excluded", this.inExcludedHostsList(node, null)); info.put(node.getHostName() + ":" + node.getPort(), innerinfo); } return JSON.toString(info); }
for (DatanodeDescriptor node:chosenNodes) { excludedNodes.put(node, node); if ((!node.isDecommissionInProgress()) && (!node.isDecommissioned())) { results.add(node);
/** * Change, if appropriate, the admin state of a datanode to * decommission completed. Return true if decommission is complete. */ boolean checkDecommissionStateInternal(DatanodeDescriptor node) { // // Check to see if all blocks in this decommissioned // node has reached their target replication factor. // if (node.isDecommissionInProgress()) { if (!isReplicationInProgress(node)) { node.setDecommissioned(); LOG.info("Decommission complete for node " + node.getName()); } } if (node.isDecommissioned()) { return true; } return false; }
if (inExcludedHostsList(node, null)) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { node.isDecommissioned()) {
/** * Update a block's priority queue in neededReplicaiton queues * * @param block block * @param delta the change of number of replicas * @param numCurrentReplicas current number of replicas * @param numCurrentDecommissionedReplicas current number of decommissioed replicas * @param node the node where the replica resides * @param fileReplication expected number of replicas */ private void updateNeededReplicationQueue(Block block, int delta, int numCurrentReplicas, int numCurrentDecommissionedReplicas, DatanodeDescriptor node, short fileReplication) { int numOldReplicas = numCurrentReplicas; int numOldDecommissionedReplicas = numCurrentDecommissionedReplicas; if (node.isDecommissioned() || node.isDecommissionInProgress()) { numOldDecommissionedReplicas -= delta; } else { numOldReplicas -= delta; } if (fileReplication > numOldReplicas) { neededReplications.remove(block, numOldReplicas, numOldDecommissionedReplicas, fileReplication); } if (fileReplication > numCurrentReplicas) { neededReplications.add(block, numCurrentReplicas, numCurrentDecommissionedReplicas, fileReplication); } }
/** * Stop decommissioning the specified datanodes. */ private void stopDecommission(DatanodeDescriptor node) throws IOException { if ((node.isDecommissionInProgress() && ((Monitor) dnthread.getRunnable()).stopDecommission(node)) || node.isDecommissioned()) { LOG.info("Stop Decommissioning node " + node.getName()); synchronized (heartbeats) { updateStats(node, false); node.stopDecommission(); updateStats(node, true); } } }
Log logr = FSNamesystem.LOG; if (node.isDecommissionInProgress() || node.isDecommissioned()) { logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the node is (being) decommissioned");
private void updateStats(DatanodeDescriptor node, boolean isAdded) { // // The statistics are protected by the heartbeat lock // For decommissioning/decommissioned nodes, only used capacity // is counted. // assert (Thread.holdsLock(heartbeats)); if (isAdded) { capacityUsed += node.getDfsUsed(); capacityNamespaceUsed += node.getNamespaceUsed(); totalLoad += node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); } else { capacityTotal += node.getDfsUsed(); } } else { capacityUsed -= node.getDfsUsed(); capacityNamespaceUsed -= node.getNamespaceUsed(); totalLoad -= node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); } else { capacityTotal -= node.getDfsUsed(); } } }
if (fileINode != null && fileINode.isUnderConstruction() && fileINode.isLastBlock(storedBlock) && !node.isDecommissionInProgress() && !node.isDecommissioned()) { decrementSafeBlockCount(block); return;
/** * Start decommissioning the specified datanode. */ private void startDecommission(DatanodeDescriptor node) throws IOException { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { LOG.info("Start Decommissioning node " + node.getName() + " with " + node.numBlocks() + " blocks."); synchronized (heartbeats) { updateStats(node, false); node.startDecommission(); updateStats(node, true); } if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { node.setStartTime(now()); } } else if (node.isDecommissionInProgress()) { if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { node.setStartTime(now()); } } }