/** @return the number of blocks with out-of-service-only replicas */ public synchronized int getOutOfServiceOnlyReplicas() { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return 0; } return outOfServiceOnlyReplicas; } /** @return the number of under-replicated blocks in open files */
/** @return the number of under-replicated blocks in open files */ public synchronized int getUnderReplicatedInOpenFiles() { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return 0; } return underReplicatedBlocksInOpenFiles; } /** @return the collection of under-replicated blocks in open files */
/** Set start time */ public synchronized void setStartTime(long time) { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return; } startTime = time; } /** @return start time */
synchronized void set(int lowRedundancyBlocksInOpenFiles, LightWeightHashSet<Long> underRepInOpenFiles, int underRepBlocks, int outOfServiceOnlyRep) { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return; } underReplicatedOpenFiles = underRepInOpenFiles; underReplicatedBlocks = underRepBlocks; underReplicatedBlocksInOpenFiles = lowRedundancyBlocksInOpenFiles; outOfServiceOnlyReplicas = outOfServiceOnlyRep; }
/** @return the number of under-replicated blocks */ public synchronized int getUnderReplicatedBlocks() { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return 0; } return underReplicatedBlocks; } /** @return the number of blocks with out-of-service-only replicas */
/** @return start time */ public synchronized long getStartTime() { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return 0; } return startTime; } } // End of class LeavingServiceStatus
private boolean isInNewRack(DatanodeDescriptor[] srcs, DatanodeDescriptor target) { LOG.debug("check if target {} increases racks, srcs={}", target, Arrays.asList(srcs)); for (DatanodeDescriptor src : srcs) { if (!src.isDecommissionInProgress() && src.getNetworkLocation().equals(target.getNetworkLocation())) { LOG.debug("the target {} is in the same rack with src {}", target, src); return false; } } return true; }
/** @return the collection of under-replicated blocks in open files */ public synchronized LightWeightHashSet<Long> getOpenFiles() { if (!isDecommissionInProgress() && !isEnteringMaintenance()) { return new LightWeightLinkedSet<>(); } return underReplicatedOpenFiles; } /** Set start time */
/** * Stop decommissioning the specified datanode. * @param node */ @VisibleForTesting public void stopDecommission(DatanodeDescriptor node) { if (node.isDecommissionInProgress() || node.isDecommissioned()) { // Update DN stats maintained by HeartbeatManager hbManager.stopDecommission(node); // extra redundancy blocks will be detected and processed when // the dead node comes back and send in its full block report. if (node.isAlive()) { blockManager.processExtraRedundancyBlocksOnInService(node); } // Remove from tracking in DatanodeAdminManager pendingNodes.remove(node); outOfServiceNodeBlocks.remove(node); } else { LOG.trace("stopDecommission: Node {} in {}, nothing to do.", node, node.getAdminState()); } }
private List<Integer> findLeavingServiceSources() { List<Integer> srcIndices = new ArrayList<>(); for (int i = 0; i < getSrcNodes().length; i++) { if (getSrcNodes()[i].isDecommissionInProgress() || (getSrcNodes()[i].isEnteringMaintenance() && getSrcNodes()[i].isAlive())) { srcIndices.add(i); } } return srcIndices; } }
/** * Start decommissioning the specified datanode. * @param node */ @VisibleForTesting public void startDecommission(DatanodeDescriptor node) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { // Update DN stats maintained by HeartbeatManager hbManager.startDecommission(node); // hbManager.startDecommission will set dead node to decommissioned. if (node.isDecommissionInProgress()) { for (DatanodeStorageInfo storage : node.getStorageInfos()) { LOG.info("Starting decommission of {} {} with {} blocks", node, storage, storage.numBlocks()); } node.getLeavingServiceStatus().setStartTime(monotonicNow()); pendingNodes.add(node); } } else { LOG.trace("startDecommission: Node {} in {}, nothing to do.", node, node.getAdminState()); } }
if (dnDesc.isDecommissioned()) { sb.append("DECOMMISSIONED)"); } else if (dnDesc.isDecommissionInProgress()) { sb.append("DECOMMISSIONING)"); } else if (this.showMaintenanceState &&
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) { List<DatanodeDescriptor> liveNodes = new ArrayList<>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas .getNodes(storedBlock); for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { if (storage.getStorageType() == StorageType.PROVIDED && storage.getState() == State.NORMAL) { // assume the policy is satisfied for blocks on PROVIDED storage // as long as the storage is in normal state. return true; } final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage); // Nodes under maintenance should be counted as valid replicas from // rack policy point of view. if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) { liveNodes.add(cur); } } DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]); BlockType blockType = storedBlock.getBlockType(); BlockPlacementPolicy placementPolicy = placementPolicies .getPolicy(blockType); int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock) .getRealTotalBlockNum() : storedBlock.getReplication(); return placementPolicy.verifyBlockPlacement(locs, numReplicas) .isPlacementPolicySatisfied(); }
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress() + ", Is current datanode entering maintenance: " + srcNode.isEnteringMaintenance());
} else if (dn.isDecommissioned() ){ out.print(DECOMMISSIONED_STATUS); } else if (dn.isDecommissionInProgress()) { out.print(DECOMMISSIONING_STATUS); } else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
for (DatanodeDescriptor dn : datanodeMap.values()) { final boolean isDead = isDatanodeDead(dn); final boolean isDecommissioning = dn.isDecommissionInProgress(); final boolean isEnteringMaintenance = dn.isEnteringMaintenance(); final boolean isInMaintenance = dn.isInMaintenance();
state = "(corrupt)"; } else if (node.isDecommissioned() || node.isDecommissionInProgress()) { state = "(decommissioned)"; } else if (node.isMaintenance() || node.isInMaintenance()){
synchronized void subtract(final DatanodeDescriptor node) { xceiverCount -= node.getXceiverCount(); if (node.isInService()) { capacityUsed -= node.getDfsUsed(); capacityUsedNonDfs -= node.getNonDfsUsed(); blockPoolUsed -= node.getBlockPoolUsed(); nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.subtractStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.subtractNode(storageType, node); } }
synchronized void add(final DatanodeDescriptor node) { xceiverCount += node.getXceiverCount(); if (node.isInService()) { capacityUsed += node.getDfsUsed(); capacityUsedNonDfs += node.getNonDfsUsed(); blockPoolUsed += node.getBlockPoolUsed(); nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.addStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.addNode(storageType, node); } }
counters.add(s, 1); return s; } else if (node.isDecommissionInProgress()) { s = StoredReplicaState.DECOMMISSIONING; } else if (node.isDecommissioned()) {