/** * @return whether at least first block report has been received */ public boolean checkBlockReportReceived() { if(this.getStorageInfos().length == 0) { return false; } for(DatanodeStorageInfo storageInfo: this.getStorageInfos()) { if(storageInfo.getBlockReportCount() == 0 ) return false; } return true; }
public boolean hasStorageType(StorageType type) { for (DatanodeStorageInfo dnStorage : getStorageInfos()) { if (dnStorage.getStorageType() == type) { return true; } } return false; } }
public void markAllDatanodesStale() { LOG.info("Marking all datanodes as stale"); synchronized (this) { for (DatanodeDescriptor dn : datanodeMap.values()) { for(DatanodeStorageInfo storage : dn.getStorageInfos()) { storage.markStaleAfterFailover(); } } } }
public EnumSet<StorageType> getStorageTypes() { EnumSet<StorageType> storageTypes = EnumSet.noneOf(StorageType.class); for (DatanodeStorageInfo dsi : getStorageInfos()) { storageTypes.add(dsi.getStorageType()); } return storageTypes; }
public StorageReport[] getStorageReports() { final DatanodeStorageInfo[] infos = getStorageInfos(); final StorageReport[] reports = new StorageReport[infos.length]; for(int i = 0; i < infos.length; i++) { reports[i] = infos[i].toStorageReport(); } return reports; }
public int numBlocks() { int blocks = 0; for (DatanodeStorageInfo entry : getStorageInfos()) { blocks += entry.numBlocks(); } return blocks; }
/** * Get iterator, which starts iterating from the specified block. */ Iterator<BlockInfo> getBlockIterator(final int startBlock) { return new BlockIterator(startBlock, getStorageInfos()); }
/** * @param nodeReg DatanodeID to update registration for. */ @Override public void updateRegInfo(DatanodeID nodeReg) { super.updateRegInfo(nodeReg); // must re-process IBR after re-registration for(DatanodeStorageInfo storage : getStorageInfos()) { if (storage.getStorageType() != StorageType.PROVIDED) { storage.setBlockReportCount(0); } } heartbeatedSinceRegistration = false; forceRegistration = false; }
/** Remove the blocks associated to the given datanode. */ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { providedStorageMap.removeDatanode(node); for (DatanodeStorageInfo storage : node.getStorageInfos()) { final Iterator<BlockInfo> it = storage.getBlockIterator(); //add the BlockInfos to a new collection as the //returned iterator is not modifiable. Collection<BlockInfo> toRemove = new ArrayList<>(); while (it.hasNext()) { toRemove.add(it.next()); } for (BlockInfo b : toRemove) { removeStoredBlock(b, node); } } // Remove all pending DN messages referencing this DN. pendingDNMessages.removeAllMessagesForDatanode(node); node.resetBlocks(); invalidateBlocks.remove(node); }
/** * Start decommissioning the specified datanode. * @param node */ @VisibleForTesting public void startDecommission(DatanodeDescriptor node) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { // Update DN stats maintained by HeartbeatManager hbManager.startDecommission(node); // hbManager.startDecommission will set dead node to decommissioned. if (node.isDecommissionInProgress()) { for (DatanodeStorageInfo storage : node.getStorageInfos()) { LOG.info("Starting decommission of {} {} with {} blocks", node, storage, storage.numBlocks()); } node.getLeavingServiceStatus().setStartTime(monotonicNow()); pendingNodes.add(node); } } else { LOG.trace("startDecommission: Node {} in {}, nothing to do.", node, node.getAdminState()); } }
long remaining = 0; DatanodeStorageInfo storage = null; for (DatanodeStorageInfo s : getStorageInfos()) { if (s.getState() == State.NORMAL && s.getStorageType() == t) { if (storage == null) {
/** * Start maintenance of the specified datanode. * @param node */ @VisibleForTesting public void startMaintenance(DatanodeDescriptor node, long maintenanceExpireTimeInMS) { // Even if the node is already in maintenance, we still need to adjust // the expiration time. node.setMaintenanceExpireTimeInMS(maintenanceExpireTimeInMS); if (!node.isMaintenance()) { // Update DN stats maintained by HeartbeatManager hbManager.startMaintenance(node); // hbManager.startMaintenance will set dead node to IN_MAINTENANCE. if (node.isEnteringMaintenance()) { for (DatanodeStorageInfo storage : node.getStorageInfos()) { LOG.info("Starting maintenance of {} {} with {} blocks", node, storage, storage.numBlocks()); } node.getLeavingServiceStatus().setStartTime(monotonicNow()); } // Track the node regardless whether it is ENTERING_MAINTENANCE or // IN_MAINTENANCE to support maintenance expiration. pendingNodes.add(node); } else { LOG.trace("startMaintenance: Node {} in {}, nothing to do.", node, node.getAdminState()); } }
for (DatanodeDescriptor node : datanodeManager.getDatanodeListForReport(DatanodeReportType.ALL)) { for (DatanodeStorageInfo storage : node.getStorageInfos()) { try { namesystem.readLock();
synchronized void add(final DatanodeDescriptor node) { xceiverCount += node.getXceiverCount(); if (node.isInService()) { capacityUsed += node.getDfsUsed(); capacityUsedNonDfs += node.getNonDfsUsed(); blockPoolUsed += node.getBlockPoolUsed(); nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.addStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.addNode(storageType, node); } }
synchronized void subtract(final DatanodeDescriptor node) { xceiverCount -= node.getXceiverCount(); if (node.isInService()) { capacityUsed -= node.getDfsUsed(); capacityUsedNonDfs -= node.getNonDfsUsed(); blockPoolUsed -= node.getBlockPoolUsed(); nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.subtractStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.subtractNode(storageType, node); } }
/** * Get iterator, which starts iterating from the specified block. */ Iterator<BlockInfoContiguous> getBlockIterator(final int startBlock) { return new BlockIterator(startBlock, getStorageInfos()); }
/** * Get iterator, which starts iterating from the specified block. */ Iterator<BlockInfoContiguous> getBlockIterator(final int startBlock) { return new BlockIterator(startBlock, getStorageInfos()); }
numOfStaleNodes++; DatanodeStorageInfo[] storageInfos = d.getStorageInfos(); for(DatanodeStorageInfo storageInfo : storageInfos) { if (storageInfo.areBlockContentsStale()) {
private static void updateHeartbeatWithUsage(DatanodeDescriptor dn, long capacity, long dfsUsed, long remaining, long blockPoolUsed, long dnCacheCapacity, long dnCacheUsed, int xceiverCount, int volFailures) { dn.getStorageInfos()[0].setUtilizationForTesting( capacity, dfsUsed, remaining, blockPoolUsed); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), dnCacheCapacity, dnCacheUsed, xceiverCount, volFailures, null); }
private static void updateHeartbeatForExtraStorage(long capacity, long dfsUsed, long remaining, long blockPoolUsed) { DatanodeDescriptor dn = dataNodes[5]; dn.getStorageInfos()[1].setUtilizationForTesting( capacity, dfsUsed, remaining, blockPoolUsed); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); }