public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { if (storages == null) { return null; } StorageType[] storageTypes = new StorageType[storages.length]; for(int i = 0; i < storageTypes.length; i++) { storageTypes[i] = storages[i].getStorageType(); } return storageTypes; }
private void addStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(info.getStorageType()); if (storageTypeStats == null) { storageTypeStats = new StorageTypeStats(info.getStorageType()); storageTypeStatsMap.put(info.getStorageType(), storageTypeStats); } storageTypeStats.addStorage(info, node); }
public EnumSet<StorageType> getStorageTypes() { EnumSet<StorageType> storageTypes = EnumSet.noneOf(StorageType.class); for (DatanodeStorageInfo dsi : getStorageInfos()) { storageTypes.add(dsi.getStorageType()); } return storageTypes; }
private void subtractStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(info.getStorageType()); if (storageTypeStats != null) { storageTypeStats.subtractStorage(info, node); } }
public boolean hasStorageType(StorageType type) { for (DatanodeStorageInfo dnStorage : getStorageInfos()) { if (dnStorage.getStorageType() == type) { return true; } } return false; } }
/** Check if we can use delHint. */ @VisibleForTesting boolean useDelHint(DatanodeStorageInfo delHint, DatanodeStorageInfo added, List<DatanodeStorageInfo> moreThanOne, Collection<DatanodeStorageInfo> exactlyOne, List<StorageType> excessTypes) { if (delHint == null) { return false; // no delHint } else if (!excessTypes.contains(delHint.getStorageType())) { return false; // delHint storage type is not an excess type } else { // check if removing delHint reduces the number of racks return notReduceNumOfGroups(moreThanOne, delHint, added); } }
/** * Decrement the number of blocks scheduled for each given storage. This will * be called during abandon block or delete of UC block. */ public static void decrementBlocksScheduled(DatanodeStorageInfo... storages) { for (DatanodeStorageInfo s : storages) { s.getDatanodeDescriptor().decrementBlocksScheduled(s.getStorageType()); } }
/** Increment the number of blocks scheduled for each given storage */ public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) { for (DatanodeStorageInfo s : storages) { s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType()); } }
boolean hasStaleStorages() { synchronized (storageMap) { for (DatanodeStorageInfo storage : storageMap.values()) { if (StorageType.PROVIDED.equals(storage.getStorageType())) { // to verify provided storage participated in this hb, requires // check to pass DNDesc. // e.g., storageInfo.verifyBlockReportId(this, curBlockReportId) continue; } if (storage.areBlockContentsStale()) { return true; } } return false; } }
/** * Get the associated {@link DatanodeDescriptor} for the storage. * If the storage is of type PROVIDED, one of the nodes that reported * PROVIDED storage are returned. If not, this is equivalent to * {@code storage.getDatanodeDescriptor()}. * @param storage * @return the associated {@link DatanodeDescriptor}. */ private DatanodeDescriptor getDatanodeDescriptorFromStorage( DatanodeStorageInfo storage) { if (storage.getStorageType() == StorageType.PROVIDED) { return providedStorageMap.chooseProvidedDatanode(); } return storage.getDatanodeDescriptor(); }
/** * @param nodeReg DatanodeID to update registration for. */ @Override public void updateRegInfo(DatanodeID nodeReg) { super.updateRegInfo(nodeReg); // must re-process IBR after re-registration for(DatanodeStorageInfo storage : getStorageInfos()) { if (storage.getStorageType() != StorageType.PROVIDED) { storage.setBlockReportCount(0); } } heartbeatedSinceRegistration = false; forceRegistration = false; }
void injectStorage(DatanodeStorageInfo s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (null == storage) { LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(), getXferAddr()); DFSTopologyNodeImpl parent = null; if (getParent() instanceof DFSTopologyNodeImpl) { parent = (DFSTopologyNodeImpl) getParent(); } StorageType type = s.getStorageType(); if (!hasStorageType(type) && parent != null) { // we are about to add a type this node currently does not have, // inform the parent that a new type is added to this datanode parent.childAddStorage(getName(), type); } storageMap.put(s.getStorageID(), s); } else { assert storage == s : "found " + storage + " expected " + s; } } }
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) { List<DatanodeDescriptor> liveNodes = new ArrayList<>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas .getNodes(storedBlock); for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { if (storage.getStorageType() == StorageType.PROVIDED && storage.getState() == State.NORMAL) { // assume the policy is satisfied for blocks on PROVIDED storage // as long as the storage is in normal state. return true; } final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage); // Nodes under maintenance should be counted as valid replicas from // rack policy point of view. if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) { liveNodes.add(cur); } } DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]); BlockType blockType = storedBlock.getBlockType(); BlockPlacementPolicy placementPolicy = placementPolicies .getPolicy(blockType); int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock) .getRealTotalBlockNum() : storedBlock.getReplication(); return placementPolicy.verifyBlockPlacement(locs, numReplicas) .isPlacementPolicySatisfied(); }
/** * Find specified DatanodeStorageInfo. * @return DatanodeStorageInfo or null if not found. */ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { int len = getCapacity(); DatanodeStorageInfo providedStorageInfo = null; for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if(cur != null) { if (cur.getStorageType() == StorageType.PROVIDED) { // if block resides on provided storage, only match the storage ids if (dn.getStorageInfo(cur.getStorageID()) != null) { // do not return here as we have to check the other // DatanodeStorageInfos for this block which could be local providedStorageInfo = cur; } } else if (cur.getDatanodeDescriptor() == dn) { return cur; } } } return providedStorageInfo; }
void addStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { assert storageType == info.getStorageType(); capacityUsed += info.getDfsUsed(); capacityNonDfsUsed += info.getNonDfsUsed(); blockPoolUsed += info.getBlockPoolUsed(); if (node.isInService()) { capacityTotal += info.getCapacity(); capacityRemaining += info.getRemaining(); } else { capacityTotal += info.getDfsUsed(); } }
void subtractStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { assert storageType == info.getStorageType(); capacityUsed -= info.getDfsUsed(); capacityNonDfsUsed -= info.getNonDfsUsed(); blockPoolUsed -= info.getBlockPoolUsed(); if (node.isInService()) { capacityTotal -= info.getCapacity(); capacityRemaining -= info.getRemaining(); } else { capacityTotal -= info.getDfsUsed(); } }
node.decrementBlocksScheduled(storageInfo.getStorageType());
/** * when committing a striped block whose size is less than a stripe, we need * to decrease the scheduled block size of the DataNodes that do not store * any internal block. */ void updateStorageScheduledSize(BlockInfoStriped storedBlock) { assert storedBlock.getUnderConstructionFeature() == this; if (replicas.length == 0) { return; } final int dataBlockNum = storedBlock.getDataBlockNum(); final int realDataBlockNum = storedBlock.getRealDataBlockNum(); if (realDataBlockNum < dataBlockNum) { for (ReplicaUnderConstruction replica : replicas) { int index = BlockIdManager.getBlockIndex(replica); if (index >= realDataBlockNum && index < dataBlockNum) { final DatanodeStorageInfo storage = replica.getExpectedStorageLocation(); storage.getDatanodeDescriptor() .decrementBlocksScheduled(storage.getStorageType()); } } } }
synchronized void subtract(final DatanodeDescriptor node) { xceiverCount -= node.getXceiverCount(); if (node.isInService()) { capacityUsed -= node.getDfsUsed(); capacityUsedNonDfs -= node.getNonDfsUsed(); blockPoolUsed -= node.getBlockPoolUsed(); nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.subtractStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.subtractNode(storageType, node); } }
synchronized void add(final DatanodeDescriptor node) { xceiverCount += node.getXceiverCount(); if (node.isInService()) { capacityUsed += node.getDfsUsed(); capacityUsedNonDfs += node.getNonDfsUsed(); blockPoolUsed += node.getBlockPoolUsed(); nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.addStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.addNode(storageType, node); } }