private void updateFailedStorage( Set<DatanodeStorageInfo> failedStorageInfos) { for (DatanodeStorageInfo storageInfo : failedStorageInfos) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { LOG.info("{} failed.", storageInfo); storageInfo.setState(DatanodeStorage.State.FAILED); } } }
boolean areBlocksOnFailedStorage() { return getState() == State.FAILED && !blocks.isEmpty(); }
/** * Adds block to list of blocks which will be invalidated on all its * datanodes. */ private void addToInvalidates(BlockInfo storedBlock) { if (!isPopulatingReplQueues()) { return; } StringBuilder datanodes = blockLog.isDebugEnabled() ? new StringBuilder() : null; for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { if (storage.getState() != State.NORMAL) { continue; } final DatanodeDescriptor node = storage.getDatanodeDescriptor(); final Block b = getBlockOnStorage(storedBlock, storage); if (b != null) { invalidateBlocks.add(b, node, false); if (datanodes != null) { datanodes.append(node).append(" "); } } } if (datanodes != null && datanodes.length() != 0) { blockLog.debug("BLOCK* addToInvalidates: {} {}", storedBlock, datanodes); } }
DatanodeStorageInfo storage = null; for (DatanodeStorageInfo s : getStorageInfos()) { if (s.getState() == State.NORMAL && s.getStorageType() == t) { if (storage == null) { storage = s;
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) { List<DatanodeDescriptor> liveNodes = new ArrayList<>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas .getNodes(storedBlock); for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { if (storage.getStorageType() == StorageType.PROVIDED && storage.getState() == State.NORMAL) { // assume the policy is satisfied for blocks on PROVIDED storage // as long as the storage is in normal state. return true; } final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage); // Nodes under maintenance should be counted as valid replicas from // rack policy point of view. if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) { liveNodes.add(cur); } } DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]); BlockType blockType = storedBlock.getBlockType(); BlockPlacementPolicy placementPolicy = placementPolicies .getPolicy(blockType); int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock) .getRealTotalBlockNum() : storedBlock.getReplication(); return placementPolicy.verifyBlockPlacement(locs, numReplicas) .isPlacementPolicySatisfied(); }
.getNodes(block); for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { if (storage.getState() != State.NORMAL) { continue;
if (storage.getStorageType() == StorageType.PROVIDED) { storage = new DatanodeStorageInfo(node, storage.getStorageID(), storage.getStorageType(), storage.getState());
String storageId = storage.getStorageID(); DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId); State state = (storageInfo == null) ? null : storageInfo.getState(); out.println("Block=" + block.toString() + "\tSize=" + block.getNumBytes()
/** * @param dn datanode descriptor * @param s data node storage * @return the {@link DatanodeStorageInfo} for the specified datanode. * If {@code s} corresponds to a provided storage, the storage info * representing provided storage is returned. * @throws IOException */ DatanodeStorageInfo getStorage(DatanodeDescriptor dn, DatanodeStorage s) throws IOException { if (providedEnabled && storageId.equals(s.getStorageID())) { if (StorageType.PROVIDED.equals(s.getStorageType())) { if (providedStorageInfo.getState() == State.FAILED && s.getState() == State.NORMAL) { providedStorageInfo.setState(State.NORMAL); LOG.info("Provided storage transitioning to state " + State.NORMAL); } if (dn.getStorageInfo(s.getStorageID()) == null) { dn.injectStorage(providedStorageInfo); } processProvidedStorageReport(); return providedDescriptor.getProvidedStorage(dn, s); } LOG.warn("Reserved storage {} reported as non-provided from {}", s, dn); } return dn.getStorageInfo(s.getStorageID()); }
Collection<DatanodeDescriptor> nodesCorrupt, boolean inStartupSafeMode) { final StoredReplicaState s; if (storage.getState() == State.NORMAL) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); if (nodesCorrupt != null && nodesCorrupt.contains(node)) { storage.getState() == State.READ_ONLY_SHARED) { s = StoredReplicaState.READONLY; counters.add(s, 1);
@Override public boolean apply(DatanodeStorageInfo storage) { return storage.getState() == state; } });
@Override public boolean apply(DatanodeStorageInfo storage) { return storage.getState() == state; } });
boolean areBlocksOnFailedStorage() { return getState() == State.FAILED && numBlocks != 0; }
boolean areBlocksOnFailedStorage() { return getState() == State.FAILED && numBlocks != 0; }
synchronized void subtract(final DatanodeDescriptor node) { xceiverCount -= node.getXceiverCount(); if (node.isInService()) { capacityUsed -= node.getDfsUsed(); capacityUsedNonDfs -= node.getNonDfsUsed(); blockPoolUsed -= node.getBlockPoolUsed(); nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.subtractStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.subtractNode(storageType, node); } }
synchronized void add(final DatanodeDescriptor node) { xceiverCount += node.getXceiverCount(); if (node.isInService()) { capacityUsed += node.getDfsUsed(); capacityUsedNonDfs += node.getNonDfsUsed(); blockPoolUsed += node.getBlockPoolUsed(); nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } else if (node.isDecommissionInProgress() || node.isEnteringMaintenance()) { cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); } Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { statsMap.addStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } } for (StorageType storageType : storageTypes) { statsMap.addNode(storageType, node); } }
} else if (storage.getState() != s.getState() || storage.getStorageType() != s.getStorageType()) {
private void updateFailedStorage( Set<DatanodeStorageInfo> failedStorageInfos) { for (DatanodeStorageInfo storageInfo : failedStorageInfos) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { LOG.info(storageInfo + " failed."); storageInfo.setState(DatanodeStorage.State.FAILED); } } }
private void updateFailedStorage( Set<DatanodeStorageInfo> failedStorageInfos) { for (DatanodeStorageInfo storageInfo : failedStorageInfos) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { LOG.info(storageInfo + " failed."); storageInfo.setState(DatanodeStorage.State.FAILED); } } }
final boolean noCorrupt = (numCorruptReplicas == 0); for(DatanodeStorageInfo storage : blocksMap.getStorages(blk)) { if (storage.getState() != State.FAILED) { final DatanodeDescriptor d = storage.getDatanodeDescriptor();