public DatanodeStorageInfo[] getStorages(BlockInfo block) { final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()]; int i = 0; for(DatanodeStorageInfo s : blocksMap.getStorages(block)) { storages[i++] = s; } return storages; }
/** counts number of containing nodes. Better than using iterator. */ int numNodes(Block b) { BlockInfo info = blocks.get(b); return info == null ? 0 : info.numNodes(); }
/** * Check if the blocks are COMPLETE; * it may allow the last block to be COMMITTED. */ private boolean checkBlocksComplete(String src, boolean allowCommittedBlock, BlockInfo... blocks) { final int n = allowCommittedBlock? numCommittedAllowed: 0; for(int i = 0; i < blocks.length; i++) { final short min = blockManager.getMinStorageNum(blocks[i]); final String err = INodeFile.checkBlockComplete(blocks, i, n, min); if (err != null) { final int numNodes = blocks[i].numNodes(); LOG.info("BLOCK* " + err + "(numNodes= " + numNodes + (numNodes < min ? " < " : " >= ") + " minimum = " + min + ") in file " + src); return false; } } return true; }
/** * If IBR is not sent from expected locations yet, add the datanodes to * pendingReconstruction in order to keep RedundancyMonitor from scheduling * the block. */ public void addExpectedReplicasToPending(BlockInfo blk) { if (!blk.isStriped()) { DatanodeStorageInfo[] expectedStorages = blk.getUnderConstructionFeature().getExpectedStorageLocations(); if (expectedStorages.length - blk.numNodes() > 0) { ArrayList<DatanodeDescriptor> pendingNodes = new ArrayList<>(); for (DatanodeStorageInfo storage : expectedStorages) { DatanodeDescriptor dnd = storage.getDatanodeDescriptor(); if (blk.findStorageInfo(dnd) == null) { pendingNodes.add(dnd); } } pendingReconstruction.increment(blk, pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()])); } } }
int numNodes = curBlock.numNodes(); if (!force && !hasMinStorage(curBlock, numNodes)) { throw new IOException("Cannot complete block: "
for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); out.print("Block replica on datanode/rack: " + dn.getHostName() +
/** * Removes the blocks from blocksmap and updates the safemode blocks total. * @param blocks An instance of {@link BlocksMapUpdateInfo} which contains a * list of blocks that need to be removed from blocksMap */ public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) { assert namesystem.hasWriteLock(); // In the case that we are a Standby tailing edits from the // active while in safe-mode, we need to track the total number // of blocks and safe blocks in the system. boolean trackBlockCounts = bmSafeMode.isSafeModeTrackingBlocks(); int numRemovedComplete = 0, numRemovedSafe = 0; for (BlockInfo b : blocks.getToDeleteList()) { if (trackBlockCounts) { if (b.isComplete()) { numRemovedComplete++; if (hasMinStorage(b, b.numNodes())) { numRemovedSafe++; } } } removeBlock(b); } if (trackBlockCounts) { LOG.debug("Adjusting safe-mode totals for deletion." + "decreasing safeBlocks by {}, totalBlocks by {}", numRemovedSafe, numRemovedComplete); bmSafeMode.adjustBlockTotals(-numRemovedSafe, -numRemovedComplete); } }
/** * Remove the block from the block map; * remove it from all data-node lists it belongs to; * and remove all data-node locations associated with the block. */ void removeBlock(BlockInfo block) { BlockInfo blockInfo = blocks.remove(block); if (blockInfo == null) { return; } decrementBlockStat(block); assert blockInfo.getBlockCollectionId() == INodeId.INVALID_INODE_ID; final int size = blockInfo.isStriped() ? blockInfo.getCapacity() : blockInfo.numNodes(); for(int idx = size - 1; idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); if (dn != null) { removeBlock(dn, blockInfo); // remove from the list and wipe the location } } }