AddBlockResult addBlock(BlockInfo b) { return addBlock(b, b); }
private int findStorageInfoFromEnd(DatanodeStorageInfo storage) { final int len = getCapacity(); for(int idx = len - 1; idx >= 0; idx--) { DatanodeStorageInfo cur = getStorageInfo(idx); if (storage.equals(cur)) { return idx; } } return -1; }
/** Remove the blocks associated to the given DatanodeStorageInfo. */ void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) { assert namesystem.hasWriteLock(); final Iterator<BlockInfo> it = storageInfo.getBlockIterator(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); Collection<BlockInfo> toRemove = new ArrayList<>(); while (it.hasNext()) { toRemove.add(it.next()); } for (BlockInfo block : toRemove) { removeStoredBlock(block, node); final Block b = getBlockOnStorage(block, storageInfo); if (b != null) { invalidateBlocks.remove(node, b); } } checkSafeMode(); LOG.info("Removed blocks associated with storage {} from DataNode {}", storageInfo, node); }
void addStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { assert storageType == info.getStorageType(); capacityUsed += info.getDfsUsed(); capacityNonDfsUsed += info.getNonDfsUsed(); blockPoolUsed += info.getBlockPoolUsed(); if (node.isInService()) { capacityTotal += info.getCapacity(); capacityRemaining += info.getRemaining(); } else { capacityTotal += info.getDfsUsed(); } }
/** Increment the number of blocks scheduled for each given storage */ public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) { for (DatanodeStorageInfo s : storages) { s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType()); } }
storage, corruptReplicas.getNodes(block), false); if (state == StoredReplicaState.LIVE) { if (storage.getStorageType() == StorageType.PROVIDED) { storage = new DatanodeStorageInfo(node, storage.getStorageID(), storage.getStorageType(), storage.getState());
public static StorageReport[] getStorageReportsForDatanode( DatanodeDescriptor dnd) { ArrayList<StorageReport> reports = new ArrayList<StorageReport>(); for (DatanodeStorageInfo storage : dnd.getStorageInfos()) { DatanodeStorage dns = new DatanodeStorage( storage.getStorageID(), storage.getState(), storage.getStorageType()); StorageReport report = new StorageReport( dns ,false, storage.getCapacity(), storage.getDfsUsed(), storage.getRemaining(), storage.getBlockPoolUsed(), 0L); reports.add(report); } return reports.toArray(StorageReport.EMPTY_ARRAY); }
/** * Find specified DatanodeStorageInfo. * @return DatanodeStorageInfo or null if not found. */ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { int len = getCapacity(); DatanodeStorageInfo providedStorageInfo = null; for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if(cur != null) { if (cur.getStorageType() == StorageType.PROVIDED) { // if block resides on provided storage, only match the storage ids if (dn.getStorageInfo(cur.getStorageID()) != null) { // do not return here as we have to check the other // DatanodeStorageInfos for this block which could be local providedStorageInfo = cur; } } else if (cur.getDatanodeDescriptor() == dn) { return cur; } } } return providedStorageInfo; }
DatanodeStorageInfo storage = null; for (DatanodeStorageInfo s : getStorageInfos()) { if (s.getState() == State.NORMAL && s.getStorageType() == t) { if (storage == null) { storage = s; long r = s.getRemaining(); if (r >= requiredSize) { remaining += r;
sb.append("blk_" + index + ":"); DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor(); if (showRacks) { sb.append(NodeBase.getPath(dnDesc)); } else { sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType())); } else if (dnDesc.isStale(this.staleInterval)) { sb.append("STALE_NODE)"); } else if (storage.areBlockContentsStale()) { sb.append("STALE_BLOCK_CONTENT)"); } else {
&& storageInfo.getBlockReportCount() > 0) { blockLog.info("BLOCK* processReport 0x{}: " + "discarded non-initial block report from {}" if (storageInfo.getBlockReportCount() == 0) { + "storage report for {} from datanode {}", strBlockReportId, storageInfo.getStorageID(), nodeID.getDatanodeUuid()); processFirstBlockReport(storageInfo, newReport); if (!StorageType.PROVIDED.equals(storageInfo.getStorageType())) { invalidatedBlocks = processReport(storageInfo, newReport, context); storageInfo.receivedBlockReport(); } finally { endTime = Time.monotonicNow();
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP)); blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3)); dd.addBlock(blockInfoList.get(i)); assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks()); Iterator<BlockInfoContiguous> it = dd.getBlockIterator(); int len = 0; while (it.hasNext()) { headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd); headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex); blockInfoList.get(i), dd.getBlockListHeadForTesting()); BlockInfoContiguous temp = dd.getBlockListHeadForTesting(); curIndex = 0; headIndex = 0; dd.moveBlockToHead(temp, curIndex, headIndex); assertEquals( "Moving head to the head of the list shopuld not change the list", temp, dd.getBlockListHeadForTesting()); temp = dd.getBlockListHeadForTesting(); assertNotNull("Head should not be null", temp); int c = MAX_BLOCKS - 1; headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd); Random rand = new Random();
.getNodes(block); for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { if (storage.getState() != State.NORMAL) { continue; final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (storage.areBlockContentsStale()) { LOG.trace("BLOCK* processExtraRedundancyBlock: Postponing {}" + " since storage {} does not yet have up-to-date information.",
if (!excessTypes.contains(storage.getStorageType())) { continue; final DatanodeDescriptor node = storage.getDatanodeDescriptor(); long free = storage.getRemaining(); long lastHeartbeat = node.getLastUpdateMonotonic(); if (lastHeartbeat < oldestHeartbeat) { return null; excessTypes.remove(storage.getStorageType()); return storage;
if (storageInfo.numBlocks() == 0) { DatanodeStorageInfo info = storageMap.remove(storageInfo.getStorageID()); if (!hasStorageType(info.getStorageType())) { info.getStorageType()); storageInfo, storageInfo.numBlocks());
AddBlockResult result = storageInfo.addBlock(delimiter); assert result == AddBlockResult.ADDED : "Delimiting block cannot be present in the node"; headIndex = storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); while(it.hasNext()) toRemove.add(it.next()); storageInfo.removeBlock(delimiter);
/** @return the node of the second replica */ private static DatanodeDescriptor secondNode(Node localMachine, List<DatanodeStorageInfo> results) { // find the second replica for(DatanodeStorageInfo nextStorage : results) { DatanodeDescriptor nextNode = nextStorage.getDatanodeDescriptor(); if (nextNode != localMachine) { return nextNode; } } return null; }
private BlockIterator(final int startBlock, final DatanodeStorageInfo... storages) { if(startBlock < 0) { throw new IllegalArgumentException( "Illegal value startBlock = " + startBlock); } List<Iterator<BlockInfo>> iterators = new ArrayList<>(); int s = startBlock; int sumBlocks = 0; for (DatanodeStorageInfo e : storages) { int numBlocks = e.numBlocks(); sumBlocks += numBlocks; if(sumBlocks <= startBlock) { s -= numBlocks; } else { iterators.add(e.getBlockIterator()); } } this.iterators = Collections.unmodifiableList(iterators); // skip to the storage containing startBlock for(; s > 0 && hasNext(); s--) { next(); } }
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) { List<DatanodeDescriptor> liveNodes = new ArrayList<>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas .getNodes(storedBlock); for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { if (storage.getStorageType() == StorageType.PROVIDED && storage.getState() == State.NORMAL) { // assume the policy is satisfied for blocks on PROVIDED storage // as long as the storage is in normal state. return true; } final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage); // Nodes under maintenance should be counted as valid replicas from // rack policy point of view. if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) { liveNodes.add(cur); } } DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]); BlockType blockType = storedBlock.getBlockType(); BlockPlacementPolicy placementPolicy = placementPolicies .getPolicy(blockType); int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock) .getRealTotalBlockNum() : storedBlock.getReplication(); return placementPolicy.verifyBlockPlacement(locs, numReplicas) .isPlacementPolicySatisfied(); }