public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { // First check whether the block belongs to a different storage // on the same DN. AddBlockResult result = AddBlockResult.ADDED; DatanodeStorageInfo otherStorage = b.findStorageInfo(getDatanodeDescriptor()); if (otherStorage != null) { if (otherStorage != this) { // The block belongs to a different storage. Remove it first. otherStorage.removeBlock(b); result = AddBlockResult.REPLACED; } else { // The block is already associated with this storage. return AddBlockResult.ALREADY_EXIST; } } b.addStorage(this, reportedBlock); blocks.add(b); return result; }
private static void logAllocatedBlock(String src, BlockInfo b) { if (!NameNode.stateChangeLog.isInfoEnabled()) { return; } StringBuilder sb = new StringBuilder(150); sb.append("BLOCK* allocate "); b.appendStringTo(sb); sb.append(", "); BlockUnderConstructionFeature uc = b.getUnderConstructionFeature(); if (uc != null) { uc.appendUCPartsConcise(sb); } sb.append(" for " + src); NameNode.stateChangeLog.info(sb.toString()); }
/** * Commit a block of a file * * @param block block to be committed * @param commitBlock - contains client reported block length and generation * @return true if the block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ private boolean commitBlock(final BlockInfo block, final Block commitBlock) throws IOException { if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; assert block.getNumBytes() <= commitBlock.getNumBytes() : "commitBlock length is less than the stored one " + commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); if(block.getGenerationStamp() != commitBlock.getGenerationStamp()) { throw new IOException("Commit block with mismatching GS. NN has " + block + ", client submits " + commitBlock); } List<ReplicaUnderConstruction> staleReplicas = block.commitBlock(commitBlock); removeStaleReplicas(staleReplicas, block); return true; }
/** * Do some check when adding a block to blocksmap. * For HDFS-7994 to check whether then block is a NonEcBlockUsingStripedID. * */ public BlockInfo addBlockCollectionWithCheck( BlockInfo block, BlockCollection bc) { if (!hasNonEcBlockUsingStripedID && !block.isStriped() && BlockIdManager.isStripedBlockID(block.getBlockId())) { hasNonEcBlockUsingStripedID = true; } return addBlockCollection(block, bc); }
/** * Remove the block from the block map; * remove it from all data-node lists it belongs to; * and remove all data-node locations associated with the block. */ void removeBlock(BlockInfo block) { BlockInfo blockInfo = blocks.remove(block); if (blockInfo == null) { return; } decrementBlockStat(block); assert blockInfo.getBlockCollectionId() == INodeId.INVALID_INODE_ID; final int size = blockInfo.isStriped() ? blockInfo.getCapacity() : blockInfo.numNodes(); for(int idx = size - 1; idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); if (dn != null) { removeBlock(dn, blockInfo); // remove from the list and wipe the location } } }
final long oldGenerationStamp = storedBlock.getGenerationStamp(); final long oldNumBytes = storedBlock.getNumBytes(); if (storedBlock.isDeleted()) { throw new IOException("The blockCollection of " + storedBlock + " is null, likely because the file owning this block was" + src + ", likely due to delayed block removal"); if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) && iFile.getLastBlock().isComplete()) { if (LOG.isDebugEnabled()) { LOG.debug("Unexpected block (=" + oldBlock final long recoveryId = truncatedBlock.getUnderConstructionFeature() .getBlockRecoveryId(); copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId(); if(recoveryId != newgenerationstamp) { throw new IOException("The recovery id " + newgenerationstamp storedBlock.setGenerationStamp(newgenerationstamp); storedBlock.setNumBytes(newlength); } else { Block bi = new Block(storedBlock); if (storedBlock.isStriped()) { bi.setBlockId(bi.getBlockId() + i);
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) { curBlock = blocks[nrCompleteBlocks]; if(!curBlock.isComplete()) break; assert blockManager.hasMinStorage(curBlock) : nrCompleteBlocks == nrBlocks - 2 && curBlock != null && curBlock.getBlockUCState() != BlockUCState.COMMITTED) { final String message = "DIR* NameSystem.internalReleaseLease: " + "attempt to release a create lock on " BlockUCState lastBlockState = lastBlock.getBlockUCState(); BlockInfo penultimateBlock = pendingFile.getPenultimateBlock(); case UNDER_RECOVERY: BlockUnderConstructionFeature uc = lastBlock.getUnderConstructionFeature(); recoveryBlock.getBlockId() != lastBlock.getBlockId(); assert !copyOnTruncate || recoveryBlock.getBlockId() < lastBlock.getBlockId() && recoveryBlock.getGenerationStamp() < lastBlock.getGenerationStamp() && recoveryBlock.getNumBytes() > lastBlock.getNumBytes() : "wrong recoveryBlock"; lastBlock.getBlockType()); if (uc.getNumExpectedLocations() == 0 && lastBlock.getNumBytes() == 0) {
newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock(BlockType.CONTIGUOUS) : new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockManager().isLegacyBlock( oldBlock))); truncatedBlockUC.convertToBlockUnderConstruction( BlockUCState.UNDER_CONSTRUCTION, blockManager.getStorages(oldBlock)); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.getUnderConstructionFeature().setTruncateBlock(oldBlock); file.setLastBlock(truncatedBlockUC); blockManager.addBlockCollection(truncatedBlockUC, file); "BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" + " size {} new block {} old block {}", truncatedBlockUC.getNumBytes(), newBlock, oldBlock); } else { assert !oldBlock.isComplete() : "oldBlock should be under construction"; BlockUnderConstructionFeature uc = oldBlock.getUnderConstructionFeature(); uc.setTruncateBlock(new BlockInfoContiguous(oldBlock, oldBlock.getReplication())); uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp()); truncatedBlockUC = oldBlock; uc, uc.getTruncateBlock().getNumBytes()); truncatedBlockUC.getUnderConstructionFeature().initializeBlockRecovery(
if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) { throw new IOException( "Mismatched block IDs or generation stamps for the old last block of file " oldLastBlock.setNumBytes(pBlock.getNumBytes()); if (!oldLastBlock.isComplete()) { fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); file.getPreferredBlockReplication()); newBlockInfo.convertToBlockUnderConstruction( BlockUCState.UNDER_CONSTRUCTION, null); fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBlockInfo, file);
return ""; final boolean isComplete = storedBlock.isComplete(); Iterator<DatanodeStorageInfo> storagesItr; StringBuilder sb = new StringBuilder(" ["); final boolean isStriped = storedBlock.isStriped(); Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>(); if (isComplete) { if (isStriped) { long blockId = storedBlock.getBlockId(); Iterable<StorageAndBlockIndex> sis = ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos(); storagesItr = storedBlock.getStorageInfos(); } else { storagesItr = storedBlock.getUnderConstructionFeature() .getExpectedStorageLocationsIterator();
if (!blockInfo.isComplete()) { LOG.debug("Block {}: can't cache this block, because it is not yet" + " complete.", cachedBlock.getBlockId()); int numReplicas = blockInfo.getCapacity(); Collection<DatanodeDescriptor> corrupt = blockManager.getCorruptReplicas(blockInfo); int outOfCapacity = 0; for (int i = 0; i < numReplicas; i++) { DatanodeDescriptor datanode = blockInfo.getDatanode(i); if (datanode == null) { continue; blockManager.getStoredBlock(new Block(cBlock.getBlockId())); if (info != null) { pendingBytes -= info.getNumBytes(); blockManager.getStoredBlock(new Block(cBlock.getBlockId())); if (info != null) { pendingBytes += info.getNumBytes(); if (pendingCapacity < blockInfo.getNumBytes()) { LOG.trace("Block {}: DataNode {} is not a valid possibility " + "because the block has size {}, but the DataNode only has {} " + "bytes of cache remaining ({} pending bytes, {} already cached.)", blockInfo.getBlockId(), datanode.getDatanodeUuid(), blockInfo.getNumBytes(), pendingCapacity, pendingBytes, datanode.getCacheRemaining()); outOfCapacity++;
case COMPLETE: case COMMITTED: if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "block is " + ucState + " and reported genstamp " + reportedGS + " does not match genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); if (storedBlock.isStriped()) { assert BlockIdManager.isStripedBlockID(reported.getBlockId()); assert storedBlock.getBlockId() == BlockIdManager.convertToStripedID(reported.getBlockId()); BlockInfoStriped stripedBlock = (BlockInfoStriped) storedBlock; stripedBlock.getDataBlockNum(), reportedBlkIdx); } else { wrongSize = storedBlock.getNumBytes() != reported.getNumBytes(); "block is " + ucState + " and reported length " + reported.getNumBytes() + " does not match " + "length in block map " + storedBlock.getNumBytes(), Reason.SIZE_MISMATCH); } else { if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
/** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. * @param block - contains client reported block length and generation * @return staleReplica's List. * @throws IOException if block ids are inconsistent. */ List<ReplicaUnderConstruction> commitBlock(Block block) throws IOException { if (getBlockId() != block.getBlockId()) { throw new IOException("Trying to commit inconsistent block: id = " + block.getBlockId() + ", expected id = " + getBlockId()); } Preconditions.checkState(!isComplete()); uc.commit(); this.setNumBytes(block.getNumBytes()); // Sort out invalid replicas. return setGenerationStampAndVerifyReplicas(block.getGenerationStamp()); } }
/** * If IBR is not sent from expected locations yet, add the datanodes to * pendingReconstruction in order to keep RedundancyMonitor from scheduling * the block. */ public void addExpectedReplicasToPending(BlockInfo blk) { if (!blk.isStriped()) { DatanodeStorageInfo[] expectedStorages = blk.getUnderConstructionFeature().getExpectedStorageLocations(); if (expectedStorages.length - blk.numNodes() > 0) { ArrayList<DatanodeDescriptor> pendingNodes = new ArrayList<>(); for (DatanodeStorageInfo storage : expectedStorages) { DatanodeDescriptor dnd = storage.getDatanodeDescriptor(); if (blk.findStorageInfo(dnd) == null) { pendingNodes.add(dnd); } } pendingReconstruction.increment(blk, pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()])); } } }
/** * Add a to-be-deleted block into the * {@link BlocksMapUpdateInfo#toDeleteList} * @param toDelete the to-be-deleted block */ public void addDeleteBlock(BlockInfo toDelete) { assert toDelete != null : "toDelete is null"; toDelete.delete(); toDeleteList.add(toDelete); // If the file is being truncated // the copy-on-truncate block should also be collected for deletion BlockUnderConstructionFeature uc = toDelete.getUnderConstructionFeature(); if(uc == null) { return; } BlockInfo truncateBlock = uc.getTruncateBlock(); if(truncateBlock == null || truncateBlock.equals(toDelete)) { return; } addDeleteBlock(truncateBlock); }
size += b.getNumBytes(); size -= blocks[i].getNumBytes(), --i) { BlockInfo bi = blocks[i]; long truncatedBytes; if (size - newLength < bi.getNumBytes()) { truncatedBytes = bi.getNumBytes() - getPreferredBlockSize(); } else { truncatedBytes = bi.getNumBytes(); if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) { truncatedBytes -= bi.getNumBytes(); delta.addStorageSpace(-truncatedBytes * bi.getReplication()); if (bsps != null) { List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication()); for (StorageType t : types) { if (t.supportTypeQuota()) {
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature(); if(uc == null) { throw new IOException("Recovery block " + b + uc.getTruncateBlock().getBlockId() != b.getBlockId(); ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) : rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, uc.getBlockRecoveryId()); if (b.isStriped()) { rBlock = new RecoveringStripedBlock(rBlock, uc.getBlockIndices(), ((BlockInfoStriped) b).getErasureCodingPolicy());
BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( iblk, reportedState, storedBlock, ucState, storedBlock.getUnderConstructionFeature() .addReplicaIfNotPresent(storageInfo, iblk, reportedState); if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) { int numOfReplicas = storedBlock.getUnderConstructionFeature() .getNumExpectedLocations(); bmSafeMode.incrementSafeBlockCount(numOfReplicas, storedBlock);
if (lastBlock != null && lastBlock.getBlockUCState().equals( HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { dontRemoveBlock = lastBlock.getUnderConstructionFeature() .getTruncateBlock(); if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) { reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) { blockInfo.getBlockUCState() ); continue; Block block = new Block(blockInfo.getBlockId()); CachedBlock ncblock = new CachedBlock(block.getBlockId(), directive.getReplication(), mark); ocblock.getDatanodes(Type.CACHED); long cachedByBlock = Math.min(cachedOn.size(), directive.getReplication()) * blockInfo.getNumBytes(); cachedTotal += cachedByBlock;