/** * Copy block references into the snapshot * up to the current {@link #fileSize}. * Should be done only once. */ public void setBlocks(BlockInfo[] blocks) { if(this.blocks != null) return; int numBlocks = 0; for(long s = 0; numBlocks < blocks.length && s < fileSize; numBlocks++) s += blocks[numBlocks].getNumBytes(); this.blocks = Arrays.copyOf(blocks, numBlocks); }
private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks, final BlockInfo[] blocks, final long endPos, final AccessMode mode) throws IOException { int curBlk; long curPos = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { long blkSize = blocks[curBlk].getNumBytes(); if (curPos + blkSize >= endPos) { break; } curPos += blkSize; } return createLocatedBlock(locatedBlocks, blocks[curBlk], curPos, mode); }
private void createLocatedBlockList( LocatedBlockBuilder locatedBlocks, final BlockInfo[] blocks, final long offset, final long length, final AccessMode mode) throws IOException { int curBlk; long curPos = 0, blkSize = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { blkSize = blocks[curBlk].getNumBytes(); assert blkSize > 0 : "Block of size 0"; if (curPos + blkSize > offset) { break; } curPos += blkSize; } if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file return; long endOff = offset + length; do { locatedBlocks.addBlock( createLocatedBlock(locatedBlocks, blocks[curBlk], curPos, mode)); curPos += blocks[curBlk].getNumBytes(); curBlk++; } while (curPos < endOff && curBlk < blocks.length && !locatedBlocks.isBlockMax()); return; }
public BlockReconstructionWork(BlockInfo block, BlockCollection bc, DatanodeDescriptor[] srcNodes, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> liveReplicaStorages, int additionalReplRequired, int priority) { this.block = block; this.srcPath = bc.getName(); this.blockSize = block.getNumBytes(); this.storagePolicyID = bc.getStoragePolicyID(); this.srcNodes = srcNodes; this.containingNodes = containingNodes; this.liveReplicaStorages = liveReplicaStorages; this.additionalReplRequired = additionalReplRequired; this.priority = priority; this.targets = null; }
long size = 0; for(; n < oldBlocks.length && max > size; n++) { size += oldBlocks[n].getNumBytes();
size += b.getNumBytes(); size -= blocks[i].getNumBytes(), --i) { BlockInfo bi = blocks[i]; long truncatedBytes; if (size - newLength < bi.getNumBytes()) { truncatedBytes = bi.getNumBytes() - getPreferredBlockSize(); } else { truncatedBytes = bi.getNumBytes(); truncatedBytes -= bi.getNumBytes();
long size = lastBlk.getNumBytes(); if (!lastBlk.isComplete()) { if (!includesLastUcBlock) { size += blocks[i].getNumBytes();
blockStriped.getDataBlockNum(), blockStriped.getCellSize())); return block.getNumBytes() / blockStriped.getDataBlockNum(); }else{ results.add(blkWithLocs); return block.getNumBytes();
/** * Commit a block of a file * * @param block block to be committed * @param commitBlock - contains client reported block length and generation * @return true if the block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ private boolean commitBlock(final BlockInfo block, final Block commitBlock) throws IOException { if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; assert block.getNumBytes() <= commitBlock.getNumBytes() : "commitBlock length is less than the stored one " + commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); if(block.getGenerationStamp() != commitBlock.getGenerationStamp()) { throw new IOException("Commit block with mismatching GS. NN has " + block + ", client submits " + commitBlock); } List<ReplicaUnderConstruction> staleReplicas = block.commitBlock(commitBlock); removeStaleReplicas(staleReplicas, block); return true; }
curBlock = iter.next(); if(!curBlock.isComplete()) continue; if (curBlock.getNumBytes() < minBlockSize) { continue; curBlock = iter.next(); if(!curBlock.isComplete()) continue; if (curBlock.getNumBytes() < minBlockSize) { continue;
/** * When deleting a file in the current fs directory, and the file is contained * in a snapshot, we should delete the last block if it's under construction * and its size is 0. */ void cleanZeroSizeBlock(final INodeFile f, final BlocksMapUpdateInfo collectedBlocks) { final BlockInfo[] blocks = f.getBlocks(); if (blocks != null && blocks.length > 0 && !blocks[blocks.length - 1].isComplete()) { BlockInfo lastUC = blocks[blocks.length - 1]; if (lastUC.getNumBytes() == 0) { // this is a 0-sized block. do not need check its UC state here collectedBlocks.addDeleteBlock(lastUC); f.removeLastBlock(lastUC); } } } }
"genstamp does not match " + oldGenerationStamp + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (block.getNumBytes() != oldNumBytes) { b = new BlockToMarkCorrupt(oldBlock, block, "length does not match " + oldNumBytes + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH); } else { return;
long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize(); counts.addStorageSpace(blockSize * replication);
throw new IOException(msg); if (newBlock.getNumBytes() < lastBlock.getNumBytes()) { final String msg = "Update " + oldBlock + " (size=" + oldBlock.getNumBytes() + ") to a smaller size block " + newBlock
final BlockInfo last = blocks[blocks.length - 1]; final long lastPos = last.isComplete()? fileSizeExcludeBlocksUnderConstruction - last.getNumBytes() : fileSizeExcludeBlocksUnderConstruction;
newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock(BlockType.CONTIGUOUS) : new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockManager().isLegacyBlock( oldBlock))); truncatedBlockUC.convertToBlockUnderConstruction( BlockUCState.UNDER_CONSTRUCTION, blockManager.getStorages(oldBlock)); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.getUnderConstructionFeature().setTruncateBlock(oldBlock); file.setLastBlock(truncatedBlockUC); "BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" + " size {} new block {} old block {}", truncatedBlockUC.getNumBytes(), newBlock, oldBlock); } else { uc.setTruncateBlock(new BlockInfoContiguous(oldBlock, oldBlock.getReplication())); uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp()); truncatedBlockUC = oldBlock; uc, uc.getTruncateBlock().getNumBytes());
/** Compute quota change for converting a complete block to a UC block. */ private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn, INodeFile file) { final QuotaCounts delta = new QuotaCounts.Builder().build(); final BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null) { final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes(); final short repl = lastBlock.getReplication(); delta.addStorageSpace(diff * repl); final BlockStoragePolicy policy = fsn.getFSDirectory() .getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID()); List<StorageType> types = policy.chooseStorageTypes(repl); for (StorageType t : types) { if (t.supportTypeQuota()) { delta.addTypeSpace(t, diff); } } } return delta; } }
replicationFactor = (short) 1; } else { diff = fileINode.getPreferredBlockSize() - completeBlk.getNumBytes(); replicationFactor = fileINode.getFileReplication();
BlockInfo lastBlock = bc.getLastBlock(); if (lastBlock == null || bc.getPreferredBlockSize() == lastBlock.getNumBytes() - bytesToRemove) { return null; final long pos = fileLength - lastBlock.getNumBytes(); return createLocatedBlock(null, lastBlock, pos, BlockTokenIdentifier.AccessMode.WRITE);
for (BlockInfo b: blocks) { short replication = b.getReplication(); long blockSize = b.isComplete() ? b.getNumBytes() : file .getPreferredBlockSize();