/** * Check sufficient redundancy of the blocks in the collection. If any block * is needed reconstruction, insert it into the reconstruction queue. * Otherwise, if the block is more than the expected replication factor, * process it as an extra redundancy block. */ public void checkRedundancy(BlockCollection bc) { for (BlockInfo block : bc.getBlocks()) { short expected = getExpectedRedundancyNum(block); final NumberReplicas n = countNodes(block); final int pending = pendingReconstruction.getNumReplicas(block); if (!hasEnoughEffectiveReplicas(block, n, pending)) { neededReconstruction.add(block, n.liveReplicas() + pending, n.readOnlyReplicas(), n.outOfServiceReplicas(), expected); } else if (shouldProcessExtraRedundancy(n, expected)) { processExtraRedundancyBlock(block, expected, null, null); } } }
BlockInfo lastBlock = bc.getLastBlock(); if (lastBlock == null || bc.getPreferredBlockSize() == lastBlock.getNumBytes() - bytesToRemove) { return null; bc.convertLastBlockToUC(lastBlock, targets); final long fileLength = bc.computeContentSummary( getStoragePolicySuite()).getLength(); final long pos = fileLength - lastBlock.getNumBytes();
/** * Copy construction. * This is used to convert BlockInfoUnderConstruction * @param from BlockInfo to copy from. */ protected BlockInfoContiguous(BlockInfoContiguous from) { this(from, from.bc.getBlockReplication()); this.bc = from.bc; }
public BlockReconstructionWork(BlockInfo block, BlockCollection bc, DatanodeDescriptor[] srcNodes, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> liveReplicaStorages, int additionalReplRequired, int priority) { this.block = block; this.srcPath = bc.getName(); this.blockSize = block.getNumBytes(); this.storagePolicyID = bc.getStoragePolicyID(); this.srcNodes = srcNodes; this.containingNodes = containingNodes; this.liveReplicaStorages = liveReplicaStorages; this.additionalReplRequired = additionalReplRequired; this.priority = priority; this.targets = null; }
numLive); if (isDecommission && numExpected > numLive) { if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) {
BlockCollection bc, NumberReplicas numberReplicas) { final int numExpected = bc.getBlockReplication(); final int numLive = numberReplicas.liveReplicas(); if (numLive >= numExpected numLive); if (numExpected > numLive) { if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) {
BlockInfoContiguous oldBlock = bc.getLastBlock(); if(oldBlock == null || bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove) return null; assert oldBlock == getStoredBlock(oldBlock) : bc.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength(); final long pos = fileLength - ucBlock.getNumBytes(); return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
private static void logBlockReplicationInfo(Block block, BlockCollection bc, DatanodeDescriptor srcNode, NumberReplicas num, Iterable<DatanodeStorageInfo> storages) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = bc.getBlockReplication(); StringBuilder nodeList = new StringBuilder(); for (DatanodeStorageInfo storage : storages) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); nodeList.append(node); nodeList.append(" "); } LOG.info("Block: " + block + ", Expected Replicas: " + curExpectedReplicas + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissioned() + ", decommissioning replicas: " + num.decommissioning() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + bc.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); }
/** * Check replication of the blocks in the collection. * If any block is needed replication, insert it into the replication queue. * Otherwise, if the block is more than the expected replication factor, * process it as an over replicated block. */ public void checkReplication(BlockCollection bc) { final short expected = bc.getBlockReplication(); for (Block block : bc.getBlocks()) { final NumberReplicas n = countNodes(block); if (isNeededReplication(block, expected, n.liveReplicas())) { neededReplications.add(block, n.liveReplicas(), n.decommissionedAndDecommissioning(), expected); } else if (n.liveReplicas() > expected) { processOverReplicatedBlock(block, expected, null, null); } } }
@Override public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { assert hasReadLock(); final BlockCollection bc = blockUC.getBlockCollection(); if (bc == null || !(bc instanceof INodeFile) || !bc.isUnderConstruction()) { return false; } String fullName = bc.getName(); try { if (fullName != null && fullName.startsWith(Path.SEPARATOR) && dir.getINode(fullName) == bc) { // If file exists in normal path then no need to look in snapshot return false; } } catch (UnresolvedLinkException e) { LOG.error("Error while resolving the link : " + fullName, e); return false; } /* * 1. if bc is under construction and also with snapshot, and * bc is not in the current fsdirectory tree, bc must represent a snapshot * file. * 2. if fullName is not an absolute path, bc cannot be existent in the * current fsdirectory tree. * 3. if bc is not the current node associated with fullName, bc must be a * snapshot inode. */ return true; }
if(commitBlock == null) return false; // not committing, this is a block allocation retry BlockInfoContiguous lastBlock = bc.getLastBlock(); if(lastBlock == null) return false; // no blocks in file yet if(numReplicas.liveReplicas() + numReplicas.decommissioning() >= minReplication) completeBlock(bc, bc.numBlocks()-1, iip, false); return b;
bc.getStoragePolicyID()); final List<StorageType> excessTypes = storagePolicy.chooseExcess( (short) numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess));
String fileName = (bc == null) ? "[orphaned]" : bc.getName(); out.print(fileName + ": ");
if (bc.isUnderConstruction()) { INode ucFile = namesystem.getFSDirectory().getInode(bc.getId()); if (!(ucFile instanceof INodeFile) || !ucFile.asFile().isUnderConstruction()) {
+ num.liveEnteringMaintenanceReplicas() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + bc.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: "
if(commitBlock == null) return false; // not committing, this is a block allocation retry BlockInfo lastBlock = bc.getLastBlock(); if(lastBlock == null) return false; // no blocks in file yet
BlockCollection bc, NumberReplicas numberReplicas) { final int numExpected = bc.getBlockReplication(); final int numLive = numberReplicas.liveReplicas(); if (numLive >= numExpected numLive); if (numExpected > numLive) { if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) {
BlockInfoContiguous oldBlock = bc.getLastBlock(); if(oldBlock == null || bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove) return null; assert oldBlock == getStoredBlock(oldBlock) : bc.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength(); final long pos = fileLength - ucBlock.getNumBytes(); return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
} else { BlockCollection bc = getBlockCollection(blockInfo); if (bc.getStoragePolicyID() == lpPolicy.getId()) { filesToDelete.add(bc); LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas."); BlocksMapUpdateInfo toRemoveBlocks = FSDirDeleteOp.deleteInternal(