@Override //org.apache.hadoop.hdfs.protocol.Block public void setNumBytes(long numBytes) { super.setNumBytes(numBytes); original.setNumBytes(numBytes); }
@Override //org.apache.hadoop.hdfs.protocol.Block public void setNumBytes(long numBytes) { super.setNumBytes(numBytes); original.setNumBytes(numBytes); }
public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block, ReplicaInfo replicaInfo, int smallBufferSize, Configuration conf) throws IOException { File[] blockFiles = FsDatasetImpl.copyBlockFiles(block.getBlockId(), block.getGenerationStamp(), replicaInfo, getTmpDir(block.getBlockPoolId()), replicaInfo.isOnTransientStorage(), smallBufferSize, conf); ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY) .setBlockId(replicaInfo.getBlockId()) .setGenerationStamp(replicaInfo.getGenerationStamp()) .setFsVolume(this) .setDirectoryToUse(blockFiles[0].getParentFile()) .setBytesToReserve(0) .build(); newReplicaInfo.setNumBytes(blockFiles[1].length()); return newReplicaInfo; }
rur.truncateBlock(newlength); rur.setNumBytes(newlength); } else {
+ memBlockInfo.getNumBytes() + " to " + memBlockInfo.getBlockDataLength()); memBlockInfo.setNumBytes(memBlockInfo.getBlockDataLength());
LOG.warn("Updating size of block " + blockId + " from " + memBlockInfo.getNumBytes() + " to " + memFile.length()); memBlockInfo.setNumBytes(memFile.length());
LOG.warn("Updating size of block " + blockId + " from " + memBlockInfo.getNumBytes() + " to " + memFile.length()); memBlockInfo.setNumBytes(memFile.length());
replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(), targetVolume, blockFiles[0].getParentFile(), 0); newReplicaInfo.setNumBytes(blockFiles[1].length());
replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(), targetVolume, blockFiles[0].getParentFile(), 0); newReplicaInfo.setNumBytes(blockFiles[1].length());