map.put("replication", INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS); } else { map.put("replication", f.getReplication());
totalSpace += fileSize * f.getReplication();
private void fillDirSummary(long id, long[] data) throws IOException { data[0]++; long[] children = dirmap.get(id); if (children == null) { return; } for (long cid : children) { INode node = fromINodeId(cid); switch (node.getType()) { case DIRECTORY: fillDirSummary(cid, data); break; case FILE: FsImageProto.INodeSection.INodeFile f = node.getFile(); long curLength = getFileSize(f); data[1]++; data[2] += curLength; data[3] += (curLength) * (f.getReplication()); break; case SYMLINK: data[1]++; break; default: break; } } }
public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this; if (other.hasReplication()) { setReplication(other.getReplication());
fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getPreferredBlockSize(), (byte)fileInPb.getStoragePolicyID(),
INodeFile file = inode.getFile(); p = getPermission(file.getPermission()); append(buffer, file.getReplication()); append(buffer, formatDate(file.getModificationTime())); append(buffer, formatDate(file.getAccessTime()));
fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getPreferredBlockSize(), (byte)fileInPb.getStoragePolicyID(),
INodeSection.INodeFile f = n.getFile(); List<BlockProto> bp = f.getBlocksList(); short replication = (short) f.getReplication(); LoaderContext state = parent.getLoaderContext();
INodeSection.INodeFile f = n.getFile(); List<BlockProto> bp = f.getBlocksList(); short replication = (short) f.getReplication(); LoaderContext state = parent.getLoaderContext();
totalSpace += fileSize * f.getReplication();
totalSpace += fileSize * f.getReplication();
private void dumpINodeFile(INodeSection.INodeFile f) { o("replication", f.getReplication()).o("mtime", f.getModificationTime()) .o("atime", f.getAccessTime()) .o("perferredBlockSize", f.getPreferredBlockSize()) .o("permission", dumpPermission(f.getPermission())); if (f.getBlocksCount() > 0) { out.print("<blocks>"); for (BlockProto b : f.getBlocksList()) { out.print("<block>"); o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes", b.getNumBytes()); out.print("</block>\n"); } out.print("</blocks>\n"); } if (f.hasFileUC()) { INodeSection.FileUnderConstructionFeature u = f.getFileUC(); out.print("<file-under-construction>"); o("clientName", u.getClientName()).o("clientMachine", u.getClientMachine()); out.print("</file-under-construction>\n"); } }
@Override public void onFile(FsImageProto.INodeSection.INode inode, String path) { FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = loader.getPermissionStatus(f.getPermission()); final long fileSize = FSImageLoader.getFileSize(f); final long fileBlocks = f.getBlocksCount(); overallStats.sumBlocks.add(fileBlocks); overallStats.fileSize.observe(fileSize); overallStats.replication.observe(f.getReplication()); // Group stats final String groupName = p.getGroupName(); final GroupStats groupStat = report.groupStats.computeIfAbsent(groupName, report.createGroupStats); groupStat.sumBlocks.add(fileBlocks); groupStat.fileSize.observe(fileSize); // User stats final String userName = p.getUserName(); UserStats userStat = report.userStats.computeIfAbsent(userName, report.createUserStat); userStat.sumBlocks.add(fileBlocks); userStat.fileSize.observe(fileSize); userStat.replication.observe(f.getReplication()); }
private void dumpINodeFile(INodeSection.INodeFile f) { o("replication", f.getReplication()).o("mtime", f.getModificationTime()) .o("atime", f.getAccessTime()) .o("perferredBlockSize", f.getPreferredBlockSize()) .o("permission", dumpPermission(f.getPermission())); if (f.getBlocksCount() > 0) { out.print("<blocks>"); for (BlockProto b : f.getBlocksList()) { out.print("<block>"); o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes", b.getNumBytes()); out.print("</block>\n"); } out.print("</blocks>\n"); } if (f.hasFileUC()) { INodeSection.FileUnderConstructionFeature u = f.getFileUC(); out.print("<file-under-construction>"); o("clientName", u.getClientName()).o("clientMachine", u.getClientMachine()); out.print("</file-under-construction>\n"); } }
(fileInPb.getBlockType() == BlockTypeProto .STRIPED); Short replication = (!isStriped ? (short)fileInPb.getReplication() : null); Byte ecPolicyID = (isStriped ? (byte)fileInPb.getErasureCodingPolicyID() : null);
public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this; if (other.hasReplication()) { setReplication(other.getReplication());
p = getPermission(file.getPermission()); hasAcl = file.hasAcl() && file.getAcl().getEntriesCount() > 0; append(buffer, file.getReplication()); append(buffer, formatDate(file.getModificationTime())); append(buffer, formatDate(file.getAccessTime()));
boolean isStriped = f.hasErasureCodingPolicyID(); assert ((!isStriped) || (isStriped && !f.hasReplication())); Short replication = (!isStriped ? (short) f.getReplication() : null); Byte ecPolicyID = (isStriped ? (byte) f.getErasureCodingPolicyID() : null);
o(SECTION_REPLICATION, INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS); } else { o(SECTION_REPLICATION, f.getReplication());
data[2] = getFileSize(f); nsQuota = -1; data[3] = data[2] * f.getReplication(); spaceQuota = -1; return fillSummaryMap(spaceQuota, nsQuota, data);