private Map<String, Object> getFileStatus (FsImageProto.INodeSection.INode inode, boolean printSuffix){ Map<String, Object> map = Maps.newHashMap(); switch (inode.getType()) { case FILE: { FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); if (f.hasErasureCodingPolicyID()) { map.put("type", inode.getType()); map.put("fileId", inode.getId()); map.put("childrenNum", 0); return map; FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( d.getPermission(), stringTable); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", 0); map.put("type", inode.getType());
throw new IOException("INode XML found with no <type> tag."); INodeSection.INode.Builder inodeBld = INodeSection.INode.newBuilder(); Long id = node.removeChildLong(SECTION_ID); if (id == null) {
loadNodeChildren(inode, "INode fields"); INodeSection.INode.Builder inodeBld = processINodeXml(inode); inodeBld.build().writeDelimitedTo(out);
private Map<String, Object> getFileStatus (FsImageProto.INodeSection.INode inode, boolean printSuffix){ Map<String, Object> map = Maps.newHashMap(); switch (inode.getType()) { case FILE: { FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", f.getReplication()); map.put("type", inode.getType()); map.put("fileId", inode.getId()); map.put("childrenNum", 0); return map; FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( d.getPermission(), stringTable); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", 0); map.put("type", inode.getType()); map.put("fileId", inode.getId());
private Map<String, Object> getFileStatus (FsImageProto.INodeSection.INode inode, boolean printSuffix){ Map<String, Object> map = Maps.newHashMap(); switch (inode.getType()) { case FILE: { FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", f.getReplication()); map.put("type", inode.getType()); map.put("fileId", inode.getId()); map.put("childrenNum", 0); return map; FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( d.getPermission(), stringTable); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", 0); map.put("type", inode.getType()); map.put("fileId", inode.getId());
public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
@Override public String getEntry(String parent, INode inode) { StringBuffer buffer = new StringBuffer(); String inodeName = inode.getName().toStringUtf8(); Path path = new Path(parent.isEmpty() ? "/" : parent, inodeName.isEmpty() ? "/" : inodeName); PermissionStatus p = null; switch (inode.getType()) { case FILE: INodeFile file = inode.getFile(); p = getPermission(file.getPermission()); append(buffer, file.getReplication()); break; case DIRECTORY: INodeDirectory dir = inode.getDirectory(); p = getPermission(dir.getPermission()); append(buffer, 0); // Replication break; case SYMLINK: INodeSymlink s = inode.getSymlink(); p = getPermission(s.getPermission()); append(buffer, 0); // Replication
@Override public String getEntry(String parent, INode inode) { StringBuffer buffer = new StringBuffer(); String inodeName = inode.getName().toStringUtf8(); Path path = new Path(parent.isEmpty() ? "/" : parent, inodeName.isEmpty() ? "/" : inodeName); PermissionStatus p = null; switch (inode.getType()) { case FILE: INodeFile file = inode.getFile(); p = getPermission(file.getPermission()); append(buffer, file.getReplication()); break; case DIRECTORY: INodeDirectory dir = inode.getDirectory(); p = getPermission(dir.getPermission()); append(buffer, 0); // Replication break; case SYMLINK: INodeSymlink s = inode.getSymlink(); p = getPermission(s.getPermission()); append(buffer, 0); // Replication
public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot, parent.getSaverContext()); INodeSection.INode r = INodeSection.INode.newBuilder() .setId(sroot.getId()) .setType(INodeSection.INode.Type.DIRECTORY)
off += block.getNumBytes(); INode.Builder ib = INode.newBuilder() .setType(INode.Type.FILE) .setId(id)
INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot, parent.getSaverContext()); INodeSection.INode r = INodeSection.INode.newBuilder() .setId(sroot.getId()) .setType(INodeSection.INode.Type.DIRECTORY)
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasType()) { memoizedIsInitialized = 0; return false; } if (!hasId()) { memoizedIsInitialized = 0; return false; } if (hasFile()) { if (!getFile().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasDirectory()) { if (!getDirectory().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasType()) { memoizedIsInitialized = 0; return false; } if (!hasId()) { memoizedIsInitialized = 0; return false; } if (hasFile()) { if (!getFile().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasDirectory()) { if (!getDirectory().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; }
/** * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code> * * <pre> * Snapshot root * </pre> */ public Builder mergeRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) { if (rootBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && root_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) { root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder(root_).mergeFrom(value).buildPartial(); } else { root_ = value; } onChanged(); } else { rootBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
private PermissionStatus getPermissionStatus(String path) throws IOException { long id = lookup(path); FsImageProto.INodeSection.INode inode = fromINodeId(id); switch (inode.getType()) { case FILE: { FsImageProto.INodeSection.INodeFile f = inode.getFile(); return FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); } case DIRECTORY: { FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory(); return FSImageFormatPBINode.Loader.loadPermission( d.getPermission(), stringTable); } case SYMLINK: { FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink(); return FSImageFormatPBINode.Loader.loadPermission( s.getPermission(), stringTable); } default: { return null; } } }
/** * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code> * * <pre> * Snapshot root * </pre> */ public Builder mergeRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) { if (rootBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && root_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) { root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder(root_).mergeFrom(value).buildPartial(); } else { root_ = value; } onChanged(); } else { rootBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
private PermissionStatus getPermissionStatus(String path) throws IOException { long id = lookup(path); FsImageProto.INodeSection.INode inode = fromINodeId(id); switch (inode.getType()) { case FILE: { FsImageProto.INodeSection.INodeFile f = inode.getFile(); return FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); } case DIRECTORY: { FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory(); return FSImageFormatPBINode.Loader.loadPermission( d.getPermission(), stringTable); } case SYMLINK: { FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink(); return FSImageFormatPBINode.Loader.loadPermission( s.getPermission(), stringTable); } default: { return null; } } }
@Override public void onFile(FsImageProto.INodeSection.INode inode, String path) { FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = loader.getPermissionStatus(f.getPermission()); final long fileSize = FSImageLoader.getFileSize(f); final long fileBlocks = f.getBlocksCount(); overallStats.sumBlocks.add(fileBlocks); overallStats.fileSize.observe(fileSize); overallStats.replication.observe(f.getReplication()); // Group stats final String groupName = p.getGroupName(); final GroupStats groupStat = report.groupStats.computeIfAbsent(groupName, report.createGroupStats); groupStat.sumBlocks.add(fileBlocks); groupStat.fileSize.observe(fileSize); // User stats final String userName = p.getUserName(); UserStats userStat = report.userStats.computeIfAbsent(userName, report.createUserStat); userStat.sumBlocks.add(fileBlocks); userStat.fileSize.observe(fileSize); userStat.replication.observe(f.getReplication()); }
private void save(OutputStream out, INodeFile n) throws IOException { INodeSection.INodeFile.Builder b = buildINodeFile(n, parent.getSaverContext()); if (n.getBlocks() != null) { for (Block block : n.getBlocks()) { b.addBlocks(PBHelper.convert(block)); } } FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature(); if (uc != null) { INodeSection.FileUnderConstructionFeature f = INodeSection.FileUnderConstructionFeature .newBuilder().setClientName(uc.getClientName()) .setClientMachine(uc.getClientMachine()).build(); b.setFileUC(f); } INodeSection.INode r = buildINodeCommon(n) .setType(INodeSection.INode.Type.FILE).setFile(b).build(); r.writeDelimitedTo(out); }