case FILE: INodeFile file = inode.getFile(); p = getPermission(file.getPermission()); hasAcl = file.hasAcl() && file.getAcl().getEntriesCount() > 0; append(buffer, file.getReplication()); append(buffer, formatDate(file.getModificationTime())); append(buffer, formatDate(file.getAccessTime())); append(buffer, file.getPreferredBlockSize()); append(buffer, file.getBlocksCount()); append(buffer, FSImageLoader.getFileSize(file)); append(buffer, 0); // NS_QUOTA
/** * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code> */ public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) { if (snapshotCopyBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) { snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial(); } else { snapshotCopy_ = value; } onChanged(); } else { snapshotCopyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /**
public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); map.put("accessTime", f.getAccessTime()); map.put("blockSize", f.getPreferredBlockSize()); map.put("group", p.getGroupName()); map.put("length", getFileSize(f)); map.put("modificationTime", f.getModificationTime()); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", f.getReplication()); map.put("type", inode.getType()); map.put("fileId", inode.getId());
public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile buildPartial() { org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
FsImageProto.INodeSection.INodeFile f = inode.getFile(); PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( f.getPermission(), stringTable); map.put("accessTime", f.getAccessTime()); map.put("blockSize", f.getPreferredBlockSize()); map.put("group", p.getGroupName()); map.put("length", getFileSize(f)); map.put("modificationTime", f.getModificationTime()); map.put("owner", p.getUserName()); map.put("pathSuffix", printSuffix ? inode.getName().toStringUtf8() : ""); map.put("permission", toString(p.getPermission())); map.put("replication", f.getReplication()); map.put("type", inode.getType()); map.put("fileId", inode.getId());
public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this; if (other.hasReplication()) { setReplication(other.getReplication()); if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); if (other.hasPreferredBlockSize()) { setPreferredBlockSize(other.getPreferredBlockSize()); if (other.hasPermission()) { setPermission(other.getPermission()); if (other.hasFileUC()) { mergeFileUC(other.getFileUC()); if (other.hasAcl()) { mergeAcl(other.getAcl()); if (other.hasXAttrs()) { mergeXAttrs(other.getXAttrs()); if (other.hasStoragePolicyID()) { setStoragePolicyID(other.getStoragePolicyID()); this.mergeUnknownFields(other.getUnknownFields());
case FILE: INodeFile file = inode.getFile(); p = getPermission(file.getPermission()); append(buffer, file.getReplication()); append(buffer, formatDate(file.getModificationTime())); append(buffer, formatDate(file.getAccessTime())); append(buffer, file.getPreferredBlockSize()); append(buffer, file.getBlocksCount()); append(buffer, FSImageLoader.getFileSize(file)); append(buffer, 0); // NS_QUOTA
INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy(); PermissionStatus permission = loadPermission( fileInPb.getPermission(), state.getStringTable()); if (fileInPb.hasAcl()) { int[] entries = AclEntryStatusFormat .toInt(FSImageFormatPBINode.Loader.loadAclEntries( fileInPb.getAcl(), state.getStringTable())); acl = new AclFeature(entries); if (fileInPb.hasXAttrs()) { xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs( fileInPb.getXAttrs(), state.getStringTable())); .toByteArray(), permission, acl, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getPreferredBlockSize(), (byte)fileInPb.getStoragePolicyID(), xAttrs);
public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) { if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this; if (other.hasReplication()) { setReplication(other.getReplication()); if (other.hasModificationTime()) { setModificationTime(other.getModificationTime()); if (other.hasAccessTime()) { setAccessTime(other.getAccessTime()); if (other.hasPreferredBlockSize()) { setPreferredBlockSize(other.getPreferredBlockSize()); if (other.hasPermission()) { setPermission(other.getPermission()); if (other.hasFileUC()) { mergeFileUC(other.getFileUC()); if (other.hasAcl()) { mergeAcl(other.getAcl()); if (other.hasXAttrs()) { mergeXAttrs(other.getXAttrs()); if (other.hasStoragePolicyID()) { setStoragePolicyID(other.getStoragePolicyID()); this.mergeUnknownFields(other.getUnknownFields());
case FILE: INodeFile file = inode.getFile(); p = getPermission(file.getPermission()); append(buffer, file.getReplication()); append(buffer, formatDate(file.getModificationTime())); append(buffer, formatDate(file.getAccessTime())); append(buffer, file.getPreferredBlockSize()); append(buffer, file.getBlocksCount()); append(buffer, FSImageLoader.getFileSize(file)); append(buffer, 0); // NS_QUOTA
INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy(); PermissionStatus permission = loadPermission( fileInPb.getPermission(), state.getStringTable()); if (fileInPb.hasAcl()) { int[] entries = AclEntryStatusFormat .toInt(FSImageFormatPBINode.Loader.loadAclEntries( fileInPb.getAcl(), state.getStringTable())); acl = new AclFeature(entries); if (fileInPb.hasXAttrs()) { xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs( fileInPb.getXAttrs(), state.getStringTable())); .toByteArray(), permission, acl, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getPreferredBlockSize(), (byte)fileInPb.getStoragePolicyID(), xAttrs);
ugi.addUser(s.getOwner()); ugi.addGroup(s.getGroup()); INodeFile.Builder b = INodeFile.newBuilder() .setReplication(blk.getReplication(s)) .setModificationTime(s.getModificationTime())
private INodeFile loadINodeFile(INodeSection.INode n) { assert n.getType() == INodeSection.INode.Type.FILE; INodeSection.INodeFile f = n.getFile(); List<BlockProto> bp = f.getBlocksList(); short replication = (short) f.getReplication(); LoaderContext state = parent.getLoaderContext(); final PermissionStatus permissions = loadPermission(f.getPermission(), parent.getLoaderContext().getStringTable()); n.getName().toByteArray(), permissions, f.getModificationTime(), f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), (byte)f.getStoragePolicyID()); if (f.hasAcl()) { int[] entries = AclEntryStatusFormat.toInt(loadAclEntries( f.getAcl(), state.getStringTable())); file.addAclFeature(new AclFeature(entries)); if (f.hasXAttrs()) { file.addXAttrFeature(new XAttrFeature( loadXAttrs(f.getXAttrs(), state.getStringTable()))); if (f.hasFileUC()) { ucFiles.add(file); INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); file.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); if (blocks.length > 0) {
private INodeFile loadINodeFile(INodeSection.INode n) { assert n.getType() == INodeSection.INode.Type.FILE; INodeSection.INodeFile f = n.getFile(); List<BlockProto> bp = f.getBlocksList(); short replication = (short) f.getReplication(); LoaderContext state = parent.getLoaderContext(); final PermissionStatus permissions = loadPermission(f.getPermission(), parent.getLoaderContext().getStringTable()); n.getName().toByteArray(), permissions, f.getModificationTime(), f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), (byte)f.getStoragePolicyID()); if (f.hasAcl()) { int[] entries = AclEntryStatusFormat.toInt(loadAclEntries( f.getAcl(), state.getStringTable())); file.addAclFeature(new AclFeature(entries)); if (f.hasXAttrs()) { file.addXAttrFeature(new XAttrFeature( loadXAttrs(f.getXAttrs(), state.getStringTable()))); if (f.hasFileUC()) { ucFiles.add(file); INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); file.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); if (blocks.length > 0) {
++totalFiles; INodeSection.INodeFile f = p.getFile(); totalBlocks += f.getBlocksCount(); long fileSize = 0; for (BlockProto b : f.getBlocksList()) { fileSize += b.getNumBytes(); totalSpace += fileSize * f.getReplication();
++totalFiles; INodeSection.INodeFile f = p.getFile(); totalBlocks += f.getBlocksCount(); long fileSize = 0; for (BlockProto b : f.getBlocksList()) { fileSize += b.getNumBytes(); totalSpace += fileSize * f.getReplication();
public Builder clear() { super.clear(); type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE; bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); name_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); if (fileBuilder_ == null) { file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance(); } else { fileBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (directoryBuilder_ == null) { directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance(); } else { directoryBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); if (symlinkBuilder_ == null) { symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance(); } else { symlinkBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; }
public Builder clear() { super.clear(); type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE; bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); name_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); if (fileBuilder_ == null) { file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance(); } else { fileBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); if (directoryBuilder_ == null) { directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance(); } else { directoryBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); if (symlinkBuilder_ == null) { symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance(); } else { symlinkBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; }