Tabnine Logo
FsImageProto$INodeSection$INodeFile.newBuilder
Code IndexAdd Tabnine to your IDE (free)

How to use
newBuilder
method
in
org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile.newBuilder (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (fileBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   file_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(file_).mergeFrom(value).buildPartial();
  } else {
   file_ = value;
  }
  onChanged();
 } else {
  fileBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (snapshotCopyBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   snapshotCopy_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial();
  } else {
   snapshotCopy_ = value;
  }
  onChanged();
 } else {
  snapshotCopyBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

private INodeSection.INodeFile.Builder createINodeFileBuilder(Node node)
  throws IOException {
 INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
 Integer ival = node.removeChildInt(SECTION_REPLICATION);
 if (ival != null) {
origin: org.apache.hadoop/hadoop-fs2img

ugi.addUser(s.getOwner());
ugi.addGroup(s.getGroup());
INodeFile.Builder b = INodeFile.newBuilder()
  .setReplication(blk.getReplication(s))
  .setModificationTime(s.getModificationTime())
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (snapshotCopyBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   snapshotCopy_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial();
  } else {
   snapshotCopy_ = value;
  }
  onChanged();
 } else {
  snapshotCopyBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (fileBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   file_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(file_).mergeFrom(value).buildPartial();
  } else {
   file_ = value;
  }
  onChanged();
 } else {
  fileBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (snapshotCopyBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   snapshotCopy_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial();
  } else {
   snapshotCopy_ = value;
  }
  onChanged();
 } else {
  snapshotCopyBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (fileBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   file_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(file_).mergeFrom(value).buildPartial();
  } else {
   file_ = value;
  }
  onChanged();
 } else {
  fileBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

public static INodeSection.INodeFile.Builder buildINodeFile(
  INodeFileAttributes file, final SaverContext state) {
 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
   .setAccessTime(file.getAccessTime())
   .setModificationTime(file.getModificationTime())
   .setPermission(buildPermissionStatus(file, state.getStringMap()))
   .setPreferredBlockSize(file.getPreferredBlockSize())
   .setReplication(file.getFileReplication())
   .setStoragePolicyID(file.getLocalStoragePolicyID());
 AclFeature f = file.getAclFeature();
 if (f != null) {
  b.setAcl(buildAclEntries(f, state.getStringMap()));
 }
 XAttrFeature xAttrFeature = file.getXAttrFeature();
 if (xAttrFeature != null) {
  b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
 }
 return b;
}
origin: ch.cern.hadoop/hadoop-hdfs

public static INodeSection.INodeFile.Builder buildINodeFile(
  INodeFileAttributes file, final SaverContext state) {
 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
   .setAccessTime(file.getAccessTime())
   .setModificationTime(file.getModificationTime())
   .setPermission(buildPermissionStatus(file, state.getStringMap()))
   .setPreferredBlockSize(file.getPreferredBlockSize())
   .setReplication(file.getFileReplication())
   .setStoragePolicyID(file.getLocalStoragePolicyID());
 AclFeature f = file.getAclFeature();
 if (f != null) {
  b.setAcl(buildAclEntries(f, state.getStringMap()));
 }
 XAttrFeature xAttrFeature = file.getXAttrFeature();
 if (xAttrFeature != null) {
  b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
 }
 return b;
}
origin: ch.cern.hadoop/hadoop-hdfs

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
origin: ch.cern.hadoop/hadoop-hdfs

public Builder toBuilder() { return newBuilder(this); }
origin: io.prestosql.hadoop/hadoop-apache

public Builder toBuilder() { return newBuilder(this); }
origin: io.prestosql.hadoop/hadoop-apache

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
origin: io.prestosql.hadoop/hadoop-apache

public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
origin: ch.cern.hadoop/hadoop-hdfs

public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
origin: org.apache.hadoop/hadoop-hdfs

public static INodeSection.INodeFile.Builder buildINodeFile(
  INodeFileAttributes file, final SaverContext state) {
 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
   .setAccessTime(file.getAccessTime())
   .setModificationTime(file.getModificationTime())
   .setPermission(buildPermissionStatus(file))
   .setPreferredBlockSize(file.getPreferredBlockSize())
   .setStoragePolicyID(file.getLocalStoragePolicyID())
   .setBlockType(PBHelperClient.convert(file.getBlockType()));
 if (file.isStriped()) {
  b.setErasureCodingPolicyID(file.getErasureCodingPolicyID());
 } else {
  b.setReplication(file.getFileReplication());
 }
 AclFeature f = file.getAclFeature();
 if (f != null) {
  b.setAcl(buildAclEntries(f));
 }
 XAttrFeature xAttrFeature = file.getXAttrFeature();
 if (xAttrFeature != null) {
  b.setXAttrs(buildXAttrs(xAttrFeature));
 }
 return b;
}
origin: org.apache.hadoop/hadoop-hdfs

public Builder toBuilder() { return newBuilder(this); }
origin: org.apache.hadoop/hadoop-hdfs

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
origin: org.apache.hadoop/hadoop-hdfs

public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
org.apache.hadoop.hdfs.server.namenodeFsImageProto$INodeSection$INodeFilenewBuilder

Popular methods of FsImageProto$INodeSection$INodeFile

  • getBlocksCount
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getPermission
    optional fixed64 permission = 5;
  • getReplication
    optional uint32 replication = 1;
  • <init>
  • getAccessTime
    optional uint64 accessTime = 3;
  • getAcl
    optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
  • getBlocks
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getBlocksList
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getDefaultInstance
  • getFileUC
    optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
  • getModificationTime
    optional uint64 modificationTime = 2;
  • getPreferredBlockSize
    optional uint64 preferredBlockSize = 4;
  • getModificationTime,
  • getPreferredBlockSize,
  • getSerializedSize,
  • getStoragePolicyID,
  • getUnknownFields,
  • getXAttrs,
  • hasAccessTime,
  • hasAcl,
  • hasFileUC

Popular in Java

  • Finding current android device location
  • getContentResolver (Context)
  • runOnUiThread (Activity)
  • setContentView (Activity)
  • Path (java.nio.file)
  • Date (java.sql)
    A class which can consume and produce dates in SQL Date format. Dates are represented in SQL as yyyy
  • Enumeration (java.util)
    A legacy iteration interface.New code should use Iterator instead. Iterator replaces the enumeration
  • Hashtable (java.util)
    A plug-in replacement for JDK1.5 java.util.Hashtable. This version is based on org.cliffc.high_scale
  • JOptionPane (javax.swing)
  • Table (org.hibernate.mapping)
    A relational table
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now