Tabnine Logo
FsImageProto$INodeSection$INodeFile.getDefaultInstance
Code IndexAdd Tabnine to your IDE (free)

How to use
getDefaultInstance
method
in
org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile.getDefaultInstance (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public Builder clear() {
 super.clear();
 snapshotId_ = 0;
 bitField0_ = (bitField0_ & ~0x00000001);
 fileSize_ = 0L;
 bitField0_ = (bitField0_ & ~0x00000002);
 name_ = com.google.protobuf.ByteString.EMPTY;
 bitField0_ = (bitField0_ & ~0x00000004);
 if (snapshotCopyBuilder_ == null) {
  snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 } else {
  snapshotCopyBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 if (blocksBuilder_ == null) {
  blocks_ = java.util.Collections.emptyList();
  bitField0_ = (bitField0_ & ~0x00000010);
 } else {
  blocksBuilder_.clear();
 }
 return this;
}
origin: org.apache.hadoop/hadoop-hdfs

private void initFields() {
 snapshotId_ = 0;
 fileSize_ = 0L;
 name_ = com.google.protobuf.ByteString.EMPTY;
 snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
origin: org.apache.hadoop/hadoop-hdfs

public Builder clear() {
 super.clear();
 type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
 bitField0_ = (bitField0_ & ~0x00000001);
 id_ = 0L;
 bitField0_ = (bitField0_ & ~0x00000002);
 name_ = com.google.protobuf.ByteString.EMPTY;
 bitField0_ = (bitField0_ & ~0x00000004);
 if (fileBuilder_ == null) {
  file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 } else {
  fileBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 if (directoryBuilder_ == null) {
  directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
 } else {
  directoryBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000010);
 if (symlinkBuilder_ == null) {
  symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
 } else {
  symlinkBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000020);
 return this;
}
origin: io.prestosql.hadoop/hadoop-apache

public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() {
 return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder clearSnapshotCopy() {
 if (snapshotCopyBuilder_ == null) {
  snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
  onChanged();
 } else {
  snapshotCopyBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder clearFile() {
 if (fileBuilder_ == null) {
  file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
  onChanged();
 } else {
  fileBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder clearSnapshotCopy() {
 if (snapshotCopyBuilder_ == null) {
  snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
  onChanged();
 } else {
  snapshotCopyBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder clearFile() {
 if (fileBuilder_ == null) {
  file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
  onChanged();
 } else {
  fileBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() {
 return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
}
origin: io.prestosql.hadoop/hadoop-apache

private void initFields() {
 snapshotId_ = 0;
 fileSize_ = 0L;
 name_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
 snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
origin: ch.cern.hadoop/hadoop-hdfs

private void initFields() {
 snapshotId_ = 0;
 fileSize_ = 0L;
 name_ = com.google.protobuf.ByteString.EMPTY;
 snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
origin: io.prestosql.hadoop/hadoop-apache

private void initFields() {
 type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
 id_ = 0L;
 name_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY;
 file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
 symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
origin: ch.cern.hadoop/hadoop-hdfs

private void initFields() {
 type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
 id_ = 0L;
 name_ = com.google.protobuf.ByteString.EMPTY;
 file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
 symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
origin: org.apache.hadoop/hadoop-hdfs

public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) {
 if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this;
 if (other.hasReplication()) {
  setReplication(other.getReplication());
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (snapshotCopyBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   snapshotCopy_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial();
  } else {
   snapshotCopy_ = value;
  }
  onChanged();
 } else {
  snapshotCopyBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
 if (fileBuilder_ == null) {
  if (((bitField0_ & 0x00000008) == 0x00000008) &&
    file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
   file_ =
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(file_).mergeFrom(value).buildPartial();
  } else {
   file_ = value;
  }
  onChanged();
 } else {
  fileBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000008;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() {
 return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
 */
public Builder clearFile() {
 if (fileBuilder_ == null) {
  file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
  onChanged();
 } else {
  fileBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
 */
public Builder clearSnapshotCopy() {
 if (snapshotCopyBuilder_ == null) {
  snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
  onChanged();
 } else {
  snapshotCopyBuilder_.clear();
 }
 bitField0_ = (bitField0_ & ~0x00000008);
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

private void initFields() {
 type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
 id_ = 0L;
 name_ = com.google.protobuf.ByteString.EMPTY;
 file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
 directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
 symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
org.apache.hadoop.hdfs.server.namenodeFsImageProto$INodeSection$INodeFilegetDefaultInstance

Popular methods of FsImageProto$INodeSection$INodeFile

  • getBlocksCount
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getPermission
    optional fixed64 permission = 5;
  • getReplication
    optional uint32 replication = 1;
  • newBuilder
  • <init>
  • getAccessTime
    optional uint64 accessTime = 3;
  • getAcl
    optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
  • getBlocks
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getBlocksList
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getFileUC
    optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
  • getModificationTime
    optional uint64 modificationTime = 2;
  • getPreferredBlockSize
    optional uint64 preferredBlockSize = 4;
  • getModificationTime,
  • getPreferredBlockSize,
  • getSerializedSize,
  • getStoragePolicyID,
  • getUnknownFields,
  • getXAttrs,
  • hasAccessTime,
  • hasAcl,
  • hasFileUC

Popular in Java

  • Making http post requests using okhttp
  • setRequestProperty (URLConnection)
  • requestLocationUpdates (LocationManager)
  • notifyDataSetChanged (ArrayAdapter)
  • Socket (java.net)
    Provides a client-side TCP socket.
  • DateFormat (java.text)
    Formats or parses dates and times.This class provides factories for obtaining instances configured f
  • Manifest (java.util.jar)
    The Manifest class is used to obtain attribute information for a JarFile and its entries.
  • ImageIO (javax.imageio)
  • BasicDataSource (org.apache.commons.dbcp)
    Basic implementation of javax.sql.DataSource that is configured via JavaBeans properties. This is no
  • LogFactory (org.apache.commons.logging)
    Factory for creating Log instances, with discovery and configuration features similar to that employ
  • Top PhpStorm plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now