Tabnine Logo
DataTransferProtos$BaseHeaderProto.newBuilder
Code IndexAdd Tabnine to your IDE (free)

How to use
newBuilder
method
in
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos$BaseHeaderProto

Best Java code snippets using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos$BaseHeaderProto.newBuilder (Showing top 20 results out of 315)

origin: apache/hbase

blockCopy.setNumBytes(locatedBlock.getBlockSize());
ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
  .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy))
    .setToken(PB_HELPER.convert(locatedBlock.getBlockToken())))
  .setClientName(clientName).build();
origin: org.apache.hadoop/hadoop-hdfs-client

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
 */
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (headerBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   header_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
  } else {
   header_ = value;
  }
  onChanged();
 } else {
  headerBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
 */
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (headerBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   header_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
  } else {
   header_ = value;
  }
  onChanged();
 } else {
  headerBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
 */
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (headerBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   header_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
  } else {
   header_ = value;
  }
  onChanged();
 } else {
  headerBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
 */
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (headerBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   header_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
  } else {
   header_ = value;
  }
  onChanged();
 } else {
  headerBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
 */
public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (baseHeaderBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   baseHeader_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial();
  } else {
   baseHeader_ = value;
  }
  onChanged();
 } else {
  baseHeaderBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs-client

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
 */
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (headerBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   header_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
  } else {
   header_ = value;
  }
  onChanged();
 } else {
  headerBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs-client

/**
 * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
 */
public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
 if (baseHeaderBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
   baseHeader_ =
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial();
  } else {
   baseHeader_ = value;
  }
  onChanged();
 } else {
  baseHeaderBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs-client

public Builder toBuilder() { return newBuilder(this); }
origin: ch.cern.hadoop/hadoop-hdfs

public Builder toBuilder() { return newBuilder(this); }
origin: org.apache.hadoop/hadoop-hdfs-client

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
origin: io.prestosql.hadoop/hadoop-apache

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
origin: io.prestosql.hadoop/hadoop-apache

public Builder toBuilder() { return newBuilder(this); }
origin: io.prestosql.hadoop/hadoop-apache

static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
  Token<BlockTokenIdentifier> blockToken) {
 BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
  .setBlock(PBHelper.convert(blk))
  .setToken(PBHelper.convert(blockToken));
 if (Trace.isTracing()) {
  Span s = Trace.currentSpan();
  builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
    .setTraceId(s.getTraceId())
    .setParentId(s.getSpanId()));
 }
 return builder.build();
}
origin: ch.cern.hadoop/hadoop-hdfs

public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
origin: ch.cern.hadoop/hadoop-hdfs

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
origin: io.prestosql.hadoop/hadoop-apache

public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
origin: org.apache.hadoop/hadoop-hdfs-client

static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
  Token<BlockTokenIdentifier> blockToken) {
 BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
   .setBlock(PBHelperClient.convert(blk))
   .setToken(PBHelperClient.convert(blockToken));
 SpanId spanId = Tracer.getCurrentSpanId();
 if (spanId.isValid()) {
  builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
    .setTraceId(spanId.getHigh())
    .setParentId(spanId.getLow()));
 }
 return builder.build();
}
origin: ch.cern.hadoop/hadoop-hdfs

static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
  Token<BlockTokenIdentifier> blockToken) {
 BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
  .setBlock(PBHelper.convert(blk))
  .setToken(PBHelper.convert(blockToken));
 if (Trace.isTracing()) {
  Span s = Trace.currentSpan();
  builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
    .setTraceId(s.getTraceId())
    .setParentId(s.getSpanId()));
 }
 return builder.build();
}
origin: org.apache.hadoop/hadoop-hdfs-client

public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
org.apache.hadoop.hdfs.protocol.protoDataTransferProtos$BaseHeaderProtonewBuilder

Popular methods of DataTransferProtos$BaseHeaderProto

  • getBlock
    required .hadoop.hdfs.ExtendedBlockProto block = 1;
  • getToken
    optional .hadoop.common.TokenProto token = 2;
  • getTraceInfo
    optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
  • <init>
  • equals
  • getDefaultInstance
  • getDescriptorForType
  • getSerializedSize
  • getUnknownFields
  • hasBlock
    required .hadoop.hdfs.ExtendedBlockProto block = 1;
  • hasToken
    optional .hadoop.common.TokenProto token = 2;
  • hasTraceInfo
    optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
  • hasToken,
  • hasTraceInfo,
  • hashCode,
  • initFields,
  • isInitialized,
  • makeExtensionsImmutable,
  • parseUnknownField,
  • toBuilder

Popular in Java

  • Start an intent from android
  • onRequestPermissionsResult (Fragment)
  • scheduleAtFixedRate (Timer)
  • getResourceAsStream (ClassLoader)
  • Color (java.awt)
    The Color class is used to encapsulate colors in the default sRGB color space or colors in arbitrary
  • String (java.lang)
  • BigInteger (java.math)
    An immutable arbitrary-precision signed integer.FAST CRYPTOGRAPHY This implementation is efficient f
  • SocketTimeoutException (java.net)
    This exception is thrown when a timeout expired on a socket read or accept operation.
  • ResultSet (java.sql)
    An interface for an object which represents a database table entry, returned as the result of the qu
  • GregorianCalendar (java.util)
    GregorianCalendar is a concrete subclass of Calendarand provides the standard calendar used by most
  • Top 12 Jupyter Notebook extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now