Tabnine Logo
QJournalProtocolProtos$SegmentStateProto.newBuilder
Code IndexAdd Tabnine to your IDE (free)

How to use
newBuilder
method
in
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos$SegmentStateProto

Best Java code snippets using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos$SegmentStateProto.newBuilder (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
 */
public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (segmentStateBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   segmentState_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
  } else {
   segmentState_ = value;
  }
  onChanged();
 } else {
  segmentStateBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId)
  throws IOException {
 EditLogFile elf = fjm.getLogFile(segmentTxId);
 if (elf == null) {
  return null;
 }
 if (elf.isInProgress()) {
  elf.scanLog(Long.MAX_VALUE, false);
 }
 if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
  LOG.info("Edit log file " + elf + " appears to be empty. " +
    "Moving it aside..." + " ; journal id: " + journalId);
  elf.moveAsideEmptyFile();
  return null;
 }
 SegmentStateProto ret = SegmentStateProto.newBuilder()
   .setStartTxId(segmentTxId)
   .setEndTxId(elf.getLastTxId())
   .setIsInProgress(elf.isInProgress())
   .build();
 LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
   TextFormat.shortDebugString(ret) + " ; journal id: " + journalId);
 return ret;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
 */
public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (segmentStateBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   segmentState_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
  } else {
   segmentState_ = value;
  }
  onChanged();
 } else {
  segmentStateBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId)
  throws IOException {
 EditLogFile elf = fjm.getLogFile(segmentTxId);
 if (elf == null) {
  return null;
 }
 if (elf.isInProgress()) {
  elf.scanLog();
 }
 if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
  LOG.info("Edit log file " + elf + " appears to be empty. " +
    "Moving it aside...");
  elf.moveAsideEmptyFile();
  return null;
 }
 SegmentStateProto ret = SegmentStateProto.newBuilder()
   .setStartTxId(segmentTxId)
   .setEndTxId(elf.getLastTxId())
   .setIsInProgress(elf.isInProgress())
   .build();
 LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
   TextFormat.shortDebugString(ret));
 return ret;
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
 *
 * <pre>
 ** Details on the segment to recover 
 * </pre>
 */
public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (stateToAcceptBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   stateToAccept_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
  } else {
   stateToAccept_ = value;
  }
  onChanged();
 } else {
  stateToAcceptBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
 *
 * <pre>
 ** Details on the segment to recover 
 * </pre>
 */
public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (stateToAcceptBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   stateToAccept_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
  } else {
   stateToAccept_ = value;
  }
  onChanged();
 } else {
  stateToAcceptBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
 */
public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (segmentStateBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   segmentState_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
  } else {
   segmentState_ = value;
  }
  onChanged();
 } else {
  segmentStateBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

/**
 * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
 */
public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (segmentStateBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   segmentState_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
  } else {
   segmentState_ = value;
  }
  onChanged();
 } else {
  segmentStateBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
 */
public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (segmentStateBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   segmentState_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
  } else {
   segmentState_ = value;
  }
  onChanged();
 } else {
  segmentStateBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
 */
public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (segmentStateBuilder_ == null) {
  if (((bitField0_ & 0x00000001) == 0x00000001) &&
    segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   segmentState_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
  } else {
   segmentState_ = value;
  }
  onChanged();
 } else {
  segmentStateBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000001;
 return this;
}
/**
origin: io.prestosql.hadoop/hadoop-apache

public Builder toBuilder() { return newBuilder(this); }
origin: ch.cern.hadoop/hadoop-hdfs

public Builder toBuilder() { return newBuilder(this); }
origin: ch.cern.hadoop/hadoop-hdfs

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
origin: io.prestosql.hadoop/hadoop-apache

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
origin: ch.cern.hadoop/hadoop-hdfs

public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
origin: io.prestosql.hadoop/hadoop-apache

public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
origin: org.apache.hadoop/hadoop-hdfs

/**
 * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
 *
 * <pre>
 ** Details on the segment to recover 
 * </pre>
 */
public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
 if (stateToAcceptBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
   stateToAccept_ =
    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
  } else {
   stateToAccept_ = value;
  }
  onChanged();
 } else {
  stateToAcceptBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: org.apache.hadoop/hadoop-hdfs

public Builder toBuilder() { return newBuilder(this); }
origin: org.apache.hadoop/hadoop-hdfs

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
origin: org.apache.hadoop/hadoop-hdfs

public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
 return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
org.apache.hadoop.hdfs.qjournal.protocolQJournalProtocolProtos$SegmentStateProtonewBuilder

Popular methods of QJournalProtocolProtos$SegmentStateProto

  • getEndTxId
    required uint64 endTxId = 2;
  • getIsInProgress
    required bool isInProgress = 3;
  • getStartTxId
    required uint64 startTxId = 1;
  • <init>
  • equals
  • getDefaultInstance
  • getDescriptorForType
  • getSerializedSize
  • getUnknownFields
  • hasEndTxId
    required uint64 endTxId = 2;
  • hasIsInProgress
    required bool isInProgress = 3;
  • hasStartTxId
    required uint64 startTxId = 1;
  • hasIsInProgress,
  • hasStartTxId,
  • hashBoolean,
  • hashCode,
  • hashLong,
  • initFields,
  • isInitialized,
  • makeExtensionsImmutable,
  • parseUnknownField

Popular in Java

  • Parsing JSON documents to java classes using gson
  • setScale (BigDecimal)
  • requestLocationUpdates (LocationManager)
  • getContentResolver (Context)
  • ServerSocket (java.net)
    This class represents a server-side socket that waits for incoming client connections. A ServerSocke
  • URI (java.net)
    A Uniform Resource Identifier that identifies an abstract or physical resource, as specified by RFC
  • TreeSet (java.util)
    TreeSet is an implementation of SortedSet. All optional operations (adding and removing) are support
  • Reference (javax.naming)
  • JLabel (javax.swing)
  • Logger (org.slf4j)
    The org.slf4j.Logger interface is the main user entry point of SLF4J API. It is expected that loggin
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now