Tabnine Logo
QJournalProtocolProtos$SegmentStateProto
Code IndexAdd Tabnine to your IDE (free)

How to use
QJournalProtocolProtos$SegmentStateProto
in
org.apache.hadoop.hdfs.qjournal.protocol

Best Java code snippets using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos$SegmentStateProto (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

assert segmentTxId == logToSync.getStartTxId();
   resp.getLastCommittedTxId() > logToSync.getEndTxId()) {
  throw new AssertionError("Decided to synchronize log to " + logToSync +
    " but logger " + logger + " had seen txid " +
  loggers.finalizeLogSegment(logToSync.getStartTxId(), logToSync.getEndTxId()); 
loggers.waitForWriteQuorum(finalize, finalizeSegmentTimeoutMs,
  String.format("finalizeLogSegment(%s-%s)",
    logToSync.getStartTxId(),
    logToSync.getEndTxId()));
origin: org.apache.hadoop/hadoop-hdfs

long segmentTxId = segment.getStartTxId();
Preconditions.checkArgument(segment.getEndTxId() > 0 &&
  segment.getEndTxId() >= segmentTxId,
  "bad recovery state for segment %s: %s ; journal id: %s",
  segmentTxId, TextFormat.shortDebugString(segment), journalId);
  currentSegment.getEndTxId() != segment.getEndTxId()) {
 if (currentSegment == null) {
  LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
  updateHighestWrittenTxId(Math.max(segment.getEndTxId(),
    highestWrittenTxId));
 } else {
  alwaysAssert(currentSegment.getIsInProgress(),
    "Should never be asked to synchronize a different log on top of " +
    "an already-finalized segment ; journal id: " + journalId);
   updateHighestWrittenTxId(segment.getEndTxId());
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId)
  throws IOException {
 EditLogFile elf = fjm.getLogFile(segmentTxId);
 if (elf == null) {
  return null;
 }
 if (elf.isInProgress()) {
  elf.scanLog(Long.MAX_VALUE, false);
 }
 if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
  LOG.info("Edit log file " + elf + " appears to be empty. " +
    "Moving it aside..." + " ; journal id: " + journalId);
  elf.moveAsideEmptyFile();
  return null;
 }
 SegmentStateProto ret = SegmentStateProto.newBuilder()
   .setStartTxId(segmentTxId)
   .setEndTxId(elf.getLastTxId())
   .setIsInProgress(elf.isInProgress())
   .build();
 LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
   TextFormat.shortDebugString(ret) + " ; journal id: " + journalId);
 return ret;
}
origin: ch.cern.hadoop/hadoop-hdfs

assert segmentTxId == logToSync.getStartTxId();
   resp.getLastCommittedTxId() > logToSync.getEndTxId()) {
  throw new AssertionError("Decided to synchronize log to " + logToSync +
    " but logger " + logger + " had seen txid " +
  loggers.finalizeLogSegment(logToSync.getStartTxId(), logToSync.getEndTxId()); 
loggers.waitForWriteQuorum(finalize, finalizeSegmentTimeoutMs,
  String.format("finalizeLogSegment(%s-%s)",
    logToSync.getStartTxId(),
    logToSync.getEndTxId()));
origin: ch.cern.hadoop/hadoop-hdfs

long segmentTxId = segment.getStartTxId();
Preconditions.checkArgument(segment.getEndTxId() > 0 &&
  segment.getEndTxId() >= segmentTxId,
  "bad recovery state for segment %s: %s",
  segmentTxId, TextFormat.shortDebugString(segment));
  currentSegment.getEndTxId() != segment.getEndTxId()) {
 if (currentSegment == null) {
  LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
  highestWrittenTxId = Math.max(segment.getEndTxId(),
    highestWrittenTxId);
 } else {
  alwaysAssert(currentSegment.getIsInProgress(),
    "Should never be asked to synchronize a different log on top of an " +
    "already-finalized segment");
   highestWrittenTxId = segment.getEndTxId();
origin: io.prestosql.hadoop/hadoop-apache

long segmentTxId = segment.getStartTxId();
Preconditions.checkArgument(segment.getEndTxId() > 0 &&
  segment.getEndTxId() >= segmentTxId,
  "bad recovery state for segment %s: %s",
  segmentTxId, TextFormat.shortDebugString(segment));
  currentSegment.getEndTxId() != segment.getEndTxId()) {
 if (currentSegment == null) {
  LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
  highestWrittenTxId = Math.max(segment.getEndTxId(),
    highestWrittenTxId);
 } else {
  alwaysAssert(currentSegment.getIsInProgress(),
    "Should never be asked to synchronize a different log on top of an " +
    "already-finalized segment");
   highestWrittenTxId = segment.getEndTxId();
origin: io.prestosql.hadoop/hadoop-apache

assert segmentTxId == logToSync.getStartTxId();
   resp.getLastCommittedTxId() > logToSync.getEndTxId()) {
  throw new AssertionError("Decided to synchronize log to " + logToSync +
    " but logger " + logger + " had seen txid " +
  loggers.finalizeLogSegment(logToSync.getStartTxId(), logToSync.getEndTxId()); 
loggers.waitForWriteQuorum(finalize, finalizeSegmentTimeoutMs,
  String.format("finalizeLogSegment(%s-%s)",
    logToSync.getStartTxId(),
    logToSync.getEndTxId()));
origin: ch.cern.hadoop/hadoop-hdfs

prep = ch.prepareRecovery(1L).get();
assertEquals(1L, prep.getAcceptedInEpoch());
assertEquals(1L, prep.getSegmentState().getEndTxId());
origin: ch.cern.hadoop/hadoop-hdfs

Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_3 =
  makeEntry(PrepareRecoveryResponseProto.newBuilder()
   .setSegmentState(SegmentStateProto.newBuilder()
     .setStartTxId(1L)
     .setEndTxId(3L)
Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4 =
  makeEntry(PrepareRecoveryResponseProto.newBuilder()
   .setSegmentState(SegmentStateProto.newBuilder()
     .setStartTxId(1L)
     .setEndTxId(4L)
Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4_ACCEPTED =
  makeEntry(PrepareRecoveryResponseProto.newBuilder()
   .setSegmentState(SegmentStateProto.newBuilder()
     .setStartTxId(1L)
     .setEndTxId(4L)
   .setSegmentState(SegmentStateProto.newBuilder()
     .setStartTxId(1L)
     .setEndTxId(3L)
origin: io.prestosql.hadoop/hadoop-apache

SegmentStateProto r2Seg = r2.getSegmentState();
Preconditions.checkArgument(r1Seg.getStartTxId() == r2Seg.getStartTxId(),
  "Should only be called with responses for corresponding segments: " +
  "%s and %s do not have the same start txid.", r1, r2);
if (r1Seg.getIsInProgress() != r2Seg.getIsInProgress()) {
 return Booleans.compare(!r1Seg.getIsInProgress(), !r2Seg.getIsInProgress());
if (!r1Seg.getIsInProgress()) {
 if (r1Seg.getEndTxId() != r2Seg.getEndTxId()) {
  throw new AssertionError("finalized segs with different lengths: " + 
    r1 + ", " + r2);
  .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId())
  .result();
origin: io.prestosql.hadoop/hadoop-apache

boolean hasFinalizedSegment = segInfo != null && !segInfo.getIsInProgress();
 assert acceptedState.getEndTxId() == segInfo.getEndTxId() :
    "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" +
    "on disk:       " + TextFormat.shortDebugString(segInfo);
origin: ch.cern.hadoop/hadoop-hdfs

if (hasSegmentState()) {
 result = result && getSegmentState()
   .equals(other.getSegmentState());
origin: ch.cern.hadoop/hadoop-hdfs

 final SegmentStateProto segment, final URL url) throws IOException {
final File tmpFile = storage.getSyncLogTemporaryFile(
  segment.getStartTxId(), reqInfo.getEpoch());
final List<File> localPaths = ImmutableList.of(tmpFile);
origin: io.prestosql.hadoop/hadoop-apache

 final SegmentStateProto segment, final URL url) throws IOException {
final File tmpFile = storage.getSyncLogTemporaryFile(
  segment.getStartTxId(), reqInfo.getEpoch());
final List<File> localPaths = ImmutableList.of(tmpFile);
origin: ch.cern.hadoop/hadoop-hdfs

SegmentStateProto r2Seg = r2.getSegmentState();
Preconditions.checkArgument(r1Seg.getStartTxId() == r2Seg.getStartTxId(),
  "Should only be called with responses for corresponding segments: " +
  "%s and %s do not have the same start txid.", r1, r2);
if (r1Seg.getIsInProgress() != r2Seg.getIsInProgress()) {
 return Booleans.compare(!r1Seg.getIsInProgress(), !r2Seg.getIsInProgress());
if (!r1Seg.getIsInProgress()) {
 if (r1Seg.getEndTxId() != r2Seg.getEndTxId()) {
  throw new AssertionError("finalized segs with different lengths: " + 
    r1 + ", " + r2);
  .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId())
  .result();
origin: ch.cern.hadoop/hadoop-hdfs

long segmentId = paxosData.getSegmentState().getStartTxId();
long epoch = paxosData.getAcceptedInEpoch();
origin: ch.cern.hadoop/hadoop-hdfs

@java.lang.Override
public boolean equals(final java.lang.Object obj) {
 if (obj == this) {
  return true;
 }
 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
  return super.equals(obj);
 }
 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
 boolean result = true;
 result = result && (hasReqInfo() == other.hasReqInfo());
 if (hasReqInfo()) {
  result = result && getReqInfo()
    .equals(other.getReqInfo());
 }
 result = result && (hasStateToAccept() == other.hasStateToAccept());
 if (hasStateToAccept()) {
  result = result && getStateToAccept()
    .equals(other.getStateToAccept());
 }
 result = result && (hasFromURL() == other.hasFromURL());
 if (hasFromURL()) {
  result = result && getFromURL()
    .equals(other.getFromURL());
 }
 result = result &&
   getUnknownFields().equals(other.getUnknownFields());
 return result;
}
origin: io.prestosql.hadoop/hadoop-apache

if (hasSegmentState()) {
 result = result && getSegmentState()
   .equals(other.getSegmentState());
origin: ch.cern.hadoop/hadoop-hdfs

boolean hasFinalizedSegment = segInfo != null && !segInfo.getIsInProgress();
 assert acceptedState.getEndTxId() == segInfo.getEndTxId() :
    "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" +
    "on disk:       " + TextFormat.shortDebugString(segInfo);
origin: io.prestosql.hadoop/hadoop-apache

long segmentId = paxosData.getSegmentState().getStartTxId();
long epoch = paxosData.getAcceptedInEpoch();
org.apache.hadoop.hdfs.qjournal.protocolQJournalProtocolProtos$SegmentStateProto

Javadoc

Protobuf type hadoop.hdfs.qjournal.SegmentStateProto

Most used methods

  • getEndTxId
    required uint64 endTxId = 2;
  • getIsInProgress
    required bool isInProgress = 3;
  • getStartTxId
    required uint64 startTxId = 1;
  • newBuilder
  • <init>
  • equals
  • getDefaultInstance
  • getDescriptorForType
  • getSerializedSize
  • getUnknownFields
  • hasEndTxId
    required uint64 endTxId = 2;
  • hasIsInProgress
    required bool isInProgress = 3;
  • hasEndTxId,
  • hasIsInProgress,
  • hasStartTxId,
  • hashBoolean,
  • hashCode,
  • hashLong,
  • initFields,
  • isInitialized,
  • makeExtensionsImmutable,
  • parseUnknownField

Popular in Java

  • Reactive rest calls using spring rest template
  • onRequestPermissionsResult (Fragment)
  • addToBackStack (FragmentTransaction)
  • getSharedPreferences (Context)
  • FlowLayout (java.awt)
    A flow layout arranges components in a left-to-right flow, much like lines of text in a paragraph. F
  • Graphics2D (java.awt)
    This Graphics2D class extends the Graphics class to provide more sophisticated control overgraphics
  • Connection (java.sql)
    A connection represents a link from a Java application to a database. All SQL statements and results
  • Format (java.text)
    The base class for all formats. This is an abstract base class which specifies the protocol for clas
  • Semaphore (java.util.concurrent)
    A counting semaphore. Conceptually, a semaphore maintains a set of permits. Each #acquire blocks if
  • JOptionPane (javax.swing)
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now