private Range<Long> txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s ; journal id: %s", seg, journalId); return Range.between(seg.getStartTxId(), seg.getEndTxId()); }
resp.getLastCommittedTxId() > logToSync.getEndTxId()) { throw new AssertionError("Decided to synchronize log to " + logToSync + " but logger " + logger + " had seen txid " + loggers.finalizeLogSegment(logToSync.getStartTxId(), logToSync.getEndTxId()); loggers.waitForWriteQuorum(finalize, finalizeSegmentTimeoutMs, String.format("finalizeLogSegment(%s-%s)", logToSync.getStartTxId(), logToSync.getEndTxId()));
if (r1Seg.getEndTxId() != r2Seg.getEndTxId()) { throw new AssertionError("finalized segs with different lengths: " + r1 + ", " + r2); .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId()) .result();
assert acceptedState.getEndTxId() == segInfo.getEndTxId() : "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" + "on disk: " + TextFormat.shortDebugString(segInfo);
if (r1Seg.getEndTxId() != r2Seg.getEndTxId()) { throw new AssertionError("finalized segs with different lengths: " + r1 + ", " + r2); .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId()) .result();
assert acceptedState.getEndTxId() == segInfo.getEndTxId() : "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" + "on disk: " + TextFormat.shortDebugString(segInfo);
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj; boolean result = true; result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && (hasEndTxId() == other.hasEndTxId()); if (hasEndTxId()) { result = result && (getEndTxId() == other.getEndTxId()); } result = result && (hasIsInProgress() == other.hasIsInProgress()); if (hasIsInProgress()) { result = result && (getIsInProgress() == other.getIsInProgress()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj; boolean result = true; result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && (hasEndTxId() == other.hasEndTxId()); if (hasEndTxId()) { result = result && (getEndTxId() == other.getEndTxId()); } result = result && (hasIsInProgress() == other.hasIsInProgress()); if (hasIsInProgress()) { result = result && (getIsInProgress() == other.getIsInProgress()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } if (hasEndTxId()) { hash = (37 * hash) + ENDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTxId()); } if (hasIsInProgress()) { hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsInProgress()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } if (hasEndTxId()) { hash = (37 * hash) + ENDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTxId()); } if (hasIsInProgress()) { hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsInProgress()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
/** * Test whether JNs can correctly handle editlog that cannot be decoded. */ @Test public void testScanEditLog() throws Exception { // use a future layout version journal.startLogSegment(makeRI(1), 1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1); // in the segment we write garbage editlog, which can be scanned but // cannot be decoded final int numTxns = 5; byte[] ops = QJMTestUtil.createGabageTxns(1, 5); journal.journal(makeRI(2), 1, 1, numTxns, ops); // verify the in-progress editlog segment SegmentStateProto segmentState = journal.getSegmentInfo(1); assertTrue(segmentState.getIsInProgress()); Assert.assertEquals(numTxns, segmentState.getEndTxId()); Assert.assertEquals(1, segmentState.getStartTxId()); // finalize the segment and verify it again journal.finalizeLogSegment(makeRI(3), 1, numTxns); segmentState = journal.getSegmentInfo(1); assertFalse(segmentState.getIsInProgress()); Assert.assertEquals(numTxns, segmentState.getEndTxId()); Assert.assertEquals(1, segmentState.getStartTxId()); }
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this; if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } if (other.hasIsInProgress()) { setIsInProgress(other.getIsInProgress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this; if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } if (other.hasIsInProgress()) { setIsInProgress(other.getIsInProgress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
private LongRange txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s", seg); return new LongRange(seg.getStartTxId(), seg.getEndTxId()); }
private LongRange txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s", seg); return new LongRange(seg.getStartTxId(), seg.getEndTxId()); }
Preconditions.checkArgument(segment.getEndTxId() > 0 && segment.getEndTxId() >= segmentTxId, "bad recovery state for segment %s: %s ; journal id: %s", segmentTxId, TextFormat.shortDebugString(segment), journalId); currentSegment.getEndTxId() != segment.getEndTxId()) { if (currentSegment == null) { LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) + updateHighestWrittenTxId(Math.max(segment.getEndTxId(), highestWrittenTxId)); } else { updateHighestWrittenTxId(segment.getEndTxId());
assert acceptedState.getEndTxId() == segInfo.getEndTxId() : "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" + "on disk: " + TextFormat.shortDebugString(segInfo);
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj; boolean result = true; result = result && (hasStartTxId() == other.hasStartTxId()); if (hasStartTxId()) { result = result && (getStartTxId() == other.getStartTxId()); } result = result && (hasEndTxId() == other.hasEndTxId()); if (hasEndTxId()) { result = result && (getEndTxId() == other.getEndTxId()); } result = result && (hasIsInProgress() == other.hasIsInProgress()); if (hasIsInProgress()) { result = result && (getIsInProgress() == other.getIsInProgress()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStartTxId()) { hash = (37 * hash) + STARTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTxId()); } if (hasEndTxId()) { hash = (37 * hash) + ENDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTxId()); } if (hasIsInProgress()) { hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsInProgress()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this; if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } if (other.hasIsInProgress()) { setIsInProgress(other.getIsInProgress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }