@Override public void recoverUnfinalizedSegments() throws IOException { Preconditions.checkState(!isActiveWriter, "already active writer"); LOG.info("Starting recovery process for unclosed journal segments..."); Map<AsyncLogger, NewEpochResponseProto> resps = createNewUniqueEpoch(); LOG.info("Successfully started new epoch " + loggers.getEpoch()); if (LOG.isDebugEnabled()) { LOG.debug("newEpoch(" + loggers.getEpoch() + ") responses:\n" + QuorumCall.mapToString(resps)); } long mostRecentSegmentTxId = Long.MIN_VALUE; for (NewEpochResponseProto r : resps.values()) { if (r.hasLastSegmentTxId()) { mostRecentSegmentTxId = Math.max(mostRecentSegmentTxId, r.getLastSegmentTxId()); } } // On a completely fresh system, none of the journals have any // segments, so there's nothing to recover. if (mostRecentSegmentTxId != Long.MIN_VALUE) { recoverUnclosedSegment(mostRecentSegmentTxId); } isActiveWriter = true; }
NewEpochResponseProto.newBuilder();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable();
NewEpochResponseProto newEpoch = journal.newEpoch(FAKE_NSINFO, 1); assertFalse(newEpoch.hasLastSegmentTxId()); assertEquals(1, journal.getLastPromisedEpoch()); journal.newEpoch(FAKE_NSINFO, 3); assertFalse(newEpoch.hasLastSegmentTxId()); assertEquals(3, journal.getLastPromisedEpoch()); try {
NewEpochResponseProto.newBuilder();
NewEpochResponseProto.newBuilder();
@Override public void recoverUnfinalizedSegments() throws IOException { Preconditions.checkState(!isActiveWriter, "already active writer"); LOG.info("Starting recovery process for unclosed journal segments..."); Map<AsyncLogger, NewEpochResponseProto> resps = createNewUniqueEpoch(); LOG.info("Successfully started new epoch " + loggers.getEpoch()); if (LOG.isDebugEnabled()) { LOG.debug("newEpoch(" + loggers.getEpoch() + ") responses:\n" + QuorumCall.mapToString(resps)); } long mostRecentSegmentTxId = Long.MIN_VALUE; for (NewEpochResponseProto r : resps.values()) { if (r.hasLastSegmentTxId()) { mostRecentSegmentTxId = Math.max(mostRecentSegmentTxId, r.getLastSegmentTxId()); } } // On a completely fresh system, none of the journals have any // segments, so there's nothing to recover. if (mostRecentSegmentTxId != Long.MIN_VALUE) { recoverUnclosedSegment(mostRecentSegmentTxId); } isActiveWriter = true; }
@Override public void recoverUnfinalizedSegments() throws IOException { Preconditions.checkState(!isActiveWriter, "already active writer"); LOG.info("Starting recovery process for unclosed journal segments..."); Map<AsyncLogger, NewEpochResponseProto> resps = createNewUniqueEpoch(); LOG.info("Successfully started new epoch " + loggers.getEpoch()); if (LOG.isDebugEnabled()) { LOG.debug("newEpoch(" + loggers.getEpoch() + ") responses:\n" + QuorumCall.mapToString(resps)); } long mostRecentSegmentTxId = Long.MIN_VALUE; for (NewEpochResponseProto r : resps.values()) { if (r.hasLastSegmentTxId()) { mostRecentSegmentTxId = Math.max(mostRecentSegmentTxId, r.getLastSegmentTxId()); } } // On a completely fresh system, none of the journals have any // segments, so there's nothing to recover. if (mostRecentSegmentTxId != Long.MIN_VALUE) { recoverUnclosedSegment(mostRecentSegmentTxId); } isActiveWriter = true; }
@Test(timeout=100000) public void testReturnsSegmentInfoAtEpochTransition() throws Exception { ch.newEpoch(1).get(); ch.setEpoch(1); ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); ch.sendEdits(1L, 1, 2, QJMTestUtil.createTxnData(1, 2)).get(); // Switch to a new epoch without closing earlier segment NewEpochResponseProto response = ch.newEpoch(2).get(); ch.setEpoch(2); assertEquals(1, response.getLastSegmentTxId()); ch.finalizeLogSegment(1, 2).get(); // Switch to a new epoch after just closing the earlier segment. response = ch.newEpoch(3).get(); ch.setEpoch(3); assertEquals(1, response.getLastSegmentTxId()); // Start a segment but don't write anything, check newEpoch segment info ch.startLogSegment(3, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); response = ch.newEpoch(4).get(); ch.setEpoch(4); // Because the new segment is empty, it is equivalent to not having // started writing it. Hence, we should return the prior segment txid. assertEquals(1, response.getLastSegmentTxId()); }
@Before public void setup() throws Exception { spyLoggers = ImmutableList.of( mockLogger(), mockLogger(), mockLogger()); qjm = new QuorumJournalManager(conf, new URI("qjournal://host/jid"), FAKE_NSINFO) { @Override protected List<AsyncLogger> createLoggers(AsyncLogger.Factory factory) { return spyLoggers; } }; for (AsyncLogger logger : spyLoggers) { futureReturns(GetJournalStateResponseProto.newBuilder() .setLastPromisedEpoch(0) .setHttpPort(-1) .build()) .when(logger).getJournalState(); futureReturns( NewEpochResponseProto.newBuilder().build() ).when(logger).newEpoch(Mockito.anyLong()); futureReturns(null).when(logger).format(Mockito.<NamespaceInfo>any()); } qjm.recoverUnfinalizedSegments(); }
/** * Test that, if the writer crashes at the very beginning of a segment, * before any transactions are written, that the next newEpoch() call * returns the prior segment txid as its most recent segment. */ @Test (timeout = 10000) public void testNewEpochAtBeginningOfSegment() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); journal.startLogSegment(makeRI(1), 1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); journal.journal(makeRI(2), 1, 1, 2, QJMTestUtil.createTxnData(1, 2)); journal.finalizeLogSegment(makeRI(3), 1, 2); journal.startLogSegment(makeRI(4), 3, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); NewEpochResponseProto resp = journal.newEpoch(FAKE_NSINFO, 2); assertEquals(1, resp.getLastSegmentTxId()); }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj; boolean result = true; result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId()); if (hasLastSegmentTxId()) { result = result && (getLastSegmentTxId() == other.getLastSegmentTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
public Builder toBuilder() { return newBuilder(this); }
public Builder toBuilder() { return newBuilder(this); }