private void testTruncate(List<LogEntryProto> entries, long fromIndex) throws Exception { try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // truncate the log raftLog.truncate(fromIndex).join(); checkEntries(raftLog, entries, 0, (int) fromIndex); } try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // check if the raft log is correct if (fromIndex > 0) { Assert.assertEquals(entries.get((int) (fromIndex - 1)), getLastEntry(raftLog)); } else { Assert.assertNull(raftLog.getLastEntryTermIndex()); } checkEntries(raftLog, entries, 0, (int) fromIndex); } }
@Override public List<CompletableFuture<Long>> append(LogEntryProto... entries) { checkLogState(); if (entries == null || entries.length == 0) { return Collections.emptyList(); try(AutoCloseableLock writeLock = writeLock()) { Iterator<TermIndex> iter = cache.iterator(entries[0].getIndex()); int index = 0; final LogEntryProto entry = get(storedEntry.getIndex()); server.failClientRequest(entry); } catch (RaftLogIOException e) { if (truncateIndex != -1) { futures = new ArrayList<>(entries.length - index + 1); futures.add(truncate(truncateIndex)); } else { futures = new ArrayList<>(entries.length - index); futures.add(appendEntry(entries[i]));
@Override public List<CompletableFuture<Long>> appendImpl(LogEntryProto... entries) { checkLogState(); if (entries == null || entries.length == 0) { return Collections.emptyList(); } try(AutoCloseableLock writeLock = writeLock()) { final TruncateIndices ti = cache.computeTruncateIndices(this::failClientRequest, entries); final long truncateIndex = ti.getTruncateIndex(); final int index = ti.getArrayIndex(); LOG.debug("truncateIndex={}, arrayIndex={}", truncateIndex, index); final List<CompletableFuture<Long>> futures; if (truncateIndex != -1) { futures = new ArrayList<>(entries.length - index + 1); futures.add(truncate(truncateIndex)); } else { futures = new ArrayList<>(entries.length - index); } for (int i = index; i < entries.length; i++) { futures.add(appendEntry(entries[i])); } return futures; } }
private LogEntryProto getLastEntry(SegmentedRaftLog raftLog) throws IOException { return raftLog.get(raftLog.getLastEntryTermIndex().getIndex()); }
doCallRealMethod().when(server).failClientRequest(any(LogEntryProto.class)); try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, server, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); entries.forEach(entry -> RetryCacheTestUtil.createEntry(retryCache, entry)); new SegmentedRaftLog(peerId, server, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); LOG.info("newEntries[0] = {}", newEntries.get(0)); final int last = newEntries.size() - 1; LOG.info("newEntries[{}] = {}", last, newEntries.get(last)); raftLog.append(newEntries.toArray(new LogEntryProto[0])).forEach(CompletableFuture::join); getLastEntry(raftLog)); Assert.assertEquals(newEntries.get(newEntries.size() - 1).getIndex(), raftLog.getLatestFlushedIndex()); new SegmentedRaftLog(peerId, server, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); checkEntries(raftLog, entries, 0, 650); checkEntries(raftLog, newEntries, 100, 100); getLastEntry(raftLog)); Assert.assertEquals(newEntries.get(newEntries.size() - 1).getIndex(), raftLog.getLatestFlushedIndex()); RaftLogCache cache = raftLog.getRaftLogCache(); Assert.assertEquals(5, cache.getNumOfSegments());
@Test public void testLoadLogSegments() throws Exception { // first generate log files List<SegmentRange> ranges = prepareRanges(5, 100, 0); LogEntryProto[] entries = prepareLog(ranges); // create RaftLog object and load log file try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // check if log entries are loaded correctly for (LogEntryProto e : entries) { LogEntryProto entry = raftLog.get(e.getIndex()); Assert.assertEquals(e, entry); } TermIndex[] termIndices = raftLog.getEntries(0, 500); LogEntryProto[] entriesFromLog = Arrays.stream(termIndices) .map(ti -> { try { return raftLog.get(ti.getIndex()); } catch (IOException e) { throw new RuntimeException(e); } }) .toArray(LogEntryProto[]::new); Assert.assertArrayEquals(entries, entriesFromLog); Assert.assertEquals(entries[entries.length - 1], getLastEntry(raftLog)); } }
new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); TermIndex lastTermIndex = raftLog.getLastEntryTermIndex(); IllegalStateException ex = null; try { raftLog.appendEntry(LogEntryProto.newBuilder(entries.get(0)) .setTerm(lastTermIndex.getTerm() - 1) .setIndex(lastTermIndex.getIndex() + 1).build()); try { raftLog.appendEntry(LogEntryProto.newBuilder(entries.get(0)) .setTerm(lastTermIndex.getTerm()) .setIndex(lastTermIndex.getIndex() + 2).build());
/** * Keep appending entries, make sure the rolling is correct. */ @Test public void testAppendAndRoll() throws Exception { RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf("16KB")); RaftServerConfigKeys.Log.setSegmentSizeMax(properties, SizeInBytes.valueOf("128KB")); List<SegmentRange> ranges = prepareRanges(0, 1, 1024, 0); final byte[] content = new byte[1024]; List<LogEntryProto> entries = prepareLogEntries(ranges, () -> new String(content)); try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // append entries to the raftlog entries.stream().map(raftLog::appendEntry).forEach(CompletableFuture::join); } try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // check if the raft log is correct checkEntries(raftLog, entries, 0, entries.size()); Assert.assertEquals(9, raftLog.getRaftLogCache().getNumOfSegments()); } }
try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, sm, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); raftLog.appendEntry(entries.get(next++)); assertIndices(raftLog, flush, next); raftLog.appendEntry(entries.get(next++)); assertIndices(raftLog, flush, next); raftLog.appendEntry(entries.get(next++)); assertIndicesMultipleAttempts(raftLog, flush += 3, next); raftLog.appendEntry(entries.get(next++));
/** * Append entry one by one and check if log state is correct. */ @Test public void testAppendEntry() throws Exception { List<SegmentRange> ranges = prepareRanges(5, 200, 0); List<LogEntryProto> entries = prepareLogEntries(ranges, null); try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // append entries to the raftlog entries.stream().map(raftLog::appendEntry).forEach(CompletableFuture::join); } try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // check if the raft log is correct checkEntries(raftLog, entries, 0, entries.size()); } }
@Override public EntryWithData getEntryWithData(long index) throws RaftLogIOException { final LogEntryProto entry = get(index); if (!ServerProtoUtils.shouldReadStateMachineData(entry)) { return new EntryWithData(entry, null); } try { return new EntryWithData(entry, server.map(s -> s.getStateMachine().readStateMachineData(entry)).orElse(null)); } catch (Throwable e) { final String err = getSelfId() + ": Failed readStateMachineData for " + ServerProtoUtils.toLogEntryString(entry); LOG.error(err, e); throw new RaftLogIOException(err, JavaUtils.unwrapCompletionException(e)); } }
/** * note we do not apply log entries to the state machine here since we do not * know whether they have been committed. */ private RaftLog initLog(RaftPeerId id, RaftProperties prop, long lastIndexInSnapshot, Consumer<LogEntryProto> logConsumer) throws IOException { final RaftLog log; if (RaftServerConfigKeys.Log.useMemory(prop)) { final int maxBufferSize = RaftServerConfigKeys.Log.Appender.bufferCapacity(prop).getSizeInt(); log = new MemoryRaftLog(id, maxBufferSize); } else { log = new SegmentedRaftLog(id, server, this.storage, lastIndexInSnapshot, prop); } log.open(lastIndexInSnapshot, logConsumer); return log; }
doCallRealMethod().when(server).failClientRequest(any(LogEntryProto.class)); try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, server, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); entries.stream().forEach(entry -> RetryCacheTestUtil.createEntry(retryCache, entry)); new SegmentedRaftLog(peerId, server, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); raftLog.append(newEntries.toArray(new LogEntryProto[newEntries.size()])).forEach(CompletableFuture::join); getLastEntry(raftLog)); Assert.assertEquals(newEntries.get(newEntries.size() - 1).getIndex(), raftLog.getLatestFlushedIndex()); new SegmentedRaftLog(peerId, server, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); checkEntries(raftLog, entries, 0, 650); checkEntries(raftLog, newEntries, 100, 100); getLastEntry(raftLog)); Assert.assertEquals(newEntries.get(newEntries.size() - 1).getIndex(), raftLog.getLatestFlushedIndex()); RaftLogCache cache = raftLog.getRaftLogCache(); Assert.assertEquals(5, cache.getNumOfSegments());
@Test public void testLoadLogSegments() throws Exception { // first generate log files List<SegmentRange> ranges = prepareRanges(0, 5, 100, 0); LogEntryProto[] entries = prepareLog(ranges); // create RaftLog object and load log file try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // check if log entries are loaded correctly for (LogEntryProto e : entries) { LogEntryProto entry = raftLog.get(e.getIndex()); Assert.assertEquals(e, entry); } TermIndex[] termIndices = raftLog.getEntries(0, 500); LogEntryProto[] entriesFromLog = Arrays.stream(termIndices) .map(ti -> { try { return raftLog.get(ti.getIndex()); } catch (IOException e) { throw new RuntimeException(e); } }) .toArray(LogEntryProto[]::new); Assert.assertArrayEquals(entries, entriesFromLog); Assert.assertEquals(entries[entries.length - 1], getLastEntry(raftLog)); } }
/** * Keep appending entries, make sure the rolling is correct. */ @Test public void testAppendAndRoll() throws Exception { RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf("16KB")); RaftServerConfigKeys.Log.setSegmentSizeMax(properties, SizeInBytes.valueOf("128KB")); List<SegmentRange> ranges = prepareRanges(1, 1024, 0); final byte[] content = new byte[1024]; List<LogEntryProto> entries = prepareLogEntries(ranges, () -> new String(content)); try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // append entries to the raftlog entries.stream().map(raftLog::appendEntry).forEach(CompletableFuture::join); } try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // check if the raft log is correct checkEntries(raftLog, entries, 0, entries.size()); Assert.assertEquals(9, raftLog.getRaftLogCache().getNumOfSegments()); } }
@Test public void testSegmentedRaftLogStateMachineDataTimeoutIOException() throws Exception { RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); final TimeDuration syncTimeout = TimeDuration.valueOf(100, TimeUnit.MILLISECONDS); RaftServerConfigKeys.Log.StateMachineData.setSyncTimeout(properties, syncTimeout); final int numRetries = 2; RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, numRetries); ExitUtils.disableSystemExit(); final LogEntryProto entry = prepareLogEntry(0, 0, null, true); final StateMachine sm = new BaseStateMachine() { @Override public CompletableFuture<?> writeStateMachineData(LogEntryProto entry) { return new CompletableFuture<>(); // the future never completes } }; try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, sm, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); raftLog.appendEntry(entry); // RaftLogWorker should catch TimeoutIOException JavaUtils.attempt(() -> { final ExitUtils.ExitException exitException = ExitUtils.getFirstExitException(); Objects.requireNonNull(exitException, "exitException == null"); Assert.assertEquals(TimeoutIOException.class, exitException.getCause().getClass()); }, 3*numRetries, syncTimeout, "RaftLogWorker should catch TimeoutIOException and exit", LOG); ExitUtils.clear(); } }
@Test public void testTruncate() throws Exception { // prepare the log for truncation List<SegmentRange> ranges = prepareRanges(0, 5, 200, 0); List<LogEntryProto> entries = prepareLogEntries(ranges, null); try (SegmentedRaftLog raftLog = new SegmentedRaftLog(peerId, null, storage, -1, properties)) { raftLog.open(RaftServerConstants.INVALID_LOG_INDEX, null); // append entries to the raftlog entries.stream().map(raftLog::appendEntry).forEach(CompletableFuture::join); } for (long fromIndex = 900; fromIndex >= 0; fromIndex -= 150) { testTruncate(entries, fromIndex); } }
private LogEntryProto getLastEntry(SegmentedRaftLog raftLog) throws IOException { return raftLog.get(raftLog.getLastEntryTermIndex().getIndex()); }
/** * note we do not apply log entries to the state machine here since we do not * know whether they have been committed. */ private RaftLog initLog(RaftPeerId id, RaftProperties prop, long lastIndexInSnapshot, Consumer<LogEntryProto> logConsumer) throws IOException { final RaftLog log; if (RaftServerConfigKeys.Log.useMemory(prop)) { final int maxBufferSize = RaftServerConfigKeys.Log.Appender.bufferByteLimit(prop).getSizeInt(); log = new MemoryRaftLog(id, lastIndexInSnapshot, maxBufferSize); } else { log = new SegmentedRaftLog(id, server, this.storage, lastIndexInSnapshot, prop); } log.open(lastIndexInSnapshot, logConsumer); return log; }