@Override public List<CompletableFuture<Long>> appendImpl(LogEntryProto... entries) { checkLogState(); if (entries == null || entries.length == 0) { return Collections.emptyList(); } try(AutoCloseableLock writeLock = writeLock()) { final TruncateIndices ti = cache.computeTruncateIndices(this::failClientRequest, entries); final long truncateIndex = ti.getTruncateIndex(); final int index = ti.getArrayIndex(); LOG.debug("truncateIndex={}, arrayIndex={}", truncateIndex, index); final List<CompletableFuture<Long>> futures; if (truncateIndex != -1) { futures = new ArrayList<>(entries.length - index + 1); futures.add(truncate(truncateIndex)); } else { futures = new ArrayList<>(entries.length - index); } for (int i = index; i < entries.length; i++) { futures.add(appendEntry(entries[i])); } return futures; } }
/** * The method, along with {@link #appendEntry} and * {@link #append(LogEntryProto...)} need protection of RaftServer's lock. */ @Override CompletableFuture<Long> truncate(long index) { checkLogState(); try(AutoCloseableLock writeLock = writeLock()) { RaftLogCache.TruncationSegments ts = cache.truncate(index); if (ts != null) { Task task = fileLogWorker.truncate(ts); return task.getFuture(); } } return CompletableFuture.completedFuture(index); }
/** * The method, along with {@link #appendEntry} and * {@link #append(LogEntryProto...)} need protection of RaftServer's lock. */ @Override CompletableFuture<Long> truncateImpl(long index) { checkLogState(); try(AutoCloseableLock writeLock = writeLock()) { RaftLogCache.TruncationSegments ts = cache.truncate(index); if (ts != null) { Task task = fileLogWorker.truncate(ts, index); return task.getFuture(); } } return CompletableFuture.completedFuture(index); }
@Override public void close() throws IOException { try(AutoCloseableLock writeLock = writeLock()) { super.close(); cache.clear(); } fileLogWorker.close(); storage.close(); }
@Override public void close() throws IOException { try(AutoCloseableLock writeLock = writeLock()) { super.close(); cache.clear(); } fileLogWorker.close(); storage.close(); }
private void loadLogSegments(long lastIndexInSnapshot, Consumer<LogEntryProto> logConsumer) throws IOException { try(AutoCloseableLock writeLock = writeLock()) { List<LogPathAndIndex> paths = storage.getStorageDir().getLogSegmentFiles(); int i = 0; for (LogPathAndIndex pi : paths) { boolean isOpen = pi.endIndex == RaftServerConstants.INVALID_LOG_INDEX; // During the initial loading, we can only confirm the committed // index based on the snapshot. This means if a log segment is not kept // in cache after the initial loading, later we have to load its content // again for updating the state machine. // TODO we should let raft peer persist its committed index periodically // so that during the initial loading we can apply part of the log // entries to the state machine boolean keepEntryInCache = (paths.size() - i++) <= cache.getMaxCachedSegments(); cache.loadSegment(pi, isOpen, keepEntryInCache, logConsumer); } // if the largest index is smaller than the last index in snapshot, we do // not load the log to avoid holes between log segments. This may happen // when the local I/O worker is too slow to persist log (slower than // committing the log and taking snapshot) if (!cache.isEmpty() && cache.getEndIndex() < lastIndexInSnapshot) { LOG.warn("End log index {} is smaller than last index in snapshot {}", cache.getEndIndex(), lastIndexInSnapshot); cache.clear(); // TODO purge all segment files } } }
private void loadLogSegments(long lastIndexInSnapshot, Consumer<LogEntryProto> logConsumer) throws IOException { try(AutoCloseableLock writeLock = writeLock()) { List<LogPathAndIndex> paths = storage.getStorageDir().getLogSegmentFiles(); int i = 0; for (LogPathAndIndex pi : paths) { // During the initial loading, we can only confirm the committed // index based on the snapshot. This means if a log segment is not kept // in cache after the initial loading, later we have to load its content // again for updating the state machine. // TODO we should let raft peer persist its committed index periodically // so that during the initial loading we can apply part of the log // entries to the state machine boolean keepEntryInCache = (paths.size() - i++) <= cache.getMaxCachedSegments(); cache.loadSegment(pi, keepEntryInCache, logConsumer); } // if the largest index is smaller than the last index in snapshot, we do // not load the log to avoid holes between log segments. This may happen // when the local I/O worker is too slow to persist log (slower than // committing the log and taking snapshot) if (!cache.isEmpty() && cache.getEndIndex() < lastIndexInSnapshot) { LOG.warn("End log index {} is smaller than last index in snapshot {}", cache.getEndIndex(), lastIndexInSnapshot); cache.clear(); // TODO purge all segment files } } }
try(AutoCloseableLock writeLock = writeLock()) { Iterator<TermIndex> iter = cache.iterator(entries[0].getIndex()); int index = 0;
ServerProtoUtils.toLogEntryString(entry)); try(AutoCloseableLock writeLock = writeLock()) { final LogSegment currentOpenSegment = cache.getOpenSegment(); if (currentOpenSegment == null) {
ServerProtoUtils.toLogEntryString(entry)); try(AutoCloseableLock writeLock = writeLock()) { validateLogEntry(entry); final LogSegment currentOpenSegment = cache.getOpenSegment();