/** * @param startIndex inclusive * @param endIndex exclusive */ TermIndex[] getTermIndices(final long startIndex, final long endIndex) { if (startIndex < 0 || startIndex < getStartIndex()) { throw new IndexOutOfBoundsException("startIndex = " + startIndex + ", log cache starts from index " + getStartIndex()); } if (startIndex > endIndex) { throw new IndexOutOfBoundsException("startIndex(" + startIndex + ") > endIndex(" + endIndex + ")"); } final long realEnd = Math.min(getEndIndex() + 1, endIndex); if (startIndex >= realEnd) { return TermIndex.EMPTY_TERMINDEX_ARRAY; } return closedSegments.getTermIndex(startIndex, realEnd, openSegment); }
+ ") > endIndex(" + endIndex + ")"); final long realEnd = Math.min(getEndIndex() + 1, endIndex); if (startIndex >= realEnd) { return TermIndex.EMPTY_TERMINDEX_ARRAY;
private void loadLogSegments(long lastIndexInSnapshot, Consumer<LogEntryProto> logConsumer) throws IOException { try(AutoCloseableLock writeLock = writeLock()) { List<LogPathAndIndex> paths = storage.getStorageDir().getLogSegmentFiles(); int i = 0; for (LogPathAndIndex pi : paths) { // During the initial loading, we can only confirm the committed // index based on the snapshot. This means if a log segment is not kept // in cache after the initial loading, later we have to load its content // again for updating the state machine. // TODO we should let raft peer persist its committed index periodically // so that during the initial loading we can apply part of the log // entries to the state machine boolean keepEntryInCache = (paths.size() - i++) <= cache.getMaxCachedSegments(); cache.loadSegment(pi, keepEntryInCache, logConsumer); } // if the largest index is smaller than the last index in snapshot, we do // not load the log to avoid holes between log segments. This may happen // when the local I/O worker is too slow to persist log (slower than // committing the log and taking snapshot) if (!cache.isEmpty() && cache.getEndIndex() < lastIndexInSnapshot) { LOG.warn("End log index {} is smaller than last index in snapshot {}", cache.getEndIndex(), lastIndexInSnapshot); cache.clear(); // TODO purge all segment files } } }
private void loadLogSegments(long lastIndexInSnapshot, Consumer<LogEntryProto> logConsumer) throws IOException { try(AutoCloseableLock writeLock = writeLock()) { List<LogPathAndIndex> paths = storage.getStorageDir().getLogSegmentFiles(); int i = 0; for (LogPathAndIndex pi : paths) { boolean isOpen = pi.endIndex == RaftServerConstants.INVALID_LOG_INDEX; // During the initial loading, we can only confirm the committed // index based on the snapshot. This means if a log segment is not kept // in cache after the initial loading, later we have to load its content // again for updating the state machine. // TODO we should let raft peer persist its committed index periodically // so that during the initial loading we can apply part of the log // entries to the state machine boolean keepEntryInCache = (paths.size() - i++) <= cache.getMaxCachedSegments(); cache.loadSegment(pi, isOpen, keepEntryInCache, logConsumer); } // if the largest index is smaller than the last index in snapshot, we do // not load the log to avoid holes between log segments. This may happen // when the local I/O worker is too slow to persist log (slower than // committing the log and taking snapshot) if (!cache.isEmpty() && cache.getEndIndex() < lastIndexInSnapshot) { LOG.warn("End log index {} is smaller than last index in snapshot {}", cache.getEndIndex(), lastIndexInSnapshot); cache.clear(); // TODO purge all segment files } } }
@Override protected void openImpl(long lastIndexInSnapshot, Consumer<LogEntryProto> consumer) throws IOException { loadLogSegments(lastIndexInSnapshot, consumer); File openSegmentFile = null; LogSegment openSegment = cache.getOpenSegment(); if (openSegment != null) { openSegmentFile = storage.getStorageDir() .getOpenLogFile(openSegment.getStartIndex()); } fileLogWorker.start(Math.max(cache.getEndIndex(), lastIndexInSnapshot), openSegmentFile); }
@Override public void open(long lastIndexInSnapshot, Consumer<LogEntryProto> consumer) throws IOException { loadLogSegments(lastIndexInSnapshot, consumer); File openSegmentFile = null; LogSegment openSegment = cache.getOpenSegment(); if (openSegment != null) { openSegmentFile = storage.getStorageDir() .getOpenLogFile(openSegment.getStartIndex()); } fileLogWorker.start(Math.max(cache.getEndIndex(), lastIndexInSnapshot), openSegmentFile); super.open(lastIndexInSnapshot, consumer); }
private void testIterator(long startIndex) throws IOException { Iterator<TermIndex> iterator = cache.iterator(startIndex); TermIndex prev = null; while (iterator.hasNext()) { TermIndex termIndex = iterator.next(); Assert.assertEquals(cache.getLogRecord(termIndex.getIndex()).getTermIndex(), termIndex); if (prev != null) { Assert.assertEquals(prev.getIndex() + 1, termIndex.getIndex()); } prev = termIndex; } if (startIndex <= cache.getEndIndex()) { Assert.assertNotNull(prev); Assert.assertEquals(cache.getEndIndex(), prev.getIndex()); } }
private void testIterator(long startIndex) throws IOException { Iterator<TermIndex> iterator = cache.iterator(startIndex); TermIndex prev = null; while (iterator.hasNext()) { TermIndex termIndex = iterator.next(); Assert.assertEquals(cache.getLogRecord(termIndex.getIndex()).getTermIndex(), termIndex); if (prev != null) { Assert.assertEquals(prev.getIndex() + 1, termIndex.getIndex()); } prev = termIndex; } if (startIndex <= cache.getEndIndex()) { Assert.assertNotNull(prev); Assert.assertEquals(cache.getEndIndex(), prev.getIndex()); } }
private void checkCache(long start, long end, int segmentSize) throws IOException { Assert.assertEquals(start, cache.getStartIndex()); Assert.assertEquals(end, cache.getEndIndex()); for (long index = start; index <= end; index++) { LogEntryProto entry = cache.getSegment(index).getEntryWithoutLoading(index).getEntry(); Assert.assertEquals(index, entry.getIndex()); } long[] offsets = new long[]{start, start + 1, start + (end - start) / 2, end - 1, end}; for (long offset : offsets) { checkCacheEntries(offset, (int) (end - offset + 1), end); checkCacheEntries(offset, 1, end); checkCacheEntries(offset, 20, end); checkCacheEntries(offset, segmentSize, end); checkCacheEntries(offset, segmentSize - 1, end); } }
private void checkCache(long start, long end, int segmentSize) throws IOException { Assert.assertEquals(start, cache.getStartIndex()); Assert.assertEquals(end, cache.getEndIndex()); for (long index = start; index <= end; index++) { LogEntryProto entry = cache.getSegment(index).getEntryWithoutLoading(index).getEntry(); Assert.assertEquals(index, entry.getIndex()); } long[] offsets = new long[]{start, start + 1, start + (end - start) / 2, end - 1, end}; for (long offset : offsets) { checkCacheEntries(offset, (int) (end - offset + 1), end); checkCacheEntries(offset, 1, end); checkCacheEntries(offset, 20, end); checkCacheEntries(offset, segmentSize, end); checkCacheEntries(offset, segmentSize - 1, end); } }
cache.addSegment(s); long end = cache.getEndIndex(); Assert.assertEquals(599, end); int numOfSegments = 6;
cache.addSegment(s); long end = cache.getEndIndex(); Assert.assertEquals(599, end); int numOfSegments = 6;