boolean isReady() { return server.getState().getLastAppliedIndex() >= placeHolderIndex; }
private void updateConfiguration(long logIndex, RaftConfiguration newConf) { voterLists = divideFollowers(newConf); server.getState().setRaftConf(logIndex, newConf); }
@Override public String getLeaderId() { return getState().getLeaderId().toString(); }
private void checkInitialization() throws IOException { if (this.log == null) { ServerState state = proxy.getImpl(groupId).getState(); this.log = state.getLog(); } }
@Override public String getId() { return getState().getSelfId().toString(); }
private void checkAndEvictCache() { if (server.isPresent() && cache.shouldEvict()) { // TODO if the cache is hitting the maximum size and we cannot evict any // segment's cache, should block the new entry appending or new segment // allocation. final RaftServerImpl s = server.get(); cache.evictCache(s.getFollowerNextIndices(), fileLogWorker.getFlushedIndex(), s.getState().getLastAppliedIndex()); } }
IllegalStateException newIllegalStateExceptionForMultipleLeaders(RaftGroupId groupId, List<RaftServerImpl> leaders) { final String g = groupId == null? "": " for " + groupId; return new IllegalStateException("Found multiple leaders" + g + " at the same term (=" + leaders.get(0).getState().getCurrentTerm() + "), leaders.size() = " + leaders.size() + " > 1, leaders = " + leaders + ": " + printServers(groupId)); }
private void prepare() { synchronized (server) { if (running) { final RaftConfiguration conf = server.getRaftConf(); if (conf.isTransitional() && server.getState().isConfCommitted()) { // the configuration is in transitional state, and has been committed // so it is time to generate and replicate (new) conf. replicateNewConf(); } } } }
private void prepare() { synchronized (server) { if (running) { final RaftConfiguration conf = server.getRaftConf(); if (conf.isTransitional() && server.getState().isConfCommitted()) { // the configuration is in transitional state, and has been committed // so it is time to generate and replicate (new) conf. replicateNewConf(); } } } }
static List<Path> getOpenLogFiles(RaftServerImpl server) throws Exception { return server.getState().getStorage().getStorageDir().getLogSegmentFiles().stream() .filter(LogPathAndIndex::isOpen) .map(LogPathAndIndex::getPath) .collect(Collectors.toList()); }
static void assertLogEntries(MiniRaftCluster cluster, SimpleMessage expectedMessage) { final int size = cluster.getServers().size(); final long count = cluster.getServerAliveStream() .map(s -> s.getState().getLog()) .filter(log -> logEntriesContains(log, expectedMessage)) .count(); if (2*count <= size) { throw new AssertionError("Not in majority: size=" + size + " but count=" + count); } }
private void checkAndEvictCache() { if (server != null && cache.shouldEvict()) { // TODO if the cache is hitting the maximum size and we cannot evict any // segment's cache, should block the new entry appending or new segment // allocation. cache.evictCache(server.getFollowerNextIndices(), fileLogWorker.getFlushedIndex(), server.getState().getLastAppliedIndex()); } }
static void assertLogEntries(RaftServerImpl server, long expectedTerm, SimpleMessage... expectedMessages) { LOG.info("checking raft log for " + server.getId()); final RaftLog log = server.getState().getLog(); try { RaftTestUtil.assertLogEntries(log, expectedTerm, expectedMessages); } catch (AssertionError e) { LOG.error(server.getId() + ": Unexpected raft log", e); throw e; } }
static void assertLogEntries(RaftServerImpl server, long expectedTerm, SimpleMessage... expectedMessages) { LOG.info("checking raft log for " + server.getId()); final RaftLog log = server.getState().getLog(); try { RaftTestUtil.assertLogEntries(log, expectedTerm, expectedMessages); } catch (AssertionError e) { LOG.error(server.getId() + ": Unexpected raft log", e); throw e; } }
private void applyOldNewConf() { final ServerState state = server.getState(); final RaftConfiguration current = server.getRaftConf(); final RaftConfiguration oldNewConf= stagingState.generateOldNewConf(current, state.getLog().getNextIndex()); // apply the (old, new) configuration to log, and use it as the current conf long index = state.getLog().append(state.getCurrentTerm(), oldNewConf); updateConfiguration(index, oldNewConf); this.stagingState = null; notifySenders(); }
synchronized void changeToLeader() { Preconditions.assertTrue(isCandidate()); role.shutdownLeaderElection(); setRole(RaftPeerRole.LEADER, "changeToLeader"); state.becomeLeader(); // start sending AppendEntries RPC to followers final LogEntryProto e = role.startLeaderState(this, getProxy().getProperties()); getState().setRaftConf(e); }
private void applyOldNewConf() { final ServerState state = server.getState(); final RaftConfiguration current = server.getRaftConf(); final RaftConfiguration oldNewConf= stagingState.generateOldNewConf(current, state.getLog().getNextIndex()); // apply the (old, new) configuration to log, and use it as the current conf long index = state.getLog().append(state.getCurrentTerm(), oldNewConf); updateConfiguration(index, oldNewConf); this.stagingState = null; notifySenders(); }
public LogAppender(RaftServerImpl server, LeaderState leaderState, FollowerInfo f) { this.follower = f; this.server = server; this.leaderState = leaderState; this.raftLog = server.getState().getLog(); final RaftProperties properties = server.getProxy().getProperties(); this.maxBufferSize = RaftServerConfigKeys.Log.Appender.bufferCapacity(properties).getSizeInt(); this.batchSending = RaftServerConfigKeys.Log.Appender.batchEnabled(properties); this.snapshotChunkMaxSize = RaftServerConfigKeys.Log.Appender.snapshotChunkSizeMax(properties).getSizeInt(); this.halfMinTimeoutMs = server.getMinTimeoutMs() / 2; this.buffer = new LogEntryBuffer(); this.lifeCycle = new LifeCycle(this); }
static File getSnapshotFile(MiniRaftCluster cluster, int i) { final RaftServerImpl leader = cluster.getLeader(); final SimpleStateMachine4Testing sm = SimpleStateMachine4Testing.get(leader); return sm.getStateMachineStorage().getSnapshotFile( leader.getState().getCurrentTerm(), i); }
static void assertTruncatedLog(RaftPeerId id, File openLogFile, long lastIndex, MiniRaftCluster cluster) throws Exception { // truncate log FileUtils.truncateFile(openLogFile, openLogFile.length() - 1); final RaftServerImpl server = cluster.restartServer(id, false); // the last index should be one less than before Assert.assertEquals(lastIndex - 1, server.getState().getLog().getLastEntryTermIndex().getIndex()); server.getProxy().close(); }