RaftConfiguration generateOldNewConf(RaftConfiguration current, long logIndex) { return RaftConfiguration.newBuilder() .setConf(newConf) .setOldConf(current) .setLogEntryIndex(logIndex) .build(); }
RaftConfiguration generateOldNewConf(RaftConfiguration current, long logIndex) { return RaftConfiguration.newBuilder() .setConf(newConf) .setOldConf(current) .setLogEntryIndex(logIndex) .build(); }
public static RaftConfiguration toRaftConfiguration( long index, RaftConfigurationProto proto) { final RaftConfiguration.Builder b = RaftConfiguration.newBuilder() .setConf(ProtoUtils.toRaftPeerArray(proto.getPeersList())) .setLogEntryIndex(index); if (proto.getOldPeersCount() > 0) { b.setOldConf(ProtoUtils.toRaftPeerArray(proto.getOldPeersList())); } return b.build(); }
static RaftConfiguration toRaftConfiguration(LogEntryProto entry) { Preconditions.assertTrue(entry.hasConfigurationEntry()); final RaftConfigurationProto proto = entry.getConfigurationEntry(); final RaftConfiguration.Builder b = RaftConfiguration.newBuilder() .setConf(ProtoUtils.toRaftPeerArray(proto.getPeersList())) .setLogEntryIndex(entry.getIndex()); if (proto.getOldPeersCount() > 0) { b.setOldConf(ProtoUtils.toRaftPeerArray(proto.getOldPeersList())); } return b.build(); }
/** * when the (old, new) log entry has been committed, should replicate (new): * 1) append (new) to log * 2) update conf to (new) * 3) update RpcSenders list * 4) start replicating the log entry */ private void replicateNewConf() { final RaftConfiguration conf = server.getRaftConf(); final RaftConfiguration newConf = RaftConfiguration.newBuilder() .setConf(conf) .setLogEntryIndex(raftLog.getNextIndex()) .build(); // stop the LogAppender if the corresponding follower is no longer in the conf updateSenders(newConf); long index = raftLog.append(server.getState().getCurrentTerm(), newConf); updateConfiguration(index, newConf); notifySenders(); }
/** * when the (old, new) log entry has been committed, should replicate (new): * 1) append (new) to log * 2) update conf to (new) * 3) update RpcSenders list * 4) start replicating the log entry */ private void replicateNewConf() { final RaftConfiguration conf = server.getRaftConf(); final RaftConfiguration newConf = RaftConfiguration.newBuilder() .setConf(conf) .setLogEntryIndex(raftLog.getNextIndex()) .build(); // stop the LogAppender if the corresponding follower is no longer in the conf updateSenders(newConf); long index = raftLog.append(server.getState().getCurrentTerm(), newConf); updateConfiguration(index, newConf); notifySenders(); }
ServerState(RaftPeerId id, RaftGroup group, RaftProperties prop, RaftServerImpl server, StateMachine stateMachine) throws IOException { this.selfId = id; this.server = server; RaftConfiguration initialConf = RaftConfiguration.newBuilder() .setConf(group.getPeers()).build(); configurationManager = new ConfigurationManager(initialConf); LOG.info("{}: {}", id, configurationManager); // use full uuid string to create a subdirectory final File dir = chooseStorageDir(RaftServerConfigKeys.storageDirs(prop), group.getGroupId().getUuid().toString()); storage = new RaftStorage(dir, RaftServerConstants.StartupOption.REGULAR); snapshotManager = new SnapshotManager(storage, id); long lastApplied = initStatemachine(stateMachine, group.getGroupId()); // On start the leader is null, start the clock now leaderId = null; this.lastNoLeaderTime = Timestamp.currentTime(); this.leaderElectionTimeoutMs = RaftServerConfigKeys.leaderElectionTimeout(prop).toIntExact(TimeUnit.MILLISECONDS); // we cannot apply log entries to the state machine in this step, since we // do not know whether the local log entries have been committed. log = initLog(id, prop, lastApplied, this::setRaftConf); RaftLog.Metadata metadata = log.loadMetadata(); currentTerm = metadata.getTerm(); votedFor = metadata.getVotedFor(); stateMachineUpdater = new StateMachineUpdater(stateMachine, server, log, lastApplied, prop); }
ServerState(RaftPeerId id, RaftGroup group, RaftProperties prop, RaftServerImpl server, StateMachine stateMachine) throws IOException { this.selfId = id; this.server = server; RaftConfiguration initialConf = RaftConfiguration.newBuilder() .setConf(group.getPeers()).build(); configurationManager = new ConfigurationManager(initialConf); final File dir = RaftServerConfigKeys.storageDir(prop); storage = new RaftStorage(new File(dir, group.getGroupId().toString()), RaftServerConstants.StartupOption.REGULAR); snapshotManager = new SnapshotManager(storage, id); long lastApplied = initStatemachine(stateMachine, prop); leaderId = null; // we cannot apply log entries to the state machine in this step, since we // do not know whether the local log entries have been committed. log = initLog(id, prop, lastApplied, entry -> { if (entry.getLogEntryBodyCase() == CONFIGURATIONENTRY) { configurationManager.addConfiguration(entry.getIndex(), ServerProtoUtils.toRaftConfiguration(entry.getIndex(), entry.getConfigurationEntry())); } }); RaftLog.Metadata metadata = log.loadMetadata(); currentTerm = metadata.getTerm(); votedFor = metadata.getVotedFor(); stateMachineUpdater = new StateMachineUpdater(stateMachine, server, log, lastApplied, prop); }
private static void waitAndCheckNewConf(MiniRaftCluster cluster, RaftPeer[] peers, Collection<RaftPeerId> deadPeers) { LOG.info("waitAndCheckNewConf: peers={}, deadPeers={}, {}", Arrays.asList(peers), deadPeers, cluster.printServers()); Assert.assertNotNull(cluster.getLeader()); int numIncluded = 0; int deadIncluded = 0; final RaftConfiguration current = RaftConfiguration.newBuilder() .setConf(peers).setLogEntryIndex(0).build(); for (RaftServerImpl server : cluster.iterateServerImpls()) { LOG.info("checking {}", server); if (deadPeers != null && deadPeers.contains(server.getId())) { if (current.containsInConf(server.getId())) { deadIncluded++; } continue; } if (current.containsInConf(server.getId())) { numIncluded++; Assert.assertTrue(server.getRaftConf().isStable()); Assert.assertTrue(server.getRaftConf().hasNoChange(peers)); } else if (server.isAlive()) { // The server is successfully removed from the conf // It may not be shutdown since it may not be able to talk to the new leader (who is not in its conf). Assert.assertTrue(server.getRaftConf().isStable()); Assert.assertFalse(server.getRaftConf().containsInConf(server.getId())); } } Assert.assertEquals(peers.length, numIncluded + deadIncluded); }
private static void waitAndCheckNewConf(MiniRaftCluster cluster, RaftPeer[] peers, Collection<String> deadPeers) throws Exception { LOG.info(cluster.printServers()); Assert.assertNotNull(cluster.getLeader()); int numIncluded = 0; int deadIncluded = 0; final RaftConfiguration current = RaftConfiguration.newBuilder() .setConf(peers).setLogEntryIndex(0).build(); for (RaftServerImpl server : cluster.iterateServerImpls()) { if (deadPeers != null && deadPeers.contains(server.getId().toString())) { if (current.containsInConf(server.getId())) { deadIncluded++; } continue; } if (current.containsInConf(server.getId())) { numIncluded++; Assert.assertTrue(server.getRaftConf().isStable()); Assert.assertTrue(server.getRaftConf().hasNoChange(peers)); } else { Assert.assertFalse(server.getId() + " is still running: " + server, server.isAlive()); } } Assert.assertEquals(peers.length, numIncluded + deadIncluded); }