LeaderElection(RaftServerImpl server) { this.server = server; conf = server.getRaftConf(); others = conf.getOtherPeers(server.getId()); this.running = true; }
LeaderElection(RaftServerImpl server) { this.server = server; conf = server.getRaftConf(); others = conf.getOtherPeers(server.getId()); this.running = true; }
RaftGroup getGroup() { return RaftGroup.valueOf(groupId, getRaftConf().getPeers()); }
Collection<CommitInfoProto> getCommitInfos() { final List<CommitInfoProto> infos = new ArrayList<>(); // add the commit info of this server infos.add(commitInfoCache.update(getPeer(), state.getLog().getLastCommittedIndex())); // add the commit infos of other servers if (isLeader()) { Optional.ofNullable(leaderState).ifPresent( leader -> leader.updateFollowerCommitInfos(commitInfoCache, infos)); } else { getRaftConf().getPeers().stream() .filter(p -> !p.getId().equals(state.getSelfId())) .map(RaftPeer::getId) .map(commitInfoCache::get) .filter(i -> i != null) .forEach(infos::add); } return infos; }
/** * check if the remote peer is not included in the current conf * and should shutdown. should shutdown if all the following stands: * 1. this is a leader * 2. current conf is stable and has been committed * 3. candidate id is not included in conf * 4. candidate's last entry's index < conf's index */ private boolean shouldSendShutdown(RaftPeerId candidateId, TermIndex candidateLastEntry) { return isLeader() && getRaftConf().isStable() && getState().isConfCommitted() && !getRaftConf().containsInConf(candidateId) && candidateLastEntry.getIndex() < getRaftConf().getLogEntryIndex() && role.getLeaderState().map(ls -> !ls.isBootStrappingPeer(candidateId)).orElse(false); }
/** * check if the remote peer is not included in the current conf * and should shutdown. should shutdown if all the following stands: * 1. this is a leader * 2. current conf is stable and has been committed * 3. candidate id is not included in conf * 4. candidate's last entry's index < conf's index */ private boolean shouldSendShutdown(RaftPeerId candidateId, TermIndex candidateLastEntry) { return isLeader() && getRaftConf().isStable() && getState().isConfCommitted() && !getRaftConf().containsInConf(candidateId) && candidateLastEntry.getIndex() < getRaftConf().getLogEntryIndex() && !leaderState.isBootStrappingPeer(candidateId); }
Collection<CommitInfoProto> getCommitInfos() { final List<CommitInfoProto> infos = new ArrayList<>(); // add the commit info of this server infos.add(commitInfoCache.update(getPeer(), state.getLog().getLastCommittedIndex())); // add the commit infos of other servers if (isLeader()) { role.getLeaderState().ifPresent( leader -> leader.updateFollowerCommitInfos(commitInfoCache, infos)); } else { getRaftConf().getPeers().stream() .filter(p -> !p.getId().equals(state.getSelfId())) .map(RaftPeer::getId) .map(commitInfoCache::get) .filter(i -> i != null) .forEach(infos::add); } return infos; }
private void prepare() { synchronized (server) { if (running) { final RaftConfiguration conf = server.getRaftConf(); if (conf.isTransitional() && server.getState().isConfCommitted()) { // the configuration is in transitional state, and has been committed // so it is time to generate and replicate (new) conf. replicateNewConf(); } } } }
private void prepare() { synchronized (server) { if (running) { final RaftConfiguration conf = server.getRaftConf(); if (conf.isTransitional() && server.getState().isConfCommitted()) { // the configuration is in transitional state, and has been committed // so it is time to generate and replicate (new) conf. replicateNewConf(); } } } }
ServerInformationReply getServerInformation(ServerInformationRequest request) { final RaftGroup group = new RaftGroup(groupId, getRaftConf().getPeers()); return new ServerInformationReply(request, getCommitInfos(), group); }
NotLeaderException generateNotLeaderException() { if (lifeCycle.getCurrentState() != RUNNING) { return new NotLeaderException(getId(), null, null); } RaftPeerId leaderId = state.getLeaderId(); if (leaderId == null || leaderId.equals(state.getSelfId())) { // No idea about who is the current leader. Or the peer is the current // leader, but it is about to step down RaftPeer suggestedLeader = getRaftConf().getRandomPeer(state.getSelfId()); leaderId = suggestedLeader == null ? null : suggestedLeader.getId(); } RaftConfiguration conf = getRaftConf(); Collection<RaftPeer> peers = conf.getPeers(); return new NotLeaderException(getId(), conf.getPeer(leaderId), peers.toArray(new RaftPeer[peers.size()])); }
NotLeaderException generateNotLeaderException() { if (lifeCycle.getCurrentState() != RUNNING) { return new NotLeaderException(getId(), null, null); } RaftPeerId leaderId = state.getLeaderId(); if (leaderId == null || leaderId.equals(state.getSelfId())) { // No idea about who is the current leader. Or the peer is the current // leader, but it is about to step down RaftPeer suggestedLeader = getRaftConf().getRandomPeer(state.getSelfId()); leaderId = suggestedLeader == null ? null : suggestedLeader.getId(); } RaftConfiguration conf = getRaftConf(); Collection<RaftPeer> peers = conf.getPeers(); return new NotLeaderException(getId(), conf.getPeer(leaderId), peers.toArray(new RaftPeer[peers.size()])); }
void start() { lifeCycle.transition(STARTING); state.start(); RaftConfiguration conf = getRaftConf(); if (conf != null && conf.contains(getId())) { LOG.debug("{} starts as a follower, conf={}", getId(), conf); startAsFollower(); } else { LOG.debug("{} starts with initializing state, conf={}", getId(), conf); startInitializing(); } registerMBean(getId(), getGroupId(), jmxAdapter, jmxAdapter); }
boolean start() { if (!lifeCycle.compareAndTransition(NEW, STARTING)) { return false; } LOG.info("{}: start {}", getId(), groupId); RaftConfiguration conf = getRaftConf(); if (conf != null && conf.contains(getId())) { LOG.debug("{} starts as a follower, conf={}", getId(), conf); startAsFollower(); } else { LOG.debug("{} starts with initializing state, conf={}", getId(), conf); startInitializing(); } registerMBean(getId(), getGroupId(), jmxAdapter, jmxAdapter); state.start(); return true; }
LeaderState(RaftServerImpl server, RaftProperties properties) { this.server = server; stagingCatchupGap = RaftServerConfigKeys.stagingCatchupGap(properties); syncInterval = RaftServerConfigKeys.Rpc.sleepTime(properties); final ServerState state = server.getState(); this.raftLog = state.getLog(); this.currentTerm = state.getCurrentTerm(); eventQ = new ArrayBlockingQueue<>(4096); processor = new EventProcessor(); pendingRequests = new PendingRequests(server); final RaftConfiguration conf = server.getRaftConf(); Collection<RaftPeer> others = conf.getOtherPeers(state.getSelfId()); final Timestamp t = new Timestamp().addTimeMs(-server.getMaxTimeoutMs()); placeHolderIndex = raftLog.getNextIndex(); senders = new SenderList(others.stream().map( p -> server.newLogAppender(this, p, t, placeHolderIndex, true)) .toArray(LogAppender[]::new)); voterLists = divideFollowers(conf); }
private void applyOldNewConf() { final ServerState state = server.getState(); final RaftConfiguration current = server.getRaftConf(); final RaftConfiguration oldNewConf= stagingState.generateOldNewConf(current, state.getLog().getNextIndex()); // apply the (old, new) configuration to log, and use it as the current conf long index = state.getLog().append(state.getCurrentTerm(), oldNewConf); updateConfiguration(index, oldNewConf); this.stagingState = null; notifySenders(); }
private void applyOldNewConf() { final ServerState state = server.getState(); final RaftConfiguration current = server.getRaftConf(); final RaftConfiguration oldNewConf= stagingState.generateOldNewConf(current, state.getLog().getNextIndex()); // apply the (old, new) configuration to log, and use it as the current conf long index = state.getLog().append(state.getCurrentTerm(), oldNewConf); updateConfiguration(index, oldNewConf); this.stagingState = null; notifySenders(); }
LeaderState(RaftServerImpl server, RaftProperties properties) { this.server = server; stagingCatchupGap = RaftServerConfigKeys.stagingCatchupGap(properties); syncInterval = RaftServerConfigKeys.Rpc.sleepTime(properties); final ServerState state = server.getState(); this.raftLog = state.getLog(); this.currentTerm = state.getCurrentTerm(); processor = new EventProcessor(); this.pendingRequests = new PendingRequests(server.getId()); this.watchRequests = new WatchRequests(server.getId(), properties); final RaftConfiguration conf = server.getRaftConf(); Collection<RaftPeer> others = conf.getOtherPeers(state.getSelfId()); final Timestamp t = Timestamp.currentTime().addTimeMs(-server.getMaxTimeoutMs()); placeHolderIndex = raftLog.getNextIndex(); senders = new SenderList(others.stream().map( p -> server.newLogAppender(this, p, t, placeHolderIndex, true)) .toArray(LogAppender[]::new)); voterLists = divideFollowers(conf); }
LogEntryProto start() { // In the beginning of the new term, replicate a conf entry in order // to finally commit entries in the previous term. // Also this message can help identify the last committed index and the conf. final LogEntryProto placeHolder = ServerProtoUtils.toLogEntryProto( server.getRaftConf(), server.getState().getCurrentTerm(), raftLog.getNextIndex()); CodeInjectionForTesting.execute(APPEND_PLACEHOLDER, server.getId().toString(), null); raftLog.append(placeHolder); processor.start(); senders.forEach(LogAppender::startAppender); return placeHolder; }
/** * when the (old, new) log entry has been committed, should replicate (new): * 1) append (new) to log * 2) update conf to (new) * 3) update RpcSenders list * 4) start replicating the log entry */ private void replicateNewConf() { final RaftConfiguration conf = server.getRaftConf(); final RaftConfiguration newConf = RaftConfiguration.newBuilder() .setConf(conf) .setLogEntryIndex(raftLog.getNextIndex()) .build(); // stop the LogAppender if the corresponding follower is no longer in the conf updateSenders(newConf); long index = raftLog.append(server.getState().getCurrentTerm(), newConf); updateConfiguration(index, newConf); notifySenders(); }