protected void submitEventOnSuccessAppend() { LeaderState.StateUpdateEvent e = follower.isAttendingVote() ? LeaderState.UPDATE_COMMIT_EVENT : LeaderState.STAGING_PROGRESS_EVENT; leaderState.submitUpdateStateEvent(e); }
AppendEntriesRequestProto getAppendRequest(TermIndex previous, long callId) { final AppendEntriesRequestProto request = leaderState.newAppendEntriesRequestProto( getFollowerId(), previous, buf, !follower.isAttendingVote(), callId); buf.clear(); totalSize = 0; return request; }
protected void submitEventOnSuccessAppend() { if (follower.isAttendingVote()) { leaderState.submitUpdateCommitEvent(); } else { leaderState.submitCheckStagingEvent(); } }
private Collection<BootStrapProgress> checkAllProgress(long committed) { Preconditions.assertTrue(inStagingState()); return senders.stream() .filter(sender -> !sender.getFollower().isAttendingVote()) .map(sender -> checkProgress(sender.getFollower(), committed)) .collect(Collectors.toCollection(ArrayList::new)); }
private Collection<BootStrapProgress> checkAllProgress(long committed) { Preconditions.assertTrue(inStagingState()); return senders.stream() .filter(sender -> !sender.getFollower().isAttendingVote()) .map(sender -> checkProgress(sender.getFollower(), committed)) .collect(Collectors.toCollection(ArrayList::new)); }
protected void checkResponseTerm(long responseTerm) { synchronized (server) { if (isAppenderRunning() && follower.isAttendingVote() && responseTerm > leaderState.getCurrentTerm()) { leaderState.submitStepDownEvent(responseTerm); } } } }
void fail() { stopAndRemoveSenders(s -> !s.getFollower().isAttendingVote()); LeaderState.this.stagingState = null; // send back failure response to client's request pendingRequests.failSetConfiguration( new ReconfigurationTimeoutException("Fail to set configuration " + newConf + ". Timeout when bootstrapping new peers.")); } }
protected void checkResponseTerm(long responseTerm) { synchronized (server) { if (isAppenderRunning() && follower.isAttendingVote() && responseTerm > leaderState.getCurrentTerm()) { leaderState.submitUpdateStateEvent( new LeaderState.StateUpdateEvent(StateUpdateEventType.STEPDOWN, responseTerm)); } } } }
void fail() { stopAndRemoveSenders(s -> !s.getFollower().isAttendingVote()); LeaderState.this.stagingState = null; // send back failure response to client's request pendingRequests.failSetConfiguration( new ReconfigurationTimeoutException("Fail to set configuration " + newConf + ". Timeout when bootstrapping new peers.")); } }
protected AppendEntriesRequestProto createRequest(long callId) throws RaftLogIOException { final TermIndex previous = getPrevious(); final long heartbeatRemainingMs = getHeartbeatRemainingTime(); if (heartbeatRemainingMs <= 0L) { return leaderState.newAppendEntriesRequestProto( getFollowerId(), previous, Collections.emptyList(), !follower.isAttendingVote(), callId); } Preconditions.assertTrue(buffer.isEmpty(), () -> "buffer has " + buffer.getNumElements() + " elements."); final long leaderNext = raftLog.getNextIndex(); for (long next = follower.getNextIndex(); leaderNext > next; ) { if (!buffer.offer(raftLog.getEntryWithData(next++))) { break; } } if (buffer.isEmpty()) { return null; } final List<LogEntryProto> protos = buffer.pollList(heartbeatRemainingMs, EntryWithData::getEntry, (entry, time, exception) -> LOG.warn(this + ": Failed get " + entry + " in " + time, exception)); buffer.clear(); return leaderState.newAppendEntriesRequestProto( getFollowerId(), previous, protos, !follower.isAttendingVote(), callId); }
/** * So far we use a simple implementation for catchup checking: * 1. If the latest rpc time of the remote peer is before 3 * max_timeout, * the peer made no progress for that long. We should fail the whole * setConfiguration request. * 2. If the peer's matching index is just behind for a small gap, and the * peer was updated recently (within max_timeout), declare the peer as * caught-up. * 3. Otherwise the peer is making progressing. Keep waiting. */ private BootStrapProgress checkProgress(FollowerInfo follower, long committed) { Preconditions.assertTrue(!follower.isAttendingVote()); final Timestamp progressTime = Timestamp.currentTime().addTimeMs(-server.getMaxTimeoutMs()); final Timestamp timeoutTime = Timestamp.currentTime().addTimeMs(-3*server.getMaxTimeoutMs()); if (follower.getLastRpcResponseTime().compareTo(timeoutTime) < 0) { LOG.debug("{} detects a follower {} timeout for bootstrapping," + " timeoutTime: {}", server.getId(), follower, timeoutTime); return BootStrapProgress.NOPROGRESS; } else if (follower.getMatchIndex() + stagingCatchupGap > committed && follower.getLastRpcResponseTime().compareTo(progressTime) > 0) { return BootStrapProgress.CAUGHTUP; } else { return BootStrapProgress.PROGRESSING; } }
/** * So far we use a simple implementation for catchup checking: * 1. If the latest rpc time of the remote peer is before 3 * max_timeout, * the peer made no progress for that long. We should fail the whole * setConfiguration request. * 2. If the peer's matching index is just behind for a small gap, and the * peer was updated recently (within max_timeout), declare the peer as * caught-up. * 3. Otherwise the peer is making progressing. Keep waiting. */ private BootStrapProgress checkProgress(FollowerInfo follower, long committed) { Preconditions.assertTrue(!follower.isAttendingVote()); final Timestamp progressTime = new Timestamp().addTimeMs(-server.getMaxTimeoutMs()); final Timestamp timeoutTime = new Timestamp().addTimeMs(-3*server.getMaxTimeoutMs()); if (follower.getLastRpcResponseTime().compareTo(timeoutTime) < 0) { LOG.debug("{} detects a follower {} timeout for bootstrapping," + " timeoutTime: {}", server.getId(), follower, timeoutTime); return BootStrapProgress.NOPROGRESS; } else if (follower.getMatchIndex() + stagingCatchupGap > committed && follower.getLastRpcResponseTime().compareTo(progressTime) > 0) { return BootStrapProgress.CAUGHTUP; } else { return BootStrapProgress.PROGRESSING; } }