/** * ****************** outgoing ********************* */ void addOutQueue(Message message) { message.acquire(); outQueue.add(message); outMessages.value++; outBytes.value += message.size; }
/** * Decrement the reference count. When the reference count reaches 0, it is returned to the pool. */ public void release() { int n = refCount.decrementAndGet(); if (n == 0) { tracingInfo = null; pool.checkin(this); } assert n >= 0 : String.format("Message is over-released: type=%d from=%d", getType(), getFromMemberId()); }
/** * Returns true if this message can accomodate another command of cmdSize. */ public boolean canBatch(int cmdSize) { assert getType() == TYPE_APPEND_ENTRY_REQ; return batching && size + cmdSize + 2 <= buffer.length && !isHeartbeat(); }
message.appendEntryRequest(cmember.memberId, cmember.currentTerm, latestRid, commitIndex, cmember.currentTerm, ccmd.buffer, 0, ccmd.size); lock.lock(); while (ccmd != null && message.canBatch(ccmd.size)) { ccmd = commandQueue.remove(); message.appendEntryBatch(ccmd.buffer, 0, ccmd.size); ccmd.term = cmember.currentTerm; ccmd.index = latestRid.index + 1; message.release();
/** * Sends the message to the remote member. * This version of send increases nextIndex and backfillToIndex. * Does not send if the connection to remote member is not operational or being backfilled. */ public void send(Message message, int prevLogIndex) { assert message.getType() == Message.TYPE_APPEND_ENTRY_REQ; lock.lock(); try { if (backfilling && backfillAhead < BACKFILL_AHEAD_LIMIT) { // Increase the backfill index to the new value backfillToIndex = prevLogIndex + 2; } else if (channel.isOperational() && (message.isHeartbeat() || nextIndex == prevLogIndex + 1)) { latency.head(prevLogIndex + 1); addOutQueue(message); // Increment nextIndex for each command nextIndex += message.numCommands(); } } finally { lock.unlock(); } }
excess = message.read(in, excess, nextMessage); if (excess < 0) { logger.warn("[{}-{}] recv({}): end-of-file", message.handle(handler); inMessages.value++; inBytes.value += message.size; message.release(); nextMessage.release(); logger.info("peer receiver exiting: g1={} g2={}", generation, Peer.this.generation); return;
public void run() { Message message = null; while (true) { busyWorkers.decrementAndGet(); try { // Get a message message = workQueue.take(); } catch (InterruptedException e) { break; } busyWorkers.incrementAndGet(); try { // Save the message message.handle(handler); message.release(); } catch (InterruptedException e) { busyWorkers.decrementAndGet(); break; } catch (Exception e) { logger.error(e.getMessage(), e); cmember.indexUpdated(true, false); } } } }
/** * @param rid non-null object containing the leader's latest term and index. */ public void sendHeartbeatRequest(Rid rid) { Message message = pool.checkout(); try { message.heartbeat(memberId, currentTerm, rid, commitIndex); peers.forEach(p -> p.send(message, rid.index - 1)); } finally { message.release(); } lastSentTs = clock.now(); }
/** * Sent after follower has advanced the savedIndex. */ public void sendAppendEntryReply() { Peer leader = peers.get(leaderId); if (leader != null) { Message message = pool.checkout(); try { saveQueue.getLatest(savedRid); message.appendEntryReply(memberId, currentTerm, savedRid.index, true, false); leader.send(message); latency.tail(savedRid.index); } finally { message.release(); } // Disable this for now because new nodes are not being connected while there is load //lastSentTs = clock.now(); } }
message.appendEntryReply(memberId, currentTerm, prevLogIndex + 1, false, false); peer.send(message); return false; message.appendEntryReply(memberId, currentTerm, failedNextIndex, false, false); peer.send(message); leaderId = fromMemberId; if (message.isHeartbeat()) { message.appendEntryReply(memberId, currentTerm, savedRid.index, true, true); peer.send(message); if (commitIndex > oldCommitIndex) {
PhantomRef(T message, ReferenceQueue<Message> queue) { super(message, queue); id = message.id; size = message.size; type = message.getType(); refCount = message.refCount; } }
message.appendEntryRequest(cmember.memberId, cmember.currentTerm, latestRid, commitIndex, cmember.currentTerm, ccmd.buffer, 0, ccmd.size); lock.lock(); while (ccmd != null && message.canBatch(ccmd.size)) { ccmd = commandQueue.remove(); message.appendEntryBatch(ccmd.buffer, 0, ccmd.size); ccmd.term = cmember.currentTerm; ccmd.index = latestRid.index + 1; message.release();
/** * Sends the message to the remote member. This version of send * increases nextIndex and backfillToIndex. Does not send if the * connection to remote member is not operational or being * backfilled. */ public void send(Message message, int prevLogIndex) { assert message.getType() == Message.TYPE_APPEND_ENTRY_REQ; lock.lock(); try { if (backfilling) { // Increase the backfill index to the new value backfillToIndex = prevLogIndex + 2; } else if (channel.isOperational() && (message.isHeartbeat() || nextIndex == prevLogIndex + 1)) { latency.head(prevLogIndex + 1); addOutQueue(message); // Increment nextIndex for each command nextIndex += message.numCommands(); } } finally { lock.unlock(); } }
public void run() { while (true) { Message message = null; lock.lock(); try { while ((message = workQueue.poll()) == null) { numWaiters++; queueEmpty.await(); numWaiters--; } } catch (InterruptedException e) { break; } finally { lock.unlock(); } try { // Save the message message.handle(handler); } catch (InterruptedException e) { break; } catch (Exception e) { logger.error(e.getMessage(), e); cmember.indexUpdated(true, false); } finally { message.release(); } } } }
/** * Returns true if this message can accomodate another command of cmdSize. */ public boolean canBatch(int cmdSize) { assert getType() == TYPE_APPEND_ENTRY_REQ; return batching && size + cmdSize + 2 <= buffer.length && !isHeartbeat(); }
/** * @param rid non-null object containing the leader's latest term and index. */ public void sendHeartbeatRequest(Rid rid) { Message message = pool.checkout(); try { message.heartbeat(memberId, currentTerm, rid, commitIndex); peers.forEach((id, p) -> p.send(message, rid.index - 1)); // Send to peers but also check if peer has been inactive long now = clock.now(); for (Iterator<Peer> it = slaves.iterator(); it.hasNext(); ) { Peer slave = it.next(); if (now - slave.getLastReceivedTs() > slaveInactivityTimeout) { // Slave is inactive so stop and remove it logger.info("[{}-{}] Removing slave {} because of inactivity for {}ms", gondola.getHostId(), memberId, slave.peerId, slaveInactivityTimeout); slave.stop(); it.remove(); } slave.send(message, rid.index - 1); } } finally { message.release(); } lastSentTs = clock.now(); }