/** * Called by the SelectorKernel to get the current top * buffer for writing. */ protected ByteBuffer peekPending() { return outbound.peek(); }
@Override public SchedulerQueryContext peekFirst() { return pendingQueries.peek(); }
public WriteRequest peek() { return queue.peek(); }
public WriteRequest peek() { return queue.peek(); }
MessageEvent getPendingWrite() { return pendingWriteQueue.peek(); }
private PacemakerClient getWriteClient() { return getClientForServer(servers.peek()); }
synchronized void triggerCallbacks() { PendingReadRequest request; while ((request = pendingReads.peek()) != null) { if (!request.isDone) { break; } pendingReads.remove(); request.complete(readEntryListener); } if (pendingReads.isEmpty()) { done.countDown(); } }
public void processRequest(Request request) throws RequestProcessorException { // request.addRQRec(">tobe"); next.processRequest(request); Proposal p = toBeApplied.peek(); if (p != null && p.request != null && p.request.zxid == request.zxid) { toBeApplied.remove(); } }
private synchronized QuorumPacket removeProposedPacket(long zxid) { QuorumPacket pkt = proposedPkts.peek(); if (pkt == null || pkt.getZxid() > zxid) { LOG.debug("ignore missing proposal packet for {}", Long.toHexString(zxid)); return null; } if (pkt.getZxid() != zxid) { final String m = String.format("Unexpected proposal packet on commit ack, expected zxid 0x%d got zxid 0x%d", zxid, pkt.getZxid()); LOG.error(m); throw new RuntimeException(m); } proposedPkts.remove(); return pkt; }
private void rotateClients() { PacemakerClient c = getWriteClient(); String server = servers.peek(); // Servers should be rotated **BEFORE** the old client is removed from clientForServer // or a race with getWriteClient() could cause it to be put back in the map. servers.add(servers.remove()); clientForServer.remove(server); c.shutdown(); c.close(); }
private void checkRequestTimeout() { while (!requestTimeoutQueue.isEmpty()) { RequestTime request = requestTimeoutQueue.peek(); if (request == null || (System.currentTimeMillis() - request.creationTimeMs) < operationTimeoutMs) { // if there is no request that is timed out then exit the loop break; } request = requestTimeoutQueue.poll(); CompletableFuture<ProducerResponse> requestFuture = pendingRequests.remove(request.requestId); if (requestFuture != null && !requestFuture.isDone() && requestFuture.completeExceptionally(new TimeoutException( request.requestId + " lookup request timedout after ms " + operationTimeoutMs))) { log.warn("{} request {} timed out after {} ms", ctx.channel(), request.requestId, operationTimeoutMs); } else { // request is already completed successfully. } } }
while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break;
try { while (getRunning().get()) { ClusterMetaEntity metaEntity = clusterMetaQueue.peek(); if (null != metaEntity) {
private void scheduleTimeoutTask() { long timeoutSec = config.getAddEntryTimeoutSeconds(); // disable timeout task checker if timeout <= 0 if (timeoutSec > 0) { this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(() -> { OpAddEntry opAddEntry = pendingAddEntries.peek(); if (opAddEntry != null) { boolean isTimedOut = opAddEntry.lastInitTime != -1 && TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - opAddEntry.lastInitTime) >= timeoutSec && opAddEntry.completed == FALSE; if (isTimedOut) { log.error("Failed to add entry for ledger {} in time-out {} sec", (opAddEntry.ledger != null ? opAddEntry.ledger.getId() : -1), timeoutSec); opAddEntry.handleAddFailure(opAddEntry.ledger); } } }, config.getAddEntryTimeoutSeconds(), config.getAddEntryTimeoutSeconds(), TimeUnit.SECONDS); } }
void subscribeNext() { if (requested > 0) { Signal<Publisher<? extends T>> o = queue.poll(); if (o == null) return; if (o.isOnComplete()) { broadcastComplete(); } else { Publisher<? extends T> source = o.get(); currentSubscriber = new ConcatInnerSubscriber(); source.subscribe(currentSubscriber); } } else { // requested == 0, so we'll peek to see if we are completed, otherwise wait until another request Signal<?> o = queue.peek(); if (o != null && o.isOnComplete()) { broadcastComplete(); } } }
/** * Test command that uses a null request argument */ @Test public void testRequestCacheWithNullRequestArgument() throws Exception { ConcurrentLinkedQueue<HystrixCommand<List<String>>> commands = new ConcurrentLinkedQueue<HystrixCommand<List<String>>>(); final TestCollapserTimer timer = new TestCollapserTimer(); SuccessfulCacheableCollapsedCommand command1 = new SuccessfulCacheableCollapsedCommand(timer, null, true, commands); SuccessfulCacheableCollapsedCommand command2 = new SuccessfulCacheableCollapsedCommand(timer, null, true, commands); Future<String> f1 = command1.queue(); Future<String> f2 = command2.queue(); // increment past batch time so it executes timer.incrementTime(15); assertEquals("NULL", f1.get(1000, TimeUnit.MILLISECONDS)); assertEquals("NULL", f2.get(1000, TimeUnit.MILLISECONDS)); // it should have executed 1 command assertEquals(1, commands.size()); assertTrue(commands.peek().getExecutionEvents().contains(HystrixEventType.SUCCESS)); assertTrue(commands.peek().getExecutionEvents().contains(HystrixEventType.COLLAPSED)); Future<String> f3 = command1.queue(); // increment past batch time so it executes timer.incrementTime(15); assertEquals("NULL", f3.get(1000, TimeUnit.MILLISECONDS)); // it should still be 1 ... no new executions assertEquals(1, commands.size()); assertEquals(1, HystrixRequestLog.getCurrentRequest().getAllExecutedCommands().size()); Iterator<HystrixInvokableInfo<?>> cmdIterator = HystrixRequestLog.getCurrentRequest().getAllExecutedCommands().iterator(); assertEquals(1, cmdIterator.next().getNumberCollapsed()); }
@Override public void run() { PendingReadRequest nextRequest = pendingRequests.peek();
pauseTransition = null; eventsSet.remove(currentEvent); currentEvent = eventQueue.peek(); close(); });