/** * Retrieves and removes token from this queue. * * @return token from this queue, or <tt>null</tt> if this queue is empty. */ public Token poll() { return tokenQueue.poll(); }
break; case DROP_OLDEST: dq.poll(); dq.offer(t); callOnOverflow = true;
create().offer("foo"); create().peek(); create().poll(); create().remove(); create().add("foo");
/** * @return Pop a Handler instance if one available ready-to-go or else return null. */ private FastPathHandler popReadyHandler() { return this.fastPathHandlerStack.poll(); }
private ImapConnection pollConnection() { synchronized (connections) { return connections.poll(); } }
private synchronized byte doRead() { // if we are here we passed all the validation, so there must be something to read ByteBuffer headBuffer = data.peek(); byte b = headBuffer.get(); if (!headBuffer.hasRemaining()) { // remove empty buffer data.poll(); } return b; }
public static <V> V poll(Deque<V> queue) { if (queue instanceof BTraceDeque || queue.getClass().getClassLoader() == null) { return queue.poll(); } else { throw new IllegalArgumentException(); } }
public synchronized DnsMessage pollMessage() { return currentMessage.poll(); }
/** * Handle connect failures, the first waiter is always failed to avoid infinite reconnection. */ private void connectFailed(Holder holder, Throwable cause) { Waiter<C> waiter; synchronized (this) { connecting--; waiter = waitersQueue.poll(); weight -= initialWeight; holder.removed = true; checkProgress(); } if (waiter != null) { waiter.handler.handle(Future.failedFuture(cause)); } }
/** * Closes this buffer, releasing all {@link #offer(long)} values into the {@link Consumer}. * * This class is typically not used in a scenario suitable for try-with-resource * and so having it implement AutoCloseable would be more annoying */ public synchronized void close() { flush(); while ( !chunks.isEmpty() ) { chunkConsumer.accept( chunks.poll().values ); } }
@Override public E poll() { assertTrue(Thread.holdsLock(mutex)); return delegate.poll(); }
@Override public ExecutionNode next() { ExecutionNode result = stack.poll(); stack.addAll(result.getChildren()); return result; }
@Override public void visitTableSwitchInsn(int i, int i1, Label label, Label ...labels) { simulatedStack.poll(); if (copyEnabled) { super.visitTableSwitchInsn(i, i1, label, labels); } }
@Override public void visitLookupSwitchInsn(Label label, int[] ints, Label[] labels) { simulatedStack.poll(); if (copyEnabled) { super.visitLookupSwitchInsn(label, ints, labels); } }
/** {@inheritDoc} */ @Override public E poll() { E res = deque.poll(); if (res != null) adder.decrement(); return res; }
@Override public void run(SourceContext<Long> ctx) throws Exception { while (isRunning && !this.valuesToEmit.isEmpty()) { synchronized (ctx.getCheckpointLock()) { ctx.collect(this.valuesToEmit.poll()); } } }
private void addToCompletedReceives(KafkaChannel channel, Deque<NetworkReceive> stagedDeque) { NetworkReceive networkReceive = stagedDeque.poll(); this.completedReceives.add(networkReceive); this.sensors.recordBytesReceived(channel.id(), networkReceive.size()); }
/** * Get a list of batches which have been sitting in the accumulator too long and need to be expired. */ public List<ProducerBatch> expiredBatches(long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { // expire the batches in the order of sending Deque<ProducerBatch> deque = entry.getValue(); synchronized (deque) { while (!deque.isEmpty()) { ProducerBatch batch = deque.getFirst(); if (batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now)) { deque.poll(); batch.abortRecordAppends(); expiredBatches.add(batch); } else { maybeUpdateNextBatchExpiryTime(batch); break; } } } } return expiredBatches; }
FakeConnection assertRequest() { waitUntil(() -> pendingRequests.size() > 0); FakeConnection request = pendingRequests.poll(); assertNotNull(request); return request; }
private ListenableFuture<?> loadSplits() throws IOException { Iterator<InternalHiveSplit> splits = fileIterators.poll(); if (splits == null) { HivePartitionMetadata partition = partitions.poll(); if (partition == null) { return COMPLETED_FUTURE; } return loadPartition(partition); } while (splits.hasNext() && !stopped) { ListenableFuture<?> future = hiveSplitSource.addToQueue(splits.next()); if (!future.isDone()) { fileIterators.addFirst(splits); return future; } } // No need to put the iterator back, since it's either empty or we've stopped return COMPLETED_FUTURE; }