private void clearQueues() { queues.removeIf(ReferenceHoldingQueue::release); }
void queuesShutdown() { queues.removeIf(queue -> { queue.shutdown(); return true; }); }
protected Response handleView(List<Address> members) { if(owner != null && !members.contains(owner.getAddress())) { Owner tmp=owner; setOwner(null); log.debug("%s: unlocked \"%s\" because owner %s left", local_addr, lock_name, tmp); } synchronized(queue) { queue.removeIf(req -> !members.contains(req.owner.getAddress())); } condition.queue.removeIf(own -> !members.contains(own.getAddress())); return processQueue(); }
@Subscribe public void onMenuOptionClicked(MenuOptionClicked event) { String menuOption = event.getMenuOption(); if (menuOption.contains(CLEAR_HISTORY)) { if (menuOption.startsWith(CLEAR_PRIVATE)) { messageQueue.removeIf(e -> e.getType() == ChatMessageType.PRIVATE_MESSAGE_RECEIVED || e.getType() == ChatMessageType.PRIVATE_MESSAGE_SENT || e.getType() == ChatMessageType.PRIVATE_MESSAGE_RECEIVED_MOD); } else { messageQueue.removeIf(e -> e.getType() == ChatMessageType.PUBLIC || e.getType() == ChatMessageType.PUBLIC_MOD); } } }
nfaState.getPartialMatches().removeIf(pm -> pm.getStartEventID() != null && !partialMatches.contains(pm));
queue.removeIf(lock -> lock.session.equals(session.sessionId()));
@Override public boolean removeIf(final Predicate filter) { return queue.removeIf(filter); }
/** * @param filter * @return * @see java.util.Collection#removeIf(java.util.function.Predicate) */ public boolean removeIf(Predicate<? super T> filter) { return list.removeIf(filter); }
@Override public boolean removeIf(final Predicate filter) { return queue.removeIf(filter); }
@Override public boolean removeIf(final Predicate filter) { return queue.removeIf(filter); }
@Override public boolean removeIf(Predicate<? super T> filter) { return source.removeIf(filter); }
public boolean removeIf(Predicate filter) { return queue.removeIf(filter); }
private void clearQueues() { queues.removeIf(ReferenceHoldingQueue::release); }
void queuesShutdown() { queues.removeIf(queue -> { queue.shutdown(); return true; }); }
public void remove(final T itemToRemove) { this.lock.lock(); try { this.queue.removeIf(item -> item.getItem().getInfoHash().equals(itemToRemove.getInfoHash())); } finally { this.lock.unlock(); } }
protected Response handleView(List<Address> members) { if(owner != null && !members.contains(owner.getAddress())) { Owner tmp=owner; setOwner(null); log.debug("%s: unlocked \"%s\" because owner %s left", local_addr, lock_name, tmp); } synchronized(queue) { queue.removeIf(req -> !members.contains(req.owner.getAddress())); } condition.queue.removeIf(own -> !members.contains(own.getAddress())); return processQueue(); }
@Override public void seek(LogOffset offset) { log.debug("Seek to: " + offset.offset() + " from tailer: " + id); TopicPartition topicPartition = new TopicPartition(ns.getTopicName(offset.partition().name()), offset.partition().partition()); consumer.seek(topicPartition, offset.offset()); lastOffsets.remove(topicPartition); int partition = topicPartition.partition(); records.removeIf(rec -> rec.partition() == partition); consumerMoved = true; }
/** * Tests that all acknowledged offsets are committed during graceful close. */ @Test public void manualAckClose() throws Exception { receiverOptions = receiverOptions .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) r.receiverOffset().acknowledge(); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
/** * Tests that offsets that are not committed explicitly are not committed * on close and that uncommitted records are redelivered on the next receive. */ @Test public void manualCommitClose() throws Exception { receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ZERO) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) return r.receiverOffset().commit().then(Mono.just(r)); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
/** * Tests that retriable commit exceptions are retried with {@link KafkaReceiver#receiveAutoAck()} */ @Test public void autoAckCommitTransientError() { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 3); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); sendMessages(topic, 0, 20); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAutoAck() .concatMap(r -> r); verifyMessages(inboundFlux.take(11), 11); receivedMessages.removeIf(r -> r.offset() >= 5); // Last record should not be committed verifyCommits(groupId, topic, 10); }