@Override public void resume(Collection<TopicPartition> partitions) { delegate.resume(partitions); }
private ConsumerRecords<K, V> pollKafkaBroker(PollablePartitionsInfo pollablePartitionsInfo) { doSeekRetriableTopicPartitions(pollablePartitionsInfo.pollableEarliestRetriableOffsets); Set<TopicPartition> pausedPartitions = new HashSet<>(consumer.assignment()); pausedPartitions.removeIf(pollablePartitionsInfo.pollablePartitions::contains); try { consumer.pause(pausedPartitions); final ConsumerRecords<K, V> consumerRecords = consumer.poll(kafkaSpoutConfig.getPollTimeoutMs()); ackRetriableOffsetsIfCompactedAway(pollablePartitionsInfo.pollableEarliestRetriableOffsets, consumerRecords); final int numPolledRecords = consumerRecords.count(); LOG.debug("Polled [{}] records from Kafka", numPolledRecords); if (kafkaSpoutConfig.getProcessingGuarantee() == KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) { //Commit polled records immediately to ensure delivery is at-most-once. Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = createFetchedOffsetsMetadata(consumer.assignment()); consumer.commitSync(offsetsToCommit); LOG.debug("Committed offsets {} to Kafka", offsetsToCommit); } return consumerRecords; } finally { consumer.resume(pausedPartitions); } }
/** * Execute poll using pause API just for sending heartbeat, not polling messages. */ void retainConnection() { pollingLock.lock(); TopicPartition[] assignments = null; try { final Set<TopicPartition> assignmentSet = kafkaConsumer.assignment(); if (assignmentSet.isEmpty()) { return; } if (logger.isDebugEnabled()) { logger.debug("Pausing " + assignmentSet); } assignments = assignmentSet.toArray(new TopicPartition[assignmentSet.size()]); kafkaConsumer.pause(assignments); kafkaConsumer.poll(0); if (logger.isDebugEnabled()) { logger.debug("Resuming " + assignments); } } finally { try { if (assignments != null) { kafkaConsumer.resume(assignments); } } finally { pollingLock.unlock(); } } }
consumer.resume(pausedTopicPartitions); LOG.trace("Resumed topic-partitions {}", pausedTopicPartitions);
LOG.debug("Starting consuming from metrics reporter topic partitions {}.", _metricConsumer.assignment()); _metricConsumer.resume(_metricConsumer.paused()); int totalMetricsAdded = 0; long maxTimeStamp = -1L;
consumer.resume(pausedTopicPartitions); LOG.trace("Resumed topic-partitions {}", pausedTopicPartitions);
@Override public void resume(Collection<TopicPartition> partitions) { _kafkaConsumer.resume(partitions); }
@Override public void resume(Collection<TopicPartition> partitions) { kafkaConsumer.resume(partitions); }
@Override public void resume(Collection<TopicPartition> partitions) { consumer.resume(partitions); }
@Override public void resume(Collection<TopicPartition> partitions) { consumer.resume(partitions); }
@Override public void resume(Collection<TopicPartition> partitions) { delegate.resume(partitions); }
resumeLatch.countDown(); return null; }).given(consumer).resume(records.keySet()); TopicPartitionInitialOffset[] topicPartition = new TopicPartitionInitialOffset[] { new TopicPartitionInitialOffset("foo", 0) };
@Override public KafkaReadStream<K, V> resume(Set<TopicPartition> topicPartitions, Handler<AsyncResult<Void>> completionHandler) { this.submitTask((consumer, future) -> { consumer.resume(topicPartitions); if (future != null) { future.complete(); } }, completionHandler); return this; }
/** * Check if the partition should be un-paused * * @param currentTime * the current time since epoch */ public void maybeUnpause(long currentTime) { if (!paused) { LOGGER.debug("Partition [{}] not paused. Nothing to do", topicPartition); return; } if (currentTime >= pausedTillTime) { if(LOGGER.isInfoEnabled()){ LOGGER.info("Unpausing partition [{}] as the current time [{}] is >= paused time [{}]", new Object[] { topicPartition, new Date(currentTime), new Date(pausedTillTime) }); } // This method does not throw a KafkaException consumer.resume(Collections.singleton(topicPartition)); PAUSED_PARTITIONS.dec(); paused = false; // Reset successful results to 100% successful resetResults(); } else{ if (LOGGER.isDebugEnabled()) LOGGER.debug("Not unpausing partition [{}] as the current time [{}] is < paused time [{}]", topicPartition, currentTime, pausedTillTime); } }
kafkaConsumer.resume(topicPartitionsToPoll); records = kafkaConsumer.poll(timeoutMs);
kafkaConsumer.resume(topicPartitionsToPoll); records = kafkaConsumer.poll(timeoutMs);
kafkaConsumer.resume(topicPartitionsToPoll); records = kafkaConsumer.poll(timeoutMs);
/** * Execute poll using pause API just for sending heartbeat, not polling messages. */ void retainConnection() { pollingLock.lock(); TopicPartition[] assignments = null; try { final Set<TopicPartition> assignmentSet = kafkaConsumer.assignment(); if (assignmentSet.isEmpty()) { return; } if (logger.isDebugEnabled()) { logger.debug("Pausing " + assignmentSet); } assignments = assignmentSet.toArray(new TopicPartition[assignmentSet.size()]); kafkaConsumer.pause(assignments); kafkaConsumer.poll(0); if (logger.isDebugEnabled()) { logger.debug("Resuming " + assignments); } } finally { try { if (assignments != null) { kafkaConsumer.resume(assignments); } } finally { pollingLock.unlock(); } } }
/** * @throws IllegalStateException If store gets registered after initialized is already finished * @throws StreamsException if the store's change log does not contain the partition */ boolean updateNewAndRestoringTasks() { active.initializeNewTasks(); standby.initializeNewTasks(); final Collection<TopicPartition> restored = changelogReader.restore(active); active.updateRestored(restored); if (active.allTasksRunning()) { final Set<TopicPartition> assignment = consumer.assignment(); log.trace("Resuming partitions {}", assignment); consumer.resume(assignment); assignStandbyPartitions(); return true; } return false; }
@Test public void consumerMethods() throws Exception { testConsumerMethod(c -> assertEquals(this.assignedPartitions, c.assignment())); testConsumerMethod(c -> assertEquals(Collections.singleton(topic), c.subscription())); testConsumerMethod(c -> assertEquals(2, c.partitionsFor(topics.get(2)).size())); testConsumerMethod(c -> assertEquals(topics.size(), c.listTopics().size())); testConsumerMethod(c -> assertEquals(0, c.metrics().size())); testConsumerMethod(c -> { Collection<TopicPartition> partitions = Collections.singleton(new TopicPartition(topic, 1)); c.pause(partitions); assertEquals(partitions, c.paused()); c.resume(partitions); }); testConsumerMethod(c -> { TopicPartition partition = new TopicPartition(topic, 1); Collection<TopicPartition> partitions = Collections.singleton(partition); long position = c.position(partition); c.seekToBeginning(partitions); assertEquals(0, c.position(partition)); c.seekToEnd(partitions); assertTrue("Did not seek to end", c.position(partition) > 0); c.seek(partition, position); }); }