Consumer(Topic topic, String consumerGroupId, Properties props, PartitionProcessorFactory processorFactory) { this.topic = topic; this.consumerGroupId = consumerGroupId; // Mandatory settings, not changeable props.put("group.id", consumerGroupId); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", ByteArrayDeserializer.class.getName()); kafka = new KafkaConsumer<>(props); partitions = new AssignedPartitions(processorFactory); long now = System.currentTimeMillis(); // start it consumerLoopExecutor.execute(new ConsumerLoop()); }
public void shutdown() { logger.debug("Shutdown requested for consumer in group {} for topic {}", consumerGroupId, topic.toString()); isStopped.set(true); consumerLoopExecutor.shutdown(); try { consumerLoopExecutor.awaitTermination(2, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); } Set<TopicPartition> allPartitions = partitions.allPartitions(); partitions.stopProcessing(allPartitions); partitions.waitForHandlersToComplete(allPartitions, HANDLER_TIMEOUT_MILLIS); kafka.commitSync(partitions.offsetsToBeCommitted()); kafka.close(); logger.info("Consumer in group {} for topic {} was shut down.", consumerGroupId, topic.toString()); }
@Override public void onPartitionsAssigned(Collection<TopicPartition> assignedPartitions) { logger.debug("ConsumerRebalanceListener.onPartitionsAssigned on {}", assignedPartitions); partitions.assignNewPartitions(assignedPartitions); } }
@Override public void onPartitionsRevoked(Collection<TopicPartition> revokedPartitions) { logger.debug("ConsumerRebalanceListener.onPartitionsRevoked on {}", revokedPartitions); partitions.stopProcessing(revokedPartitions); partitions.waitForHandlersToComplete(revokedPartitions, HANDLER_TIMEOUT_MILLIS); kafka.commitSync(partitions.offsetsToBeCommitted()); partitions.removePartitions(revokedPartitions); }
@Test public void assignNewPartitonsCreatesProcessors() { PartitionProcessorFactory ppf = processorFactory(); AssignedPartitions ap = new AssignedPartitions(ppf); Collection<TopicPartition> newPartitions = new ArrayList<>(); newPartitions.add(new TopicPartition(TOPIC, 3)); newPartitions.add(new TopicPartition(TOPIC, 1)); newPartitions.add(new TopicPartition(TOPIC, 99)); ap.assignNewPartitions(newPartitions); assertFalse(ap.allPartitions().isEmpty()); Set<TopicPartition> partitions = ap.allPartitions(); assertEquals(3, partitions.size()); assertTrue(partitions.containsAll(newPartitions)); }
@Test public void enqueuingARecordForANewPartitionShouldCreateAProcessor() { int partitionId = 9; ConsumerRecords<String, byte[]> records = givenATestRecord(TOPIC, partitionId, 42); PartitionProcessorFactory ppf = processorFactory(); AssignedPartitions ap = new AssignedPartitions(ppf); assertTrue(ap.allPartitions().isEmpty()); ap.enqueue(records); Set<TopicPartition> partitions = ap.allPartitions(); assertEquals(1, partitions.size()); TopicPartition partition = partitions.iterator().next(); assertEquals(TOPIC, partition.topic()); assertEquals(partitionId, partition.partition()); }
partitions.enqueue(records); kafka.commitSync(partitions.offsetsToBeCommitted()); kafka.pause(partitions.partitionsToBePaused()); kafka.resume(partitions.partitionsToBeResumed());
void assignNewPartitions(Collection<TopicPartition> assignedPartitions) { assignedPartitions.forEach((key) -> assignNewPartition(key)); }
private void checkIfRefreshCommitRequired() { // Here's the issue: // The retention of __consumer_offsets is less than most topics itself, so we need to re-commit regularly to keep the // last committed offset per consumer group. This is especially an issue in cases were we have bursty / little traffic. Map<TopicPartition, OffsetAndMetadata> commitOffsets = new HashMap<>(); long now = System.currentTimeMillis(); if (nextCommitRefreshRequiredTimestamp < now) { nextCommitRefreshRequiredTimestamp = now + COMMIT_REFRESH_INTERVAL_MILLIS; for (PartitionProcessor processor : partitions.allProcessors()) { TopicPartition assignedPartition = processor.getAssignedPartition(); long lastCommittedOffset = processor.getLastCommittedOffset(); // We haven't committed from this partiton yet if (lastCommittedOffset < 0) { OffsetAndMetadata offset = kafka.committed(assignedPartition); if (offset == null) { // there was no commit on this partition at all continue; } lastCommittedOffset = offset.offset(); processor.forceSetLastCommittedOffset(lastCommittedOffset); } commitOffsets.put(assignedPartition, new OffsetAndMetadata(lastCommittedOffset)); } kafka.commitSync(commitOffsets); logger.info("Refreshing last committed offset {}", commitOffsets); } }
void enqueue(ConsumerRecords<String, byte[]> records) { records.forEach((record) -> { TopicPartition partitionKey = new TopicPartition(record.topic(), record.partition()); PartitionProcessor processor = processors.get(partitionKey); if (processor == null) { processor = assignNewPartition(partitionKey); } processor.enqueue(record); }); }