/** * Get the last committed offset for the given partition (whether the commit happened by this process or * another). This offset will be used as the position for the consumer in the event of a failure. * <p> * This call will do a remote call to get the latest committed offset from the server, and will block until the * committed offset is gotten successfully, an unrecoverable error is encountered (in which case it is thrown to * the caller), or the timeout specified by {@code default.api.timeout.ms} expires (in which case a * {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). * * @param partition The partition to check * @return The last committed offset and metadata or null if there was no prior commit * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the committed offset cannot be found before * the timeout specified by {@code default.api.timeout.ms} expires. */ @Override public OffsetAndMetadata committed(TopicPartition partition) { return committed(partition, Duration.ofMillis(defaultApiTimeoutMs)); }
private String getCommittedOffsetsString() { StringBuilder sb = new StringBuilder(); sb.append(getName()).append(" committed: "); for (TopicPartition tp : consumer.assignment()) { try { sb.append("[").append(tp).append(",") .append(consumer.committed(tp).offset()) .append("] "); } catch (NullPointerException npe) { logger.debug("Committed {}", tp); } } return sb.toString(); }
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets( KafkaConsumer<String, byte[]> client, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = client.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition key = new TopicPartition(topicStr, partition.partition()); OffsetAndMetadata offsetAndMetadata = client.committed(key); if (offsetAndMetadata != null) { offsets.put(key, offsetAndMetadata); } } return offsets; }
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets( KafkaConsumer<String, byte[]> client) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = client.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition key = new TopicPartition(topicStr, partition.partition()); OffsetAndMetadata offsetAndMetadata = client.committed(key); if (offsetAndMetadata != null) { offsets.put(key, offsetAndMetadata); } } return offsets; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Test(expected = AuthenticationException.class) public void testCommittedAuthenticationFaiure() { final KafkaConsumer<String, String> consumer = consumerWithPendingAuthentication(); consumer.committed(tp0); }
@Test public void testCommitsFetchedDuringAssign() { long offset1 = 10000; long offset2 = 20000; Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.assign(singletonList(tp0)); // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // fetch offset for one topic client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); assertEquals(offset1, consumer.committed(tp0).offset()); consumer.assign(Arrays.asList(tp0, tp1)); // fetch offset for two topics Map<TopicPartition, Long> offsets = new HashMap<>(); offsets.put(tp0, offset1); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); assertEquals(offset1, consumer.committed(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, offset2); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); assertEquals(offset2, consumer.committed(tp1).offset()); consumer.close(Duration.ofMillis(0)); }
@Test public void testOperationsByAssigningConsumerWithDefaultGroupId() { KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null); consumer.assign(singleton(tp0)); try { consumer.committed(tp0); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) { // OK, expected } try { consumer.commitAsync(); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) { // OK, expected } try { consumer.commitSync(); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) { // OK, expected } }
assertEquals(0, consumer.committed(tp0).offset());
offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator); assertEquals(0, consumer.committed(tp0).offset());
newConsumer((String) null).committed(tp0); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) {
private void checkIfRefreshCommitRequired() { // Here's the issue: // The retention of __consumer_offsets is less than most topics itself, so we need to re-commit regularly to keep the // last committed offset per consumer group. This is especially an issue in cases were we have bursty / little traffic. Map<TopicPartition, OffsetAndMetadata> commitOffsets = new HashMap<>(); long now = System.currentTimeMillis(); if (nextCommitRefreshRequiredTimestamp < now) { nextCommitRefreshRequiredTimestamp = now + COMMIT_REFRESH_INTERVAL_MILLIS; for (PartitionProcessor processor : partitions.allProcessors()) { TopicPartition assignedPartition = processor.getAssignedPartition(); long lastCommittedOffset = processor.getLastCommittedOffset(); // We haven't committed from this partiton yet if (lastCommittedOffset < 0) { OffsetAndMetadata offset = kafka.committed(assignedPartition); if (offset == null) { // there was no commit on this partition at all continue; } lastCommittedOffset = offset.offset(); processor.forceSetLastCommittedOffset(lastCommittedOffset); } commitOffsets.put(assignedPartition, new OffsetAndMetadata(lastCommittedOffset)); } kafka.commitSync(commitOffsets); logger.info("Refreshing last committed offset {}", commitOffsets); } }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }