/** * Overrides the fetch offsets that the consumer will use on the next {@link #poll(Duration) poll(timeout)}. If this API * is invoked for the same partition more than once, the latest offset will be used on the next poll(). Note that * you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets * * @throws IllegalArgumentException if the provided offset is negative * @throws IllegalStateException if the provided TopicPartition is not assigned to this consumer */ @Override public void seek(TopicPartition partition, long offset) { seek(partition, new OffsetAndMetadata(offset, null)); }
@Override public synchronized Map<String, List<PartitionInfo>> listTopics() { ensureNotClosed(); return partitions; }
public void invoke() { if (callback != null) callback.onComplete(offsets, exception); } }
@Test(expected = IllegalArgumentException.class) public void testSubscriptionOnNullPattern() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId)) { consumer.subscribe((Pattern) null); } }
@Test(expected = IllegalStateException.class) public void testPollWithNoSubscription() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.poll(Duration.ZERO); } }
@Test public void closeShouldBeIdempotent() { KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null); consumer.close(); consumer.close(); consumer.close(); }
@Test(expected = IllegalArgumentException.class) public void testAssignOnNullTopicPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(null); } }
@Override public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { ensureNotClosed(); subscriptions.seek(partition, offsetAndMetadata.offset()); }
@Test(expected = AuthenticationException.class) public void testPartitionsForAuthenticationFailure() { final KafkaConsumer<String, String> consumer = consumerWithPendingAuthentication(); consumer.partitionsFor("some other topic"); }
@Override public void onPartitionsAssigned(Collection<TopicPartition> partitions) { // set initial position so we don't need a lookup for (TopicPartition partition : partitions) consumer.seek(partition, 0); } };
/** * Commit offsets returned on the last {@link #poll(Duration)} for all the subscribed list of topics and partition. * Same as {@link #commitAsync(OffsetCommitCallback) commitAsync(null)} */ @Override public void commitAsync() { commitAsync(null); }
@Test(expected = AuthenticationException.class) public void testCommittedAuthenticationFaiure() { final KafkaConsumer<String, String> consumer = consumerWithPendingAuthentication(); consumer.committed(tp0); }
private KafkaConsumer<String, String> newConsumerNoAutoCommit(Time time, KafkaClient client, Metadata metadata) { return newConsumer(time, client, metadata, new RangeAssignor(), OffsetResetStrategy.EARLIEST, false, groupId); }
@Override public void close(Duration timeout) { close(); } }
public static RetriableCommitFailedException withUnderlyingMessage(String additionalMessage) { return new RetriableCommitFailedException("Offset commit failed with a retriable exception. " + "You should retry committing the latest consumed offsets. " + "The underlying error was: " + additionalMessage); }
private boolean in(Set<ConsumerPair> pairs) { for (ConsumerPair pair: pairs) if (this.equals(pair)) return true; return false; } }
@Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout) { return offsetsForTimes(timestampsToSearch); }
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, boolean autoCommitEnabled) { return newConsumer(time, client, metadata, assignor, OffsetResetStrategy.EARLIEST, autoCommitEnabled, groupId); }
@Test(expected = IllegalArgumentException.class) public void testSubscriptionOnNullTopicCollection() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId)) { consumer.subscribe((List<String>) null); } }
@SuppressWarnings("deprecation") @Override public synchronized void close(long timeout, TimeUnit unit) { ensureNotClosed(); this.closed = true; }