@Override public void assignPartitions(KafkaConsumer<?, ?> consumer, List<TopicPartition> topicPartitions) throws Exception { consumer.assign(topicPartitions); }
@Test public void testAssignOnEmptyTopicPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId)) { consumer.assign(Collections.<TopicPartition>emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); } }
public void assignPartitions(KafkaConsumer<?, ?> consumer, List<TopicPartition> topicPartitions) throws Exception { consumer.assign(topicPartitions); }
@Test public void testSubscription() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId); consumer.subscribe(singletonList(topic)); assertEquals(singleton(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.<String>emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.assign(singletonList(tp0)); assertTrue(consumer.subscription().isEmpty()); assertEquals(singleton(tp0), consumer.assignment()); consumer.unsubscribe(); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.close(); }
@Deprecated static void assignPartitions( final KafkaConsumer consumer, final String topic, final Set<Integer> partitions ) { consumer.assign( new ArrayList<>( partitions.stream().map(n -> new TopicPartition(topic, n)).collect(Collectors.toList()) ) ); }
@Test public void testPause() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId); consumer.assign(singletonList(tp0)); assertEquals(singleton(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); consumer.pause(singleton(tp0)); assertEquals(singleton(tp0), consumer.paused()); consumer.resume(singleton(tp0)); assertTrue(consumer.paused().isEmpty()); consumer.unsubscribe(); assertTrue(consumer.paused().isEmpty()); consumer.close(); }
@Override public void assign(Set<StreamPartition<Integer>> streamPartitions) { consumer.assign(streamPartitions .stream() .map(x -> new TopicPartition(x.getStream(), x.getPartitionId())) .collect(Collectors.toSet())); seekToEarliest(streamPartitions); }
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); consumer.assign(Arrays.asList(topicPartition)); consumer.seekToBeginning(Arrays.asList(topicPartition)); return consumer.position(topicPartition); }
public static long getLatestOffset(KafkaConsumer consumer, String topic, int partitionId) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); consumer.assign(Arrays.asList(topicPartition)); consumer.seekToEnd(Arrays.asList(topicPartition)); return consumer.position(topicPartition); }
@Test(expected = IllegalArgumentException.class) public void testAssignOnNullTopicInPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(singleton(new TopicPartition(null, 0))); } }
@Test(expected = IllegalStateException.class) public void testPollWithEmptyUserAssignment() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId)) { consumer.assign(Collections.<TopicPartition>emptySet()); consumer.poll(Duration.ZERO); } }
@Test(expected = IllegalArgumentException.class) public void testAssignOnEmptyTopicInPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(singleton(new TopicPartition(" ", 0))); } }
@Test(expected = IllegalArgumentException.class) public void testSeekNegative() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(singleton(new TopicPartition("nonExistTopic", 0))); consumer.seek(new TopicPartition("nonExistTopic", 0), -1); } }
@Test(expected = IllegalArgumentException.class) public void testAssignOnNullTopicPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(null); } }
@Override public <K, V> Collection<ConsumerRecord<K, V>> getAllRecordsFromTopic(Properties properties, String topic, int partition, long timeout) { List<ConsumerRecord<K, V>> result = new ArrayList<>(); try (KafkaConsumer<K, V> consumer = new KafkaConsumer<>(properties)) { consumer.assign(Arrays.asList(new TopicPartition(topic, partition))); while (true) { boolean processedAtLeastOneRecord = false; Iterator<ConsumerRecord<K, V>> iterator = consumer.poll(timeout).iterator(); while (iterator.hasNext()) { ConsumerRecord<K, V> record = iterator.next(); result.add(record); processedAtLeastOneRecord = true; } if (!processedAtLeastOneRecord) { break; } } consumer.commitSync(); } return UnmodifiableList.decorate(result); }
@Test public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.assign(singleton(tp0)); consumer.seekToBeginning(singleton(tp0)); // there shouldn't be any need to lookup the coordinator or fetch committed offsets. // we just lookup the starting position and send the record fetch. client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); client.prepareResponse(fetchResponse(tp0, 50L, 5)); ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); consumer.close(Duration.ofMillis(0)); }
@Test(expected = NoOffsetForPartitionException.class) public void testMissingOffsetNoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // lookup committed offset and find nothing client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); }
@Test public void testResetToCommittedOffset() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); }
@Test public void testResetUsingAutoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.LATEST, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); assertEquals(50L, consumer.position(tp0)); }
@Test public void testOperationsByAssigningConsumerWithDefaultGroupId() { KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null); consumer.assign(singleton(tp0)); try { consumer.committed(tp0); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) { // OK, expected } try { consumer.commitAsync(); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) { // OK, expected } try { consumer.commitSync(); fail("Expected an InvalidGroupIdException"); } catch (InvalidGroupIdException e) { // OK, expected } }