@BeforeEach public void setUp() { when(topologyContextMock.getStormId()).thenReturn(topologyId); consumer.assign(Collections.singleton(partition)); consumer.updateBeginningOffsets(Collections.singletonMap(partition, firstOffsetInKafka)); consumer.updateEndOffsets(Collections.singletonMap(partition, firstOffsetInKafka + recordsInKafka)); List<ConsumerRecord<String, String>> records = SpoutWithMockedConsumerSetupHelper.createRecords(partition, firstOffsetInKafka, recordsInKafka); records.forEach(record -> consumer.addRecord(record)); }
@Test public void testConsumerRecordsIsEmptyWhenReturningNoRecords() { TopicPartition partition = new TopicPartition("test", 0); consumer.assign(Collections.singleton(partition)); consumer.addRecord(new ConsumerRecord<String, String>("test", 0, 0, null, null)); consumer.updateEndOffsets(Collections.singletonMap(partition, 1L)); consumer.seekToEnd(Collections.singleton(partition)); ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1)); assertThat(records.count(), is(0)); assertThat(records.isEmpty(), is(true)); }
Config testConfig = ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.KAFKA_BROKERS, "test")); MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.NONE); consumer.assign(Arrays.asList(new TopicPartition("test_topic", 0)));
@Override public synchronized void assign(final Collection<TopicPartition> assigned) { super.assign(assigned); assignedPartitions.set(ImmutableList.copyOf(assigned)); for (TopicPartition tp : assigned) { updateBeginningOffsets(ImmutableMap.of(tp, 0L)); updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size())); } } // Override offsetsForTimes() in order to look up the offsets by timestamp.