@Override public synchronized long position(TopicPartition partition, final Duration timeout) { return position(partition); }
public synchronized void scheduleNopPollTask() { schedulePollTask(() -> { }); }
private void resetOffsetPosition(TopicPartition tp) { OffsetResetStrategy strategy = subscriptions.resetStrategy(tp); Long offset; if (strategy == OffsetResetStrategy.EARLIEST) { offset = beginningOffsets.get(tp); if (offset == null) throw new IllegalStateException("MockConsumer didn't have beginning offset specified, but tried to seek to beginning"); } else if (strategy == OffsetResetStrategy.LATEST) { offset = getEndOffset(endOffsets.get(tp)); if (offset == null) throw new IllegalStateException("MockConsumer didn't have end offset specified, but tried to seek to end"); } else { throw new NoOffsetForPartitionException(tp); } seek(tp, offset); }
@Test public void testConsume(TestContext ctx) throws Exception { MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST); KafkaReadStream<String, String> consumer = createConsumer(vertx, mock); Async doneLatch = ctx.async(); consumer.handler(record -> { ctx.assertEquals("the_topic", record.topic()); ctx.assertEquals(0, record.partition()); ctx.assertEquals("abc", record.key()); ctx.assertEquals("def", record.value()); consumer.close(v -> doneLatch.complete()); }); consumer.subscribe(Collections.singleton("the_topic"), v -> { mock.schedulePollTask(() -> { mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0))); mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, "abc", "def")); mock.seek(new TopicPartition("the_topic", 0), 0L); }); }); }
@Test public void testSimpleMock() { consumer.subscribe(Collections.singleton("test")); assertEquals(0, consumer.poll(Duration.ZERO).count()); consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1))); // Mock consumers need to seek manually since they cannot automatically reset offsets HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition("test", 0), 0L); beginningOffsets.put(new TopicPartition("test", 1), 0L); consumer.updateBeginningOffsets(beginningOffsets); consumer.seek(new TopicPartition("test", 0), 0); ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key1", "value1"); ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key2", "value2"); consumer.addRecord(rec1); consumer.addRecord(rec2); ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(1)); Iterator<ConsumerRecord<String, String>> iter = recs.iterator(); assertEquals(rec1, iter.next()); assertEquals(rec2, iter.next()); assertFalse(iter.hasNext()); assertEquals(2L, consumer.position(new TopicPartition("test", 0))); consumer.commitSync(); assertEquals(2L, consumer.committed(new TopicPartition("test", 0)).offset()); }
@BeforeEach public void setUp() { when(topologyContextMock.getStormId()).thenReturn(topologyId); consumer.assign(Collections.singleton(partition)); consumer.updateBeginningOffsets(Collections.singletonMap(partition, firstOffsetInKafka)); consumer.updateEndOffsets(Collections.singletonMap(partition, firstOffsetInKafka + recordsInKafka)); List<ConsumerRecord<String, String>> records = SpoutWithMockedConsumerSetupHelper.createRecords(partition, firstOffsetInKafka, recordsInKafka); records.forEach(record -> consumer.addRecord(record)); }
@Test public void testConsume() throws Exception { Config testConfig = ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.KAFKA_BROKERS, "test")); MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.NONE); consumer.assign(Arrays.asList(new TopicPartition("test_topic", 0))); consumer.updateBeginningOffsets(beginningOffsets); ConsumerRecord<String, String> record2 = new ConsumerRecord<>("test_topic", 0, 2L, "key", "value2"); consumer.addRecord(record0); consumer.addRecord(record1); consumer.addRecord(record2);
@Test public void testConsumerRecordsIsEmptyWhenReturningNoRecords() { TopicPartition partition = new TopicPartition("test", 0); consumer.assign(Collections.singleton(partition)); consumer.addRecord(new ConsumerRecord<String, String>("test", 0, 0, null, null)); consumer.updateEndOffsets(Collections.singletonMap(partition, 1L)); consumer.seekToEnd(Collections.singleton(partition)); ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1)); assertThat(records.count(), is(0)); assertThat(records.isEmpty(), is(true)); }
@Override public void run() { // add all the records with offset >= current partition position. int recordsAdded = 0; for (TopicPartition tp : assignedPartitions.get()) { long curPos = consumer.position(tp); for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) { if (r.offset() >= curPos) { consumer.addRecord(r); recordsAdded++; } } } if (recordsAdded == 0) { if (config.get("inject.error.at.eof") != null) { consumer.setException(new KafkaException("Injected error in consumer.poll()")); } // MockConsumer.poll(timeout) does not actually wait even when there aren't any records. // Add a small wait here in order to avoid busy looping in the reader. Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS); //TODO: BEAM-4086: testUnboundedSourceWithoutBoundedWrapper() occasionally hangs // without this wait. Need to look into it. } consumer.schedulePollTask(this); } };
private MockConsumer<String, String> createMockConsumer(){ MockConsumer<String, String> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); Map<org.apache.kafka.common.TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new org.apache.kafka.common.TopicPartition(TOPIC, 0), 0L); consumer.updateBeginningOffsets(beginningOffsets); return consumer; }
private void sendNextBatch(MockConsumer<String, String> consumer){ for(int i=0;i<SEND_BATCH && recordsMock.size()>0;i++) consumer.addRecord(recordsMock.pop()); }
@Override public synchronized void assign(final Collection<TopicPartition> assigned) { super.assign(assigned); assignedPartitions.set(ImmutableList.copyOf(assigned)); for (TopicPartition tp : assigned) { updateBeginningOffsets(ImmutableMap.of(tp, 0L)); updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size())); } } // Override offsetsForTimes() in order to look up the offsets by timestamp.
consumer.updatePartitions(topic, partitionMap.get(topic)); consumer.schedulePollTask(recordEnqueueTask); return consumer;
@Override public void close(Duration timeout) { close(); } }
@Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, final Duration timeout) { commitSync(offsets); }
@Override public OffsetAndMetadata committed(TopicPartition partition, final Duration timeout) { return committed(partition); }
@Test public void testConsume(TestContext ctx) throws Exception { MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST); KafkaReadStream<String, String> consumer = createConsumer(vertx, mock); Async doneLatch = ctx.async(); consumer.handler(record -> { ctx.assertEquals("the_topic", record.topic()); ctx.assertEquals(0, record.partition()); ctx.assertEquals("abc", record.key()); ctx.assertEquals("def", record.value()); consumer.close(v -> doneLatch.complete()); }); consumer.subscribe(Collections.singleton("the_topic"), v -> { mock.schedulePollTask(() -> { mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0))); mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, "abc", "def")); mock.seek(new TopicPartition("the_topic", 0), 0L); }); }); }
@SuppressWarnings("deprecation") @Test public void testSimpleMockDeprecated() { consumer.subscribe(Collections.singleton("test")); assertEquals(0, consumer.poll(1000).count()); consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1))); // Mock consumers need to seek manually since they cannot automatically reset offsets HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition("test", 0), 0L); beginningOffsets.put(new TopicPartition("test", 1), 0L); consumer.updateBeginningOffsets(beginningOffsets); consumer.seek(new TopicPartition("test", 0), 0); ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key1", "value1"); ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key2", "value2"); consumer.addRecord(rec1); consumer.addRecord(rec2); ConsumerRecords<String, String> recs = consumer.poll(1); Iterator<ConsumerRecord<String, String>> iter = recs.iterator(); assertEquals(rec1, iter.next()); assertEquals(rec2, iter.next()); assertFalse(iter.hasNext()); assertEquals(2L, consumer.position(new TopicPartition("test", 0))); consumer.commitSync(); assertEquals(2L, consumer.committed(new TopicPartition("test", 0)).offset()); }
private MockConsumer<String, String> createMockConsumer(){ MockConsumer<String, String> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); Map<org.apache.kafka.common.TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new org.apache.kafka.common.TopicPartition(TOPIC, 0), 0L); consumer.updateBeginningOffsets(beginningOffsets); return consumer; }
private void sendNextBatch(MockConsumer<String, String> consumer){ for(int i=0;i<SEND_BATCH && recordsMock.size()>0;i++) consumer.addRecord(recordsMock.pop()); }