@SuppressWarnings("unchecked") private static ConsumerRecords<CommandId, Command> someConsumerRecords( final ConsumerRecord... consumerRecords) { return new ConsumerRecords( ImmutableMap.of(TOPIC_PARTITION, ImmutableList.copyOf(consumerRecords))); }
return new ConsumerRecords<>(results);
private static ConsumerRecords<CommandId, Command> buildRecords(final Object... args) { assertThat(args.length % 2, equalTo(0)); final List<ConsumerRecord<CommandId, Command>> records = new ArrayList<>(); for (int i = 0; i < args.length; i += 2) { assertThat(args[i], instanceOf(CommandId.class)); assertThat(args[i + 1], anyOf(is(nullValue()), instanceOf(Command.class))); records.add( new ConsumerRecord<>(COMMAND_TOPIC, 0, 0, (CommandId) args[i], (Command) args[i + 1])); } return new ConsumerRecords<>(Collections.singletonMap(COMMAND_TOPIC_PARTITION, records)); } }
private ConsumerRecords buildRecords(final String kafkaTopicName) { return new ConsumerRecords<>( ImmutableMap.of( new TopicPartition(kafkaTopicName, 1), Arrays.asList( new ConsumerRecord<>( kafkaTopicName, 1, 1, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890") ) ) ); }
@Override public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) { onConsumeCount++; if (throwExceptionOnConsume) throw new KafkaException("Injected exception in FilterConsumerInterceptor.onConsume."); // filters out topic/partitions with partition == FILTER_PARTITION Map<TopicPartition, List<ConsumerRecord<K, V>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { if (tp.partition() != filterPartition) recordMap.put(tp, records.records(tp)); } return new ConsumerRecords<K, V>(recordMap); }
@Test public void testNextTupleEmitsAtMostOneTuple() { //The spout should emit at most one message per call to nextTuple //This is necessary for Storm to be able to throttle the spout according to maxSpoutPending KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); Map<TopicPartition, List<ConsumerRecord<String, String>>> records = new HashMap<>(); records.put(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 10)); when(consumerMock.poll(anyLong())) .thenReturn(new ConsumerRecords<>(records)); spout.nextTuple(); verify(collectorMock, times(1)).emit(anyString(), anyList(), any(KafkaSpoutMessageId.class)); }
@Test public void shouldFilterNullValues() { replay(schemaRegistryClient); final ConsumerRecord<String, Bytes> record = new ConsumerRecord<>( "some-topic", 1, 1, "key", null); final RecordFormatter formatter = new RecordFormatter(schemaRegistryClient, "some-topic"); final ConsumerRecords<String, Bytes> records = new ConsumerRecords<>( ImmutableMap.of(new TopicPartition("some-topic", 1), ImmutableList.of(record))); assertThat(formatter.format(records), empty()); }
private void doTestModeCannotReplayTuples(KafkaSpoutConfig<String, String> spoutConfig) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); ArgumentCaptor<KafkaSpoutMessageId> msgIdCaptor = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList(), msgIdCaptor.capture()); assertThat("Should have captured a message id", msgIdCaptor.getValue(), not(nullValue())); spout.fail(msgIdCaptor.getValue()); reset(consumerMock); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 1, 1)))); spout.nextTuple(); //The consumer should not be seeking to retry the failed tuple, it should just be continuing from the current position verify(consumerMock, never()).seek(eq(partition), anyLong()); }
private void doTestModeDisregardsMaxUncommittedOffsets(KafkaSpoutConfig<String, String> spoutConfig) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())) .thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, spoutConfig.getMaxUncommittedOffsets())))) .thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, spoutConfig.getMaxUncommittedOffsets() - 1, spoutConfig.getMaxUncommittedOffsets())))); for (int i = 0; i < spoutConfig.getMaxUncommittedOffsets() * 2; i++) { spout.nextTuple(); } verify(consumerMock, times(2)).poll(anyLong()); verify(collectorMock, times(spoutConfig.getMaxUncommittedOffsets() * 2)).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList()); }
private void doFilterNullTupleTest(KafkaSpoutConfig.ProcessingGuarantee processingGuaranteee) { //STORM-3059 KafkaSpoutConfig<String, String> spoutConfig = createKafkaSpoutConfigBuilder(mock(TopicFilter.class), mock(ManualPartitioner.class), -1) .setProcessingGuarantee(processingGuaranteee) .setTupleTrackingEnforced(true) .setRecordTranslator(new NullRecordTranslator<>()) .build(); KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); verify(collectorMock, never()).emit(any(), any(), any()); }
@Test public void iterator() throws Exception { Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records = new LinkedHashMap<>(); String topic = "topic"; records.put(new TopicPartition(topic, 0), new ArrayList<ConsumerRecord<Integer, String>>()); ConsumerRecord<Integer, String> record1 = new ConsumerRecord<>(topic, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, "value1"); ConsumerRecord<Integer, String> record2 = new ConsumerRecord<>(topic, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, "value2"); records.put(new TopicPartition(topic, 1), Arrays.asList(record1, record2)); records.put(new TopicPartition(topic, 2), new ArrayList<ConsumerRecord<Integer, String>>()); ConsumerRecords<Integer, String> consumerRecords = new ConsumerRecords<>(records); Iterator<ConsumerRecord<Integer, String>> iter = consumerRecords.iterator(); int c = 0; for (; iter.hasNext(); c++) { ConsumerRecord<Integer, String> record = iter.next(); assertEquals(1, record.partition()); assertEquals(topic, record.topic()); assertEquals(c, record.offset()); } assertEquals(2, c); } }
private Result getFormatter(final byte[] data) { final ConsumerRecord<String, Bytes> record = new ConsumerRecord<>( "some-topic", 1, 1, "key", new Bytes(data)); final RecordFormatter formatter = new RecordFormatter(schemaRegistryClient, "some-topic"); final ConsumerRecords<String, Bytes> records = new ConsumerRecords<>( ImmutableMap.of(new TopicPartition("some-topic", 1), ImmutableList.of(record))); final List<String> formatted = formatter.format(records); assertThat("Only expect one line", formatted, hasSize(1)); return new Result(formatter.getFormat(), formatted.get(0)); }
@Override @SuppressWarnings("deprecation") public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) { // This will ensure that we get the cluster metadata when onConsume is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { List<ConsumerRecord<String, String>> lst = new ArrayList<>(); for (ConsumerRecord<String, String> record: records.records(tp)) { lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT))); } recordMap.put(tp, lst); } return new ConsumerRecords<String, String>(recordMap); }
@Test public void testAtMostOnceModeCommitsBeforeEmit() throws Exception { //At-most-once mode must commit tuples before they are emitted to the topology to ensure that a spout crash won't cause replays. KafkaSpoutConfig<String, String> spoutConfig = createKafkaSpoutConfigBuilder(mock(TopicFilter.class), mock(ManualPartitioner.class), -1) .setProcessingGuarantee(KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) .build(); KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); when(consumerMock.position(partition)).thenReturn(1L); //The spout should have emitted the tuple, and must have committed it before emit InOrder inOrder = inOrder(consumerMock, collectorMock); inOrder.verify(consumerMock).poll(anyLong()); inOrder.verify(consumerMock).commitSync(commitCapture.capture()); inOrder.verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList()); CommitMetadataManager metadataManager = new CommitMetadataManager(contextMock, KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE); Map<TopicPartition, OffsetAndMetadata> committedOffsets = commitCapture.getValue(); assertThat(committedOffsets.get(partition).offset(), is(0L)); assertThat(committedOffsets.get(partition).metadata(), is(metadataManager.getCommitMetadata())); }
@Test public void testAtMostOnceModeDoesNotCommitAckedTuples() throws Exception { //When tuple tracking is enabled, the spout must not commit acked tuples in at-most-once mode because they were committed before being emitted KafkaSpoutConfig<String, String> spoutConfig = createKafkaSpoutConfigBuilder(mock(TopicFilter.class), mock(ManualPartitioner.class), -1) .setProcessingGuarantee(KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) .setTupleTrackingEnforced(true) .build(); try (SimulatedTime time = new SimulatedTime()) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); clearInvocations(consumerMock); ArgumentCaptor<KafkaSpoutMessageId> msgIdCaptor = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList(), msgIdCaptor.capture()); assertThat("Should have captured a message id", msgIdCaptor.getValue(), not(nullValue())); spout.ack(msgIdCaptor.getValue()); Time.advanceTime(KafkaSpout.TIMER_DELAY_MS + spoutConfig.getOffsetsCommitPeriodMs()); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.emptyMap())); spout.nextTuple(); verify(consumerMock, never()).commitSync(argThat((Map<TopicPartition, OffsetAndMetadata> arg) -> { return !arg.containsKey(partition); })); } }
return this.interceptors.onConsume(new ConsumerRecords<>(records));
@Test public void testNoGuaranteeModeCommitsPolledTuples() throws Exception { //When using the no guarantee mode, the spout must commit tuples periodically, regardless of whether they've been acked KafkaSpoutConfig<String, String> spoutConfig = createKafkaSpoutConfigBuilder(mock(TopicFilter.class), mock(ManualPartitioner.class), -1) .setProcessingGuarantee(KafkaSpoutConfig.ProcessingGuarantee.NO_GUARANTEE) .setTupleTrackingEnforced(true) .build(); try (SimulatedTime time = new SimulatedTime()) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); when(consumerMock.position(partition)).thenReturn(1L); ArgumentCaptor<KafkaSpoutMessageId> msgIdCaptor = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList(), msgIdCaptor.capture()); assertThat("Should have captured a message id", msgIdCaptor.getValue(), not(nullValue())); Time.advanceTime(KafkaSpout.TIMER_DELAY_MS + spoutConfig.getOffsetsCommitPeriodMs()); spout.nextTuple(); verify(consumerMock).commitAsync(commitCapture.capture(), isNull()); CommitMetadataManager metadataManager = new CommitMetadataManager(contextMock, KafkaSpoutConfig.ProcessingGuarantee.NO_GUARANTEE); Map<TopicPartition, OffsetAndMetadata> committedOffsets = commitCapture.getValue(); assertThat(committedOffsets.get(partition).offset(), is(1L)); assertThat(committedOffsets.get(partition).metadata(), is(metadataManager.getCommitMetadata())); } }
@Test public void shouldAggregateTotalMessageConsumptionAcrossAllConsumers() { final ConsumerCollector collector1 = new ConsumerCollector(); collector1.configure(ImmutableMap.of(ConsumerConfig.CLIENT_ID_CONFIG, "client1")); final ConsumerCollector collector2 = new ConsumerCollector(); collector2.configure(ImmutableMap.of(ConsumerConfig.CLIENT_ID_CONFIG, "client2")); final Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = new HashMap<>(); final List<ConsumerRecord<Object, Object>> recordList = new ArrayList<>(); for (int i = 0; i < 10; i++) { recordList.add(new ConsumerRecord<>(TEST_TOPIC, 1, 1, 1l, TimestampType .CREATE_TIME, 1l, 10, 10,"key", "1234567890")); } records.put(new TopicPartition(TEST_TOPIC, 1), recordList); final ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records); collector1.onConsume(consumerRecords); collector2.onConsume(consumerRecords); assertEquals(20, MetricCollectors.totalMessageConsumption(), 0); }
@Test public void shouldDisplayRateThroughput() { final ConsumerCollector collector = new ConsumerCollector();// collector.configure(new Metrics(), "group", new SystemTime()); for (int i = 0; i < 100; i++){ final Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = ImmutableMap.of( new TopicPartition(TEST_TOPIC, 1), Arrays.asList( new ConsumerRecord<>(TEST_TOPIC, 1, i, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890")) ); final ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records); collector.onConsume(consumerRecords); } final Collection<TopicSensors.Stat> stats = collector.stats(TEST_TOPIC, false); assertNotNull(stats); assertThat( stats.toString(), containsString("name=consumer-messages-per-sec,")); assertThat( stats.toString(), containsString("total-messages, value=100.0")); } }
@Test public void shouldKeepWorkingWhenDuplicateTopicConsumerIsRemoved() { final ConsumerCollector collector1 = new ConsumerCollector(); collector1.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "stream-thread-1") ); final ConsumerCollector collector2 = new ConsumerCollector(); collector2.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "stream-thread-2") ); final Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = ImmutableMap.of( new TopicPartition(TEST_TOPIC, 1), Arrays.asList( new ConsumerRecord<>(TEST_TOPIC, 1, 1, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890")) ); final ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records); collector1.onConsume(consumerRecords); collector2.onConsume(consumerRecords); final String firstPassStats = MetricCollectors.getAndFormatStatsFor(TEST_TOPIC, false); assertTrue("Missed stats, got:" + firstPassStats, firstPassStats.contains("total-messages: 2")); collector2.close(); collector1.onConsume(consumerRecords); final String statsForTopic2 = MetricCollectors.getAndFormatStatsFor(TEST_TOPIC, false); assertTrue("Missed stats, got:" + statsForTopic2, statsForTopic2.contains("total-messages: 2")); }