private KafkaConsumer<String, String> newConsumerNoAutoCommit(Time time, KafkaClient client, Metadata metadata) { return newConsumer(time, client, metadata, new RangeAssignor(), OffsetResetStrategy.EARLIEST, false, groupId); }
private KafkaConsumer<String, String> consumerWithPendingAuthentication() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor(); client.createPendingAuthenticationError(node, 0); return newConsumer(time, client, metadata, assignor, false); }
@Test public void fetchResponseWithUnexpectedPartitionIsIgnored() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer)); prepareRebalance(client, node, assignor, singletonList(tp0), null); Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>(); fetches1.put(tp0, new FetchInfo(0, 1)); fetches1.put(t2p0, new FetchInfo(0, 10)); // not assigned and not fetched client.prepareResponseFrom(fetchResponse(fetches1), node); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); ConsumerRecords<String, String> records = consumer.poll(Duration.ZERO); assertEquals(0, records.count()); consumer.close(Duration.ofMillis(0)); }
@Test public void testProtocolMetadataOrder() { RoundRobinAssignor roundRobin = new RoundRobinAssignor(); RangeAssignor range = new RangeAssignor(); try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(roundRobin, range), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(roundRobin.name(), metadata.get(0).name()); assertEquals(range.name(), metadata.get(1).name()); } try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(range, roundRobin), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(range.name(), metadata.get(0).name()); assertEquals(roundRobin.name(), metadata.get(1).name()); } }
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor();
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor();
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor();
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor();
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor();
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, OffsetResetStrategy resetStrategy, boolean autoCommitEnabled, String groupId) { String clientId = "mock-consumer"; String metricGroupPrefix = "consumer"; long retryBackoffMs = 100; int requestTimeoutMs = 30000; int defaultApiTimeoutMs = 30000; boolean excludeInternalTopics = true; int minBytes = 1; int maxBytes = Integer.MAX_VALUE; int maxWaitMs = 500; int fetchSize = 1024 * 1024; int maxPollRecords = Integer.MAX_VALUE; boolean checkCrcs = true; int rebalanceTimeoutMs = 60000; Deserializer<String> keyDeserializer = new StringDeserializer(); Deserializer<String> valueDeserializer = new StringDeserializer(); List<PartitionAssignor> assignors = singletonList(assignor); ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.emptyList()); Metrics metrics = new Metrics(); ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
public static List<List<LogPartition>> rangeAssignments(int threads, Map<String, Integer> streams) { PartitionAssignor assignor = new RangeAssignor(); return assignments(assignor, threads, streams); }