public void tryFetchMetadataFor(String topic) { producer.partitionsFor(topic); }
@Override public List<PartitionInfo> partitionsFor(String topic) { return delegate.partitionsFor(topic); }
private static int[] getPartitionsByTopic(String topic, Producer<byte[], byte[]> producer) { // the fetched list is immutable, so we're creating a mutable copy in order to sort it List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic)); // sort the partitions by partition id to make sure the fetched partition list is the same across subtasks Collections.sort(partitionsList, new Comparator<PartitionInfo>() { @Override public int compare(PartitionInfo o1, PartitionInfo o2) { return Integer.compare(o1.partition(), o2.partition()); } }); int[] partitions = new int[partitionsList.size()]; for (int i = 0; i < partitions.length; i++) { partitions[i] = partitionsList.get(i).partition(); } return partitions; }
private static int[] getPartitionsByTopic(String topic, Producer<byte[], byte[]> producer) { // the fetched list is immutable, so we're creating a mutable copy in order to sort it List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic)); // sort the partitions by partition id to make sure the fetched partition list is the same across subtasks Collections.sort(partitionsList, new Comparator<PartitionInfo>() { @Override public int compare(PartitionInfo o1, PartitionInfo o2) { return Integer.compare(o1.partition(), o2.partition()); } }); int[] partitions = new int[partitionsList.size()]; for (int i = 0; i < partitions.length; i++) { partitions[i] = partitionsList.get(i).partition(); } return partitions; }
public int calcMessagePartition(String kafkaTopic, TransactionId trxId) { List<PartitionInfo> partitionMetaData = kafkaProducer.partitionsFor(kafkaTopic); int partitionSize = partitionMetaData.size(); int partition = Math.abs(trxId.hashCode() % partitionSize); return partition; }
/** * */ private int getPartition(Object key, String topicName) { if (this.partitioner != null) { int partSize = this.kafkaProducer.partitionsFor(topicName).size(); return this.partitioner.partition(key, partSize); } return 0; } }
@Override public List<PartitionInfo> partitionsFor(String topic) { return this.delegate.partitionsFor(topic); }
@Override public List<PartitionInfo> partitionsFor(String topic) { Producer<K, V> producer = getTheProducer(); try { return producer.partitionsFor(topic); } finally { closeProducer(producer, inTransaction()); } }
@Override public List<PartitionInfo> partitionsFor(String topic) { return producer.partitionsFor(topic); }
@Override public List<PartitionInfo> partitionsFor(String topic) { return this.delegate.partitionsFor(topic); }
public List<PartitionInfo> partitionsFor(String topic) { return producer.partitionsFor(topic); }
@Override public List<PartitionInfo> partitionsFor(String topic) { return this.delegate.partitionsFor(topic); }
/** * */ private int getPartition(Object key, String topicName) { if (this.partitioner != null) { int partSize = this.kafkaProducer.partitionsFor(topicName).size(); return this.partitioner.partition(key, partSize); } return 0; } }
/** * */ private int getPartition(Object key, String topicName) { if (this.partitioner != null) { int partSize = this.kafkaProducer.partitionsFor(topicName).size(); return this.partitioner.partition(key, partSize); } return 0; } }
private int getTotalPartitions() { try (Producer<byte[], byte[]> producer = this.getProducer()) { return producer.partitionsFor(this.topic).stream().mapToInt(PartitionInfo::partition).max().orElseThrow(() -> new InvalidPartitionsException("partitions not found")) + 1; } }
@Override public boolean isTopicAvailable(CachedTopic cachedTopic) { String kafkaTopicName = cachedTopic.getKafkaTopics().getPrimary().name().asString(); try { if (producers.get(cachedTopic.getTopic()).partitionsFor(kafkaTopicName).size() > 0) { return true; } } catch (Exception e) { logger.warn("Could not read information about partitions for topic {}. {}", kafkaTopicName, e.getMessage()); return false; } logger.warn("No information about partitions for topic {}", kafkaTopicName); return false; }
@Override public Collection<PartitionInfo> call() throws Exception { Producer<byte[], byte[]> producer = producerFB.createProducer(); List<PartitionInfo> partitionsFor = producer.partitionsFor(destination.getName()); producer.close(); ((DisposableBean) producerFB).destroy(); return partitionsFor; }
/** * Tests invocation of methods on KafkaProducer using {@link KafkaSender#doOnProducer(java.util.function.Function)} */ @Test public void producerMethods() { testProducerMethod(p -> assertEquals(0, p.metrics().size())); testProducerMethod(p -> assertEquals(2, p.partitionsFor(topic).size())); testProducerMethod(p -> p.flush()); }
/** * Tests {@link KafkaProducer#partitionsFor(String)} error path. */ @Test public void partitionsForNonExistentTopic() { sender = new DefaultKafkaSender<>(producerFactory, SenderOptions.create()); StepVerifier.create(sender.doOnProducer(producer -> producer.partitionsFor("nonexistent"))) .expectError(InvalidTopicException.class) .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); }
/** * Tests {@link KafkaProducer#partitionsFor(String)} good path. */ @Test public void partitionsFor() { sender = new DefaultKafkaSender<>(producerFactory, SenderOptions.create()); StepVerifier.create(sender.doOnProducer(producer -> producer.partitionsFor(topic))) .expectNext(cluster.cluster().partitionsForTopic(topic)) .expectComplete() .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); }