try { _leader = null; List<PartitionMetadata> pMetaList = response.topicsMetadata().get(0).partitionsMetadata(); for (PartitionMetadata pMeta : pMetaList) { if (pMeta.partitionId() == _partition) {
@Override public int getNumPartitions(String topic) { SimpleConsumer consumer = null; try { consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "partitionLookup"); List<String> topics = new ArrayList<String>(); topics.add(topic); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); if (response.topicsMetadata().size() != 1) { throw new RuntimeException("Expected one metadata for topic " + topic + " found " + response.topicsMetadata().size()); } TopicMetadata topicMetadata = response.topicsMetadata().get(0); return topicMetadata.partitionsMetadata().size(); } finally { if (consumer != null) { consumer.close(); } } }
for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == partition) { returnMetaData = part;
return topicMetadata.partitionsMetadata().size(); } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) { List<KafkaPartition> partitions = Lists.newArrayList(); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (null == partitionMetadata) { log.error("Ignoring topic with null partition metadata " + topicMetadata.topic()); return Collections.emptyList(); } if (null == partitionMetadata.leader()) { log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata); return Collections.emptyList(); } partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId()) .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id()) .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build()); } return partitions; }
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) { List<KafkaPartition> partitions = Lists.newArrayList(); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (null == partitionMetadata) { LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic()); return Collections.emptyList(); } if (null == partitionMetadata.leader()) { LOG.error( "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata); return Collections.emptyList(); } partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId()) .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id()) .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build()); } return partitions; }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : this.brokers) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata .leader().port()); break; } } break; } } }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : KafkaWrapper.this.getBrokers()) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata.leader().port()); break; } } break; } } }
for (TopicMetadata item : metaData) { if (topic.equals(item.topic())) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == partitionId) { return part;
private HostAndPort findLeader(TopicPartition topicPartition) { SimpleConsumer consumer = null; try { LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition()); consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "leaderLookup"); List<String> topics = new ArrayList<String>(); topics.add(topicPartition.getTopic()); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); List<TopicMetadata> metaData = response.topicsMetadata(); for (TopicMetadata item : metaData) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == topicPartition.getPartition()) { return HostAndPort.fromParts(part.leader().host(), part.leader().port()); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
for (PartitionMetadata part : metadata.partitionsMetadata()) { log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
topicMetadata = topicMetadataList.get(0); boolean partitionFound = false; for (PartitionMetadata metadataPerPartition : topicMetadata.partitionsMetadata()) { if (metadataPerPartition.partitionId() == partitionMetadata.partitionId()) { partitionFound = true;
private Map<String, Long> getTopicOffsets(List<String> topics) { ArrayList<HostAndPort> nodes = new ArrayList<>(config.getNodes()); Collections.shuffle(nodes); SimpleConsumer simpleConsumer = consumerManager.getConsumer(nodes.get(0)); TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics); TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest); ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) { for (PartitionMetadata part : metadata.partitionsMetadata()) { LOGGER.debug(format("Adding Partition %s/%s", metadata.topic(), part.partitionId())); Broker leader = part.leader(); if (leader == null) { // Leader election going on... LOGGER.warn(format("No leader for partition %s/%s found!", metadata.topic(), part.partitionId())); } else { HostAndPort leaderHost = HostAndPort.fromParts(leader.host(), leader.port()); SimpleConsumer leaderConsumer = consumerManager.getConsumer(leaderHost); long offset = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId())[0]; builder.put(metadata.topic(), offset); } } } return builder.build(); } }
private void updateLeaderMap() { for (String broker : brokerList) { try { SimpleConsumer consumer = getSimpleConsumer(broker); TopicMetadataRequest req = new TopicMetadataRequest(auditTopics); kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata(); for (TopicMetadata tmd : metaData) { for (PartitionMetadata pmd : tmd.partitionsMetadata()) { TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId()); partitionLeader.put(topicAndPartition, getHostPort(pmd.leader())); } } } catch (Exception e) { logger.warn("Got exception to get metadata from broker=" + broker, e); } } }
private TopicMetadataResponse mockTopicMetaDataResponse() { PartitionMetadata pMeta = EasyMock.createMock(PartitionMetadata.class); mocks.add(pMeta); EasyMock.expect(pMeta.errorCode()).andReturn((short)0).anyTimes(); Broker broker = new Broker(0, "localhost", 2121); EasyMock.expect(pMeta.leader()).andReturn(broker).anyTimes(); EasyMock.expect(pMeta.partitionId()).andReturn(PARTITION_1_ID).anyTimes(); List<PartitionMetadata> partitionMetadatas = new ArrayList<PartitionMetadata>(); partitionMetadatas.add(pMeta); TopicMetadata tMeta = EasyMock.createMock(TopicMetadata.class); mocks.add(tMeta); EasyMock.expect(tMeta.topic()).andReturn(TOPIC_1).anyTimes(); EasyMock.expect(tMeta.errorCode()).andReturn((short)0).anyTimes(); EasyMock.expect(tMeta.partitionsMetadata()).andReturn(partitionMetadatas).anyTimes(); List<TopicMetadata> topicMetadatas = new ArrayList<TopicMetadata>(); topicMetadatas.add(tMeta); TopicMetadataResponse metadataResponse = EasyMock.createMock(TopicMetadataResponse.class); mocks.add(metadataResponse); EasyMock.expect(metadataResponse.topicsMetadata()).andReturn(topicMetadatas).anyTimes(); return metadataResponse; }
private void refreshTopicMetadata() { TopicMetadataRequest request = new TopicMetadataRequest(Collections.singletonList(kafkaRequest.getTopic())); TopicMetadataResponse response; try { response = simpleConsumer.send(request); } catch (Exception e) { log.error("Exception caught when refreshing metadata for topic " + request.topics().get(0) + ": " + e.getMessage()); return; } TopicMetadata metadata = response.topicsMetadata().get(0); for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == kafkaRequest.getPartition()) { simpleConsumer = new SimpleConsumer(partitionMetadata.leader().host(), partitionMetadata.leader().port(), CamusJob.getKafkaTimeoutValue(context), CamusJob.getKafkaBufferSize(context), CamusJob.getKafkaClientName(context)); break; } } }
/** * Test only refreshing the paritionMetadata when the error code is LeaderNotAvailable. * @throws Exception */ @Test public void testRefreshPartitioMetadataWithThreeRetries() throws Exception { JobContext dummyContext = null; //A partitionMetadata with errorCode LeaderNotAvailable PartitionMetadata partitionMetadata = createMock(PartitionMetadata.class); expect(partitionMetadata.errorCode()).andReturn(ErrorMapping.LeaderNotAvailableCode()).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2); expect(partitionMetadata.partitionId()).andReturn(0).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2); replay(partitionMetadata); TopicMetadata mockedTopicMetadata = createMock(TopicMetadata.class); expect(mockedTopicMetadata.topic()).andReturn("testTopic").times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA); expect(mockedTopicMetadata.partitionsMetadata()).andReturn(Collections.singletonList(partitionMetadata)).times( EtlInputFormat.NUM_TRIES_PARTITION_METADATA); replay(mockedTopicMetadata); EtlInputFormat etlInputFormat = createMock(EtlInputFormat.class, EtlInputFormat.class.getMethod("getKafkaMetadata", new Class[] { JobContext.class, List.class })); EasyMock.expect(etlInputFormat.getKafkaMetadata(dummyContext, Collections.singletonList("testTopic"))).andReturn( Collections.singletonList(mockedTopicMetadata)).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA); etlInputFormat.setLogger(Logger.getLogger(getClass())); replay(etlInputFormat); etlInputFormat.refreshPartitionMetadataOnLeaderNotAvailable(partitionMetadata, mockedTopicMetadata, dummyContext, EtlInputFormat.NUM_TRIES_PARTITION_METADATA); verify(mockedTopicMetadata); verify(etlInputFormat); }
expect(mockedTopicMetadata.partitionsMetadata()).andReturn( Collections.singletonList(mockedReturnedPartitionMetadata)); replay(mockedTopicMetadata);
+ ErrorMapping.exceptionFor(topicMetadata.errorCode())); } else { for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {