private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); } return offsetResponse.offsets(topicName, partitionId); }
try { log.info("Finding new leader from Kafka brokers, try broker [%s]", broker.toString()); consumer = new SimpleConsumer(broker.getHostText(), broker.getPort(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, leaderLookupClientId); TopicMetadataResponse resp = consumer.send(new TopicMetadataRequest(Collections.singletonList(topic))); List<TopicMetadata> metaData = resp.topicsMetadata(); for (TopicMetadata item : metaData) { if (topic.equals(item.topic())) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == partitionId) { return part; consumer.close();
@Override public int getNumPartitions(String topic) { SimpleConsumer consumer = null; try { consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "partitionLookup"); List<String> topics = new ArrayList<String>(); topics.add(topic); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); if (response.topicsMetadata().size() != 1) { throw new RuntimeException("Expected one metadata for topic " + topic + " found " + response.topicsMetadata().size()); } TopicMetadata topicMetadata = response.topicsMetadata().get(0); return topicMetadata.partitionsMetadata().size(); } finally { if (consumer != null) { consumer.close(); } } }
_simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(_topic))); try { _leader = null; List<PartitionMetadata> pMetaList = response.topicsMetadata().get(0).partitionsMetadata(); for (PartitionMetadata pMeta : pMetaList) { if (pMeta.partitionId() == _partition) { _leader = new KafkaBrokerWrapper(pMeta.leader()); break;
public long getOffset(String topic, int partition, long startOffsetTime) { SimpleConsumer simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { LOG.error("Error consumer is null get offset from partition:" + partition); return -1; } TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId()); long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) { List<KafkaPartition> partitions = Lists.newArrayList(); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (null == partitionMetadata) { LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic()); return Collections.emptyList(); } if (null == partitionMetadata.leader()) { LOG.error( "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata); return Collections.emptyList(); } partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId()) .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id()) .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build()); } return partitions; }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : this.brokers) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata .leader().port()); break; } } break; } } }
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return messageBuffer.iterator(); } catch (Exception e) { LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, List<Pattern> blacklist, List<Pattern> whitelist) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker); if (topicMetadataList == null) { return null; } List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList(); for (TopicMetadata topicMetadata : topicMetadataList) { if (DatasetFilterUtils.survived(topicMetadata.topic(), blacklist, whitelist)) { filteredTopicMetadataList.add(topicMetadata); } } return filteredTopicMetadataList; }
private FetchRequest createFetchRequest(KafkaPartition partition, long nextOffset) { TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getId()); PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(nextOffset, this.bufferSize); Map<TopicAndPartition, PartitionFetchInfo> fetchInfo = Collections.singletonMap(topicAndPartition, partitionFetchInfo); return new FetchRequest(this.fetchCorrelationId, this.clientName, this.fetchTimeoutMillis, this.fetchMinBytes, fetchInfo); }
for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo);
try { if (consumer == null) { consumer = new SimpleConsumer(brokerHost.getHost(), brokerHost.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId); consumer = new SimpleConsumer(host.getHost(), host.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId); TopicMetadataRequest req = new TopicMetadataRequest(topics); kafka.javaapi.TopicMetadataResponse resp = null; try { resp = consumer.send(req); } catch (Exception e) { errors += 1; consumer.close(); consumer = null; List<TopicMetadata> metaData = resp.topicsMetadata(); for (TopicMetadata item : metaData) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == partition) { returnMetaData = part; break loop; } finally { if (consumer != null) { consumer.close(); consumer = null;
private long getOffset(boolean earliest) throws InterruptedException { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put( topicAndPartition, new PartitionOffsetRequestInfo( earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1 ) ); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId); OffsetResponse response; try { response = consumer.getOffsetsBefore(request); } catch (Exception e) { ensureNotInterrupted(e); log.error(e, "caught exception in getOffsetsBefore [%s] - [%s]", topic, partitionId); return -1; } if (response.hasError()) { log.error( "error fetching data Offset from the Broker [%s]. reason: [%s]", leaderBroker.host(), response.errorCode(topic, partitionId) ); return -1; } long[] offsets = response.offsets(topic, partitionId); return earliest ? offsets[0] : offsets[offsets.length - 1]; }
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) { List<KafkaPartition> partitions = Lists.newArrayList(); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (null == partitionMetadata) { log.error("Ignoring topic with null partition metadata " + topicMetadata.topic()); return Collections.emptyList(); } if (null == partitionMetadata.leader()) { log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata); return Collections.emptyList(); } partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId()) .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id()) .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build()); } return partitions; }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : KafkaWrapper.this.getBrokers()) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata.leader().port()); break; } } break; } } }
@Override public List<KafkaTopic> getTopics() { List<TopicMetadata> topicMetadataList = getFilteredMetadataList(); List<KafkaTopic> filteredTopics = Lists.newArrayList(); for (TopicMetadata topicMetadata : topicMetadataList) { List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata); filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions)); } return filteredTopics; }
private FetchRequest createFetchRequest(KafkaPartition partition, long nextOffset) { TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getId()); PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(nextOffset, this.bufferSize); Map<TopicAndPartition, PartitionFetchInfo> fetchInfo = Collections.singletonMap(topicAndPartition, partitionFetchInfo); return new FetchRequest(this.fetchCorrelationId, this.clientName, this.fetchTimeoutMillis, this.fetchMinBytes, fetchInfo); }
private HostAndPort findLeader(TopicPartition topicPartition) { SimpleConsumer consumer = null; try { LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition()); consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "leaderLookup"); List<String> topics = new ArrayList<String>(); topics.add(topicPartition.getTopic()); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); List<TopicMetadata> metaData = response.topicsMetadata(); for (TopicMetadata item : metaData) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == topicPartition.getPartition()) { return HostAndPort.fromParts(part.leader().host(), part.leader().port()); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
@Override public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) { List<TopicMetadata> topicMetadataList = getFilteredMetadataList(blacklist, whitelist); List<KafkaTopic> filteredTopics = Lists.newArrayList(); for (TopicMetadata topicMetadata : topicMetadataList) { List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata); filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions)); } return filteredTopics; }
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes)); TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName())); TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest); for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) { for (PartitionMetadata part : metadata.partitionsMetadata()) { log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId()); Broker leader = part.leader(); if (leader == null) { throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Leader election in progress for Kafka topic '%s' partition %s", metadata.topic(), part.partitionId())); long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId()); metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), kafkaTableHandle.getKeyDataSchemaLocation().map(KafkaSplitManager::readSchema), kafkaTableHandle.getMessageDataSchemaLocation().map(KafkaSplitManager::readSchema), part.partitionId(), offsets[i], offsets[i - 1],