private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, List<Pattern> blacklist, List<Pattern> whitelist) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker); if (topicMetadataList == null) { return null; } List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList(); for (TopicMetadata topicMetadata : topicMetadataList) { if (DatasetFilterUtils.survived(topicMetadata.topic(), blacklist, whitelist)) { filteredTopicMetadataList.add(topicMetadata); } } return filteredTopicMetadataList; }
@Override public List<KafkaTopic> getTopics() { List<TopicMetadata> topicMetadataList = getFilteredMetadataList(); List<KafkaTopic> filteredTopics = Lists.newArrayList(); for (TopicMetadata topicMetadata : topicMetadataList) { List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata); filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions)); } return filteredTopics; }
@Override public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) { List<TopicMetadata> topicMetadataList = getFilteredMetadataList(blacklist, whitelist); List<KafkaTopic> filteredTopics = Lists.newArrayList(); for (TopicMetadata topicMetadata : topicMetadataList) { List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata); filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions)); } return filteredTopics; }
topics.add(item.topic());
if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo);
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) { List<KafkaPartition> partitions = Lists.newArrayList(); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (null == partitionMetadata) { log.error("Ignoring topic with null partition metadata " + topicMetadata.topic()); return Collections.emptyList(); } if (null == partitionMetadata.leader()) { log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata); return Collections.emptyList(); } partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId()) .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id()) .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build()); } return partitions; }
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) { List<KafkaPartition> partitions = Lists.newArrayList(); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (null == partitionMetadata) { LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic()); return Collections.emptyList(); } if (null == partitionMetadata.leader()) { LOG.error( "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata); return Collections.emptyList(); } partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId()) .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id()) .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build()); } return partitions; }
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId()); throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Leader election in progress for Kafka topic '%s' partition %s", metadata.topic(), part.partitionId())); long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId()); metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(),
if (topic.equals(item.topic())) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == partitionId) {
public List<TopicMetadata> filterWhitelistTopics(List<TopicMetadata> topicMetadataList, HashSet<String> whiteListTopics) { ArrayList<TopicMetadata> filteredTopics = new ArrayList<TopicMetadata>(); String regex = createTopicRegEx(whiteListTopics); for (TopicMetadata topicMetadata : topicMetadataList) { if (Pattern.matches(regex, topicMetadata.topic())) { filteredTopics.add(topicMetadata); } else { log.info("Discarding topic : " + topicMetadata.topic()); } } return filteredTopics; }
private Map<String, Long> getTopicOffsets(List<String> topics) { ArrayList<HostAndPort> nodes = new ArrayList<>(config.getNodes()); Collections.shuffle(nodes); SimpleConsumer simpleConsumer = consumerManager.getConsumer(nodes.get(0)); TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics); TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest); ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) { for (PartitionMetadata part : metadata.partitionsMetadata()) { LOGGER.debug(format("Adding Partition %s/%s", metadata.topic(), part.partitionId())); Broker leader = part.leader(); if (leader == null) { // Leader election going on... LOGGER.warn(format("No leader for partition %s/%s found!", metadata.topic(), part.partitionId())); } else { HostAndPort leaderHost = HostAndPort.fromParts(leader.host(), leader.port()); SimpleConsumer leaderConsumer = consumerManager.getConsumer(leaderHost); long offset = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId())[0]; builder.put(metadata.topic(), offset); } } } return builder.build(); } }
log.info("Retry to referesh the topicMetadata on LeaderNotAvailable..."); List<TopicMetadata> topicMetadataList = this.getKafkaMetadata(context, Collections.singletonList(topicMetadata.topic())); if (topicMetadataList == null || topicMetadataList.size() == 0) { log.warn("The topicMetadataList for topic " + topicMetadata.topic() + " is empty."); } else { topicMetadata = topicMetadataList.get(0);
private TopicMetadataResponse mockTopicMetaDataResponse() { PartitionMetadata pMeta = EasyMock.createMock(PartitionMetadata.class); mocks.add(pMeta); EasyMock.expect(pMeta.errorCode()).andReturn((short)0).anyTimes(); Broker broker = new Broker(0, "localhost", 2121); EasyMock.expect(pMeta.leader()).andReturn(broker).anyTimes(); EasyMock.expect(pMeta.partitionId()).andReturn(PARTITION_1_ID).anyTimes(); List<PartitionMetadata> partitionMetadatas = new ArrayList<PartitionMetadata>(); partitionMetadatas.add(pMeta); TopicMetadata tMeta = EasyMock.createMock(TopicMetadata.class); mocks.add(tMeta); EasyMock.expect(tMeta.topic()).andReturn(TOPIC_1).anyTimes(); EasyMock.expect(tMeta.errorCode()).andReturn((short)0).anyTimes(); EasyMock.expect(tMeta.partitionsMetadata()).andReturn(partitionMetadatas).anyTimes(); List<TopicMetadata> topicMetadatas = new ArrayList<TopicMetadata>(); topicMetadatas.add(tMeta); TopicMetadataResponse metadataResponse = EasyMock.createMock(TopicMetadataResponse.class); mocks.add(metadataResponse); EasyMock.expect(metadataResponse.topicsMetadata()).andReturn(topicMetadatas).anyTimes(); return metadataResponse; }
private void updateLeaderMap() { for (String broker : brokerList) { try { SimpleConsumer consumer = getSimpleConsumer(broker); TopicMetadataRequest req = new TopicMetadataRequest(auditTopics); kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata(); for (TopicMetadata tmd : metaData) { for (PartitionMetadata pmd : tmd.partitionsMetadata()) { TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId()); partitionLeader.put(topicAndPartition, getHostPort(pmd.leader())); } } } catch (Exception e) { logger.warn("Got exception to get metadata from broker=" + broker, e); } } }
/** * Test only refreshing the paritionMetadata when the error code is LeaderNotAvailable. * @throws Exception */ @Test public void testRefreshPartitioMetadataWithThreeRetries() throws Exception { JobContext dummyContext = null; //A partitionMetadata with errorCode LeaderNotAvailable PartitionMetadata partitionMetadata = createMock(PartitionMetadata.class); expect(partitionMetadata.errorCode()).andReturn(ErrorMapping.LeaderNotAvailableCode()).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2); expect(partitionMetadata.partitionId()).andReturn(0).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2); replay(partitionMetadata); TopicMetadata mockedTopicMetadata = createMock(TopicMetadata.class); expect(mockedTopicMetadata.topic()).andReturn("testTopic").times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA); expect(mockedTopicMetadata.partitionsMetadata()).andReturn(Collections.singletonList(partitionMetadata)).times( EtlInputFormat.NUM_TRIES_PARTITION_METADATA); replay(mockedTopicMetadata); EtlInputFormat etlInputFormat = createMock(EtlInputFormat.class, EtlInputFormat.class.getMethod("getKafkaMetadata", new Class[] { JobContext.class, List.class })); EasyMock.expect(etlInputFormat.getKafkaMetadata(dummyContext, Collections.singletonList("testTopic"))).andReturn( Collections.singletonList(mockedTopicMetadata)).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA); etlInputFormat.setLogger(Logger.getLogger(getClass())); replay(etlInputFormat); etlInputFormat.refreshPartitionMetadataOnLeaderNotAvailable(partitionMetadata, mockedTopicMetadata, dummyContext, EtlInputFormat.NUM_TRIES_PARTITION_METADATA); verify(mockedTopicMetadata); verify(etlInputFormat); }
if (Pattern.matches(regex, topicMetadata.topic())) { log.info("Discarding topic (blacklisted): " + topicMetadata.topic()); } else if (!createMessageDecoder(context, topicMetadata.topic())) { log.info("Discarding topic (Decoder generation failed) : " + topicMetadata.topic()); } else if (topicMetadata.errorCode() != ErrorMapping.NoError()) { log.info("Skipping the creation of ETL request for Whole Topic : " + topicMetadata.topic() + " Exception : " + ErrorMapping.exceptionFor(topicMetadata.errorCode())); } else { log.info("Skipping the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode())); if (partitionMetadata.errorCode() != ErrorMapping.NoError()) { log.warn("Receiving non-fatal error code, Continuing the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode())); if (offsetRequestInfo.containsKey(leader)) { ArrayList<TopicAndPartition> topicAndPartitions = offsetRequestInfo.get(leader); topicAndPartitions.add(new TopicAndPartition(topicMetadata.topic(), partitionMetadata.partitionId())); offsetRequestInfo.put(leader, topicAndPartitions); } else { ArrayList<TopicAndPartition> topicAndPartitions = new ArrayList<TopicAndPartition>(); topicAndPartitions.add(new TopicAndPartition(topicMetadata.topic(), partitionMetadata.partitionId())); offsetRequestInfo.put(leader, topicAndPartitions);
expect(mockedTopicMetadata.topic()).andReturn("testTopic"); expect(mockedTopicMetadata.partitionsMetadata()).andReturn( Collections.singletonList(mockedReturnedPartitionMetadata));
public List<TopicMetadata> filterWhitelistTopics(List<TopicMetadata> topicMetadataList, HashSet<String> whiteListTopics) { ArrayList<TopicMetadata> filteredTopics = new ArrayList<TopicMetadata>(); String regex = createTopicRegEx(whiteListTopics); for (TopicMetadata topicMetadata : topicMetadataList) { if (Pattern.matches(regex, topicMetadata.topic())) { filteredTopics.add(topicMetadata); } else { log.info("Discarding topic : " + topicMetadata.topic()); } } return filteredTopics; }
@Override public String toString() { StringBuilder result = new StringBuilder(); String NEW_LINE = System.getProperty("line.separator"); result.append(this.getClass().getName() + " Object {" + NEW_LINE); result.append(" TopicName: " + m_TopicMetadata.topic() + NEW_LINE); result.append(" PartitionId: " + m_PartitionMetadata.partitionId() + NEW_LINE); result.append("}"); return result.toString(); } }