Refine search
for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) { if (item.errorCode() != ErrorMapping.NoError()) { seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage()); if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo);
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : this.brokers) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata .leader().port()); break; } } break; } } }
public Map<Integer, Long> fetch(String topic, int partitionCount) { Map<Integer, PartitionMetadata> metadatas = fetchPartitionMetadata(brokerList, port, topic, partitionCount); Map<Integer, Long> ret = new HashMap<>(); for (int partition = 0; partition < partitionCount; partition++) { PartitionMetadata metadata = metadatas.get(partition); if (metadata == null || metadata.leader() == null) { ret.put(partition, -1L); //throw new RuntimeException("Can't find Leader for Topic and Partition. Exiting"); } String leadBroker = metadata.leader().host(); String clientName = "Client_" + topic + "_" + partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long latestOffset = getLatestOffset(consumer, topic, partition, clientName); if (consumer != null) consumer.close(); ret.put(partition, latestOffset); } return ret; }
private void refreshTopicMetadata() { TopicMetadataRequest request = new TopicMetadataRequest(Collections.singletonList(kafkaRequest.getTopic())); TopicMetadataResponse response; try { response = simpleConsumer.send(request); } catch (Exception e) { log.error("Exception caught when refreshing metadata for topic " + request.topics().get(0) + ": " + e.getMessage()); return; } TopicMetadata metadata = response.topicsMetadata().get(0); for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == kafkaRequest.getPartition()) { simpleConsumer = new SimpleConsumer(partitionMetadata.leader().host(), partitionMetadata.leader().port(), CamusJob.getKafkaTimeoutValue(context), CamusJob.getKafkaBufferSize(context), CamusJob.getKafkaClientName(context)); break; } } }
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest); for (PartitionMetadata part : metadata.partitionsMetadata()) { log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId()); Broker leader = part.leader(); if (leader == null) { throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Leader election in progress for Kafka topic '%s' partition %s", metadata.topic(), part.partitionId())); HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port()); long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId()); kafkaTableHandle.getKeyDataSchemaLocation().map(KafkaSplitManager::readSchema), kafkaTableHandle.getMessageDataSchemaLocation().map(KafkaSplitManager::readSchema), part.partitionId(), offsets[i], offsets[i - 1],
while (pm.partitionId() != kp.getPartitionId()) { if (!pmIterator.hasNext()) break; pm = pmIterator.next(); if (pm.partitionId() != kp.getPartitionId()) continue; Broker bk = pm.leader(); SimpleConsumer ksc = new SimpleConsumer(bk.host(), bk.port(), cons.getTimeout(), cons.getBufferSize(), cons.getClientId()); FetchResponse fetchResponse = ksc.fetch(req); Integer count = 0; for (MessageAndOffset msg : fetchResponse.messageSet(consumer.topic, kp.getPartitionId())) {
NUM_TRIES_PARTITION_METADATA); if (partitionMetadata.errorCode() == ErrorMapping.LeaderNotAvailableCode()) { log.info("Skipping the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode())); reportJobFailureDueToLeaderNotAvailable = true; } else { if (partitionMetadata.errorCode() != ErrorMapping.NoError()) { log.warn("Receiving non-fatal error code, Continuing the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode())); new LeaderInfo(new URI("tcp://" + partitionMetadata.leader().getConnectionString()), partitionMetadata.leader().id()); if (offsetRequestInfo.containsKey(leader)) { ArrayList<TopicAndPartition> topicAndPartitions = offsetRequestInfo.get(leader); topicAndPartitions.add(new TopicAndPartition(topicMetadata.topic(), partitionMetadata.partitionId())); offsetRequestInfo.put(leader, topicAndPartitions); } else { ArrayList<TopicAndPartition> topicAndPartitions = new ArrayList<TopicAndPartition>(); topicAndPartitions.add(new TopicAndPartition(topicMetadata.topic(), partitionMetadata.partitionId())); offsetRequestInfo.put(leader, topicAndPartitions);
SimpleConsumer consumer = null; try { consumer = new SimpleConsumer(brokerUrl.getHost(), brokerUrl.getPort(), soTimeout, bufferSize, clientId); kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); if (item.errorCode() != ErrorMapping.NoError()) { "for " + topics.toString() + ". Error: " + ErrorMapping.exceptionFor(item.errorCode()).getMessage()); continue brokersLoop; if (!topics.contains(item.topic())) { LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ..."); continue brokersLoop; for (PartitionMetadata part : item.partitionsMetadata()) { Node leader = brokerToNode(part.leader()); KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId()); KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader); partitions.add(pInfo); consumer.close();
for (PartitionMetadata partition : metadata.partitionsMetadata()) { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); BrokerEndPoint brokerEndPoint = partition.leader(); if(brokerEndPoint == null){ throw new CrunchRuntimeException("Unable to find leader for topic:"+metadata.topic() +" partition:"+partition.partitionId()); ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT); Broker leader = new Broker(0, JavaConversions.asScalaBuffer(Arrays.asList(endPoint)), Option.<String>empty()); requestInfo = brokerRequests.get(leader); requestInfo.put(new TopicAndPartition(metadata.topic(), partition.partitionId()), new PartitionOffsetRequestInfo( time, 1)); OffsetRequest offsetRequest = new OffsetRequest(brokerRequest.getValue(), kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID); offsetResponse = simpleConsumer.getOffsetsBefore(offsetRequest); } finally { simpleConsumer.close();
private SimpleConsumer findLeaderConsumer(int partition) { try { if (consumer != null) { return consumer; } PartitionMetadata metadata = findLeader(partition); if (metadata == null) { leaderBroker = null; consumer = null; return null; } leaderBroker = metadata.leader(); consumer = new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId); return consumer; } catch (Exception e) { LOG.error(e.getMessage(), e); } return null; }
public PartitionMetadata refreshPartitionMetadataOnLeaderNotAvailable(PartitionMetadata partitionMetadata, TopicMetadata topicMetadata, JobContext context, int numTries) throws InterruptedException { int tryCounter = 0; while (tryCounter < numTries && partitionMetadata.errorCode() == ErrorMapping.LeaderNotAvailableCode()) { log.info("Retry to referesh the topicMetadata on LeaderNotAvailable..."); List<TopicMetadata> topicMetadataList = this.getKafkaMetadata(context, Collections.singletonList(topicMetadata.topic())); if (topicMetadataList == null || topicMetadataList.size() == 0) { log.warn("The topicMetadataList for topic " + topicMetadata.topic() + " is empty."); } else { topicMetadata = topicMetadataList.get(0); boolean partitionFound = false; for (PartitionMetadata metadataPerPartition : topicMetadata.partitionsMetadata()) { if (metadataPerPartition.partitionId() == partitionMetadata.partitionId()) { partitionFound = true; if (metadataPerPartition.errorCode() != ErrorMapping.LeaderNotAvailableCode()) { return metadataPerPartition; } else { //retry again. + partitionMetadata.partitionId());
continue; for (PartitionMetadata pm : pmLEntry.getValue()) { KafkaPartition kp = new KafkaPartition(pmLEntry.getKey(), topic, pm.partitionId()); if (!kps.contains(kp)) { Broker b = pm.leader(); if (b == null) { logger.info("No Leader broker for Kafka Partition {}. Skipping it for time until new leader is elected", kp.getPartitionId()); if (b.equals(oldB)) { continue; stats.updatePartitionStats(kp, pm.leader().id(), pm.leader().host() + ":" + pm.leader().port());
for (kafka.javaapi.TopicMetadata item : data) m_logger.info("Topic: " + item.topic()); for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata()) for (kafka.cluster.Broker replica : part.replicas()) replicas += " " + replica.host(); for (kafka.cluster.Broker replica : part.isr()) isr += " " + replica.host(); if (part.leader() != null) if (part.leader().host() != null) leader = part.leader().host(); m_logger.info(" Partition: " + part.partitionId() + ": Leader: " + leader + " Replicas:[" + replicas + "] ISR:[" + isr + "]");
if (Pattern.matches(regex, item.topic())) { m_logger.debug("Discarding topic (blacklisted): " + item.topic()); continue; for (PartitionMetadata part : item.partitionsMetadata()) if (!exploredTopicPartition.contains(new TopicPartition(item.topic(), part.partitionId()))) part.partitionId(), Option.apply(part.leader()), JavaConversions.asScalaBuffer(part.replicas()).toList(), JavaConversions.asScalaBuffer(part.isr()).toList(), part.errorCode()); pml.add(pm); exploredTopicPartition.add(new TopicPartition(item.topic(), part.partitionId()));
private TopicMetadataResponse mockTopicMetaDataResponse() { PartitionMetadata pMeta = EasyMock.createMock(PartitionMetadata.class); mocks.add(pMeta); EasyMock.expect(pMeta.errorCode()).andReturn((short)0).anyTimes(); Broker broker = new Broker(0, "localhost", 2121); EasyMock.expect(pMeta.leader()).andReturn(broker).anyTimes(); EasyMock.expect(pMeta.partitionId()).andReturn(PARTITION_1_ID).anyTimes(); List<PartitionMetadata> partitionMetadatas = new ArrayList<PartitionMetadata>(); partitionMetadatas.add(pMeta); TopicMetadata tMeta = EasyMock.createMock(TopicMetadata.class); mocks.add(tMeta); EasyMock.expect(tMeta.topic()).andReturn(TOPIC_1).anyTimes(); EasyMock.expect(tMeta.errorCode()).andReturn((short)0).anyTimes(); EasyMock.expect(tMeta.partitionsMetadata()).andReturn(partitionMetadatas).anyTimes(); List<TopicMetadata> topicMetadatas = new ArrayList<TopicMetadata>(); topicMetadatas.add(tMeta); TopicMetadataResponse metadataResponse = EasyMock.createMock(TopicMetadataResponse.class); mocks.add(metadataResponse); EasyMock.expect(metadataResponse.topicsMetadata()).andReturn(topicMetadatas).anyTimes(); return metadataResponse; }
private String findNewLeader(String oldLeader, String topic, int partition, int port) { for (int i = 0; i < 3; i++) { PartitionMetadata metadata = findLeader(replicaBrokers, port, topic, partition); if (metadata == null || metadata.leader() == null || oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) { // first time through if the leader hasn't changed give ZooKeeper a second to recover // second time, assume the broker did recover before failover, or it was a non-Broker issue try { Thread.sleep(1000); } catch (InterruptedException e) { LOGGER.error("Unable to sleep", e); } } else { return metadata.leader().host(); } } throw new IllegalStateException("Unable to find new leader after Broker failure"); }
for (String whiteListTopic : metaDataProperties.canaryTestTopics) if (topic.topic().equalsIgnoreCase(whiteListTopic)) { whiteListTopicMetadata.add(topic); numPartitionsProducer += topic.partitionsMetadata().size(); for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata()) { int partitionProducerFailCount = 0; LOGGER.debug("Writing to Topic: {}; Partition: {};", item.topic(), part.partitionId()); MetricNameEncoded producerPartitionLatency = metricNameFactory.createWithPartition("Producer.Latency", item.topic() + "##" + part.partitionId()); Histogram histogramProducerPartitionLatency = new Histogram(new SlidingWindowReservoir(1)); if (!metrics.getNames().contains(new Gson().toJson(producerPartitionLatency))) { try { producer.sendCanaryToTopicPartition(item.topic(), Integer.toString(part.partitionId())); endTime = System.currentTimeMillis(); } catch (Exception e) { LOGGER.error("Error Writing to Topic: {}; Partition: {}; Exception: {}", item.topic(), part.partitionId(), e); topicProducerFailCount++; partitionProducerFailCount++; MetricNameEncoded producerPartitionAvailability = metricNameFactory.createWithPartition("Producer.Availability", item.topic() + "##" + part.partitionId()); if (!metrics.getNames().contains(new Gson().toJson(producerPartitionAvailability))) { metrics.register(new Gson().toJson(producerPartitionAvailability), new AvailabilityGauge(1, 1 - partitionProducerFailCount));
private TopicPartitionVO parsePartitionMetadata(String topic, PartitionMetadata pmd) { TopicPartitionVO partition = new TopicPartitionVO(pmd.partitionId()); if (pmd.leader() != null) { partition.addReplica(new TopicPartitionVO.PartitionReplica(pmd.leader().id(), true, true)); } final List<Integer> isr = getIsr(topic, pmd); pmd.replicas().stream() .map(replica -> new TopicPartitionVO.PartitionReplica(replica.id(), isr.contains(replica.id()), false)) .forEach(partition::addReplica); return partition; }