@Override public String toString() { return this.getTopicName() + ":" + this.getId(); }
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException( String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
public KafkaPartition build() { return new KafkaPartition(this); } }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : this.brokers) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata .leader().port()); break; } } break; } } }
/** * A topic can be configured to move to the latest offset in {@link #TOPICS_MOVE_TO_LATEST_OFFSET}. * * Need to be synchronized as access by multiple threads */ private synchronized boolean shouldMoveToLatestOffset(KafkaPartition partition, SourceState state) { if (!state.contains(TOPICS_MOVE_TO_LATEST_OFFSET)) { return false; } if (this.moveToLatestTopics.isEmpty()) { this.moveToLatestTopics.addAll( Splitter.on(',').trimResults().omitEmptyStrings().splitToList(state.getProp(TOPICS_MOVE_TO_LATEST_OFFSET))); } return this.moveToLatestTopics.contains(partition.getTopicName()) || this.moveToLatestTopics.contains(ALL_TOPICS); }
private void switchMetricContextToCurrentPartition() { if (this.currentPartitionIdx >= this.partitions.size()) { return; } int currentPartitionId = this.getCurrentPartition().getId(); switchMetricContext(Lists.<Tag<?>> newArrayList(new Tag<>("kafka_partition", currentPartitionId))); }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : KafkaWrapper.this.getBrokers()) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata.leader().port()); break; } } break; } } }
private void createEmptyWorkUnitsForSkippedPartitions(Map<String, List<WorkUnit>> workUnits, Map<String, State> topicSpecificStateMap, SourceState state) { // in case the previous offset not been set getAllPreviousOffsetState(state); // For each partition that has a previous offset, create an empty WorkUnit for it if // it is not in this.partitionsToBeProcessed. for (Map.Entry<KafkaPartition, Long> entry : this.previousOffsets.entrySet()) { KafkaPartition partition = entry.getKey(); if (!this.partitionsToBeProcessed.contains(partition)) { String topicName = partition.getTopicName(); if (!this.isDatasetStateEnabled.get() || this.topicsToProcess.contains(topicName)) { long previousOffset = entry.getValue(); WorkUnit emptyWorkUnit = createEmptyWorkUnit(partition, previousOffset, this.previousOffsetFetchEpochTimes.get(partition), Optional.fromNullable(topicSpecificStateMap.get(partition.getTopicName()))); if (workUnits.containsKey(topicName)) { workUnits.get(topicName).add(emptyWorkUnit); } else { workUnits.put(topicName, Lists.newArrayList(emptyWorkUnit)); } } } } }
private void switchMetricContextToCurrentPartition() { if (this.currentPartitionIdx >= this.partitions.size()) { return; } int currentPartitionId = this.getCurrentPartition().getId(); switchMetricContext(Lists.<Tag<?>> newArrayList(new Tag<>("kafka_partition", currentPartitionId))); }
private long getPreviousLowWatermark(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousLowWatermarks.containsKey(partition)) { return this.previousLowWatermarks.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous low watermark for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException(String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : this.brokers) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata .leader().port()); break; } } break; } } }
if (KafkaUtils.containsPartitionAvgRecordMillis(workUnitState, partition)) { double prevAvgMillisForPartition = KafkaUtils.getPartitionAvgRecordMillis(workUnitState, partition); if (prevAvgMillis.containsKey(partition.getTopicName())) { prevAvgMillis.get(partition.getTopicName()).add(prevAvgMillisForPartition); } else { prevAvgMillis.put(partition.getTopicName(), Lists.newArrayList(prevAvgMillisForPartition));
public KafkaTopic(String name, List<KafkaPartition> partitions, Optional<State> topicSpecificState) { this.name = name; this.partitions = Lists.newArrayList(); for (KafkaPartition partition : partitions) { this.partitions.add(new KafkaPartition(partition)); } this.topicSpecificState = topicSpecificState; }
private long getPreviousExpectedHighWatermark(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousExpectedHighWatermarks.containsKey(partition)) { return this.previousExpectedHighWatermarks.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous expected high watermark for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
/** * Add a list of partitions of the same topic to a {@link WorkUnit}. */ private static void populateMultiPartitionWorkUnit(List<KafkaPartition> partitions, WorkUnit workUnit) { Preconditions.checkArgument(!partitions.isEmpty(), "There should be at least one partition"); GobblinMetrics.addCustomTagToState(workUnit, new Tag<>("kafkaTopic", partitions.get(0).getTopicName())); for (int i = 0; i < partitions.size(); i++) { workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, i), partitions.get(i).getId()); workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.LEADER_ID, i), partitions.get(i).getLeader().getId()); workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.LEADER_HOSTANDPORT, i), partitions.get(i).getLeader().getHostAndPort()); } }
private void refreshTopicMetadata(KafkaPartition partition) { for (String broker : KafkaWrapper.this.getBrokers()) { List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName()); if (topicMetadataList != null && !topicMetadataList.isEmpty()) { TopicMetadata topicMetadata = topicMetadataList.get(0); for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { if (partitionMetadata.partitionId() == partition.getId()) { partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata.leader().port()); break; } } break; } } }
/** * A topic can be configured to move to the latest offset in {@link #TOPICS_MOVE_TO_LATEST_OFFSET}. * * Need to be synchronized as access by multiple threads */ private synchronized boolean shouldMoveToLatestOffset(KafkaPartition partition, SourceState state) { if (!state.contains(TOPICS_MOVE_TO_LATEST_OFFSET)) { return false; } if (this.moveToLatestTopics.isEmpty()) { this.moveToLatestTopics.addAll( Splitter.on(',').trimResults().omitEmptyStrings().splitToList(state.getProp(TOPICS_MOVE_TO_LATEST_OFFSET))); } return this.moveToLatestTopics.contains(partition.getTopicName()) || this.moveToLatestTopics.contains(ALL_TOPICS); }
public KafkaPartition build() { return new KafkaPartition(this); } }
/** * Set the average time in milliseconds to pull a record of a partition, which will be stored in property * "[topicname].[partitionid].avg.record.millis". */ public static void setPartitionAvgRecordMillis(State state, KafkaPartition partition, double millis) { state.setProp( getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + KafkaSource.AVG_RECORD_MILLIS, millis); }