/** * Delete records up to (and including) the provided ssp offsets for * all system stream partitions specified in the map. * This only works with Kafka cluster 0.11 or later. Otherwise it's a no-op. * @param offsets specifies up to what offsets the messages should be deleted */ @Override public void deleteMessages(Map<SystemStreamPartition, String> offsets) { if (deleteCommittedMessages) { if (adminClientForDelete == null) { adminClientForDelete = kafka.admin.AdminClient.create(createAdminClientProperties()); } KafkaSystemAdminUtilsScala.deleteMessages(adminClientForDelete, offsets); deleteMessageCalled = true; } }
@Override public boolean clearStream(StreamSpec streamSpec) { LOG.info("Creating Kafka topic: {} on system: {}", streamSpec.getPhysicalName(), streamSpec.getSystemName()); KafkaSystemAdminUtilsScala.clearStream(streamSpec, getZkConnection()); Map<String, List<PartitionInfo>> topicsMetadata = getTopicMetadata(ImmutableSet.of(streamSpec.getPhysicalName())); return topicsMetadata.get(streamSpec.getPhysicalName()).isEmpty(); }
@Override public boolean createStream(StreamSpec streamSpec) { LOG.info("Creating Kafka topic: {} on system: {}", streamSpec.getPhysicalName(), streamSpec.getSystemName()); return KafkaSystemAdminUtilsScala.createStream(toKafkaSpec(streamSpec), getZkConnection()); }
coordinatorStreamProperties = KafkaSystemAdminUtilsScala.getCoordinatorTopicProperties(kafkaConfig); JavaConverters.mapAsJavaMapConverter(KafkaSystemAdminUtilsScala.getIntermediateStreamProperties(config)) .asJava();
/** * Fetch SystemStreamMetadata for each topic with the consumer * @param topics set of topics to get metadata info for * @return map of topic to SystemStreamMetadata */ private Map<String, SystemStreamMetadata> fetchSystemStreamMetadata(Set<String> topics) { Map<SystemStreamPartition, String> allOldestOffsets = new HashMap<>(); Map<SystemStreamPartition, String> allNewestOffsets = new HashMap<>(); Map<SystemStreamPartition, String> allUpcomingOffsets = new HashMap<>(); LOG.info("Fetching SystemStreamMetadata for topics {} on system {}", topics, systemName); topics.forEach(topic -> { List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic); if (partitionInfos == null) { String msg = String.format("Partition info not(yet?) available for system %s topic %s", systemName, topic); throw new SamzaException(msg); } List<TopicPartition> topicPartitions = partitionInfos.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); OffsetsMaps offsetsForTopic = fetchTopicPartitionsMetadata(topicPartitions); allOldestOffsets.putAll(offsetsForTopic.getOldestOffsets()); allNewestOffsets.putAll(offsetsForTopic.getNewestOffsets()); allUpcomingOffsets.putAll(offsetsForTopic.getUpcomingOffsets()); }); scala.collection.immutable.Map<String, SystemStreamMetadata> result = KafkaSystemAdminUtilsScala.assembleMetadata(ScalaJavaUtil.toScalaMap(allOldestOffsets), ScalaJavaUtil.toScalaMap(allNewestOffsets), ScalaJavaUtil.toScalaMap(allUpcomingOffsets)); LOG.debug("assembled SystemStreamMetadata is: {}", result); return JavaConverters.mapAsJavaMapConverter(result).asJava(); }
coordinatorStreamProperties = KafkaSystemAdminUtilsScala.getCoordinatorTopicProperties(kafkaConfig); JavaConverters.mapAsJavaMapConverter(KafkaSystemAdminUtilsScala.getIntermediateStreamProperties(config)) .asJava();
/** * Fetch SystemStreamMetadata for each topic with the consumer * @param topics set of topics to get metadata info for * @return map of topic to SystemStreamMetadata */ private Map<String, SystemStreamMetadata> fetchSystemStreamMetadata(Set<String> topics) { Map<SystemStreamPartition, String> allOldestOffsets = new HashMap<>(); Map<SystemStreamPartition, String> allNewestOffsets = new HashMap<>(); Map<SystemStreamPartition, String> allUpcomingOffsets = new HashMap<>(); LOG.info("Fetching SystemStreamMetadata for topics {} on system {}", topics, systemName); topics.forEach(topic -> { List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic); if (partitionInfos == null) { String msg = String.format("Partition info not(yet?) available for system %s topic %s", systemName, topic); throw new SamzaException(msg); } List<TopicPartition> topicPartitions = partitionInfos.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); OffsetsMaps offsetsForTopic = fetchTopicPartitionsMetadata(topicPartitions); allOldestOffsets.putAll(offsetsForTopic.getOldestOffsets()); allNewestOffsets.putAll(offsetsForTopic.getNewestOffsets()); allUpcomingOffsets.putAll(offsetsForTopic.getUpcomingOffsets()); }); scala.collection.immutable.Map<String, SystemStreamMetadata> result = KafkaSystemAdminUtilsScala.assembleMetadata(ScalaJavaUtil.toScalaMap(allOldestOffsets), ScalaJavaUtil.toScalaMap(allNewestOffsets), ScalaJavaUtil.toScalaMap(allUpcomingOffsets)); LOG.debug("assembled SystemStreamMetadata is: {}", result); return JavaConverters.mapAsJavaMapConverter(result).asJava(); }
coordinatorStreamProperties = KafkaSystemAdminUtilsScala.getCoordinatorTopicProperties(kafkaConfig); JavaConverters.mapAsJavaMapConverter(KafkaSystemAdminUtilsScala.getIntermediateStreamProperties(config)) .asJava();
/** * Delete records up to (and including) the provided ssp offsets for * all system stream partitions specified in the map. * This only works with Kafka cluster 0.11 or later. Otherwise it's a no-op. * @param offsets specifies up to what offsets the messages should be deleted */ @Override public void deleteMessages(Map<SystemStreamPartition, String> offsets) { if (deleteCommittedMessages) { if (adminClient == null) { adminClient = AdminClient.create(createAdminClientProperties()); } KafkaSystemAdminUtilsScala.deleteMessages(adminClient, offsets); deleteMessageCalled = true; } }
/** * Fetch SystemStreamMetadata for each topic with the consumer * @param topics set of topics to get metadata info for * @return map of topic to SystemStreamMetadata */ private Map<String, SystemStreamMetadata> fetchSystemStreamMetadata(Set<String> topics) { Map<SystemStreamPartition, String> allOldestOffsets = new HashMap<>(); Map<SystemStreamPartition, String> allNewestOffsets = new HashMap<>(); Map<SystemStreamPartition, String> allUpcomingOffsets = new HashMap<>(); LOG.info("Fetching SystemStreamMetadata for topics {} on system {}", topics, systemName); topics.forEach(topic -> { List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic); if (partitionInfos == null) { String msg = String.format("Partition info not(yet?) available for system %s topic %s", systemName, topic); throw new SamzaException(msg); } List<TopicPartition> topicPartitions = partitionInfos.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); OffsetsMaps offsetsForTopic = fetchTopicPartitionsMetadata(topicPartitions); allOldestOffsets.putAll(offsetsForTopic.getOldestOffsets()); allNewestOffsets.putAll(offsetsForTopic.getNewestOffsets()); allUpcomingOffsets.putAll(offsetsForTopic.getUpcomingOffsets()); }); scala.collection.immutable.Map<String, SystemStreamMetadata> result = KafkaSystemAdminUtilsScala.assembleMetadata(ScalaJavaUtil.toScalaMap(allOldestOffsets), ScalaJavaUtil.toScalaMap(allNewestOffsets), ScalaJavaUtil.toScalaMap(allUpcomingOffsets)); LOG.debug("assembled SystemStreamMetadata is: {}", result); return JavaConverters.mapAsJavaMapConverter(result).asJava(); }
@Override public boolean clearStream(StreamSpec streamSpec) { LOG.info("Creating Kafka topic: {} on system: {}", streamSpec.getPhysicalName(), streamSpec.getSystemName()); KafkaSystemAdminUtilsScala.clearStream(streamSpec, getZkConnection()); Map<String, List<PartitionInfo>> topicsMetadata = getTopicMetadata(ImmutableSet.of(streamSpec.getPhysicalName())); return topicsMetadata.get(streamSpec.getPhysicalName()).isEmpty(); }
@Override public boolean createStream(StreamSpec streamSpec) { LOG.info("Creating Kafka topic: {} on system: {}", streamSpec.getPhysicalName(), streamSpec.getSystemName()); return KafkaSystemAdminUtilsScala.createStream(toKafkaSpec(streamSpec), getZkConnection()); }
/** * Delete records up to (and including) the provided ssp offsets for * all system stream partitions specified in the map. * This only works with Kafka cluster 0.11 or later. Otherwise it's a no-op. * @param offsets specifies up to what offsets the messages should be deleted */ @Override public void deleteMessages(Map<SystemStreamPartition, String> offsets) { if (deleteCommittedMessages) { if (adminClient == null) { adminClient = AdminClient.create(createAdminClientProperties()); } KafkaSystemAdminUtilsScala.deleteMessages(adminClient, offsets); deleteMessageCalled = true; } }