congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
KafkaSystemConsumer.toTopicPartition
Code IndexAdd Tabnine to your IDE (free)

How to use
toTopicPartition
method
in
org.apache.samza.system.kafka.KafkaSystemConsumer

Best Java code snippets using org.apache.samza.system.kafka.KafkaSystemConsumer.toTopicPartition (Showing top 10 results out of 315)

origin: apache/samza

/**
 * Add new partition to the list of polled partitions.
 * Bust only be called before {@link KafkaConsumerProxy#start} is called..
 */
public void addTopicPartition(SystemStreamPartition ssp, long nextOffset) {
 LOG.info(String.format("Adding new topicPartition %s with offset %s to queue for consumer %s", ssp, nextOffset,
   this));
 topicPartitionToSSP.put(KafkaSystemConsumer.toTopicPartition(ssp), ssp); //registered SSPs
 // this is already vetted offset so there is no need to validate it
 nextOffsets.put(ssp, nextOffset);
 kafkaConsumerMetrics.setNumTopicPartitions(metricName, nextOffsets.size());
}
origin: org.apache.samza/samza-kafka

/**
 * Add new partition to the list of polled partitions.
 * Bust only be called before {@link KafkaConsumerProxy#start} is called..
 */
public void addTopicPartition(SystemStreamPartition ssp, long nextOffset) {
 LOG.info(String.format("Adding new topicPartition %s with offset %s to queue for consumer %s", ssp, nextOffset,
   this));
 topicPartitionToSSP.put(KafkaSystemConsumer.toTopicPartition(ssp), ssp); //registered SSPs
 // this is already vetted offset so there is no need to validate it
 nextOffsets.put(ssp, nextOffset);
 kafkaConsumerMetrics.setNumTopicPartitions(metricName, nextOffsets.size());
}
origin: org.apache.samza/samza-kafka_2.11

/**
 * Add new partition to the list of polled partitions.
 * Bust only be called before {@link KafkaConsumerProxy#start} is called..
 */
public void addTopicPartition(SystemStreamPartition ssp, long nextOffset) {
 LOG.info(String.format("Adding new topicPartition %s with offset %s to queue for consumer %s", ssp, nextOffset,
   this));
 topicPartitionToSSP.put(KafkaSystemConsumer.toTopicPartition(ssp), ssp); //registered SSPs
 // this is already vetted offset so there is no need to validate it
 nextOffsets.put(ssp, nextOffset);
 kafkaConsumerMetrics.setNumTopicPartitions(metricName, nextOffsets.size());
}
origin: apache/samza

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}
origin: org.apache.samza/samza-kafka_2.11

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}
origin: org.apache.samza/samza-kafka

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}
origin: apache/samza

/**
 * record the ssp and the offset. Do not submit it to the consumer yet.
 * @param systemStreamPartition ssp to register
 * @param offset offset to register with
 */
@Override
public void register(SystemStreamPartition systemStreamPartition, String offset) {
 if (started.get()) {
  String msg = String.format("%s: Trying to register partition after consumer has been started. ssp=%s", this,
    systemStreamPartition);
  throw new SamzaException(msg);
 }
 if (!systemStreamPartition.getSystem().equals(systemName)) {
  LOG.warn("{}: ignoring SSP {}, because this consumer's system doesn't match.", this, systemStreamPartition);
  return;
 }
 LOG.info("{}: Registering ssp = {} with offset {}", this, systemStreamPartition, offset);
 super.register(systemStreamPartition, offset);
 TopicPartition tp = toTopicPartition(systemStreamPartition);
 topicPartitionsToSSP.put(tp, systemStreamPartition);
 String existingOffset = topicPartitionsToOffset.get(tp);
 // register the older (of the two) offset in the consumer, to guarantee we do not miss any messages.
 if (existingOffset == null || compareOffsets(existingOffset, offset) > 0) {
  topicPartitionsToOffset.put(tp, offset);
 }
 metrics.registerTopicAndPartition(toTopicAndPartition(tp));
}
origin: org.apache.samza/samza-kafka_2.11

/**
 * record the ssp and the offset. Do not submit it to the consumer yet.
 * @param systemStreamPartition ssp to register
 * @param offset offset to register with
 */
@Override
public void register(SystemStreamPartition systemStreamPartition, String offset) {
 if (started.get()) {
  String msg = String.format("%s: Trying to register partition after consumer has been started. ssp=%s", this,
    systemStreamPartition);
  throw new SamzaException(msg);
 }
 if (!systemStreamPartition.getSystem().equals(systemName)) {
  LOG.warn("{}: ignoring SSP {}, because this consumer's system doesn't match.", this, systemStreamPartition);
  return;
 }
 LOG.info("{}: Registering ssp = {} with offset {}", this, systemStreamPartition, offset);
 super.register(systemStreamPartition, offset);
 TopicPartition tp = toTopicPartition(systemStreamPartition);
 topicPartitionsToSSP.put(tp, systemStreamPartition);
 String existingOffset = topicPartitionsToOffset.get(tp);
 // register the older (of the two) offset in the consumer, to guarantee we do not miss any messages.
 if (existingOffset == null || compareOffsets(existingOffset, offset) > 0) {
  topicPartitionsToOffset.put(tp, offset);
 }
 metrics.registerTopicAndPartition(toTopicAndPartition(tp));
}
origin: org.apache.samza/samza-kafka

/**
 * record the ssp and the offset. Do not submit it to the consumer yet.
 * @param systemStreamPartition ssp to register
 * @param offset offset to register with
 */
@Override
public void register(SystemStreamPartition systemStreamPartition, String offset) {
 if (started.get()) {
  String msg = String.format("%s: Trying to register partition after consumer has been started. ssp=%s", this,
    systemStreamPartition);
  throw new SamzaException(msg);
 }
 if (!systemStreamPartition.getSystem().equals(systemName)) {
  LOG.warn("{}: ignoring SSP {}, because this consumer's system doesn't match.", this, systemStreamPartition);
  return;
 }
 LOG.info("{}: Registering ssp = {} with offset {}", this, systemStreamPartition, offset);
 super.register(systemStreamPartition, offset);
 TopicPartition tp = toTopicPartition(systemStreamPartition);
 topicPartitionsToSSP.put(tp, systemStreamPartition);
 String existingOffset = topicPartitionsToOffset.get(tp);
 // register the older (of the two) offset in the consumer, to guarantee we do not miss any messages.
 if (existingOffset == null || compareOffsets(existingOffset, offset) > 0) {
  topicPartitionsToOffset.put(tp, offset);
 }
 metrics.registerTopicAndPartition(toTopicAndPartition(tp));
}
origin: apache/samza

@Test
public void testConsumerRegisterOlderOffsetOfTheSamzaSSP() {
 KafkaSystemConsumer consumer = createConsumer(FETCH_THRESHOLD_MSGS, FETCH_THRESHOLD_BYTES);
 SystemStreamPartition ssp0 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(0));
 SystemStreamPartition ssp1 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(1));
 SystemStreamPartition ssp2 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(2));
 consumer.register(ssp0, "0");
 consumer.register(ssp0, "5");
 consumer.register(ssp1, "2");
 consumer.register(ssp1, "3");
 consumer.register(ssp2, "0");
 assertEquals("0", consumer.topicPartitionsToOffset.get(KafkaSystemConsumer.toTopicPartition(ssp0)));
 assertEquals("2", consumer.topicPartitionsToOffset.get(KafkaSystemConsumer.toTopicPartition(ssp1)));
 assertEquals("0", consumer.topicPartitionsToOffset.get(KafkaSystemConsumer.toTopicPartition(ssp2)));
}
org.apache.samza.system.kafkaKafkaSystemConsumertoTopicPartition

Popular methods of KafkaSystemConsumer

  • stop
  • compareOffsets
    Compare two String offsets. Note. There is a method in KafkaSystemAdmin that does that, but that wou
  • setFetchThresholds
  • startConsumer
    Set the offsets to start from. Register the TopicPartitions with the proxy. Start the proxy.
  • startSubscription
  • toTopicAndPartition
  • getMessagesSizeInQueue
  • getNumMessagesInQueue
  • register
    record the ssp and the offset. Do not submit it to the consumer yet.
  • start

Popular in Java

  • Reading from database using SQL prepared statement
  • setScale (BigDecimal)
  • scheduleAtFixedRate (Timer)
  • onRequestPermissionsResult (Fragment)
  • FlowLayout (java.awt)
    A flow layout arranges components in a left-to-right flow, much like lines of text in a paragraph. F
  • UnknownHostException (java.net)
    Thrown when a hostname can not be resolved.
  • Stack (java.util)
    Stack is a Last-In/First-Out(LIFO) data structure which represents a stack of objects. It enables u
  • Pattern (java.util.regex)
    Patterns are compiled regular expressions. In many cases, convenience methods such as String#matches
  • JPanel (javax.swing)
  • IsNull (org.hamcrest.core)
    Is the value null?
  • 14 Best Plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now