Tabnine Logo
KafkaSystemConsumer$KafkaConsumerMessageSink
Code IndexAdd Tabnine to your IDE (free)

How to use
KafkaSystemConsumer$KafkaConsumerMessageSink
in
org.apache.samza.system.kafka

Best Java code snippets using org.apache.samza.system.kafka.KafkaSystemConsumer$KafkaConsumerMessageSink (Showing top 17 results out of 315)

origin: org.apache.samza/samza-kafka_2.11

private void moveMessagesToTheirQueue(SystemStreamPartition ssp, List<IncomingMessageEnvelope> envelopes) {
 long nextOffset = nextOffsets.get(ssp);
 for (IncomingMessageEnvelope env : envelopes) {
  sink.addMessage(ssp, env);  // move message to the BlockingEnvelopeMap's queue
  LOG.trace("IncomingMessageEnvelope. got envelope with offset:{} for ssp={}", env.getOffset(), ssp);
  nextOffset = Long.valueOf(env.getOffset()) + 1;
 }
 nextOffsets.put(ssp, nextOffset);
}
origin: apache/samza

private void fetchMessages() {
 Set<SystemStreamPartition> sspsToFetch = new HashSet<>();
 for (SystemStreamPartition ssp : nextOffsets.keySet()) {
  if (sink.needsMoreMessages(ssp)) {
   sspsToFetch.add(ssp);
origin: org.apache.samza/samza-kafka

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}
origin: org.apache.samza/samza-kafka_2.11

private void fetchMessages() {
 Set<SystemStreamPartition> sspsToFetch = new HashSet<>();
 for (SystemStreamPartition ssp : nextOffsets.keySet()) {
  if (sink.needsMoreMessages(ssp)) {
   sspsToFetch.add(ssp);
origin: org.apache.samza/samza-kafka

private void fetchMessages() {
 Set<SystemStreamPartition> sspsToFetch = new HashSet<>();
 for (SystemStreamPartition ssp : nextOffsets.keySet()) {
  if (sink.needsMoreMessages(ssp)) {
   sspsToFetch.add(ssp);
origin: apache/samza

consumer.register(ssp1, "0");
consumer.start();
consumer.messageSink.addMessage(ssp0, ime0);
Assert.assertEquals(true, consumer.messageSink.needsMoreMessages(ssp0));
consumer.messageSink.addMessage(ssp1, ime1);
Assert.assertEquals(true, consumer.messageSink.needsMoreMessages(ssp1));
consumer.messageSink.addMessage(ssp1, ime11);
Assert.assertEquals(false, consumer.messageSink.needsMoreMessages(ssp1));
origin: apache/samza

consumer.register(ssp1, "0");
consumer.start();
consumer.messageSink.addMessage(ssp0, ime0);
Assert.assertEquals(false, consumer.messageSink.needsMoreMessages(ssp0));
consumer.messageSink.addMessage(ssp1, ime1);
Assert.assertEquals(true, consumer.messageSink.needsMoreMessages(ssp1));
consumer.messageSink.addMessage(ssp1, ime11);
Assert.assertEquals(false, consumer.messageSink.needsMoreMessages(ssp1));
origin: org.apache.samza/samza-kafka

/**
 * Create a KafkaSystemConsumer for the provided {@code systemName}
 * @param kafkaConsumer kafka Consumer object to be used by this system consumer
 * @param systemName system name for which we create the consumer
 * @param config application config
 * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy
 * @param metrics metrics for this KafkaSystemConsumer
 * @param clock system clock
 */
public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId,
  KafkaSystemConsumerMetrics metrics, Clock clock) {
 super(metrics.registry(), clock, metrics.getClass().getName());
 this.kafkaConsumer = kafkaConsumer;
 this.clientId = clientId;
 this.systemName = systemName;
 this.config = config;
 this.metrics = metrics;
 fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName);
 // create a sink for passing the messages between the proxy and the consumer
 messageSink = new KafkaConsumerMessageSink();
 // Create the proxy to do the actual message reading.
 String metricName = String.format("%s-%s", systemName, clientId);
 proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName);
 LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy);
}
origin: org.apache.samza/samza-kafka_2.11

/**
 * Create a KafkaSystemConsumer for the provided {@code systemName}
 * @param kafkaConsumer kafka Consumer object to be used by this system consumer
 * @param systemName system name for which we create the consumer
 * @param config application config
 * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy
 * @param metrics metrics for this KafkaSystemConsumer
 * @param clock system clock
 */
public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId,
  KafkaSystemConsumerMetrics metrics, Clock clock) {
 super(metrics.registry(), clock, metrics.getClass().getName());
 this.kafkaConsumer = kafkaConsumer;
 this.clientId = clientId;
 this.systemName = systemName;
 this.config = config;
 this.metrics = metrics;
 fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName);
 // create a sink for passing the messages between the proxy and the consumer
 messageSink = new KafkaConsumerMessageSink();
 // Create the proxy to do the actual message reading.
 String metricName = String.format("%s-%s", systemName, clientId);
 proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName);
 LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy);
}
origin: org.apache.samza/samza-kafka_2.11

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}
origin: apache/samza

/**
 * Create a KafkaSystemConsumer for the provided {@code systemName}
 * @param kafkaConsumer kafka Consumer object to be used by this system consumer
 * @param systemName system name for which we create the consumer
 * @param config application config
 * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy
 * @param metrics metrics for this KafkaSystemConsumer
 * @param clock system clock
 */
public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId,
  KafkaSystemConsumerMetrics metrics, Clock clock) {
 super(metrics.registry(), clock, metrics.getClass().getName());
 this.kafkaConsumer = kafkaConsumer;
 this.clientId = clientId;
 this.systemName = systemName;
 this.config = config;
 this.metrics = metrics;
 fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName);
 // create a sink for passing the messages between the proxy and the consumer
 messageSink = new KafkaConsumerMessageSink();
 // Create the proxy to do the actual message reading.
 String metricName = String.format("%s-%s", systemName, clientId);
 proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName);
 LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy);
}
origin: apache/samza

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}
origin: org.apache.samza/samza-kafka_2.11

private void initializeLags() {
 // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag.
 Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet());
 endOffsets.forEach((tp, offset) -> {
  SystemStreamPartition ssp = topicPartitionToSSP.get(tp);
  long startingOffset = nextOffsets.get(ssp);
  // End offsets are the offset of the newest message + 1
  // If the message we are about to consume is < end offset, we are starting with a lag.
  long initialLag = endOffsets.get(tp) - startingOffset;
  LOG.info("Initial lag for SSP {} is {} (end={}, startOffset={})", ssp, initialLag, endOffsets.get(tp), startingOffset);
  latestLags.put(ssp, initialLag);
  sink.setIsAtHighWatermark(ssp, initialLag == 0);
 });
 // initialize lag metrics
 refreshLagMetrics();
}
origin: apache/samza

private void initializeLags() {
 // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag.
 Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet());
 endOffsets.forEach((tp, offset) -> {
  SystemStreamPartition ssp = topicPartitionToSSP.get(tp);
  long startingOffset = nextOffsets.get(ssp);
  // End offsets are the offset of the newest message + 1
  // If the message we are about to consume is < end offset, we are starting with a lag.
  long initialLag = endOffsets.get(tp) - startingOffset;
  LOG.info("Initial lag for SSP {} is {} (end={}, startOffset={})", ssp, initialLag, endOffsets.get(tp), startingOffset);
  latestLags.put(ssp, initialLag);
  sink.setIsAtHighWatermark(ssp, initialLag == 0);
 });
 // initialize lag metrics
 refreshLagMetrics();
}
origin: org.apache.samza/samza-kafka

private void initializeLags() {
 // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag.
 Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet());
 endOffsets.forEach((tp, offset) -> {
  SystemStreamPartition ssp = topicPartitionToSSP.get(tp);
  long startingOffset = nextOffsets.get(ssp);
  // End offsets are the offset of the newest message + 1
  // If the message we are about to consume is < end offset, we are starting with a lag.
  long initialLag = endOffsets.get(tp) - startingOffset;
  LOG.info("Initial lag for SSP {} is {} (end={}, startOffset={})", ssp, initialLag, endOffsets.get(tp), startingOffset);
  latestLags.put(ssp, initialLag);
  sink.setIsAtHighWatermark(ssp, initialLag == 0);
 });
 // initialize lag metrics
 refreshLagMetrics();
}
origin: org.apache.samza/samza-kafka

private void moveMessagesToTheirQueue(SystemStreamPartition ssp, List<IncomingMessageEnvelope> envelopes) {
 long nextOffset = nextOffsets.get(ssp);
 for (IncomingMessageEnvelope env : envelopes) {
  sink.addMessage(ssp, env);  // move message to the BlockingEnvelopeMap's queue
  LOG.trace("IncomingMessageEnvelope. got envelope with offset:{} for ssp={}", env.getOffset(), ssp);
  nextOffset = Long.valueOf(env.getOffset()) + 1;
 }
 nextOffsets.put(ssp, nextOffset);
}
origin: apache/samza

private void moveMessagesToTheirQueue(SystemStreamPartition ssp, List<IncomingMessageEnvelope> envelopes) {
 long nextOffset = nextOffsets.get(ssp);
 for (IncomingMessageEnvelope env : envelopes) {
  sink.addMessage(ssp, env);  // move message to the BlockingEnvelopeMap's queue
  LOG.trace("IncomingMessageEnvelope. got envelope with offset:{} for ssp={}", env.getOffset(), ssp);
  nextOffset = Long.valueOf(env.getOffset()) + 1;
 }
 nextOffsets.put(ssp, nextOffset);
}
org.apache.samza.system.kafkaKafkaSystemConsumer$KafkaConsumerMessageSink

Most used methods

  • addMessage
  • needsMoreMessages
  • <init>
  • setIsAtHighWatermark

Popular in Java

  • Reading from database using SQL prepared statement
  • putExtra (Intent)
  • requestLocationUpdates (LocationManager)
  • getApplicationContext (Context)
  • Color (java.awt)
    The Color class is used to encapsulate colors in the default sRGB color space or colors in arbitrary
  • URI (java.net)
    A Uniform Resource Identifier that identifies an abstract or physical resource, as specified by RFC
  • Permission (java.security)
    Legacy security code; do not use.
  • Vector (java.util)
    Vector is an implementation of List, backed by an array and synchronized. All optional operations in
  • AtomicInteger (java.util.concurrent.atomic)
    An int value that may be updated atomically. See the java.util.concurrent.atomic package specificati
  • Servlet (javax.servlet)
    Defines methods that all servlets must implement. A servlet is a small Java program that runs within
  • Top 25 Plugins for Webstorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now