public boolean offer(T element) { boolean accepted = queue.offer(element); if (!accepted) { metrics.counter("queue." + name + ".failures").inc(); logger.error("[Queue: {}] Unable to add item: queue is full. Offered item: {}", name, element); } return accepted; }
public boolean offer(T element) { boolean accepted = queue.offer(element); if (!accepted) { metrics.counter("queue." + name + ".failures").inc(); logger.error("[Queue: {}] Unable to add item: queue is full. Offered item: {}", name, element); } return accepted; }
@Override public void commit(Set<SubscriptionPartitionOffset> offsets) { try { consumer.commitSync(createOffset(offsets)); } catch (InterruptException ex ) { logger.info("Kafka consumer thread interrupted", ex); Thread.currentThread().interrupt(); } catch (Exception ex) { logger.error("Error while committing offset for subscription {}", subscription.getQualifiedName(), ex); metrics.counter("offset-committer.failed").inc(); } }
private void drop(Signal signal) { metrics.counter("supervisor.signal.dropped." + signal.getType().name()).inc(); logger.warn("Dropping signal {} as running target consumer process does not exist.", signal); }
private void drop(Signal signal) { metrics.counter("supervisor.signal.dropped." + signal.getType().name()).inc(); logger.warn("Dropping signal {} as running target consumer process does not exist.", signal); }
@Override public void commit(Set<SubscriptionPartitionOffset> offsets) { try { consumer.commitSync(createOffset(offsets)); } catch (InterruptException ex ) { logger.info("Kafka consumer thread interrupted", ex); Thread.currentThread().interrupt(); } catch (Exception ex) { logger.error("Error while committing offset for subscription {}", subscription.getQualifiedName(), ex); metrics.counter("offset-committer.failed").inc(); } }
private Counter getInflightCounter(Subscription subscription) { return counter(Counters.INFLIGHT, subscription.getTopicName(), subscription.getName()); }
protected void updateMetrics(String counterToUpdate, Message message, Subscription subscription) { hermesMetrics.counter(counterToUpdate, subscription.getTopicName(), subscription.getName()).inc(); hermesMetrics.decrementInflightCounter(subscription); hermesMetrics.inflightTimeHistogram(subscription).update(System.currentTimeMillis() - message.getReadingTimestamp()); } }
protected void updateMetrics(String counterToUpdate, Message message, Subscription subscription) { hermesMetrics.counter(counterToUpdate, subscription.getTopicName(), subscription.getName()).inc(); hermesMetrics.decrementInflightCounter(subscription); hermesMetrics.inflightTimeHistogram(subscription).update(System.currentTimeMillis() - message.getReadingTimestamp()); } }
private void updateMetrics(Subscription subscription) { metrics.meter(Meters.FILTERED_METER, subscription.getTopicName(), subscription.getName()).mark(); metrics.counter(Counters.FILTERED, subscription.getTopicName(), subscription.getName()).inc(); } }
private void updateMetrics(Subscription subscription) { metrics.meter(Meters.FILTERED_METER, subscription.getTopicName(), subscription.getName()).mark(); metrics.counter(Counters.FILTERED, subscription.getTopicName(), subscription.getName()).inc(); } }
@Override public void run() { try (Timer.Context c = metrics.timer("offset-committer.duration").time()) { // committed offsets need to be drained first so that there is no possibility of new committed offsets // showing up after inflight queue is drained - this would lead to stall in committing offsets ReducingConsumer committedOffsetsReducer = processCommittedOffsets(); Map<SubscriptionPartition, Long> maxCommittedOffsets = committedOffsetsReducer.reduced; ReducingConsumer inflightOffsetReducer = processInflightOffsets(committedOffsetsReducer.all); Map<SubscriptionPartition, Long> minInflightOffsets = inflightOffsetReducer.reduced; int scheduledToCommit = 0; OffsetsToCommit offsetsToCommit = new OffsetsToCommit(); for (SubscriptionPartition partition : Sets.union(minInflightOffsets.keySet(), maxCommittedOffsets.keySet())) { long offset = Math.min( minInflightOffsets.getOrDefault(partition, Long.MAX_VALUE), maxCommittedOffsets.getOrDefault(partition, Long.MAX_VALUE) ); if (offset >= 0 && offset < Long.MAX_VALUE) { scheduledToCommit++; offsetsToCommit.add(new SubscriptionPartitionOffset(partition, offset)); } } messageCommitter.commitOffsets(offsetsToCommit); metrics.counter("offset-committer.committed").inc(scheduledToCommit); cleanupUnusedSubscriptions(); } catch (Exception exception) { logger.error("Failed to run offset committer: {}", exception.getMessage(), exception); } }
public void markDiscarded(MessageMetadata messageMetadata, Subscription subscription, String reason) { TopicName topicName = fromQualifiedName(messageMetadata.getTopic()); metrics.counter(Counters.DISCARDED, topicName, messageMetadata.getSubscription()).inc(); metrics.meter(Meters.DISCARDED_METER).mark(); metrics.meter(Meters.DISCARDED_TOPIC_METER, topicName).mark(); metrics.meter(Meters.DISCARDED_SUBSCRIPTION_METER, topicName, messageMetadata.getSubscription()).mark(); metrics.decrementInflightCounter(subscription); trackers.get(subscription).logDiscarded(messageMetadata, reason); } }
public void markDiscarded(MessageMetadata messageMetadata, Subscription subscription, String reason) { TopicName topicName = fromQualifiedName(messageMetadata.getTopic()); metrics.counter(Counters.DISCARDED, topicName, messageMetadata.getSubscription()).inc(); metrics.meter(Meters.DISCARDED_METER).mark(); metrics.meter(Meters.DISCARDED_TOPIC_METER, topicName).mark(); metrics.meter(Meters.DISCARDED_SUBSCRIPTION_METER, topicName, messageMetadata.getSubscription()).mark(); metrics.decrementInflightCounter(subscription); trackers.get(subscription).logDiscarded(messageMetadata, reason); } }
public void markDiscarded(MessageBatch batch, Subscription subscription, String reason) { metrics.counter(Counters.DISCARDED, subscription.getTopicName(), subscription.getName()).inc(batch.size()); metrics.meter(Meters.DISCARDED_METER).mark(batch.size()); metrics.meter(Meters.DISCARDED_TOPIC_METER, subscription.getTopicName()).mark(batch.size()); metrics.meter(Meters.DISCARDED_SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(batch.size()); batch.getMessagesMetadata().forEach(m -> trackers.get(subscription).logDiscarded(m, reason)); }
public void markDiscarded(MessageBatch batch, Subscription subscription, String reason) { metrics.counter(Counters.DISCARDED, subscription.getTopicName(), subscription.getName()).inc(batch.size()); metrics.meter(Meters.DISCARDED_METER).mark(batch.size()); metrics.meter(Meters.DISCARDED_TOPIC_METER, subscription.getTopicName()).mark(batch.size()); metrics.meter(Meters.DISCARDED_SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(batch.size()); batch.getMessagesMetadata().forEach(m -> trackers.get(subscription).logDiscarded(m, reason)); }
private void processSignal(Signal signal) { logger.debug("Processing signal: {}", signal); metrics.counter("supervisor.signal." + signal.getType().name()).inc(); switch (signal.getType()) { case START: start(signal); break; case UPDATE_SUBSCRIPTION: updateSubscription(signal); break; case UPDATE_TOPIC: case RETRANSMIT: case COMMIT: forRunningConsumerProcess(signal, runningProcess -> runningProcess.getConsumerProcess().accept(signal)); break; case STOP: stop(signal); break; default: logger.warn("Unknown signal {}", signal); break; } }
private void markDelivered(MessageBatch batch, Subscription subscription, MessageSendingResult result) { metrics.registerConsumerHttpAnswer(subscription, result.getStatusCode()); metrics.meter(METER).mark(batch.size()); metrics.meter(TOPIC_METER, subscription.getTopicName()).mark(batch.size()); metrics.meter(SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(batch.size()); metrics.meter(SUBSCRIPTION_BATCH_METER, subscription.getTopicName(), subscription.getName()).mark(); metrics.counter(Counters.DELIVERED, subscription.getTopicName(), subscription.getName()).inc(batch.size()); batch.getMessagesMetadata().forEach(m -> trackers.get(subscription).logSent(m, result.getHostname())); }
private void processSignal(Signal signal) { logger.debug("Processing signal: {}", signal); metrics.counter("supervisor.signal." + signal.getType().name()).inc(); switch (signal.getType()) { case START: start(signal); break; case UPDATE_SUBSCRIPTION: updateSubscription(signal); break; case UPDATE_TOPIC: case RETRANSMIT: case COMMIT: forRunningConsumerProcess(signal, runningProcess -> runningProcess.getConsumerProcess().accept(signal)); break; case STOP: stop(signal); break; default: logger.warn("Unknown signal {}", signal); break; } }
private void markDelivered(MessageBatch batch, Subscription subscription, MessageSendingResult result) { metrics.registerConsumerHttpAnswer(subscription, result.getStatusCode()); metrics.meter(METER).mark(batch.size()); metrics.meter(TOPIC_METER, subscription.getTopicName()).mark(batch.size()); metrics.meter(SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(batch.size()); metrics.meter(SUBSCRIPTION_BATCH_METER, subscription.getTopicName(), subscription.getName()).mark(); metrics.counter(Counters.DELIVERED, subscription.getTopicName(), subscription.getName()).inc(batch.size()); batch.getMessagesMetadata().forEach(m -> trackers.get(subscription).logSent(m, result.getHostname())); }