public CachedTopic(Topic topic, HermesMetrics hermesMetrics, KafkaTopics kafkaTopics, boolean blacklisted) { this.topic = topic; this.kafkaTopics = kafkaTopics; this.hermesMetrics = hermesMetrics; this.blacklisted = blacklisted; globalRequestMeter = hermesMetrics.meter(Meters.METER); topicRequestMeter = hermesMetrics.meter(Meters.TOPIC_METER, topic.getName()); globalDelayedProcessingMeter = hermesMetrics.meter(Meters.DELAYED_PROCESSING); topicDelayedProcessingMeter = hermesMetrics.meter(Meters.TOPIC_DELAYED_PROCESSING, topic.getName()); globalRequestReadLatencyTimer = hermesMetrics.timer(Timers.PARSING_REQUEST); topicRequestReadLatencyTimer = hermesMetrics.timer(Timers.TOPIC_PARSING_REQUEST, topic.getName()); globalMessageCreationTimer = hermesMetrics.timer(Timers.MESSAGE_CREATION_LATENCY); topicMessageCreationTimer = hermesMetrics.timer(Timers.MESSAGE_CREATION_TOPIC_LATENCY, topic.getName()); topicMessageContentSize = hermesMetrics.messageContentSizeHistogram(topic.getName()); globalMessageContentSize = hermesMetrics.messageContentSizeHistogram(); published = hermesMetrics.counter(Counters.PUBLISHED, topic.getName()); globalThroughputMeter = hermesMetrics.meter(Meters.THROUGHPUT_BYTES); topicThroughputMeter = hermesMetrics.meter(Meters.TOPIC_THROUGHPUT_BYTES, topic.getName()); if (Topic.Ack.ALL.equals(topic.getAck())) { topicProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_LATENCY); globalProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_TOPIC_LATENCY, topic.getName()); topicBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_BROKER_LATENCY); globalBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_BROKER_TOPIC_LATENCY, topic.getName()); } else { topicProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_LATENCY); globalProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_TOPIC_LATENCY, topic.getName()); topicBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_BROKER_LATENCY); globalBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_BROKER_TOPIC_LATENCY, topic.getName()); } }
private void registerFailureMetrics(Subscription subscription, MessageSendingResult result) { if (result.hasHttpAnswer()) { hermesMetrics.registerConsumerHttpAnswer(subscription, result.getStatusCode()); } else if (result.isTimeout()) { hermesMetrics.consumerErrorsTimeoutMeter(subscription).mark(); } else { hermesMetrics.consumerErrorsOtherMeter(subscription).mark(); } } }
private String metricPath(String pattern, TopicName topicName) { return String.format(pattern, metricsPaths.prefix(), escapeDots(topicName.getGroupName()), escapeDots(topicName.getName())); } }
protected void updateMetrics(String counterToUpdate, Message message, Subscription subscription) { hermesMetrics.counter(counterToUpdate, subscription.getTopicName(), subscription.getName()).inc(); hermesMetrics.decrementInflightCounter(subscription); hermesMetrics.inflightTimeHistogram(subscription).update(System.currentTimeMillis() - message.getReadingTimestamp()); } }
public void closeInflightMetrics(MessageBatch batch, Subscription subscription) { metrics.decrementInflightCounter(subscription, batch.size()); metrics.inflightTimeHistogram(subscription).update(batch.getLifetime()); }
private void markFailed(MessageBatch batch, Subscription subscription, MessageSendingResult result) { metrics.meter(Meters.FAILED_METER_SUBSCRIPTION, subscription.getTopicName(), subscription.getName()).mark(); if (result.hasHttpAnswer()) { metrics.registerConsumerHttpAnswer(subscription, result.getStatusCode()); } else if (result.isTimeout()) { metrics.consumerErrorsTimeoutMeter(subscription).mark(); } else { metrics.consumerErrorsOtherMeter(subscription).mark(); } batch.getMessagesMetadata().forEach(m -> trackers.get(subscription).logFailed(m, result.getRootCause(), result.getHostname())); }
public void markDiscarded(MessageMetadata messageMetadata, Subscription subscription, String reason) { TopicName topicName = fromQualifiedName(messageMetadata.getTopic()); metrics.counter(Counters.DISCARDED, topicName, messageMetadata.getSubscription()).inc(); metrics.meter(Meters.DISCARDED_METER).mark(); metrics.meter(Meters.DISCARDED_TOPIC_METER, topicName).mark(); metrics.meter(Meters.DISCARDED_SUBSCRIPTION_METER, topicName, messageMetadata.getSubscription()).mark(); metrics.decrementInflightCounter(subscription); trackers.get(subscription).logDiscarded(messageMetadata, reason); } }
private void markDelivered(MessageBatch batch, Subscription subscription, MessageSendingResult result) { metrics.registerConsumerHttpAnswer(subscription, result.getStatusCode()); metrics.meter(METER).mark(batch.size()); metrics.meter(TOPIC_METER, subscription.getTopicName()).mark(batch.size()); metrics.meter(SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(batch.size()); metrics.meter(SUBSCRIPTION_BATCH_METER, subscription.getTopicName(), subscription.getName()).mark(); metrics.counter(Counters.DELIVERED, subscription.getTopicName(), subscription.getName()).inc(batch.size()); batch.getMessagesMetadata().forEach(m -> trackers.get(subscription).logSent(m, result.getHostname())); }
private void setUpMetrics(Subscription subscription) { when(hermesMetrics.latencyTimer(subscription)).thenReturn(consumerLatencyTimer); when(hermesMetrics.consumerErrorsOtherMeter(subscription)).thenReturn(errors); when(consumerLatencyTimer.time()).thenReturn(consumerLatencyTimerContext); when(hermesMetrics.meter(Meters.FAILED_METER_SUBSCRIPTION, subscription.getTopicName(), subscription.getName())).thenReturn(failedMeter); }
public boolean offer(T element) { boolean accepted = queue.offer(element); if (!accepted) { metrics.counter("queue." + name + ".failures").inc(); logger.error("[Queue: {}] Unable to add item: queue is full. Offered item: {}", name, element); } return accepted; }
@Override public void run() { try (Timer.Context c = metrics.timer("offset-committer.duration").time()) { // committed offsets need to be drained first so that there is no possibility of new committed offsets // showing up after inflight queue is drained - this would lead to stall in committing offsets ReducingConsumer committedOffsetsReducer = processCommittedOffsets(); Map<SubscriptionPartition, Long> maxCommittedOffsets = committedOffsetsReducer.reduced; ReducingConsumer inflightOffsetReducer = processInflightOffsets(committedOffsetsReducer.all); Map<SubscriptionPartition, Long> minInflightOffsets = inflightOffsetReducer.reduced; int scheduledToCommit = 0; OffsetsToCommit offsetsToCommit = new OffsetsToCommit(); for (SubscriptionPartition partition : Sets.union(minInflightOffsets.keySet(), maxCommittedOffsets.keySet())) { long offset = Math.min( minInflightOffsets.getOrDefault(partition, Long.MAX_VALUE), maxCommittedOffsets.getOrDefault(partition, Long.MAX_VALUE) ); if (offset >= 0 && offset < Long.MAX_VALUE) { scheduledToCommit++; offsetsToCommit.add(new SubscriptionPartitionOffset(partition, offset)); } } messageCommitter.commitOffsets(offsetsToCommit); metrics.counter("offset-committer.committed").inc(scheduledToCommit); cleanupUnusedSubscriptions(); } catch (Exception exception) { logger.error("Failed to run offset committer: {}", exception.getMessage(), exception); } }
MaxRateCalculator(SubscriptionAssignmentCache subscriptionAssignmentCache, SubscriptionsCache subscriptionsCache, MaxRateBalancer balancer, MaxRateRegistry maxRateRegistry, HermesMetrics metrics, Clock clock) { this.subscriptionAssignmentCache = subscriptionAssignmentCache; this.subscriptionsCache = subscriptionsCache; this.balancer = balancer; this.maxRateRegistry = maxRateRegistry; this.metrics = metrics; this.clock = clock; metrics.registerGauge(Gauges.MAX_RATE_CALCULATION_DURATION, () -> lastUpdateDurationMillis); }
private void updateMeters(Subscription subscription) { hermesMetrics.meter(Meters.DISCARDED_METER).mark(); hermesMetrics.meter(Meters.DISCARDED_TOPIC_METER, subscription.getTopicName()).mark(); hermesMetrics.meter(Meters.DISCARDED_SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(); }
private void updateMeters(Message message, Subscription subscription, MessageSendingResult result) { hermesMetrics.meter(Meters.METER).mark(); hermesMetrics.meter(Meters.TOPIC_METER, subscription.getTopicName()).mark(); hermesMetrics.meter(Meters.SUBSCRIPTION_METER, subscription.getTopicName(), subscription.getName()).mark(); hermesMetrics.meter( Meters.SUBSCRIPTION_THROUGHPUT_BYTES, subscription.getTopicName(), subscription.getName()) .mark(message.getSize()); hermesMetrics.registerConsumerHttpAnswer(subscription, result.getStatusCode()); } }
private void verifyLatencyTimersCountedTimes(int timeCount, int closeCount) { verify(hermesMetrics, times(1)).latencyTimer(subscription); verify(consumerLatencyTimer, times(timeCount)).time(); verify(consumerLatencyTimerContext, times(closeCount)).stop(); }
public void registerConsumerHttpAnswer(Subscription subscription, int statusCode) { PathContext pathContext = pathContext() .withGroup(escapeDots(subscription.getTopicName().getGroupName())) .withTopic(escapeDots(subscription.getTopicName().getName())) .withSubscription(escapeDots(subscription.getName())) .withHttpCode(statusCode) .withHttpCodeFamily(httpStatusFamily(statusCode)) .build(); metricRegistry.meter(pathCompiler.compile(Meters.ERRORS_HTTP_BY_FAMILY, pathContext)).mark(); metricRegistry.meter(pathCompiler.compile(Meters.ERRORS_HTTP_BY_CODE, pathContext)).mark(); }
@Override public void run() { try { consumersRegistry.refresh(); if (consumersRegistry.isLeader() && workTracker.isReady()) { try (Timer.Context ctx = metrics.consumersWorkloadRebalanceDurationTimer(kafkaCluster).time()) { logger.info("Initializing workload balance."); SubscriptionAssignmentView initialState = workTracker.getAssignments(); WorkBalancingResult work = workBalancer.balance( subscriptionsCache.listActiveSubscriptionNames(), consumersRegistry.list(), initialState); if (consumersRegistry.isLeader()) { WorkTracker.WorkDistributionChanges changes = workTracker.apply(initialState, work.getAssignmentsView()); logger.info("Finished workload balance {}, {}", work.toString(), changes.toString()); updateMetrics(work, changes); } else { logger.info("Lost leadership before applying changes"); } } } else { balancingMetrics.reset(); } } catch (Exception e) { logger.error("Caught exception when running balancing job", e); } }