public void shutdown() { logger.debug("Shutdown requested for consumer in group {} for topic {}", consumerGroupId, topic.toString()); isStopped.set(true); consumerLoopExecutor.shutdown(); try { consumerLoopExecutor.awaitTermination(2, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); } Set<TopicPartition> allPartitions = partitions.allPartitions(); partitions.stopProcessing(allPartitions); partitions.waitForHandlersToComplete(allPartitions, HANDLER_TIMEOUT_MILLIS); kafka.commitSync(partitions.offsetsToBeCommitted()); kafka.close(); logger.info("Consumer in group {} for topic {} was shut down.", consumerGroupId, topic.toString()); }
try { List<String> topics = new ArrayList<>(); topics.add(topic.toString()); logger.info("Consumer in group {} subscribed to topic {}", consumerGroupId, topic.toString()); } catch (Exception unexpected) { logger.error("Dead consumer in group {}: Cannot subscribe to topic {}", consumerGroupId, topic.toString(), unexpected); return; logger.warn("Received exception in ConsumerLoop of Consumer (group=" + consumerGroupId + " ,topic=" + topic.toString() + "). Consumer continues.", kafkaException); logger.error("Unexpected exception in ConsumerLoop of Consumer (group=" + consumerGroupId + " ,topic=" + topic.toString() + "). Consumer now dead.", unexpectedError);
private void deliveryFailed(Message message, Exception failure, boolean tryDeliverMessage) { logger.debug(message.getMetadata().getLoggingMarker(), "Received tryDeliverMessage={} from {}.onFailedMessage({})", tryDeliverMessage, failedMessageProcessor.getClass().getTypeName(), failure.toString()); if (metricsBuilderFactory != null) { GoCounter deliveryFailures = metricsBuilderFactory.newMetric("messaging_consumer_delivery_failures") .withTag("messageType", message.getMetadata().getType().toString()) .withTag("topic", message.getMetadata().getTopic().toString()) .buildCounter(); if(tryDeliverMessage) { deliveryFailures.incSuccess(); } else { deliveryFailures.incFailure(); } } }
public void send(Message message) { String destinationTopic = message.getMetadata().getTopic().toString(); String partitioningKey = message.getMetadata().getPartitioningKey(); Envelope envelope = Messages.toKafka(message);
private void deliveryStarted(Message message, MessageHandler handler, OrangeContext context) { logger.debug(message.getMetadata().getLoggingMarker(), "Calling {}.onMessage({})", handler.getClass().getTypeName(), message.getMetadata().getType()); if (tracer != null) { span = tracer.buildSpan(message.getMetadata().getType().toString()).start(); Tags.SPAN_KIND.set(span, "consumer"); span.setTag("correlation_id", context.getCorrelationId()); context.setTracingContext(span.context()); } if (metricsBuilderFactory != null) { handlerTimer = metricsBuilderFactory.newMetric("messaging_consumer_message_handler") .withTag("messageType", message.getMetadata().getType().toString()) .withTag("topic", message.getMetadata().getTopic().toString()) .buildTimer(); startTime = handlerTimer.start(); } }
private void deliveryEnded(Message message, boolean deliveryFailed) { logger.debug(message.getMetadata().getLoggingMarker(), "Message {} with offset {} in {}-{} marked as consumed.", message.getMetadata().getType(), message.getMetadata().getOffset(), message.getMetadata().getTopic().toString(), message.getMetadata().getPartitionId()); if (span != null) { if (deliveryFailed) { Tags.ERROR.set(span, true); } span.finish(); } if (handlerTimer != null) { // may be null in case of UnknownMessageHandlerException if (deliveryFailed) { handlerTimer.recordFailure(startTime); } else { handlerTimer.recordSuccess(startTime); } } }
public Marker getLoggingMarker() { // If we get more optional header fields, we should probably exclude them if they are empty. Marker messageMarker = append("messageId", messageId) .and(append("partitionId", partitionId)) .and(append("partitioningKey", partitioningKey)) .and(append("offset", offset)) .and(append("messageId", messageId)) .and(append("correlationId", correlationId)) .and(append("requestCorrelationId", requestCorrelationId)); // Nota bene: without the toString the marker tries to convert the object into Json, which produces strange results if (topic != null) { messageMarker.add(append("topic", topic.toString())); } if (replyTo != null) { messageMarker.add(append("replyTo", replyTo.toString())); } if (type != null) { messageMarker.add(append("messageType", type.toString())); } return messageMarker; }
static Envelope toKafka(Message message) { Envelope.Builder envelope = Envelope.newBuilder(); Metadata meta = message.getMetadata(); envelope.setMessageId(meta.getMessageId()); // Correlation ids are set when building the message if (!Strings.isNullOrEmpty(meta.getCorrelationId())) { envelope.setCorrelationId(meta.getCorrelationId()); } // Message exchange pattern headers if (meta.getReplyTo() != null) { envelope.setReplyTo(meta.getReplyTo().toString()); } if (!Strings.isNullOrEmpty(meta.getRequestCorrelationId())) { envelope.setRequestCorrelationId(meta.getRequestCorrelationId()); } // Payload (mandatory fields!) envelope.setMessageType(meta.getType().toString()); envelope.setInnerMessage(message.getPayload().toByteString()); // Serialize the proto payload to bytes return envelope.build(); } }
private ConsumerRecord<String, byte[]> simulateKafkaInTheLoop(Message message, long offset) { Envelope envelope = Messages.toKafka(message); return new ConsumerRecord<String, byte[]>(message.getMetadata().getTopic().toString(), PARTITION, offset, message.getMetadata().getPartitioningKey(), envelope.toByteArray()); }