public static Topic serviceInbox(@NotNull String serviceName, String inboxName) { StringBuilder topic = new StringBuilder(); topic.append("inbox"); if (!Strings.isNullOrEmpty(inboxName)) { topic.append("_"); topic.append(inboxName); } if (Strings.isNullOrEmpty(serviceName)) { throw new IllegalArgumentException("service name must not be null or empty"); } topic.append("-"); topic.append(serviceName); return new Topic(topic.toString()); }
public static Topic defaultServiceInbox(@NotNull String serviceName) { return serviceInbox(serviceName, ""); }
public void shutdown() { logger.debug("Shutdown requested for consumer in group {} for topic {}", consumerGroupId, topic.toString()); isStopped.set(true); consumerLoopExecutor.shutdown(); try { consumerLoopExecutor.awaitTermination(2, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); } Set<TopicPartition> allPartitions = partitions.allPartitions(); partitions.stopProcessing(allPartitions); partitions.waitForHandlersToComplete(allPartitions, HANDLER_TIMEOUT_MILLIS); kafka.commitSync(partitions.offsetsToBeCommitted()); kafka.close(); logger.info("Consumer in group {} for topic {} was shut down.", consumerGroupId, topic.toString()); }
@Test public void requestResponseCylce() throws InterruptedException { PartitionProcessor processor = givenAPartionProcessor(); EmptyMessage payload = EmptyMessage.getDefaultInstance(); OrangeContext context = new OrangeContext(); ConsumerRecord<String, byte[]> aRecord; Message sentRequest = Messages.requestFor(Topic.defaultServiceInbox("com.sixt.service.cruft"), Topic.serviceInbox("com.sixt.service.cruft", "trashcan"), "requestKey", payload, context); aRecord = simulateKafkaInTheLoop(sentRequest, 10); processor.enqueue(aRecord); getTestHandler(processor).onMessageCalled.await(); Message receivedRequest = getTestHandler(processor).lastMessage; OrangeContext receivedContext = getTestHandler(processor).lastContext; Message sentReply = Messages.replyTo(receivedRequest, payload, receivedContext); aRecord = simulateKafkaInTheLoop(sentReply, 20); processor.enqueue(aRecord); getTestHandler(processor).blockReturnFromOnMessage.countDown(); shortSleep(); // for the reply handling Message receivedReply = getTestHandler(processor).lastMessage; assertEquals(2, getTestHandler(processor).handledMessages.size()); assertEquals(sentRequest.getMetadata().getReplyTo(), receivedReply.getMetadata().getTopic()); assertEquals(sentRequest.getMetadata().getMessageId(), receivedReply.getMetadata().getRequestCorrelationId()); assertEquals(context.getCorrelationId(), receivedReply.getMetadata().getCorrelationId()); processor.waitForHandlersToTerminate(1); }
public Consumer defaultInboxConsumer(FailedMessageProcessor failedMessageStrategy) { String serviceName = serviceProperties.getServiceName(); Topic defaultInbox = Topic.defaultServiceInbox(serviceName); String consumerGroupId = defaultConsumerGroupId(defaultInbox); return new Consumer(defaultInbox, consumerGroupId, defaultKafkaConfig(), defaultPartitionProcessorFactory(failedMessageStrategy)); }
Metadata(boolean wasReceived, Topic topic, String partitioningKey, int partitionId, long offset, String messageId, String correlationId, String requestCorrelationId, Topic replyTo, MessageType type) { this.wasReceived = wasReceived; if (topic == null || topic.isEmpty()) { throw new IllegalArgumentException("topic is required"); } this.topic = topic; // null partitioningKey is ok, producer will select partition (round-robin) this.partitioningKey = partitioningKey; this.partitionId = partitionId; this.offset = offset; if (Strings.isNullOrEmpty(messageId)) { throw new IllegalArgumentException("non-empty messageId is required"); } this.messageId = messageId; this.correlationId = correlationId; this.requestCorrelationId = requestCorrelationId; this.replyTo = replyTo; if (type == null) { throw new IllegalArgumentException("type is required"); } this.type = type; } }
try { List<String> topics = new ArrayList<>(); topics.add(topic.toString()); logger.info("Consumer in group {} subscribed to topic {}", consumerGroupId, topic.toString()); } catch (Exception unexpected) { logger.error("Dead consumer in group {}: Cannot subscribe to topic {}", consumerGroupId, topic.toString(), unexpected); return; logger.warn("Received exception in ConsumerLoop of Consumer (group=" + consumerGroupId + " ,topic=" + topic.toString() + "). Consumer continues.", kafkaException); logger.error("Unexpected exception in ConsumerLoop of Consumer (group=" + consumerGroupId + " ,topic=" + topic.toString() + "). Consumer now dead.", unexpectedError);
static Message<? extends com.google.protobuf.Message> fromKafka(com.google.protobuf.Message protoMessage, Envelope envelope, ConsumerRecord<String, byte[]> record) { boolean wasReceived = true; Topic topic = new Topic(record.topic()); String partitioningKey = record.key(); int partitionId = record.partition(); long offset = record.offset(); String messageId = envelope.getMessageId(); String correlationId = envelope.getCorrelationId(); MessageType type = MessageType.of(protoMessage); String requestCorrelationId = envelope.getRequestCorrelationId(); Topic replyTo = new Topic(envelope.getReplyTo()); Metadata meta = new Metadata(wasReceived, topic, partitioningKey, partitionId, offset, messageId, correlationId, requestCorrelationId, replyTo, type); return new Message<>(protoMessage, meta); }
private void deliveryFailed(Message message, Exception failure, boolean tryDeliverMessage) { logger.debug(message.getMetadata().getLoggingMarker(), "Received tryDeliverMessage={} from {}.onFailedMessage({})", tryDeliverMessage, failedMessageProcessor.getClass().getTypeName(), failure.toString()); if (metricsBuilderFactory != null) { GoCounter deliveryFailures = metricsBuilderFactory.newMetric("messaging_consumer_delivery_failures") .withTag("messageType", message.getMetadata().getType().toString()) .withTag("topic", message.getMetadata().getTopic().toString()) .buildCounter(); if(tryDeliverMessage) { deliveryFailures.incSuccess(); } else { deliveryFailures.incFailure(); } } }
public void send(Message message) { String destinationTopic = message.getMetadata().getTopic().toString(); String partitioningKey = message.getMetadata().getPartitioningKey(); Envelope envelope = Messages.toKafka(message);
@Ignore @Test public void producerSendsToNonExistingTopic() { ServiceProperties serviceProperties = fillServiceProperties(); Topic cruft = new Topic("cruft"); Topic lard = new Topic("lard"); Producer producer = new ProducerFactory(serviceProperties).createProducer(); String key = RandomStringUtils.randomAscii(5); SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build(); Message request = Messages.requestFor(cruft, lard, key, payload, new OrangeContext()); producer.send(request); // Results: // 1.) NO topic auto creation i.e. KAFKA_AUTO_CREATE_TOPICS_ENABLE = false // 2017-04-12 18:14:41,239 [Time-limited test] DEBUG c.s.s.f.kafka.messaging.Producer - Sending message com.sixt.service.framework.kafka.messaging.SayHelloToCmd with key O+oRQ to topic cruft // loads of: 2017-04-12 18:14:41,340 [kafka-producer-network-thread | producer-2] WARN o.apache.kafka.clients.NetworkClient - Error while fetching metadata with correlation id 0 : {cruft=UNKNOWN_TOPIC_OR_PARTITION} // and finally: org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms. // 2.) WITH topic auto creation i.e. KAFKA_AUTO_CREATE_TOPICS_ENABLE = true // 2017-04-12 18:18:24,488 [Time-limited test] DEBUG c.s.s.f.kafka.messaging.Producer - Sending message com.sixt.service.framework.kafka.messaging.SayHelloToCmd with key uXdJ~ to topic cruft // one: 2017-04-12 18:18:24,638 [kafka-producer-network-thread | producer-2] WARN o.apache.kafka.clients.NetworkClient - Error while fetching metadata with correlation id 0 : {cruft=LEADER_NOT_AVAILABLE // and finally: success }
private void deliveryStarted(Message message, MessageHandler handler, OrangeContext context) { logger.debug(message.getMetadata().getLoggingMarker(), "Calling {}.onMessage({})", handler.getClass().getTypeName(), message.getMetadata().getType()); if (tracer != null) { span = tracer.buildSpan(message.getMetadata().getType().toString()).start(); Tags.SPAN_KIND.set(span, "consumer"); span.setTag("correlation_id", context.getCorrelationId()); context.setTracingContext(span.context()); } if (metricsBuilderFactory != null) { handlerTimer = metricsBuilderFactory.newMetric("messaging_consumer_message_handler") .withTag("messageType", message.getMetadata().getType().toString()) .withTag("topic", message.getMetadata().getTopic().toString()) .buildTimer(); startTime = handlerTimer.start(); } }
private void deliveryEnded(Message message, boolean deliveryFailed) { logger.debug(message.getMetadata().getLoggingMarker(), "Message {} with offset {} in {}-{} marked as consumed.", message.getMetadata().getType(), message.getMetadata().getOffset(), message.getMetadata().getTopic().toString(), message.getMetadata().getPartitionId()); if (span != null) { if (deliveryFailed) { Tags.ERROR.set(span, true); } span.finish(); } if (handlerTimer != null) { // may be null in case of UnknownMessageHandlerException if (deliveryFailed) { handlerTimer.recordFailure(startTime); } else { handlerTimer.recordSuccess(startTime); } } }
ServiceProperties serviceProperties = fillServiceProperties(); Topic cruft = new Topic("krufty");
public Marker getLoggingMarker() { // If we get more optional header fields, we should probably exclude them if they are empty. Marker messageMarker = append("messageId", messageId) .and(append("partitionId", partitionId)) .and(append("partitioningKey", partitioningKey)) .and(append("offset", offset)) .and(append("messageId", messageId)) .and(append("correlationId", correlationId)) .and(append("requestCorrelationId", requestCorrelationId)); // Nota bene: without the toString the marker tries to convert the object into Json, which produces strange results if (topic != null) { messageMarker.add(append("topic", topic.toString())); } if (replyTo != null) { messageMarker.add(append("replyTo", replyTo.toString())); } if (type != null) { messageMarker.add(append("messageType", type.toString())); } return messageMarker; }
static Envelope toKafka(Message message) { Envelope.Builder envelope = Envelope.newBuilder(); Metadata meta = message.getMetadata(); envelope.setMessageId(meta.getMessageId()); // Correlation ids are set when building the message if (!Strings.isNullOrEmpty(meta.getCorrelationId())) { envelope.setCorrelationId(meta.getCorrelationId()); } // Message exchange pattern headers if (meta.getReplyTo() != null) { envelope.setReplyTo(meta.getReplyTo().toString()); } if (!Strings.isNullOrEmpty(meta.getRequestCorrelationId())) { envelope.setRequestCorrelationId(meta.getRequestCorrelationId()); } // Payload (mandatory fields!) envelope.setMessageType(meta.getType().toString()); envelope.setInnerMessage(message.getPayload().toByteString()); // Serialize the proto payload to bytes return envelope.build(); } }
private ConsumerRecord<String, byte[]> simulateKafkaInTheLoop(Message message, long offset) { Envelope envelope = Messages.toKafka(message); return new ConsumerRecord<String, byte[]>(message.getMetadata().getTopic().toString(), PARTITION, offset, message.getMetadata().getPartitioningKey(), envelope.toByteArray()); }