/** * Send with a callback is always called for KafkaProducer. We do the same here to enable * tracing. */ @Override public Future<RecordMetadata> send(ProducerRecord<K, V> record) { return this.send(record, null); }
/** * Extracts or creates a {@link Span.Kind#CONSUMER} span for each message received. This span is * injected onto each message so it becomes the parent when a processor later calls {@link * #nextSpan(ConsumerRecord)}. */ public <K, V> Consumer<K, V> consumer(Consumer<K, V> consumer) { return new TracingConsumer<>(consumer, this); }
/** Starts and propagates {@link Span.Kind#PRODUCER} span for each message sent. */ public <K, V> Producer<K, V> producer(Producer<K, V> producer) { return new TracingProducer<>(producer, this); }
@Setup(Level.Trial) public void init() { Tracing tracing = Tracing.newBuilder().spanReporter(Reporter.NOOP).build(); producer = new FakeProducer(); tracingProducer = KafkaTracing.create(tracing).producer(producer); tracingB3SingleProducer = KafkaTracing.newBuilder(tracing).writeB3SingleFormat(true).build().producer(producer); }
@Bean @ConditionalOnMissingBean KafkaTracing kafkaTracing(Tracing tracing, SleuthMessagingProperties properties) { return KafkaTracing.newBuilder(tracing) .remoteServiceName( properties.getMessaging().getKafka().getRemoteServiceName()) .build(); }
@Override public Producer<byte[], byte[]> getProducer(Map<String, Object> config) { config.put("key.serializer", ByteArraySerializer.class); config.put("value.serializer", ByteArraySerializer.class); Producer<byte[], byte[]> producer = new KafkaProducer<>(config); return kafkaTracing.producer(producer); }
@Around("anyConsumerFactory()") public Object wrapConsumerFactory(ProceedingJoinPoint pjp) throws Throwable { Consumer consumer = (Consumer) pjp.proceed(); return this.kafkaTracing.consumer(consumer); }
static Callback create(@Nullable Callback delegate, Span span, CurrentTraceContext current) { if (span.isNoop()) return delegate; // save allocation overhead if (delegate == null) return new FinishSpan(span); return new DelegateAndFinishSpan(delegate, span, current); }
/** * Creates a {@link KafkaStreams} instance with a tracing-enabled {@link KafkaClientSupplier}. All * Topology Sources and Sinks (including internal Topics) will create Spans on records processed * (i.e. send or consumed). * * Use this instead of {@link KafkaStreams} constructor. * * <p>Simple example: * <pre>{@code * // KafkaStreams with tracing-enabled KafkaClientSupplier * KafkaStreams kafkaStreams = kafkaStreamsTracing.kafkaStreams(topology, streamsConfig); * }</pre> * * @see TracingKafkaClientSupplier */ public KafkaStreams kafkaStreams(Topology topology, Properties streamsConfig) { final KafkaTracing kafkaTracing = KafkaTracing.create(tracing); final KafkaClientSupplier kafkaClientSupplier = new TracingKafkaClientSupplier(kafkaTracing); return new KafkaStreams(topology, streamsConfig, kafkaClientSupplier); }
/** * Use this to create a span for processing the given record. Note: the result has no name and is * not started. * * <p>This creates a child from identifiers extracted from the record headers, or a new span if * one couldn't be extracted. */ public Span nextSpan(ConsumerRecord<?, ?> record) { TraceContextOrSamplingFlags extracted = extractAndClearHeaders(record.headers()); Span result = tracing.tracer().nextSpan(extracted); if (extracted.context() == null && !result.isNoop()) { addTags(record, result); } return result; }
span = tracer.nextSpan(kafkaTracing.extractAndClearHeaders(record.headers())); } else { return delegate.send(record, TracingCallback.create(callback, span, current)); } catch (RuntimeException | Error e) {
public KafkaTracing build() { return new KafkaTracing(this); } }
@Override public Future<RecordMetadata> send(ProducerRecord<String, String> record) { return send(record, null); }
public static Builder newBuilder(Tracing tracing) { return new Builder(tracing); }
public ConsumerRecords<K, V> poll(Duration timeout) { return poll(timeout.toMillis()); }
TraceContextOrSamplingFlags extractAndClearHeaders(Headers headers) { TraceContextOrSamplingFlags extracted = extractor.extract(headers); // clear propagation headers if we were able to extract a span if (!extracted.equals(TraceContextOrSamplingFlags.EMPTY)) { clearHeaders(headers); } return extracted; }
@Override public void onCompletion(RecordMetadata metadata, @Nullable Exception exception) { try (Scope ws = current.maybeScope(span.context())) { delegate.onCompletion(metadata, exception); } finally { super.onCompletion(metadata, exception); } } }
@Around("anyProducerFactory()") public Object wrapProducerFactory(ProceedingJoinPoint pjp) throws Throwable { Producer producer = (Producer) pjp.proceed(); return this.kafkaTracing.producer(producer); }
@Override public Consumer<byte[], byte[]> getConsumer(Map<String, Object> config) { config.put("key.deserializer", ByteArrayDeserializer.class); config.put("value.deserializer", ByteArrayDeserializer.class); Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(config); return kafkaTracing.consumer(consumer); }