public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { logger.debug("Error sending message to Kafka {} ", exception.getMessage()); } if (logger.isDebugEnabled()) { long eventElapsedTime = System.currentTimeMillis() - startTime; if (metadata != null) { logger.debug("Acked message partition:{} ofset:{}", metadata.partition(), metadata.offset()); } logger.debug("Elapsed time for send: {}", eventElapsedTime); } } }
@Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { exception.printStackTrace(); return; } logger.info("topic:" + metadata.topic() + "; partition: " + metadata.partition() + "; offset: " + metadata.offset()); } };
public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { log.trace("Error sending message to Kafka due to " + exception.getMessage()); } if (log.isDebugEnabled()) { long batchElapsedTime = System.currentTimeMillis() - startTime; if (metadata != null) { log.debug("Acked message_no " + index + ": " + metadata.topic() + "-" + metadata.partition() + "-" + metadata.offset() + "-" + batchElapsedTime); } } } }
@Override public void onCompletion(RecordMetadata md, Exception e) { if ( e != null ) { this.failedMessageCount.inc(); this.failedMessageMeter.mark(); LOGGER.error(e.getClass().getSimpleName() + " @ " + position + " -- " + key); LOGGER.error(e.getLocalizedMessage()); if ( e instanceof RecordTooLargeException ) { LOGGER.error("Considering raising max.request.size broker-side."); } else if (!this.context.getConfig().ignoreProducerError) { this.context.terminate(e); return; } } else { this.succeededMessageCount.inc(); this.succeededMessageMeter.mark(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("-> key:" + key + ", partition:" + md.partition() + ", offset:" + md.offset()); LOGGER.debug(" " + this.json); LOGGER.debug(" " + position); LOGGER.debug(""); } } cc.markCompleted(); } }
@Override protected void storeRecord(HistoryRecord record) throws DatabaseHistoryException { if (this.producer == null) { throw new IllegalStateException("No producer is available. Ensure that 'start()' is called before storing database history records."); } logger.trace("Storing record into database history: {}", record); try { ProducerRecord<String, String> produced = new ProducerRecord<>(topicName, PARTITION, null, record.toString()); Future<RecordMetadata> future = this.producer.send(produced); // Flush and then wait ... this.producer.flush(); RecordMetadata metadata = future.get(); // block forever since we have to be sure this gets recorded if (metadata != null) { logger.debug("Stored record in topic '{}' partition {} at offset {} ", metadata.topic(), metadata.partition(), metadata.offset()); } } catch( InterruptedException e) { logger.trace("Interrupted before record was written into database history: {}", record); Thread.interrupted(); throw new DatabaseHistoryException(e); } catch (ExecutionException e) { throw new DatabaseHistoryException(e); } }
@Override public void onAcknowledgement(RecordMetadata metadata, Exception exception) { onAckCount++; if (exception != null) { onErrorAckCount++; // the length check is just to call topic() method and let it throw an exception // if RecordMetadata.TopicPartition is null if (metadata != null && metadata.topic().length() >= 0) { onErrorAckWithTopicSetCount++; if (metadata.partition() >= 0) onErrorAckWithTopicPartitionSetCount++; } } if (throwExceptionOnAck) throw new KafkaException("Injected exception in AppendProducerInterceptor.onAcknowledgement"); }
final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition()); offsets.put(tp, metadata.offset());
@Test public void testPartitioner() throws Exception { PartitionInfo partitionInfo0 = new PartitionInfo(topic, 0, null, null, null); PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null); Cluster cluster = new Cluster(null, new ArrayList<Node>(0), asList(partitionInfo0, partitionInfo1), Collections.<String>emptySet(), Collections.<String>emptySet()); MockProducer<String, String> producer = new MockProducer<>(cluster, true, new DefaultPartitioner(), new StringSerializer(), new StringSerializer()); ProducerRecord<String, String> record = new ProducerRecord<>(topic, "key", "value"); Future<RecordMetadata> metadata = producer.send(record); assertEquals("Partition should be correct", 1, metadata.get().partition()); producer.clear(); assertEquals("Clear should erase our history", 0, producer.history().size()); producer.close(); }
@Test @SuppressWarnings("deprecation") public void testConstructionWithRelativeOffset() { TopicPartition tp = new TopicPartition("foo", 0); long timestamp = 2340234L; int keySize = 3; int valueSize = 5; long baseOffset = 15L; long relativeOffset = 3L; Long checksum = 908923L; RecordMetadata metadata = new RecordMetadata(tp, baseOffset, relativeOffset, timestamp, checksum, keySize, valueSize); assertEquals(tp.topic(), metadata.topic()); assertEquals(tp.partition(), metadata.partition()); assertEquals(timestamp, metadata.timestamp()); assertEquals(baseOffset + relativeOffset, metadata.offset()); assertEquals(checksum.longValue(), metadata.checksum()); assertEquals(keySize, metadata.serializedKeySize()); assertEquals(valueSize, metadata.serializedValueSize()); }
@Test @SuppressWarnings("deprecation") public void testConstructionWithMissingRelativeOffset() { TopicPartition tp = new TopicPartition("foo", 0); long timestamp = 2340234L; int keySize = 3; int valueSize = 5; Long checksum = 908923L; RecordMetadata metadata = new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize); assertEquals(tp.topic(), metadata.topic()); assertEquals(tp.partition(), metadata.partition()); assertEquals(timestamp, metadata.timestamp()); assertFalse(metadata.hasOffset()); assertEquals(-1L, metadata.offset()); assertEquals(checksum.longValue(), metadata.checksum()); assertEquals(keySize, metadata.serializedKeySize()); assertEquals(valueSize, metadata.serializedValueSize()); }
/** * Wait for response to the message being sent. * * @param future The future for the message being sent. * @param properties The configuration properties. * @return Metadata about the record that was written to Kafka. */ private RecordMetadata waitForResponse(Future<RecordMetadata> future, Properties properties) { RecordMetadata record = null; int maxWait = getMaxWait(properties); try { // wait for the record and then render it for the user record = future.get(maxWait, TimeUnit.MILLISECONDS); LOG.debug("KAFKA_PUT message sent; topic={}, partition={}, offset={}", record.topic(), record.partition(), record.offset()); } catch(TimeoutException | InterruptedException | ExecutionException e) { LOG.error("KAFKA_PUT message send failure", e); } return record; }
/** * Render a view of the {@link RecordMetadata} that resulted from writing * messages to Kafka. * * @param records The record metadata. * @param properties The properties. * @return */ private Object render(List<RecordMetadata> records, Properties properties) { Object view; if(MESSAGE_VIEW_RICH.equals(getMessageView(properties))) { // build a 'rich' view of the messages that were written List<Object> responses = new ArrayList<>(); for(RecordMetadata record: records) { // render the 'rich' view of the record Map<String, Object> richView = new HashMap<>(); richView.put("topic", record.topic()); richView.put("partition", record.partition()); richView.put("offset", record.offset()); richView.put("timestamp", record.timestamp()); responses.add(richView); } // the rich view is a list of maps containing metadata about how each message was written view = responses; } else { // otherwise, the view is simply a count of the number of messages written view = CollectionUtils.size(records); } return view; }
@Override public void onCompletion(RecordMetadata metaData, Exception exception) { if (exception != null) { _logger.warn("Exception while sending message. ", exception); } else { _logger.trace("Message sent to partition {} with offset {}.", metaData.partition(), metaData.offset()); } } });
public void onCompletion(RecordMetadata metadata, Exception e) { if (e != null) { e.printStackTrace(); log.error("message send to partition " + metadata.partition() + ", offset: " + metadata.offset()); } } };
RecordMetadata m = kafkaProducer.send(new ProducerRecord<byte[], byte[]>( topic, key.getBytes("UTF-8"), message .getBytes("UTF-8"))).get(); System.out.println("Message produced, offset: " + m.offset()); System.out.println("Message produced, partition : " + m.partition()); System.out.println("Message produced, topic: " + m.topic());
@Override public void onCompletion(RecordMetadata returnMetadata, Exception e) { if (null != e) { logger.error("Error sending record.", e); } else { logger.debug("Completed publish to topic: {}, offset: {}, partition: {}", returnMetadata.topic(), returnMetadata.offset(), returnMetadata.partition()); } }
@RequestMapping(value = "/reservation", method = RequestMethod.POST, consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String reserveInventory(@RequestBody InventoryMutationAction inventory) throws InterruptedException, ExecutionException { LOGGER.info("Received request to reserve Inventory {}", inventory); LocationSku t = new LocationSku(inventory.getLocation(), inventory.getSku()); Future<RecordMetadata> recordfuture= producer.send(new ProducerRecord<>(Schemas.Topics.INVENTORY_RESERVATION.name(), t, inventory.getCount())); RecordMetadata metadata = recordfuture.get(); LOGGER.info("Sent inventory reservation {} to topic {}", inventory, Schemas.Topics.INVENTORY_RESERVATION.name()); return String.format("Inventory Reservation requested on Partition {%d}", metadata.partition()); } }
@Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { log.warn("Failed to produce metrics message", exception); } else { log.debug("Produced metrics message of size {} with offset {} to topic partition {}-{}", metadata.serializedValueSize(), metadata.offset(), metadata.topic(), metadata.partition()); } } });
@Override public void onSuccess(SendResult<String, String> sendResult) { ProducerRecord<String, String> producerRecord = sendResult.getProducerRecord(); RecordMetadata recordMetadata = sendResult.getRecordMetadata(); log.info("onSuccess(), offset {} partition {} timestamp {} for '{}'=='{}'", recordMetadata.offset(), recordMetadata.partition(), recordMetadata.timestamp(), producerRecord.key(), producerRecord.value()); }
@Override public void convertAndSend(SmartCosmosEvent<Object> message) throws SmartCosmosEventException { MessageBuilder builder = MessageBuilder.withPayload(message).setHeader(KafkaHeaders.TOPIC, message.getEventType()); if (StringUtils.hasText(message.getEventUrn())) { builder.setHeader(KafkaHeaders.MESSAGE_KEY,message.getEventUrn()); } ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(builder.build()); future.addCallback(result -> log.info("Event Successfully sent to Kafka topic {}, partition {}", result.getRecordMetadata().topic(), result.getRecordMetadata().partition()), ex -> log.error("Failed to send event to Kafka", ex)); } }