public static KafkaRecord of(final String topic, final byte[] data) { return new KafkaRecord(topic, data); }
private void send(final KafkaRecord kafkaRecord) { final ProducerRecord<String, byte[]> record = new ProducerRecord<>(kafkaRecord.getKafkaTopic(), kafkaRecord.getKafkaData()); kafkaProducer.send(record, new LoggingCallback()); }
@Override public void send(final String topic, final byte[] message) { send(KafkaRecord.of(topic, message)); }
public void send(final KafkaRecord record) { if (kafkaProducer.isPresent()) { try { final Future<RecordMetadata> future = kafkaProducer.get().send( new ProducerRecord<String, byte[]>(record.getKafkaTopic(), record.getKafkaData())); final RecordMetadata metadata = future.get(KAFKA_SEND_TIMEOUT, TimeUnit.SECONDS); log.debug("Sent an event to Kafka, meta: {}", metadata); } catch (ExecutionException | InterruptedException | TimeoutException e) { log.warn("Unable to send an event to Kafka", e); } } else { log.debug("KafkaProducer isn't set. Not sending anything."); } } }
/** * Set the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public void setTaskStatus(final JobId jobId, final TaskStatus status) throws InterruptedException { log.debug("setting task status: {}", status); taskStatuses.put(jobId.toString(), status.toJsonBytes()); try { historyWriter.saveHistoryItem(status); } catch (Exception e) { // Log error here and keep going as saving task history is not critical. // This is to prevent bad data in the queue from screwing up the actually important Helios // agent operations. log.error("Error saving task status {} to ZooKeeper: {}", status, e); } final TaskStatusEvent event = new TaskStatusEvent(status, System.currentTimeMillis(), agent); kafkaSender.send(KafkaRecord.of(TaskStatusEvent.KAFKA_TOPIC, event.toJsonBytes())); }
public static KafkaRecord of(final String topic, final byte[] data) { return new KafkaRecord(topic, data); }
.setDeploymentGroupStatus(initialStatus) .build(); kafkaSender.send(KafkaRecord.of(DeploymentGroupEvent.KAFKA_TOPIC, event.toJsonBytes()));
kafkaSender.send(KafkaRecord.of(DeploymentGroupEvent.KAFKA_TOPIC, event.toJsonBytes()));