kafkaClientProvider.getDefaultProducer(); kafkaProducer.ifPresent(producer -> senders.add(new KafkaSender(producer)));
@Override public void send(final String topic, final byte[] message) { send(KafkaRecord.of(topic, message)); }
/** * Set the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public void setTaskStatus(final JobId jobId, final TaskStatus status) throws InterruptedException { log.debug("setting task status: {}", status); taskStatuses.put(jobId.toString(), status.toJsonBytes()); try { historyWriter.saveHistoryItem(status); } catch (Exception e) { // Log error here and keep going as saving task history is not critical. // This is to prevent bad data in the queue from screwing up the actually important Helios // agent operations. log.error("Error saving task status {} to ZooKeeper: {}", status, e); } final TaskStatusEvent event = new TaskStatusEvent(status, System.currentTimeMillis(), agent); kafkaSender.send(KafkaRecord.of(TaskStatusEvent.KAFKA_TOPIC, event.toJsonBytes())); }
public ZooKeeperAgentModel(final ZooKeeperClientProvider provider, final KafkaClientProvider kafkaProvider, final String host, final Path stateDirectory) throws IOException, InterruptedException { // TODO(drewc): we're constructing too many heavyweight things in the ctor, these kinds of // things should be passed in/provider'd/etc. final ZooKeeperClient client = provider.get("ZooKeeperAgentModel_ctor"); this.agent = checkNotNull(host); final Path taskConfigFile = stateDirectory.resolve(TASK_CONFIG_FILENAME); this.tasks = client.pathChildrenCache(Paths.configHostJobs(host), taskConfigFile, Json.type(Task.class)); tasks.addListener(new JobsListener()); final Path taskStatusFile = stateDirectory.resolve(TASK_STATUS_FILENAME); this.taskStatuses = ZooKeeperUpdatingPersistentDirectory.create("agent-model-task-statuses", provider, taskStatusFile, Paths.statusHostJobs(host)); this.historyWriter = new TaskHistoryWriter( host, client, stateDirectory.resolve(TASK_HISTORY_FILENAME)); this.kafkaSender = new KafkaSender( kafkaProvider.getProducer(new StringSerializer(), new ByteArraySerializer())); }
.setDeploymentGroupStatus(initialStatus) .build(); kafkaSender.send(KafkaRecord.of(DeploymentGroupEvent.KAFKA_TOPIC, event.toJsonBytes()));
final KafkaSender kafkaSender = new KafkaSender( kafkaClientProvider.getProducer(new StringSerializer(), new ByteArraySerializer()));
kafkaSender.send(KafkaRecord.of(DeploymentGroupEvent.KAFKA_TOPIC, event.toJsonBytes()));