public static ZooKeeperUpdatingPersistentDirectory create(final String name, final ZooKeeperClientProvider client, final Path stateFile, final String path) throws IOException, InterruptedException { return new ZooKeeperUpdatingPersistentDirectory(name, client, stateFile, path); }
/** * Returns the {@link TaskStatus}es for all tasks assigned to the current agent. */ @Override public Map<JobId, TaskStatus> getTaskStatuses() { final Map<JobId, TaskStatus> statuses = Maps.newHashMap(); for (final Map.Entry<String, byte[]> entry : this.taskStatuses.entrySet()) { try { final JobId id = JobId.fromString(entry.getKey()); final TaskStatus status = Json.read(entry.getValue(), TaskStatus.class); statuses.put(id, status); } catch (IOException e) { throw new RuntimeException(e); } } return statuses; }
/** * Get the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public TaskStatus getTaskStatus(final JobId jobId) { final byte[] data = taskStatuses.get(jobId.toString()); if (data == null) { return null; } try { return parse(data, TaskStatus.class); } catch (IOException e) { throw new RuntimeException(e); } }
public byte[] remove(final Object key) throws InterruptedException { if (!(key instanceof String)) { return null; } return remove((String) key); }
@Override protected void startUp() throws Exception { client("startUp").getConnectionStateListenable().addListener(connectionStateListener); reactor.startAsync().awaitRunning(); reactor.signal(); }
@Override protected void shutDown() throws Exception { tasks.stopAsync().awaitTerminated(); taskStatuses.stopAsync().awaitTerminated(); if (historyWriter != null) { historyWriter.stopAsync().awaitTerminated(); } }
@Override protected void startUp() throws Exception { tasks.startAsync().awaitRunning(); taskStatuses.startAsync().awaitRunning(); if (historyWriter != null) { historyWriter.startAsync().awaitRunning(); } }
private boolean isAlive() { return state().ordinal() < STOPPING.ordinal(); }
public ZooKeeperAgentModel(final ZooKeeperClientProvider provider, final String host, final Path stateDirectory, final TaskHistoryWriter historyWriter, final List<EventSender> eventSenders, final String taskStatusEventTopic) throws IOException, InterruptedException { // TODO(drewc): we're constructing too many heavyweight things in the ctor, these kinds of // things should be passed in/provider'd/etc. final ZooKeeperClient client = provider.get("ZooKeeperAgentModel_ctor"); this.agent = checkNotNull(host); final Path taskConfigFile = stateDirectory.resolve(TASK_CONFIG_FILENAME); this.tasks = client.pathChildrenCache(Paths.configHostJobs(host), taskConfigFile, Json.type(Task.class)); tasks.addListener(new JobsListener()); final Path taskStatusFile = stateDirectory.resolve(TASK_STATUS_FILENAME); this.taskStatuses = ZooKeeperUpdatingPersistentDirectory.create("agent-model-task-statuses", provider, taskStatusFile, Paths.statusHostJobs(host)); this.historyWriter = historyWriter; this.eventSenders = eventSenders; this.taskStatusEventTopic = taskStatusEventTopic; }
/** * Set the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public void setTaskStatus(final JobId jobId, final TaskStatus status) throws InterruptedException { log.debug("setting task status: {}", status); taskStatuses.put(jobId.toString(), status.toJsonBytes()); if (historyWriter != null) { try { historyWriter.saveHistoryItem(status); } catch (Exception e) { // Log error here and keep going as saving task history is not critical. // This is to prevent bad data in the queue from screwing up the actually important Helios // agent operations. log.error("Error saving task status {} to ZooKeeper: {}", status, e); } } final TaskStatusEvent event = new TaskStatusEvent(status, System.currentTimeMillis(), agent); final byte[] message = event.toJsonBytes(); for (final EventSender sender : eventSenders) { sender.send(taskStatusEventTopic, message); } }
/** * Remove the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public void removeTaskStatus(final JobId jobId) throws InterruptedException { taskStatuses.remove(jobId.toString()); }
@Override protected void startUp() throws Exception { client("startUp").getConnectionStateListenable().addListener(connectionStateListener); reactor.startAsync().awaitRunning(); reactor.signal(); }
@Override protected void shutDown() throws Exception { tasks.stopAsync().awaitTerminated(); taskStatuses.stopAsync().awaitTerminated(); historyWriter.stopAsync().awaitTerminated(); }
@Override protected void startUp() throws Exception { tasks.startAsync().awaitRunning(); taskStatuses.startAsync().awaitRunning(); historyWriter.startAsync().awaitRunning(); }
private boolean isAlive() { return state().ordinal() < STOPPING.ordinal(); }
public ZooKeeperAgentModel(final ZooKeeperClientProvider provider, final KafkaClientProvider kafkaProvider, final String host, final Path stateDirectory) throws IOException, InterruptedException { // TODO(drewc): we're constructing too many heavyweight things in the ctor, these kinds of // things should be passed in/provider'd/etc. final ZooKeeperClient client = provider.get("ZooKeeperAgentModel_ctor"); this.agent = checkNotNull(host); final Path taskConfigFile = stateDirectory.resolve(TASK_CONFIG_FILENAME); this.tasks = client.pathChildrenCache(Paths.configHostJobs(host), taskConfigFile, Json.type(Task.class)); tasks.addListener(new JobsListener()); final Path taskStatusFile = stateDirectory.resolve(TASK_STATUS_FILENAME); this.taskStatuses = ZooKeeperUpdatingPersistentDirectory.create("agent-model-task-statuses", provider, taskStatusFile, Paths.statusHostJobs(host)); this.historyWriter = new TaskHistoryWriter( host, client, stateDirectory.resolve(TASK_HISTORY_FILENAME)); this.kafkaSender = new KafkaSender( kafkaProvider.getProducer(new StringSerializer(), new ByteArraySerializer())); }
/** * Set the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public void setTaskStatus(final JobId jobId, final TaskStatus status) throws InterruptedException { log.debug("setting task status: {}", status); taskStatuses.put(jobId.toString(), status.toJsonBytes()); try { historyWriter.saveHistoryItem(status); } catch (Exception e) { // Log error here and keep going as saving task history is not critical. // This is to prevent bad data in the queue from screwing up the actually important Helios // agent operations. log.error("Error saving task status {} to ZooKeeper: {}", status, e); } final TaskStatusEvent event = new TaskStatusEvent(status, System.currentTimeMillis(), agent); kafkaSender.send(KafkaRecord.of(TaskStatusEvent.KAFKA_TOPIC, event.toJsonBytes())); }
public byte[] remove(final Object key) throws InterruptedException { if (!(key instanceof String)) { return null; } return remove((String) key); }
public static ZooKeeperUpdatingPersistentDirectory create(final String name, final ZooKeeperClientProvider client, final Path stateFile, final String path) throws IOException, InterruptedException { return new ZooKeeperUpdatingPersistentDirectory(name, client, stateFile, path); }
/** * Get the {@link TaskStatus} for the job identified by {@code jobId}. */ @Override public TaskStatus getTaskStatus(final JobId jobId) { final byte[] data = taskStatuses.get(jobId.toString()); if (data == null) { return null; } try { return parse(data, TaskStatus.class); } catch (IOException e) { throw Throwables.propagate(e); } }