@Override public <T> PersistentPathChildrenCache<T> pathChildrenCache(final String path, final Path snapshotFile, final JavaType valueType) throws IOException, InterruptedException { return new PersistentPathChildrenCache<T>(client, path, clusterId, snapshotFile, valueType); }
newSnapshot = sync(); } else { newSnapshot = Maps.newHashMap(currentSnapshot); fireNodesChanged();
final List<String> children = getChildren(); log.debug("children: {}", children); for (final String child : children) {
@Override protected void shutDown() throws Exception { tasks.stopAsync().awaitTerminated(); taskStatuses.stopAsync().awaitTerminated(); if (historyWriter != null) { historyWriter.stopAsync().awaitTerminated(); } }
/** * Returns the tasks (basically, a pair of {@link JobId} and {@link Task}) for the current agent. */ @Override public Map<JobId, Task> getTasks() { final Map<JobId, Task> tasks = Maps.newHashMap(); for (final Map.Entry<String, Task> entry : this.tasks.getNodes().entrySet()) { final JobId id = jobIdFromTaskPath(entry.getKey()); tasks.put(id, entry.getValue()); } return tasks; }
@Override protected void startUp() throws Exception { tasks.startAsync().awaitRunning(); taskStatuses.startAsync().awaitRunning(); if (historyWriter != null) { historyWriter.startAsync().awaitRunning(); } }
private boolean isAlive() { return state().ordinal() < STOPPING.ordinal(); }
public ZooKeeperAgentModel(final ZooKeeperClientProvider provider, final String host, final Path stateDirectory, final TaskHistoryWriter historyWriter, final List<EventSender> eventSenders, final String taskStatusEventTopic) throws IOException, InterruptedException { // TODO(drewc): we're constructing too many heavyweight things in the ctor, these kinds of // things should be passed in/provider'd/etc. final ZooKeeperClient client = provider.get("ZooKeeperAgentModel_ctor"); this.agent = checkNotNull(host); final Path taskConfigFile = stateDirectory.resolve(TASK_CONFIG_FILENAME); this.tasks = client.pathChildrenCache(Paths.configHostJobs(host), taskConfigFile, Json.type(Task.class)); tasks.addListener(new JobsListener()); final Path taskStatusFile = stateDirectory.resolve(TASK_STATUS_FILENAME); this.taskStatuses = ZooKeeperUpdatingPersistentDirectory.create("agent-model-task-statuses", provider, taskStatusFile, Paths.statusHostJobs(host)); this.historyWriter = historyWriter; this.eventSenders = eventSenders; this.taskStatusEventTopic = taskStatusEventTopic; }
@Override protected void shutDown() throws Exception { tasks.stopAsync().awaitTerminated(); taskStatuses.stopAsync().awaitTerminated(); historyWriter.stopAsync().awaitTerminated(); }
/** * Returns the tasks (basically, a pair of {@link JobId} and {@link Task}) for the current agent. */ @Override public Map<JobId, Task> getTasks() { final Map<JobId, Task> tasks = Maps.newHashMap(); for (Map.Entry<String, Task> entry : this.tasks.getNodes().entrySet()) { final JobId id = jobIdFromTaskPath(entry.getKey()); tasks.put(id, entry.getValue()); } return tasks; }
@Override protected void startUp() throws Exception { tasks.startAsync().awaitRunning(); taskStatuses.startAsync().awaitRunning(); historyWriter.startAsync().awaitRunning(); }
private boolean isAlive() { return state().ordinal() < STOPPING.ordinal(); }
public ZooKeeperAgentModel(final ZooKeeperClientProvider provider, final KafkaClientProvider kafkaProvider, final String host, final Path stateDirectory) throws IOException, InterruptedException { // TODO(drewc): we're constructing too many heavyweight things in the ctor, these kinds of // things should be passed in/provider'd/etc. final ZooKeeperClient client = provider.get("ZooKeeperAgentModel_ctor"); this.agent = checkNotNull(host); final Path taskConfigFile = stateDirectory.resolve(TASK_CONFIG_FILENAME); this.tasks = client.pathChildrenCache(Paths.configHostJobs(host), taskConfigFile, Json.type(Task.class)); tasks.addListener(new JobsListener()); final Path taskStatusFile = stateDirectory.resolve(TASK_STATUS_FILENAME); this.taskStatuses = ZooKeeperUpdatingPersistentDirectory.create("agent-model-task-statuses", provider, taskStatusFile, Paths.statusHostJobs(host)); this.historyWriter = new TaskHistoryWriter( host, client, stateDirectory.resolve(TASK_HISTORY_FILENAME)); this.kafkaSender = new KafkaSender( kafkaProvider.getProducer(new StringSerializer(), new ByteArraySerializer())); }
newSnapshot = sync(); } else { newSnapshot = Maps.newHashMap(currentSnapshot); fireNodesChanged();
@Override public <T> PersistentPathChildrenCache<T> pathChildrenCache(final String path, final Path snapshotFile, final JavaType valueType) throws IOException, InterruptedException { return new PersistentPathChildrenCache<T>(client, path, clusterId, snapshotFile, valueType); }
final List<String> children = getChildren(); log.debug("children: {}", children); for (final String child : children) {