private JsonObject formatJobPlan(ClusterJob job) throws IOException { JsonObject jobJson = new JsonObject(); jobJson.addProperty("id", job.getJobId()); jobJson.addProperty("clusterId", job.getClusterId()); jobJson.addProperty("action", job.getClusterAction().name()); jobJson.addProperty("currentStage", job.getCurrentStageNumber()); JsonArray stagesJson = new JsonArray(); for (Set<String> stage : job.getStagedTasks()) { JsonArray stageJson = new JsonArray(); for (String taskId : stage) { ClusterTask task = clusterStore.getClusterTask(TaskId.fromString(taskId)); JsonObject taskJson = new JsonObject(); taskJson.addProperty("id", task.getTaskId()); taskJson.addProperty("taskName", task.getTaskName().name()); taskJson.addProperty("nodeId", task.getNodeId()); taskJson.addProperty("service", task.getService()); stageJson.add(taskJson); } stagesJson.add(stageJson); } jobJson.add("stages", stagesJson); return jobJson; }
@Override public void writeClusterJob(ClusterJob clusterJob) throws IOException { JobId jobId = JobId.fromString(clusterJob.getJobId()); long clusterId = Long.parseLong(jobId.getClusterId()); try { Connection conn = dbConnectionPool.getConnection(); try { byte[] jobBytes = dbQueryExecutor.toBytes(clusterJob, ClusterJob.class); DBPut jobPut = new ClusterJobDBPut(clusterJob, jobBytes, jobId, clusterId); jobPut.executePut(conn); } finally { conn.close(); } } catch (SQLException e) { throw new IOException(e); } }
/** * Sets the status of the given job to {@link ClusterJob.Status#RUNNING} and add it to the queue to be run. * * @param job Job to start. * @param cluster Cluster the job is for. * @throws IOException */ public void startJob(ClusterJob job, Cluster cluster) throws IOException { // TODO: wrap in a transaction LOG.debug("Starting job {} for cluster {}", job.getJobId(), cluster.getId()); job.setJobStatus(ClusterJob.Status.RUNNING); // Note: writing job status as RUNNING, will allow other operations on the job // (like cancel, etc.) to happen in parallel. clusterStore.writeClusterJob(job); callbackQueues.add(cluster.getAccount().getTenantId(), new Element(gson.toJson(new CallbackData(CallbackData.Type.START, cluster, job)))); }
private void onStart(CallbackData callbackData, CallbackContext callbackContext) { ClusterJob job = callbackData.getJob(); Cluster cluster = callbackData.getCluster(); try { if (clusterCallback.onStart(callbackData, callbackContext)) { String jobId = callbackData.getJob().getJobId(); jobQueues.add(gElement.getQueueName(), new Element(jobId)); LOG.debug("added job {} to job queue", jobId); } else { switch (job.getClusterAction()) { case CLUSTER_CREATE: taskService.failJobAndTerminateCluster(job, cluster, "Cluster creation stopped by failed start callback."); break; default: // failed to plan means the job should fail, but state has already been changed so the cluster // state in the db is inconsistent with reality. // TODO: Should revert it here but need versioning or cluster history or something to that effect. taskService.failJobAndSetClusterStatus( job, cluster, Cluster.Status.INCONSISTENT, "Failed to schedule the " + job.getClusterAction() + " operation."); break; } } } catch (Exception e) { LOG.error("Exception failing job {} for cluster {}", job.getJobId(), cluster.getId(), e); } } }
cluster.setLatestJobId(createJob.getJobId()); clusterStore.writeClusterJob(createJob);
clusterStore.writeClusterJob(clusterJob); jobQueues.add(account.getTenantId(), new Element(clusterJob.getJobId())); } finally { lock.unlock();
job.setStatusMessage("Exception while expanding macros: " + e.getMessage()); jobQueues.add(queueName, new Element(job.getJobId())); break;
clusterJob.getJobId(), clusterId); LOG.error(message); responder.sendError(HttpResponseStatus.CONFLICT, message);
cluster.setLatestJobId(job.getJobId()); cluster.setStatus(Cluster.Status.PENDING); prepareClusterForOperation(cluster, request);
ClusterJob createJob = new ClusterJob(clusterJobId, ClusterAction.ADD_SERVICES, request.getServices(), changedNodeIds); cluster.setLatestJobId(createJob.getJobId()); clusterStore.writeClusterJob(createJob);
JobId clusterJobId = idService.getNewJobId(cluster.getId()); ClusterJob clusterJob = new ClusterJob(clusterJobId, ClusterAction.SOLVE_LAYOUT); cluster.setLatestJobId(clusterJob.getJobId());
LOG.debug("Advancing to next stage {} for job {}", job.getCurrentStageNumber(), job.getJobId()); job.advanceStage(); jobQueues.add(queueName, new Element(jobIdStr));
ClusterJob job = new ClusterJob(jobId, action, addRequest.getServices(), null); job.setJobStatus(ClusterJob.Status.RUNNING); cluster.setLatestJobId(job.getJobId()); cluster.setStatus(Cluster.Status.PENDING); prepareClusterForOperation(cluster, addRequest);
/** * Sets the status of the given job to {@link ClusterJob.Status#COMPLETE} and the status of the given cluster to * {@link co.cask.coopr.cluster.Cluster.Status#ACTIVE}. * * @param job Job to complete. * @param cluster Cluster the job was for. * @throws IOException */ public void completeJob(ClusterJob job, Cluster cluster) throws IOException, IllegalAccessException { job.setJobStatus(ClusterJob.Status.COMPLETE); clusterStore.writeClusterJob(job); LOG.debug("Job {} is complete", job.getJobId()); // Update cluster status if (job.getClusterAction() == ClusterAction.CLUSTER_DELETE) { cluster.setStatus(Cluster.Status.TERMINATED); } else { cluster.setStatus(Cluster.Status.ACTIVE); } clusterStore.writeCluster(cluster); serverStats.getSuccessfulClusterStats().incrementStat(job.getClusterAction()); if (job.getClusterAction() == ClusterAction.CLUSTER_DELETE) { wipeSensitiveFields(cluster); } callbackQueues.add(cluster.getAccount().getTenantId(), new Element(gson.toJson(new CallbackData(CallbackData.Type.SUCCESS, cluster, job)))); }
@Before public void beforeTest() throws Exception { cluster = Entities.ClusterExample.createCluster(); job = new ClusterJob(new JobId(cluster.getId(), 0), ClusterAction.CLUSTER_CREATE); cluster.setLatestJobId(job.getJobId()); clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster); clusterStore.writeClusterJob(job); clusterStore.writeNode(Entities.ClusterExample.NODE1); clusterStore.writeNode(Entities.ClusterExample.NODE2); }
@Test public void testAdminCanDeleteClustersOwnedByOthers() throws Exception { String clusterId = "2"; Cluster cluster = Entities.ClusterExample.createCluster(); cluster.setStatus(Cluster.Status.ACTIVE); ClusterJob clusterJob = new ClusterJob(new JobId(clusterId, 1), ClusterAction.CLUSTER_DELETE); clusterJob.setJobStatus(ClusterJob.Status.COMPLETE); cluster.setLatestJobId(clusterJob.getJobId()); clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster); clusterStore.writeClusterJob(clusterJob); assertResponseStatus(doDeleteExternalAPI("/clusters/" + clusterId, ADMIN_HEADERS), HttpResponseStatus.OK); }
.build(); ClusterJob job = new ClusterJob(new JobId(cluster.getId(), 1), ClusterAction.CLUSTER_CREATE); cluster.setLatestJobId(job.getJobId()); clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster); clusterStore.writeClusterJob(job);
ClusterJob clusterJob = new ClusterJob(new JobId(clusterId, 1), ClusterAction.CLUSTER_DELETE); clusterJob.setJobStatus(ClusterJob.Status.COMPLETE); cluster.setLatestJobId(clusterJob.getJobId()); clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster); clusterStore.writeClusterJob(clusterJob);