@Override public void cancel() throws IOException { try { for (Map.Entry<Dataset, Job> entry : MRCompactor.RUNNING_MR_JOBS.entrySet()) { Job hadoopJob = entry.getValue(); if (!hadoopJob.isComplete()) { LOG.info(String.format("Killing hadoop job %s for dataset %s", hadoopJob.getJobID(), entry.getKey())); hadoopJob.killJob(); } } } finally { try { ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG), 0, TimeUnit.NANOSECONDS); } finally { if (this.verifier.isPresent()) { this.verifier.get().closeNow(); } } } }
@Override public void close() throws IOException { try { if (this.hadoopJobSubmitted && !this.job.isComplete()) { LOG.info("Killing the Hadoop MR job for job " + this.jobContext.getJobId()); this.job.killJob(); } } finally { try { cleanUpWorkingDirectory(); } finally { super.close(); fs.close(); } } }
@Override protected void executeCancellation() { try { if (this.hadoopJobSubmitted && !this.job.isComplete()) { LOG.info("Killing the Hadoop MR job for job " + this.jobContext.getJobId()); this.job.killJob(); // Collect final task states. this.taskStateCollectorService.stopAsync().awaitTerminated(); } } catch (IllegalStateException ise) { LOG.error("The Hadoop MR job has not started for job " + this.jobContext.getJobId()); } catch (IOException ioe) { LOG.error("Failed to kill the Hadoop MR job for job " + this.jobContext.getJobId()); } }
500); float lastProgress = progressDone; while (!job.isComplete()) { float newProgress = progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + job.isSuccessful()); Counters ctrs = job.getCounters(); LOG.debug(Objects.toString(ctrs)); if (job.isComplete() && !job.isSuccessful()) { throw new Exception("DistCp job-id: " + jobID + " failed");
@Override public void cancel(String jobId) throws IOException { JobID id = JobID.forName(jobId); Cluster cluster = new Cluster(this.getConf()); try { Job job = cluster.getJob(id); if (job == null) { LOG.error("No job found for " + id); // should we throw exception return; } if (job.isComplete() || job.isRetired()) { return; } job.killJob(); LOG.debug("Killed copy job " + id); } catch (InterruptedException e) { throw new IOException(e); } }
private void submitAndWait(Job job) throws ClassNotFoundException, IOException, InterruptedException { job.submit(); MRCompactor.addRunningHadoopJob(this.dataset, job); LOG.info(String.format("MR job submitted for dataset %s, input %s, url: %s", this.dataset, getInputPaths(), job.getTrackingURL())); while (!job.isComplete()) { if (this.policy == Policy.ABORT_ASAP) { LOG.info(String.format( "MR job for dataset %s, input %s killed due to input data incompleteness." + " Will try again later", this.dataset, getInputPaths())); job.killJob(); return; } Thread.sleep(MR_JOB_CHECK_COMPLETE_INTERVAL_MS); } if (!job.isSuccessful()) { throw new RuntimeException(String.format("MR job failed for topic %s, input %s, url: %s", this.dataset, getInputPaths(), job.getTrackingURL())); } }
public void cancel() throws Exception { executorService.shutdownNow(); for (Job hadoopJob : runningJobs) { if (!hadoopJob.isComplete()) { try { hadoopJob.killJob(); } catch (IOException e) { e.printStackTrace(); } } } }
/** * Returns immediately whether the whole job is done yet or not. */ public synchronized boolean isComplete() throws IOException { return job.isComplete(); }
/** * Returns immediately whether the whole job is done yet or not. */ public synchronized boolean isComplete() throws IOException { return job.isComplete(); }
/** * Returns immediately whether the whole job is done yet or not. */ public synchronized boolean isComplete() throws IOException { return job.isComplete(); }
/** * Determines whether the job has completed. * * @return Whether the job has completed. * @throws IOException If there is an error querying the job. */ public boolean isComplete() throws IOException { return mJob.isComplete(); }
/** * Returns immediately whether the whole job is done yet or not. */ public synchronized boolean isComplete() throws IOException { return job.isComplete(); }
/** * Check if the job is completed. * * @return {@code true} if the job has completed * @throws java.io.IOException */ @Override public boolean isComplete() throws IOException { return delegateJob.isComplete(); }
/** * Returns immediately whether the whole job is done yet or not. */ public synchronized boolean isComplete() throws IOException { try { return job.isComplete(); } catch (InterruptedException ie) { throw new IOException(ie); } }
@Override protected void triggerShutdown() { try { stopRequested = true; if (job != null && !job.isComplete()) { job.killJob(); } } catch (IOException e) { LOG.error("Failed to kill MapReduce job {}", context, e); throw Throwables.propagate(e); } }
private Job createJob(boolean complete, boolean successful) throws IOException, InterruptedException { // Create a stub Job that responds in a controlled way Job mockJob = mock(Job.class); when(mockJob.getConfiguration()).thenReturn(new Configuration()); when(mockJob.isComplete()).thenReturn(complete); when(mockJob.isSuccessful()).thenReturn(successful); return mockJob; }
@Override protected void triggerShutdown() { try { stopRequested = true; if (job != null && !job.isComplete()) { job.killJob(); } } catch (IOException e) { LOG.error("Failed to kill MapReduce job {}", context, e); throw Throwables.propagate(e); } }
float reduceProg = 0.0f; synchronized (this) { mrJobComplete = mrJob.isComplete(); org.apache.hadoop.mapreduce.JobStatus mrJobStatus = mrJob.getStatus(); mrJobState = mrJobStatus.getState();
private Job createJob(boolean complete, boolean successful) throws IOException, InterruptedException { // Create a stub Job that responds in a controlled way Job mockJob = mock(Job.class); when(mockJob.getConfiguration()).thenReturn(new Configuration()); when(mockJob.isComplete()).thenReturn(complete); when(mockJob.isSuccessful()).thenReturn(successful); return mockJob; }
@Override public Boolean get() { try { return client.getWorkloadJob() != null && client.getWorkloadJob().isComplete(); } catch (IOException | IllegalStateException e) { return false; } } }, 3000, 60000);