/** * Shutdown an {@link ExecutorService} gradually, first disabling new task submissions and * later cancelling existing tasks. * * <p> * This method calls {@link #shutdownExecutorService(ExecutorService, Optional, long, TimeUnit)} * with default timeout time {@link #EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT} and time unit * {@link #EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT_TIMEUNIT}. * </p> * * @param executorService the {@link ExecutorService} to shutdown * @param logger an {@link Optional} wrapping a {@link Logger} to be used during shutdown */ public static void shutdownExecutorService(ExecutorService executorService, Optional<Logger> logger) { shutdownExecutorService(executorService, logger, EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT, EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT_TIMEUNIT); }
@Override public void close() throws IOException { ExecutorsUtils.shutdownExecutorService(_execService, Optional.of(_log), 5, TimeUnit.SECONDS); }
@Override protected void shutDown() throws Exception { ExecutorsUtils.shutdownExecutorService(this.fetchJobSpecExecutor, Optional.of(LOGGER)); } }
/** * Closes this {@link TaskScheduler}, ensuring that new tasks cannot be created * and cancelling existing tasks. * * @throws IOException if an I/O error occurs */ @Override final void closeImpl() throws IOException { ExecutorsUtils.shutdownExecutorService(this.executorService, Optional.of(LOGGER)); }
@Override protected void shutDown() throws Exception { LOG.info("Stopping the task executor"); try { ExecutorsUtils.shutdownExecutorService(this.taskExecutor, Optional.of(LOG)); } finally { ExecutorsUtils.shutdownExecutorService(this.forkExecutor, Optional.of(LOG)); } }
@Override public void close() throws IOException { ExecutorsUtils.shutdownExecutorService(this.exeSvc, Optional.of(LOG)); }
public void closeNow() { ExecutorsUtils.shutdownExecutorService(this.exeSvc, Optional.of(LOG), 0, TimeUnit.NANOSECONDS); }
@Override protected void shutDown() throws Exception { this.logger.info("Stopping the task state tracker"); ExecutorsUtils.shutdownExecutorService(this.taskMetricsUpdaterExecutor, Optional.of(this.logger)); }
private void shutdownExecutors() { LOG.info("Shutting down Executors"); ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG)); }
@Override public void stopImpl() { this.scheduledTask.get().cancel(false); this.scheduledTask = Optional.absent(); ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(log), 10, TimeUnit.SECONDS); // Report metrics one last time - this ensures any metrics values updated between intervals are reported report(true); }
/** * Wait till all registration requested submitted via {@link #register(HiveSpec)} to finish. * * @throws IOException if any registration failed or was interrupted. */ @Override public void close() throws IOException { try { for (Map.Entry<String, Future<Void>> entry : this.futures.entrySet()) { try { entry.getValue().get(); } catch (ExecutionException ee) { throw new IOException("Failed to finish registration for " + entry.getKey(), ee.getCause()); } } } catch (InterruptedException e) { throw new IOException(e); } finally { ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(log)); } }
@Override public void close() throws IOException { try { if (this.finishCleanSignal.isPresent()) { this.finishCleanSignal.get().await(); } if (!this.throwables.isEmpty()) { for (Throwable t : this.throwables) { LOG.error("Failed clean due to ", t); } throw new RuntimeException("Clean failed for one or more datasets"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Not all datasets finish cleanning", e); } finally { ExecutorsUtils.shutdownExecutorService(this.service, Optional.of(LOG)); this.closer.close(); } }
@Override public void shutDown() { if (this.consumer != null) { this.consumer.shutdown(); } if (this.executor != null) { ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(log), 5000, TimeUnit.MILLISECONDS); } try { this.shutdownMetrics(); } catch (IOException ioe) { log.warn("Failed to shutdown metrics for " + this.getClass().getSimpleName()); } }
@Override protected void shutDown() throws Exception { if (this.specExecutorInstanceConsumer instanceof Service) { ((Service) this.specExecutorInstanceConsumer).stopAsync().awaitTerminated(this.stopTimeoutSeconds, TimeUnit.SECONDS); } ExecutorsUtils.shutdownExecutorService(this.fetchJobSpecExecutor, Optional.of(LOGGER)); } }
/** * Run the cleaner. * @throws ExecutionException */ public void run() throws IOException, ExecutionException { FileStatus[] stateStoreDirs = this.fs.listStatus(this.stateStoreRootDir); if (stateStoreDirs == null || stateStoreDirs.length == 0) { LOGGER.warn("The state store root directory does not exist or is empty"); return; } List<Future<?>> futures = Lists.newArrayList(); for (FileStatus stateStoreDir : stateStoreDirs) { futures.add(this.cleanerRunnerExecutor .submit(new CleanerRunner(this.fs, stateStoreDir.getPath(), this.retention, this.retentionTimeUnit))); } for (Future<?> future : futures) { try { future.get(); } catch (InterruptedException e) { throw new ExecutionException("Thread interrupted", e); } } ExecutorsUtils.shutdownExecutorService(cleanerRunnerExecutor, Optional.of(LOGGER), 60, TimeUnit.SECONDS); }
ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(LOGGER)); closeJobListeners();
@Override public Collection<CopyableFile> getCopyableFiles(FileSystem targetFs, CopyConfiguration configuration) throws IOException { log.info(String.format("Getting copyable files at root path: %s", this.datasetRoot)); List<TimestampedDatasetVersion> versions = Lists.newArrayList(this.datasetVersionFinder.findDatasetVersions(this)); if (versions.isEmpty()) { log.warn("No dataset version can be found. Ignoring."); return Lists.newArrayList(); } Collection<TimestampedDatasetVersion> copyableVersions = this.versionSelectionPolicy.listSelectedVersions(versions); ConcurrentLinkedQueue<CopyableFile> copyableFileList = new ConcurrentLinkedQueue<>(); List<Future<?>> futures = Lists.newArrayList(); for (TimestampedDatasetVersion copyableVersion : copyableVersions) { futures.add(this.executor.submit(this.getCopyableFileGenetator(targetFs, configuration, copyableVersion, copyableFileList))); } try { for (Future<?> future : futures) { future.get(); } } catch (ExecutionException | InterruptedException e) { throw new IOException("Failed to generate copyable files.", e); } finally { ExecutorsUtils.shutdownExecutorService(executor, Optional.of(log)); } return copyableFileList; }
@Override public void cancel() throws IOException { try { for (Map.Entry<Dataset, Job> entry : MRCompactor.RUNNING_MR_JOBS.entrySet()) { Job hadoopJob = entry.getValue(); if (!hadoopJob.isComplete()) { LOG.info(String.format("Killing hadoop job %s for dataset %s", hadoopJob.getJobID(), entry.getKey())); hadoopJob.killJob(); } } } finally { try { ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG), 0, TimeUnit.NANOSECONDS); } finally { if (this.verifier.isPresent()) { this.verifier.get().closeNow(); } } } }
throw new IOException("Failed to copy file.", e); } finally { ExecutorsUtils.shutdownExecutorService(executor, Optional.of(LOG));
@Override protected void shutDown() throws Exception { LOG.info("Stopping the job scheduler"); if (this.properties.containsKey(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY) || this.properties.containsKey( ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY)) { this.pathAlterationDetector.stop(1000); } List<JobExecutionContext> currentExecutions = this.scheduler.getScheduler().getCurrentlyExecutingJobs(); for (JobExecutionContext jobExecutionContext : currentExecutions) { this.scheduler.getScheduler().interrupt(jobExecutionContext.getFireInstanceId()); } ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG)); }