/** * Get a new {@link java.util.concurrent.ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler} * to handle uncaught exceptions and the given thread name format. * * @param logger an {@link com.google.common.base.Optional} wrapping the {@link org.slf4j.Logger} that the * {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads * @param nameFormat an {@link com.google.common.base.Optional} wrapping a thread naming format * @return a new {@link java.util.concurrent.ThreadFactory} */ public static ThreadFactory newThreadFactory(Optional<Logger> logger, Optional<String> nameFormat) { return newThreadFactory(new ThreadFactoryBuilder(), logger, nameFormat); }
/** * Get a new {@link java.util.concurrent.ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler} * to handle uncaught exceptions. * * @param logger an {@link com.google.common.base.Optional} wrapping the {@link org.slf4j.Logger} that the * {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads * @return a new {@link java.util.concurrent.ThreadFactory} */ public static ThreadFactory newThreadFactory(Optional<Logger> logger) { return newThreadFactory(logger, Optional.<String>absent()); }
/** * Get a new {@link ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler} * to handle uncaught exceptions, uses the given thread name format, and produces daemon threads. * * @param logger an {@link Optional} wrapping the {@link Logger} that the * {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads * @param nameFormat an {@link Optional} wrapping a thread naming format * @return a new {@link ThreadFactory} */ public static ThreadFactory newDaemonThreadFactory(Optional<Logger> logger, Optional<String> nameFormat) { return newThreadFactory(new ThreadFactoryBuilder().setDaemon(true), logger, nameFormat); }
/** * Build a {@link ScheduledThreadPoolExecutor} that updates record-level and byte-level metrics. */ private static ScheduledThreadPoolExecutor buildWriterMetricsUpdater() { return new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("WriterMetricsUpdater-%d"))); }
public TimeBasedLimiter(long timeLimit, TimeUnit timeUnit) { this.timeLimit = timeLimit; this.timeUnit = timeUnit; this.flagFlippingExecutor = new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("TimeBasedThrottler"))); }
public ParallelJobListener(List<JobListener> jobListeners) { this.jobListeners = jobListeners; this.executor = Executors.newCachedThreadPool( ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelJobListener"))); this.completionService = new ExecutorCompletionService<>(this.executor); }
public static ExecutorService getDefaultExecutor(Logger log) { return Executors.newSingleThreadExecutor( ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of(log.getName() + "-%d"))); }
/** * Schedule the sweeper and stability checkers */ public synchronized void start() { if (!_started.get()) { _executorService = new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(FineGrainedWatermarkTracker.class)))); _executorService.scheduleAtFixedRate(_sweeper, 0, _sweepIntervalMillis, TimeUnit.MILLISECONDS); _executorService.scheduleAtFixedRate(_stabilityChecker, 0, _stabilityCheckIntervalMillis, TimeUnit.MILLISECONDS); } _started.set(true); }
protected void startAsync() throws JobException { _log.info("Starting " + getClass().getSimpleName()); ExecutorsUtils.newThreadFactory(Optional.of(_log), Optional.of("job-launcher-execution-driver")).newThread(this).start(); }
public ParallelRunner(int threads, FileSystem fs, FailPolicy failPolicy) { this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(threads, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelRunner")))); this.fs = fs; this.failPolicy = failPolicy; }
public AbstractTaskStateTracker(int coreThreadPoolSize, Logger logger) { Preconditions.checkArgument(coreThreadPoolSize > 0, "Thread pool size should be positive"); this.taskMetricsUpdaterExecutor = ExecutorsUtils.loggingDecorator( new ScheduledThreadPoolExecutor(coreThreadPoolSize, ExecutorsUtils.newThreadFactory(Optional.of(logger), Optional.of("TaskStateTracker-%d")))); this.logger = logger; }
public AWSJobConfigurationManager(EventBus eventBus, Config config) { super(eventBus, config); this.jobConfFiles = Maps.newHashMap(); if (config.hasPath(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL)) { this.refreshIntervalInSeconds = config.getDuration(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL, TimeUnit.SECONDS); } else { this.refreshIntervalInSeconds = DEFAULT_JOB_CONF_REFRESH_INTERVAL; } this.fetchJobConfExecutor = Executors.newSingleThreadScheduledExecutor( ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("FetchJobConfExecutor"))); }
private synchronized ExecutorService getExecutorService() { if(!this.executorServiceOptional.isPresent()) { this.executorServiceOptional = Optional.of(MoreExecutors.getExitingExecutorService( (ThreadPoolExecutor) Executors.newCachedThreadPool(ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("MetricContext-" + getName() + "-%d"))), 5, TimeUnit.MINUTES)); } return this.executorServiceOptional.get(); }
public MultiWriterWatermarkManager(WatermarkStorage storage, long commitIntervalMillis, Optional<Logger> logger) { Preconditions.checkArgument(storage != null, "WatermarkStorage cannot be null"); _watermarkAwareWriters = new ConcurrentLinkedQueue<>(); _watermarkStorage = storage; _commitIntervalMillis = commitIntervalMillis; _logger = logger.or(LoggerFactory.getLogger(MultiWriterWatermarkManager.class)); _watermarkCommitThreadPool = new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(logger, Optional.of("WatermarkManager-%d"))); _retrievalStatus = new RetrievalStatus(); _commitStatus = new CommitStatus(); }
public TrackerBasedWatermarkManager(WatermarkStorage storage, FineGrainedWatermarkTracker watermarkTracker, long commitIntervalMillis, Optional<Logger> logger) { Preconditions.checkArgument(storage != null, "WatermarkStorage cannot be null"); Preconditions.checkArgument(watermarkTracker != null, "WatermarkTracker cannot be null"); _watermarkTracker = watermarkTracker; _watermarkStorage = storage; _commitIntervalMillis = commitIntervalMillis; _logger = logger.or(LoggerFactory.getLogger(TrackerBasedWatermarkManager.class)); _watermarkCommitThreadPool = new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(logger, Optional.of("WatermarkManager-%d"))); _retrievalStatus = new RetrievalStatus(); _commitStatus = new CommitStatus(); }
public AsyncTrash(FileSystem fs, Properties properties, String user) throws IOException { int maxDeletingThreads = DEFAULT_MAX_DELETING_THREADS; if (properties.containsKey(MAX_DELETING_THREADS_KEY)) { maxDeletingThreads = Integer.parseInt(properties.getProperty(MAX_DELETING_THREADS_KEY)); } this.innerTrash = TrashFactory.createProxiedTrash(fs, properties, user); this.executor = ExecutorsUtils.loggingDecorator( MoreExecutors.getExitingExecutorService(ScalingThreadPoolExecutor.newScalingThreadPool(0, maxDeletingThreads, 100, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("Async-trash-delete-pool-%d"))))); }
protected HiveRegister(State state) { this.props = new HiveRegProps(state); this.hiveDbRootDir = this.props.getDbRootDir(); this.executor = ExecutorsUtils.loggingDecorator( ScalingThreadPoolExecutor.newScalingThreadPool(0, this.props.getNumThreads(), TimeUnit.SECONDS.toMillis(10), ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of(getClass().getSimpleName())))); }
public ParallelRequester(BatchedPermitsRequester container) { this.container = container; this.executorService = Executors.newCachedThreadPool(ExecutorsUtils.newThreadFactory(Optional.<Logger>absent(), Optional.of("parallel-requester-%d"))); }
private RootMetricContext(List<Tag<?>> tags) throws NameConflictException { super(ROOT_METRIC_CONTEXT, null, tags, true); this.innerMetricContexts = Sets.newConcurrentHashSet(); this.referenceQueue = new ReferenceQueue<>(); this.referenceQueueExecutorService = ExecutorsUtils.loggingDecorator(MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("GobblinMetrics-ReferenceQueue"))))); this.referenceQueueExecutorService.scheduleWithFixedDelay(new CheckReferenceQueue(), 0, 2, TimeUnit.SECONDS); this.reporters = Sets.newConcurrentHashSet(); this.reportingStarted = false; addShutdownHook(); }
/** * @param state This is a Job State */ public HiveRegistrationPublisher(State state) { super(state); this.hiveRegister = this.closer.register(HiveRegister.get(state)); this.hivePolicyExecutor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(new HiveRegProps(state).getNumThreads(), ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("HivePolicyExecutor-%d")))); this.metricContext = Instrumented.getMetricContext(state, HiveRegistrationPublisher.class); isPathDedupeEnabled = state.getPropAsBoolean(PATH_DEDUPE_ENABLED, this.DEFAULT_PATH_DEDUPE_ENABLED); }