private synchronized void initialize() { if ( threadCountSet == false ) { if ( query != null ) { Forest[] forests = getForestConfig().listForests(); logger.warn("threadCount not set--defaulting to number of forests ({})", forests.length); withThreadCount(forests.length); } else { int hostCount = clientList.get().size(); logger.warn("threadCount not set--defaulting to number of hosts ({})", hostCount); withThreadCount( hostCount ); } // now we've set the threadCount threadCountSet = true; } // If we are iterating and if we have the thread count to 1, we have a single thread acting as both // consumer and producer of the ThreadPoolExecutor queue. Hence, we produce till the maximum and start // consuming and produce again. Since the thread count is 1, there is no worry about thread utilization. if(getThreadCount() == 1) { isSingleThreaded = true; } logger.info("Starting job batchSize={}, threadCount={}, onUrisReady listeners={}, failure listeners={}", getBatchSize(), getThreadCount(), urisReadyListeners.size(), failureListeners.size()); threadPool = new QueryThreadPoolExecutor(getThreadCount(), this); }
private void shutdownIfAllForestsAreDone() { for ( AtomicBoolean isDone : forestIsDone.values() ) { // if even one isn't done, short-circuit out of this method and don't shutdown if ( isDone.get() == false ) return; } // if we made it this far, all forests are done. let's run the Job // completion listeners and shutdown. if(runJobCompletionListeners.compareAndSet(false, true)) runJobCompletionListeners(); threadPool.shutdown(); }
public void stop() { stopped.set(true); if ( threadPool != null ) threadPool.shutdownNow(); if(jobEndTime == null) jobEndTime = Calendar.getInstance(); if ( query != null ) { for ( AtomicBoolean isDone : forestIsDone.values() ) { // if even one isn't done, log a warning if ( isDone.get() == false ) { logger.warn("QueryBatcher instance \"{}\" stopped before all results were retrieved", getJobName()); break; } } } else { if ( iterator != null && iterator.hasNext() ) { logger.warn("QueryBatcher instance \"{}\" stopped before all results were processed", getJobName()); } } closeAllListeners(); }
private synchronized void startQuerying() { boolean consistentSnapshotFirstQueryHasRun = false; for ( Forest forest : getForestConfig().listForests() ) { QueryTask runnable = new QueryTask(getMoveMgr(), this, forest, query, 1, 1); if ( consistentSnapshot == true && consistentSnapshotFirstQueryHasRun == false ) { // let's run this first time in-line so we'll have the serverTimestamp set // before we launch all the parallel threads runnable.run(); consistentSnapshotFirstQueryHasRun = true; } else { threadPool.execute(runnable); } } }
private void startIterating() { threadPool.execute(new IteratorTask(this)); }
@Override public boolean isStopped() { return threadPool != null && threadPool.isTerminated(); }
private void launchNextTask() { if ( stopped.get() == true ) { // we're stopping, so don't do anything more return; } AtomicBoolean isDone = forestIsDone.get(forest); // we made it to the end, so don't launch anymore tasks if ( isDone.get() == true ) return; long nextStart = start + getBatchSize(); threadPool.execute(new QueryTask(moveMgr, batcher, forest, query, forestBatchNum + 1, nextStart)); } };
@Override public boolean awaitCompletion(long timeout, TimeUnit unit) throws InterruptedException { requireJobStarted(); return threadPool.awaitTermination(timeout, unit); }