canonical example by Tabnine
private void parallelDrainQueue(int threadCount) { ExecutorService executor = Executors.newFixedThreadPool(threadCount); for (int i = 0; i < threadCount; i++) { executor.execute(new NamedRunnable("Crawler %s", i) { @Override protected void execute() { try { drainQueue(); } catch (Exception e) { e.printStackTrace(); } } }); } executor.shutdown(); }
/** * Starts the logging clients. * * @throws IOException if any I/O error occurs. */ public void start() throws IOException { LOGGER.info("Starting logging clients"); service.execute(new TcpLoggingClient("Client 1", 6666)); service.execute(new TcpLoggingClient("Client 2", 6667)); service.execute(new UdpLoggingClient("Client 3", 6668)); service.execute(new UdpLoggingClient("Client 4", 6668)); }
private List<ResultTable> processSegments(final String query, final BrokerRequest brokerRequest) throws InterruptedException { ExecutorService executorService = Executors.newFixedThreadPool(10); final List<ResultTable> resultTables = Collections.synchronizedList(new ArrayList<ResultTable>()); for (final SegmentQueryProcessor segmentQueryProcessor : _segmentQueryProcessorMap.values()) { executorService.execute(new Runnable() { @Override public void run() { try { ResultTable resultTable = segmentQueryProcessor.process(brokerRequest); if (resultTable != null) { resultTables.add(resultTable); } } catch (Exception e) { LOGGER.error("Exception caught while processing segment '{}'.", segmentQueryProcessor.getSegmentName(), e); return; } } }); } executorService.shutdown(); executorService.awaitTermination(_timeoutInSeconds, TimeUnit.SECONDS); return resultTables; }
private static Pair<List<String>, List<String>> resetCoprocessorOnHTables(final Admin hbaseAdmin, final Path hdfsCoprocessorJar, List<String> tableNames) throws IOException { List<String> processedTables = Collections.synchronizedList(new ArrayList<String>()); List<String> failedTables = Collections.synchronizedList(new ArrayList<String>()); int nThread = Runtime.getRuntime().availableProcessors() * 2; if (nThread > MAX_THREADS) { nThread = MAX_THREADS; } logger.info("Use {} threads to do upgrade", nThread); ExecutorService coprocessorPool = Executors.newFixedThreadPool(nThread); CountDownLatch countDownLatch = new CountDownLatch(tableNames.size()); for (final String tableName : tableNames) { coprocessorPool.execute(new ResetCoprocessorWorker(countDownLatch, hbaseAdmin, hdfsCoprocessorJar, tableName, processedTables, failedTables)); } try { countDownLatch.await(); } catch (InterruptedException e) { logger.error("reset coprocessor failed: ", e); } coprocessorPool.shutdown(); return new Pair<>(processedTables, failedTables); }
private void waitUntilScheduled() throws Exception { final CountDownLatch latch = new CountDownLatch(1); JobConfig.getExecutorService().execute(new Runnable() { @Override public void run() { latch.countDown(); } }); latch.await(50, TimeUnit.SECONDS); } }
readPool = Executors.newFixedThreadPool(readThreads, new ThreadFactoryBuilder().setNameFormat( "Reader=%d,bindAddress=" + bindAddress.getHostName() + Reader reader = new Reader(); readers[i] = reader; readPool.execute(reader); LOG.info(getName() + ": started " + readThreads + " reader(s) listening on port=" + port);
public static void usingSingleThreadExecutor() { System.out.println("=== SingleThreadExecutor ==="); ExecutorService singleThreadExecutor = Executors.newSingleThreadExecutor(); singleThreadExecutor.execute(() -> System.out.println("Print this.")); singleThreadExecutor.execute(() -> System.out.println("and this one to.")); singleThreadExecutor.shutdown(); try { singleThreadExecutor.awaitTermination(4, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("\n\n"); }
this.numberOfFailures.set(0); this.operationTimes = new long[numRequests]; ExecutorService executor = Executors.newFixedThreadPool(numThreads); final CountDownLatch latch = new CountDownLatch(numRequests); final AtomicInteger index = new AtomicInteger(0); executor.execute(new Runnable() { latch.await(); } catch(InterruptedException e) { e.printStackTrace(); executor.shutdownNow(); try { executor.awaitTermination(3, TimeUnit.SECONDS); } catch(InterruptedException e) {} } finally {
final CountDownLatch beforeFunction = new CountDownLatch(1); executor.execute( new Runnable() { @Override executor.shutdown(); assertTrue(executor.awaitTermination(5, SECONDS));
/** * Downloads the libraries. */ public void run() { if (!directory.isDirectory() && !directory.mkdirs()) { GlowServer.logger .log(Level.SEVERE, "Could not create libraries directory: " + directory); } for (Library library : libraries) { downloaderService.execute(new LibraryDownloader(library)); } downloaderService.shutdown(); try { if (!downloaderService.awaitTermination(1, TimeUnit.MINUTES)) { downloaderService.shutdownNow(); } } catch (InterruptedException e) { GlowServer.logger.log(Level.SEVERE, "Library Manager thread interrupted: ", e); } }
public static void execute(Runnable job){ if(fetchExe == null){ fetchExe = Executors.newFixedThreadPool(NETWORK_POOL); } fetchExe.execute(job); }
final CountDownLatch latch = new CountDownLatch(1); executor.execute(new Runnable() { @Override public void run() { while (!latch.await(1, TimeUnit.MILLISECONDS)) { long now = System.nanoTime(); timeoutNano -= (now - start); timeoutNano -= (System.nanoTime() - start); executor.awaitTermination(timeoutNano, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { return false;
@Test public void testConcurrentLoading() throws InterruptedException { ExecutorService pool = Executors.newFixedThreadPool(10); IntStream .range(1, 10) .forEach(id -> pool.execute(() -> testBaz(id))); pool.awaitTermination(1000, TimeUnit.MILLISECONDS); } }
public static String execAndReturn(String[] cmd) { ExecutorService executor = Executors.newCachedThreadPool(); try { ProcessBuilder builder = new ProcessBuilder(cmd); builder.redirectErrorStream(true); Process process = builder.start(); StreamCollector collector = new StreamCollector(process.getInputStream()); executor.execute(collector); process.waitFor(); if (! executor.awaitTermination(15, TimeUnit.SECONDS)) { executor.shutdownNow(); if (! executor.awaitTermination(5, TimeUnit.SECONDS)) { System.err.println("Stream collector did not terminate."); } } return collector.get(); } catch (IOException | InterruptedException e) { return null; } }
@Test(timeout = 200000) public void testMtt() { final int baseAllocSizeLog2 = 3, maxAllocSizeLog2 = 10, totalSize = 8192, baseAllocSize = 1 << baseAllocSizeLog2, maxAllocSize = 1 << maxAllocSizeLog2; final int threadCount = maxAllocSizeLog2 - baseAllocSizeLog2 + 1; final int iterCount = 500; final BuddyAllocator a = create(maxAllocSize, 4, totalSize, true, false); ExecutorService executor = Executors.newFixedThreadPool(threadCount + 1); CountDownLatch cdlIn = new CountDownLatch(threadCount), cdlOut = new CountDownLatch(1); @SuppressWarnings("unchecked") FutureTask<MttTestCallableResult>[] allocTasks = new FutureTask[threadCount]; FutureTask<Void> dumpTask = createAllocatorDumpTask(a); for (int allocSize = baseAllocSize, i = 0; allocSize <= maxAllocSize; allocSize <<= 1, ++i) { allocTasks[i] = new FutureTask<>(new MttTestCallable( cdlIn, cdlOut, a, allocSize, totalSize / allocSize, iterCount)); executor.execute(allocTasks[i]); } executor.execute(dumpTask); runMttTest(a, allocTasks, cdlIn, cdlOut, dumpTask, null, null, totalSize, maxAllocSize); }
@Setup(Level.Iteration) public void startConsumers() { consumer.isRunning = true; consumer.stopped = new CountDownLatch(consumerCount); for (int i = 0; i < consumerCount; i++) { consumerExecutor.execute(consumer); } }
private static ExecutorService sameThreadExecutor() throws InterruptedException { ExecutorService executor = immediateExecutor(); when( executor.awaitTermination( anyLong(), any() ) ).thenReturn( true ); doAnswer( invocation -> { ((Runnable) invocation.getArgument( 0 )).run(); return null; } ).when( executor ).execute( any() ); return executor; }
public static void usingFixedThreadPool() { System.out.println("=== FixedThreadPool ==="); ExecutorService fixedPool = Executors.newFixedThreadPool(4); List<Future<UUID>> uuids = new LinkedList<>(); for (int i = 0; i < 20; i++) { Future<UUID> submitted = fixedPool.submit(() -> { UUID randomUUID = UUID.randomUUID(); System.out.println("UUID " + randomUUID + " from " + Thread.currentThread().getName()); return randomUUID; }); uuids.add(submitted); } fixedPool.execute(() -> uuids.forEach((f) -> { try { System.out.println("Result " + f.get() + " from " + Thread.currentThread().getName()); } catch (InterruptedException | ExecutionException e) { e.printStackTrace(); } })); fixedPool.shutdown(); try { fixedPool.awaitTermination(4, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println("\n\n"); }