Tabnine Logo
ThreadPoolExecutor
Code IndexAdd Tabnine to your IDE (free)

How to use
ThreadPoolExecutor
in
java.util.concurrent

Best Java code snippets using java.util.concurrent.ThreadPoolExecutor (Showing top 20 results out of 18,171)

Refine searchRefine arrow

  • BlockingQueue
  • AtomicInteger
  • RejectedExecutionException
  • ReentrantLock
  • AtomicBoolean
  • HashSet
  • Future
origin: wildfly/wildfly

protected ThreadPoolExecutor createThreadPool() {
  ThreadPoolExecutor threadPool=new ThreadPoolExecutor(0, max_pool, pool_thread_keep_alive,
                             TimeUnit.MILLISECONDS, new SynchronousQueue<>());
  ThreadFactory factory=new ThreadFactory() {
    private final AtomicInteger thread_id=new AtomicInteger(1);
    public Thread newThread(final Runnable command) {
      return getThreadFactory().newThread(command, "StreamingStateTransfer-sender-" + thread_id.getAndIncrement());
    }
  };
  threadPool.setRejectedExecutionHandler(new ShutdownRejectedExecutionHandler(threadPool.getRejectedExecutionHandler()));
  threadPool.setThreadFactory(factory);
  return threadPool;
}
origin: apache/incubator-dubbo

  @Override
  public void execute(Runnable command) {
    if (command == null) {
      throw new NullPointerException();
    }
    // do not increment in method beforeExecute!
    submittedTaskCount.incrementAndGet();
    try {
      super.execute(command);
    } catch (RejectedExecutionException rx) {
      // retry to offer the task into queue.
      final TaskQueue queue = (TaskQueue) super.getQueue();
      try {
        if (!queue.retryOffer(command, 0, TimeUnit.MILLISECONDS)) {
          submittedTaskCount.decrementAndGet();
          throw new RejectedExecutionException("Queue capacity is full.", rx);
        }
      } catch (InterruptedException x) {
        submittedTaskCount.decrementAndGet();
        throw new RejectedExecutionException(x);
      }
    } catch (Throwable t) {
      // decrease any way
      submittedTaskCount.decrementAndGet();
      throw t;
    }
  }
}
origin: thinkaurelius/titan

  @Override
  public void close() throws Exception {
    processor.shutdown();
    processor.awaitTermination(shutdownWaitMS,TimeUnit.MILLISECONDS);
    if (!processor.isTerminated()) {
      //log.error("Processor did not terminate in time");
      processor.shutdownNow();
    }

  }
}
origin: apache/incubator-dubbo

@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
  String msg = String.format("Thread pool is EXHAUSTED!" +
          " Thread Name: %s, Pool Size: %d (active: %d, core: %d, max: %d, largest: %d), Task: %d (completed: %d)," +
          " Executor status:(isShutdown:%s, isTerminated:%s, isTerminating:%s), in %s://%s:%d!",
      threadName, e.getPoolSize(), e.getActiveCount(), e.getCorePoolSize(), e.getMaximumPoolSize(), e.getLargestPoolSize(),
      e.getTaskCount(), e.getCompletedTaskCount(), e.isShutdown(), e.isTerminated(), e.isTerminating(),
      url.getProtocol(), url.getIp(), url.getPort());
  logger.warn(msg);
  dumpJStack();
  throw new RejectedExecutionException(msg);
}
origin: skylot/jadx

  @Override
  public Boolean call() throws Exception {
    runJob();
    executor.shutdown();
    return executor.awaitTermination(5, TimeUnit.DAYS);
  }
});
origin: google/sagetv

Set<String> noProgramDetails = new HashSet<>();
Set<String> needSeriesDetails = new HashSet<>();
Set<SDPerson> addedPeople = new HashSet<>();
  final AtomicInteger totalPeople = new AtomicInteger(0);
  BlockingQueue<Runnable> queue = new ArrayBlockingQueue<>(9);
  executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
   executor.execute(runnable);
  executor.shutdown();
  executor.awaitTermination(5, TimeUnit.MINUTES);
  remainingImports = queue.size();
  peopleSize = totalPeople.get();
  if (Sage.DBG) System.out.println("SDEPG Imported images for " +
   peopleSize + " pe" + (peopleSize == 1 ? "rson" : "ople") +
origin: fbacchella/jrds

public void collectAll() {
    return;
  final AtomicInteger counter = new AtomicInteger(0);
    try {
      if(isCollectRunning()) {
        List<Future<Object>> scheduled = tpool.invokeAll(toSchedule, getStep() - getTimeout() * 2, TimeUnit.SECONDS);
        running.addAll(scheduled);
        tpool.shutdown();
        tpool.awaitTermination(getStep() - getTimeout() * 2, TimeUnit.SECONDS);
    if(!tpool.isTerminated()) {
        emergencystop = !tpool.awaitTermination(getTimeout(), TimeUnit.SECONDS);
      } catch (InterruptedException e) {
        log(Level.INFO, "Collect interrupted in last chance");
        log(Level.INFO, "Some task still alive, needs to be killed");
        tpool.shutdownNow();
        dumpCollectHanged();
  } finally {
    synchronized (running) {
      tpool.shutdown();
      tpool = null;
origin: apache/activemq

final void readCheck() {
  int currentCounter = next.getReceiveCounter();
  int previousCounter = lastReceiveCounter.getAndSet(currentCounter);
  if (inReceive.get() || currentCounter != previousCounter) {
    LOG.trace("A receive is in progress, skipping read check.");
    return;
  if (!commandReceived.get() && monitorStarted.get() && !ASYNC_TASKS.isShutdown()) {
    LOG.debug("No message received since last read check for {}. Throwing InactivityIOException.", this);
      ASYNC_TASKS.execute(new Runnable() {
        @Override
        public void run() {
      });
    } catch (RejectedExecutionException ex) {
      if (!ASYNC_TASKS.isShutdown()) {
        LOG.error("Async read check was rejected from the executor: ", ex);
        throw ex;
origin: hibernate/hibernate-search

@Test
public void testPropertiesIndexing() throws InterruptedException {
  SearchIntegrator integrator = sfHolder.getSearchFactory();
  ThreadPoolExecutor threadPool = Executors.newFixedThreadPool( THREAD_NUMBER, "ReadWriteParallelismTest" );
  for ( int i = 0; i < THREAD_NUMBER; i++ ) {
    threadPool.execute( new Task( integrator, i ) );
  }
  threadPool.shutdown();
  //Time to warmup only:
  threadPool.awaitTermination( WARM_UP_SECONDS, TimeUnit.SECONDS );
  System.out.println( "Warmup complete. Start measuring now.." );
  //Start measuring:
  cyclesCompleted.set( 0 );
  long startMeasurementTime = System.nanoTime();
  threadPool.awaitTermination( FULL_RUN_SECONDS, TimeUnit.SECONDS );
  int doneCycles = cyclesCompleted.get();
  long endMeasurementTime = System.nanoTime();
  Assert.assertFalse( "Some failure happened in Task execution", failures.get() );
  long totalTime = endMeasurementTime - startMeasurementTime;
  long millisecondsElapsedTime = TimeUnit.MILLISECONDS.convert( totalTime, TimeUnit.NANOSECONDS );
  System.out.println( "Completed " + doneCycles + " in " + millisecondsElapsedTime + " milliseconds" );
  running.set( false );
}
origin: apache/cxf

ex.execute(r);
if (addWorkerMethod != null
  && !ex.getQueue().isEmpty()
  && this.approxThreadCount < highWaterMark
  && addThreadLock.tryLock()) {
  try {
    mainLock.lock();
    try {
      int ps = this.getPoolSize();
      int sz = executor.getQueue().size();
      int sz2 = this.getActiveCount();
      mainLock.unlock();
origin: pwm-project/pwm

void writeUserOrgChartDetailToCsv(
    final CSVPrinter csvPrinter,
    final UserIdentity userIdentity,
    final int depth
)
{
  final Instant startTime = Instant.now();
  LOGGER.trace( pwmRequest, () -> "beginning csv export starting with user " + userIdentity.toDisplayString() + " and depth of " + depth );
  final ThreadPoolExecutor executor = pwmRequest.getPwmApplication().getPeopleSearchService().getJobExecutor();
  final AtomicInteger rowCounter = new AtomicInteger( 0 );
  final OrgChartExportState orgChartExportState = new OrgChartExportState(
      executor,
      csvPrinter,
      rowCounter,
      Collections.singleton( OrgChartExportState.IncludeData.displayForm )
  );
  final OrgChartCsvRowOutputJob job = new OrgChartCsvRowOutputJob( orgChartExportState, userIdentity, depth, null );
  executor.execute( job );
  final TimeDuration maxDuration = peopleSearchConfiguration.getExportCsvMaxDuration();
  JavaHelper.pause( maxDuration.asMillis(), 1000, o -> ( executor.getQueue().size() + executor.getActiveCount() <= 0 ) );
  final TimeDuration timeDuration = TimeDuration.fromCurrent( startTime );
  LOGGER.trace( pwmRequest, () -> "completed csv export of " + rowCounter.get() + " records in " + timeDuration.asCompactString() );
}
origin: NetApp/NetApp-Hadoop-NFS-Connector

this.readBlockSizeBits = store.getReadSizeBits();
this.splitSize = splitSize;
this.closed = new AtomicBoolean(false);
this.ongoing = new ConcurrentHashMap<>(DEFAULT_PREFETCH_POOL_SIZE);
this.cache = new ConcurrentHashMap<>(DEFAULT_CACHE_SIZE_IN_BLOCKS);
this.statistics
= new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(),
  true);
this.executors = new ThreadPoolExecutor(DEFAULT_PREFETCH_POOL_SIZE, MAX_PREFETCH_POOL_SIZE, 5, TimeUnit.SECONDS,
  new LinkedBlockingDeque<Runnable>(1024), new ThreadPoolExecutor.CallerRunsPolicy());
origin: alipay/sofa-rpc

@Override
public void destroy() {
  if (!started) {
    return;
  }
  int stopTimeout = serverConfig.getStopTimeout();
  if (stopTimeout > 0) { // 需要等待结束时间
    AtomicInteger count = boltServerProcessor.processingCount;
    // 有正在执行的请求 或者 队列里有请求
    if (count.get() > 0 || bizThreadPool.getQueue().size() > 0) {
      long start = RpcRuntimeContext.now();
      if (LOGGER.isInfoEnabled()) {
        LOGGER.info("There are {} call in processing and {} call in queue, wait {} ms to end",
          count, bizThreadPool.getQueue().size(), stopTimeout);
      }
      while ((count.get() > 0 || bizThreadPool.getQueue().size() > 0)
        && RpcRuntimeContext.now() - start < stopTimeout) { // 等待返回结果
        try {
          Thread.sleep(10);
        } catch (InterruptedException ignore) {
        }
      }
    } // 关闭前检查已有请求?
  }
  // 关闭线程池
  bizThreadPool.shutdown();
  stop();
}
origin: net.therore/therore-concurrent

private void addThread(Runnable firstTask) {
  FlowControlWrapper w = new FlowControlWrapper(firstTask);
  final ReentrantLock mainLock = this.mainLock;
  mainLock.lock();
  try {
    activeCount.incrementAndGet();
  } finally {
    mainLock.unlock();
  }
  coreExecutorService.execute(w);
}    
origin: com.torodb.torod/db-executor

@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
  try {
    lock.lock();
    try {
      while (queue.size() >= queueSize) {
        insertionsAllowed.await();
      }
      executor.execute(r);
    }
    finally {
      lock.unlock();
    }
  }
  catch (InterruptedException ex) {
    throw new RuntimeException(ex);
  }
}
origin: qunarcorp/qmq

public ActorSystem(String name, int threads, boolean fair) {
  this.name = name;
  this.actorsCount = new AtomicInteger();
  BlockingQueue<Runnable> queue = fair ? new PriorityBlockingQueue<>() : new LinkedBlockingQueue<>();
  this.executor = new ThreadPoolExecutor(threads, threads, 60, TimeUnit.MINUTES, queue, new NamedThreadFactory("actor-sys-" + name));
  this.actors = Maps.newConcurrentMap();
  QMon.dispatchersGauge(name, actorsCount::doubleValue);
  QMon.actorSystemQueueGauge(name, () -> (double) executor.getQueue().size());
}
origin: wycm/zhihu-crawler

  Connection cn = getConnection();
  if (zhiHuDao1.insertUser(cn, u)){
    parseUserCount.incrementAndGet();
    if (zhiHuHttpClient.getDetailListPageThreadPool().getQueue().size() > 1000){
      continue;
        zhiHuHttpClient.getDetailListPageThreadPool().getActiveCount() == 1){
      zhiHuHttpClient.getDetailListPageThreadPool().execute(new DetailListPageTask(request, true));
else if(!Config.dbEnable || zhiHuHttpClient.getDetailListPageThreadPool().getActiveCount() == 1){
  parseUserCount.incrementAndGet();
  for (int j = 0; j < u.getFollowees() / 20; j++){
    String nextUrl = String.format(USER_FOLLOWEES_URL, u.getUserToken(), j * 20);
    HttpGet request = new HttpGet(nextUrl);
    zhiHuHttpClient.getDetailListPageThreadPool().execute(new DetailListPageTask(request, true));
origin: Netflix/eureka

/* visible for testing */ boolean doWarmUp() {
  Future future = null;
  try {
    future = threadPoolExecutor.submit(updateTask);
    future.get(warmUpTimeoutMs, TimeUnit.MILLISECONDS);  // block until done or timeout
    return true;
  } catch (Exception e) {
    logger.warn("Best effort warm up failed", e);
  } finally {
    if (future != null) {
      future.cancel(true);
    }
  }
  return false;
}
origin: weibocom/motan

private void rejectMessage(ChannelHandlerContext ctx, NettyMessage msg) {
  if (msg.isRequest()) {
    DefaultResponse response = new DefaultResponse();
    response.setRequestId(msg.getRequestId());
    response.setException(new MotanServiceException("process thread pool is full, reject by server: " + ctx.channel().localAddress(), MotanErrorMsgConstant.SERVICE_REJECT));
    sendResponse(ctx, response);
    LoggerUtil.error("process thread pool is full, reject, active={} poolSize={} corePoolSize={} maxPoolSize={} taskCount={} requestId={}",
        threadPoolExecutor.getActiveCount(), threadPoolExecutor.getPoolSize(), threadPoolExecutor.getCorePoolSize(),
        threadPoolExecutor.getMaximumPoolSize(), threadPoolExecutor.getTaskCount(), msg.getRequestId());
    rejectCounter.incrementAndGet();
  }
}
origin: apache/rocketmq

public DefaultMQProducerImpl(final DefaultMQProducer defaultMQProducer, RPCHook rpcHook) {
  this.defaultMQProducer = defaultMQProducer;
  this.rpcHook = rpcHook;
  this.asyncSenderThreadPoolQueue = new LinkedBlockingQueue<Runnable>(50000);
  this.defaultAsyncSenderExecutor = new ThreadPoolExecutor(
    Runtime.getRuntime().availableProcessors(),
    Runtime.getRuntime().availableProcessors(),
    1000 * 60,
    TimeUnit.MILLISECONDS,
    this.asyncSenderThreadPoolQueue,
    new ThreadFactory() {
      private AtomicInteger threadIndex = new AtomicInteger(0);
      @Override
      public Thread newThread(Runnable r) {
        return new Thread(r, "AsyncSenderExecutor_" + this.threadIndex.incrementAndGet());
      }
    });
}
java.util.concurrentThreadPoolExecutor

Javadoc

An ExecutorService that executes each submitted task using one of possibly several pooled threads, normally configured using Executors factory methods.

Thread pools address two different problems: they usually provide improved performance when executing large numbers of asynchronous tasks, due to reduced per-task invocation overhead, and they provide a means of bounding and managing the resources, including threads, consumed when executing a collection of tasks. Each ThreadPoolExecutor also maintains some basic statistics, such as the number of completed tasks.

To be useful across a wide range of contexts, this class provides many adjustable parameters and extensibility hooks. However, programmers are urged to use the more convenient Executors factory methods Executors#newCachedThreadPool (unbounded thread pool, with automatic thread reclamation), Executors#newFixedThreadPool(fixed size thread pool) and Executors#newSingleThreadExecutor (single background thread), that preconfigure settings for the most common usage scenarios. Otherwise, use the following guide when manually configuring and tuning this class: Core and maximum pool sizes A ThreadPoolExecutor will automatically adjust the pool size (see #getPoolSize) according to the bounds set by corePoolSize (see #getCorePoolSize) and maximumPoolSize (see #getMaximumPoolSize). When a new task is submitted in method #execute(Runnable), and fewer than corePoolSize threads are running, a new thread is created to handle the request, even if other worker threads are idle. If there are more than corePoolSize but less than maximumPoolSize threads running, a new thread will be created only if the queue is full. By setting corePoolSize and maximumPoolSize the same, you create a fixed-size thread pool. By setting maximumPoolSize to an essentially unbounded value such as Integer.MAX_VALUE, you allow the pool to accommodate an arbitrary number of concurrent tasks. Most typically, core and maximum pool sizes are set only upon construction, but they may also be changed dynamically using #setCorePoolSize and #setMaximumPoolSize. On-demand construction By default, even core threads are initially created and started only when new tasks arrive, but this can be overridden dynamically using method #prestartCoreThread or #prestartAllCoreThreads. You probably want to prestart threads if you construct the pool with a non-empty queue. Creating new threads New threads are created using a ThreadFactory. If not otherwise specified, a Executors#defaultThreadFactory is used, that creates threads to all be in the same ThreadGroup and with the same NORM_PRIORITY priority and non-daemon status. By supplying a different ThreadFactory, you can alter the thread's name, thread group, priority, daemon status, etc. If a ThreadFactory fails to create a thread when asked by returning null from newThread, the executor will continue, but might not be able to execute any tasks. Keep-alive times If the pool currently has more than corePoolSize threads, excess threads will be terminated if they have been idle for more than the keepAliveTime (see #getKeepAliveTime(TimeUnit)). This provides a means of reducing resource consumption when the pool is not being actively used. If the pool becomes more active later, new threads will be constructed. This parameter can also be changed dynamically using method #setKeepAliveTime(long,TimeUnit). Using a value of Long.MAX_VALUE TimeUnit#NANOSECONDS effectively disables idle threads from ever terminating prior to shut down. By default, the keep-alive policy applies only when there are more than corePoolSize threads. But method #allowCoreThreadTimeOut(boolean) can be used to apply this time-out policy to core threads as well, so long as the keepAliveTime value is non-zero. Queuing Any BlockingQueue may be used to transfer and hold submitted tasks. The use of this queue interacts with pool sizing:

  • If fewer than corePoolSize threads are running, the Executor always prefers adding a new thread rather than queuing.
  • If corePoolSize or more threads are running, the Executor always prefers queuing a request rather than adding a new thread.
  • If a request cannot be queued, a new thread is created unless this would exceed maximumPoolSize, in which case, the task will be rejected.
There are three general strategies for queuing:
  1. Direct handoffs. A good default choice for a work queue is a SynchronousQueue that hands off tasks to threads without otherwise holding them. Here, an attempt to queue a task will fail if no threads are immediately available to run it, so a new thread will be constructed. This policy avoids lockups when handling sets of requests that might have internal dependencies. Direct handoffs generally require unbounded maximumPoolSizes to avoid rejection of new submitted tasks. This in turn admits the possibility of unbounded thread growth when commands continue to arrive on average faster than they can be processed.
  2. Unbounded queues. Using an unbounded queue (for example a LinkedBlockingQueue without a predefined capacity) will cause new tasks to wait in the queue when all corePoolSize threads are busy. Thus, no more than corePoolSize threads will ever be created. (And the value of the maximumPoolSize therefore doesn't have any effect.) This may be appropriate when each task is completely independent of others, so tasks cannot affect each others execution; for example, in a web page server. While this style of queuing can be useful in smoothing out transient bursts of requests, it admits the possibility of unbounded work queue growth when commands continue to arrive on average faster than they can be processed.
  3. Bounded queues. A bounded queue (for example, an ArrayBlockingQueue) helps prevent resource exhaustion when used with finite maximumPoolSizes, but can be more difficult to tune and control. Queue sizes and maximum pool sizes may be traded off for each other: Using large queues and small pools minimizes CPU usage, OS resources, and context-switching overhead, but can lead to artificially low throughput. If tasks frequently block (for example if they are I/O bound), a system may be able to schedule time for more threads than you otherwise allow. Use of small queues generally requires larger pool sizes, which keeps CPUs busier but may encounter unacceptable scheduling overhead, which also decreases throughput.
Rejected tasks New tasks submitted in method #execute(Runnable) will be rejected when the Executor has been shut down, and also when the Executor uses finite bounds for both maximum threads and work queue capacity, and is saturated. In either case, the execute method invokes the RejectedExecutionHandler#rejectedExecution(Runnable,ThreadPoolExecutor)method of its RejectedExecutionHandler. Four predefined handler policies are provided:
  1. In the default ThreadPoolExecutor.AbortPolicy, the handler throws a runtime RejectedExecutionException upon rejection.
  2. In ThreadPoolExecutor.CallerRunsPolicy, the thread that invokes execute itself runs the task. This provides a simple feedback control mechanism that will slow down the rate that new tasks are submitted.
  3. In ThreadPoolExecutor.DiscardPolicy, a task that cannot be executed is simply dropped.
  4. In ThreadPoolExecutor.DiscardOldestPolicy, if the executor is not shut down, the task at the head of the work queue is dropped, and then execution is retried (which can fail again, causing this to be repeated.)
It is possible to define and use other kinds of RejectedExecutionHandler classes. Doing so requires some care especially when policies are designed to work only under particular capacity or queuing policies. Hook methods This class provides protected overridable #beforeExecute(Thread,Runnable) and #afterExecute(Runnable,Throwable) methods that are called before and after execution of each task. These can be used to manipulate the execution environment; for example, reinitializing ThreadLocals, gathering statistics, or adding log entries. Additionally, method #terminated can be overridden to perform any special processing that needs to be done once the Executor has fully terminated.

If hook or callback methods throw exceptions, internal worker threads may in turn fail and abruptly terminate.

Queue maintenance Method #getQueue() allows access to the work queue for purposes of monitoring and debugging. Use of this method for any other purpose is strongly discouraged. Two supplied methods, #remove(Runnable) and #purge are available to assist in storage reclamation when large numbers of queued tasks become cancelled. Finalization A pool that is no longer referenced in a program AND has no remaining threads will be shutdown automatically. If you would like to ensure that unreferenced pools are reclaimed even if users forget to call #shutdown, then you must arrange that unused threads eventually die, by setting appropriate keep-alive times, using a lower bound of zero core threads and/or setting #allowCoreThreadTimeOut(boolean).

Extension example. Most extensions of this class override one or more of the protected hook methods. For example, here is a subclass that adds a simple pause/resume feature:

  
class PausableThreadPoolExecutor extends ThreadPoolExecutor public PausableThreadPoolExecutor(...) { super(...); } 
protected void beforeExecute(Thread t, Runnable r)  
super.beforeExecute(t, r); 
pauseLock.lock(); 
try  
while (isPaused) unpaused.await(); 
} catch (InterruptedException ie)  
t.interrupt(); 
} finally  
pauseLock.unlock(); 
} 
} 
public void pause()  
pauseLock.lock(); 
try  
isPaused = true; 
} finally  
pauseLock.unlock(); 
} 
} 
public void resume()  
pauseLock.lock(); 
try  
isPaused = false; 
unpaused.signalAll(); 
} finally  
pauseLock.unlock(); 
} 
} 
}}

Most used methods

  • <init>
    Creates a new ThreadPoolExecutor with the given initial parameters.
  • execute
    Executes the given task sometime in the future. The task may execute in a new thread or in an existi
  • shutdown
    Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks wil
  • getQueue
    Returns the task queue used by this executor. Access to the task queue is intended primarily for deb
  • submit
  • shutdownNow
    Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a
  • getActiveCount
  • awaitTermination
  • allowCoreThreadTimeOut
    Sets the policy governing whether core threads may time out and terminate if no tasks arrive within
  • isShutdown
  • getMaximumPoolSize
    Returns the maximum allowed number of threads.
  • getPoolSize
    Returns the current number of threads in the pool.
  • getMaximumPoolSize,
  • getPoolSize,
  • getCorePoolSize,
  • setCorePoolSize,
  • setMaximumPoolSize,
  • afterExecute,
  • getCompletedTaskCount,
  • getTaskCount,
  • setRejectedExecutionHandler,
  • setThreadFactory

Popular in Java

  • Making http requests using okhttp
  • scheduleAtFixedRate (Timer)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • findViewById (Activity)
  • Kernel (java.awt.image)
  • FileReader (java.io)
    A specialized Reader that reads from a file in the file system. All read requests made by calling me
  • System (java.lang)
    Provides access to system-related information and resources including standard input and output. Ena
  • ResultSet (java.sql)
    An interface for an object which represents a database table entry, returned as the result of the qu
  • SortedMap (java.util)
    A map that has its keys ordered. The sorting is according to either the natural ordering of its keys
  • BoxLayout (javax.swing)
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now