/** * Gets the maximum number of files per tablet from this configuration. * * @return maximum number of files per tablet * @see Property#TABLE_FILE_MAX * @see Property#TSERV_SCAN_MAX_OPENFILES */ public int getMaxFilesPerTablet() { int maxFilesPerTablet = getCount(Property.TABLE_FILE_MAX); if (maxFilesPerTablet <= 0) { maxFilesPerTablet = getCount(Property.TSERV_SCAN_MAX_OPENFILES) - 1; log.debug("Max files per tablet {}", maxFilesPerTablet); } return maxFilesPerTablet; }
/** * Gets the number of threads used for deleting files. * * @return number of delete threads */ int getNumDeleteThreads() { return getConfiguration().getCount(Property.GC_DELETE_THREADS); }
/** * Gets the timer instance. If an instance has already been created, it will have the number of * threads supplied when it was constructed, and the size provided by the configuration here is * ignored. If a null configuration is supplied, the number of threads defaults to 1. * * @param conf * configuration from which to get the number of threads * @see Property#GENERAL_SIMPLETIMER_THREADPOOL_SIZE */ public static synchronized SimpleTimer getInstance(AccumuloConfiguration conf) { int threadPoolSize; if (conf != null) { threadPoolSize = conf.getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE); } else { threadPoolSize = DEFAULT_THREAD_POOL_SIZE; } return getInstance(threadPoolSize); }
public LogSorter(ClientContext context, VolumeManager fs, AccumuloConfiguration conf) { this.context = context; this.fs = fs; this.conf = conf; int threadPoolSize = conf.getCount(Property.TSERV_RECOVERY_MAX_CONCURRENT); this.threadPool = new SimpleThreadPool(threadPoolSize, this.getClass().getName()); this.walBlockSize = DfsLogger.getWalBlockSize(conf); }
private ExecutorService createEs(Property max, String name, BlockingQueue<Runnable> queue) { IntSupplier maxThreadsSupplier = () -> conf.getSystemConfiguration().getCount(max); return createEs(maxThreadsSupplier, name, queue, OptionalInt.empty()); }
private ExecutorService createIdlingEs(Property max, String name, long timeout, TimeUnit timeUnit) { LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(); int maxThreads = conf.getSystemConfiguration().getCount(max); ThreadPoolExecutor tp = new ThreadPoolExecutor(maxThreads, maxThreads, timeout, timeUnit, queue, new NamingThreadFactory(name)); tp.allowCoreThreadTimeOut(true); return addEs(() -> conf.getSystemConfiguration().getCount(max), name, tp); }
private static synchronized ExecutorService getThreadPool(Master master) { if (threadPool == null) { int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE); ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import"); pool.allowCoreThreadTimeOut(true); threadPool = new TraceExecutorService(pool); } return threadPool; }
@Override public void init(ServerConfiguration conf) { this.config = conf; maxMemory = conf.getSystemConfiguration().getAsBytes(Property.TSERV_MAXMEM); maxConcurrentMincs = conf.getSystemConfiguration().getCount(Property.TSERV_MINC_MAXCONCURRENT); numWaitingMultiplier = TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER; }
private void setupReplication(AccumuloConfiguration aconf) { // Start the thrift service listening for incoming replication requests try { startReplicationService(); } catch (UnknownHostException e) { throw new RuntimeException("Failed to start replication service", e); } // Start the pool to handle outgoing replications final ThreadPoolExecutor replicationThreadPool = new SimpleThreadPool( getConfiguration().getCount(Property.REPLICATION_WORKER_THREADS), "replication task"); replWorker.setExecutor(replicationThreadPool); replWorker.run(); // Check the configuration value for the size of the pool and, if changed, resize the pool Runnable replicationWorkThreadPoolResizer = () -> { int maxPoolSize = aconf.getCount(Property.REPLICATION_WORKER_THREADS); if (replicationThreadPool.getMaximumPoolSize() != maxPoolSize) { log.info("Resizing thread pool for sending replication work from {} to {}", replicationThreadPool.getMaximumPoolSize(), maxPoolSize); replicationThreadPool.setMaximumPoolSize(maxPoolSize); } }; SimpleTimer.getInstance(aconf).schedule(replicationWorkThreadPoolResizer, 10000, 30000); }
int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS); SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move"); List<Future<Exception>> results = new ArrayList<>();
"/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS); SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulkDir move"); List<Future<Boolean>> results = new ArrayList<>();
private synchronized void initBloomFilter(AccumuloConfiguration acuconf, boolean useAccumuloStart) { numKeys = acuconf.getCount(Property.TABLE_BLOOM_SIZE);
.getCount(Property.TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN); ArrayList<FileSKVIterator> readers = new ArrayList<>(mapFiles.size());
.getCount(Property.TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN); ArrayList<FileSKVIterator> readers = new ArrayList<>(mapFiles.size());
@Override public void assignWork() { if (workQueue == null) { initializeWorkQueue(conf); } initializeQueuedWork(); if (zooCache == null) { zooCache = new ZooCache(workQueue.getZooReaderWriter()); } // Get the maximum number of entries we want to queue work for (or the default) this.maxQueueSize = conf.getCount(Property.REPLICATION_MAX_WORK_QUEUE); // Scan over the work records, adding the work to the queue createWork(); // Keep the state of the work we queued correct cleanupFinishedWork(); }
LoggerOperation op = null; try { short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION); if (replication == 0) replication = fs.getDefaultReplication(new Path(logPath));
private HostAndPort startStatsService() { Iface rpcProxy = TraceWrap.service(this); final Processor<Iface> processor; if (context.getThriftServerType() == ThriftServerType.SASL) { Iface tcProxy = TCredentialsUpdatingWrapper.service(rpcProxy, getClass(), getConfiguration()); processor = new Processor<>(tcProxy); } else { processor = new Processor<>(rpcProxy); } int port[] = getConfiguration().getPort(Property.GC_PORT); HostAndPort[] addresses = TServerUtils.getHostAndPorts(this.opts.getAddress(), port); long maxMessageSize = getConfiguration().getAsBytes(Property.GENERAL_MAX_MESSAGE_SIZE); try { ServerAddress server = TServerUtils.startTServer(getConfiguration(), context.getThriftServerType(), processor, this.getClass().getSimpleName(), "GC Monitor Service", 2, getConfiguration().getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), 1000, maxMessageSize, context.getServerSslParams(), context.getSaslParams(), 0, addresses); log.debug("Starting garbage collector listening on " + server.address); return server.address; } catch (Exception ex) { // ACCUMULO-3651 Level changed to error and FATAL added to message for slf4j compatibility log.error("FATAL:", ex); throw new RuntimeException(ex); } }
int trep = acuconf.getCount(Property.TABLE_FILE_REPLICATION); int rep = hrep; if (trep > 0 && trep != hrep) {