public PartitionedWriteAheadEventStore(final RepositoryConfiguration repoConfig, final RecordWriterFactory recordWriterFactory, final RecordReaderFactory recordReaderFactory, final EventReporter eventReporter, final EventFileManager fileManager) { super(repoConfig, eventReporter); this.repoConfig = repoConfig; this.eventReporter = eventReporter; this.filesToCompress = new LinkedBlockingQueue<>(100); final AtomicLong idGenerator = new AtomicLong(0L); this.partitions = createPartitions(repoConfig, recordWriterFactory, recordReaderFactory, idGenerator); this.fileManager = fileManager; // Creates tasks to compress data on rollover if (repoConfig.isCompressOnRollover()) { compressionExecutor = Executors.newFixedThreadPool(repoConfig.getIndexThreadPoolSize(), new NamedThreadFactory("Compress Provenance Logs")); } else { compressionExecutor = null; } }
@Override public void initialize() throws IOException { maintenanceExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Provenance Repository Maintenance")); maintenanceExecutor.scheduleWithFixedDelay(() -> performMaintenance(), 1, 1, TimeUnit.MINUTES); for (final EventStorePartition partition : getPartitions()) { partition.initialize(); } }
String textFieldName, long timeoutMilliseconds) throws IOException { ExecutorService executorService = Executors.newFixedThreadPool(1, new NamedThreadFactory("confusion-matrix-gen-"));
ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("LuceneTestCase"));
ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("LuceneTestCase"));
ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("LuceneTestCase"));