LiveIndexWriterConfig(Analyzer analyzer) { this.analyzer = analyzer; ramBufferSizeMB = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB; maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; mergedSegmentWarmer = null; delPolicy = new KeepOnlyLastCommitDeletionPolicy(); commit = null; useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM; openMode = OpenMode.CREATE_OR_APPEND; similarity = IndexSearcher.getDefaultSimilarity(); mergeScheduler = new ConcurrentMergeScheduler(); indexingChain = DocumentsWriterPerThread.defaultIndexingChain; codec = Codec.getDefault(); if (codec == null) { throw new NullPointerException(); } infoStream = InfoStream.getDefault(); mergePolicy = new TieredMergePolicy(); flushPolicy = new FlushByRamOrCountsPolicy(); readerPooling = IndexWriterConfig.DEFAULT_READER_POOLING; indexerThreadPool = new DocumentsWriterPerThreadPool(); perThreadHardLimitMB = IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB; }
@Override public void run() { try { if (verbose()) { message(" merge thread: start"); doMerge(writer, merge); if (verbose()) { message(" merge thread: done"); merge(writer, MergeTrigger.MERGE_FINISHED, true); } catch (AlreadyClosedException ace) { handleMergeException(writer.getDirectory(), exc); removeMergeThread(); updateMergeThreads();
initDynamicDefaults(writer); updateMergeThreads(); if (verbose()) { message("now merge"); message(" index: " + writer.segString()); if (maybeStall(writer) == false) { break; if (verbose()) { message(" no more merges pending; now return"); if (verbose()) { message(" consider merge " + writer.segString(merge.segments)); final MergeThread newMergeThread = getMergeThread(writer, merge); mergeThreads.add(newMergeThread); updateIOThrottle(newMergeThread.merge, newMergeThread.rateLimiter); if (verbose()) { message(" launch new thread [" + newMergeThread.getName() + "]"); updateMergeThreads();
private boolean isBacklog(long now, OneMerge merge) { double mergeMB = bytesToMB(merge.estimatedMergeBytes); for (MergeThread mergeThread : mergeThreads) { long mergeStartNS = mergeThread.merge.mergeStartNS; if (mergeThread.isAlive() && mergeThread.merge != merge && mergeStartNS != -1 && mergeThread.merge.estimatedMergeBytes >= MIN_BIG_MERGE_MB*1024*1024 && nsToSec(now-mergeStartNS) > 3.0) { double otherMergeMB = bytesToMB(mergeThread.merge.estimatedMergeBytes); double ratio = otherMergeMB / mergeMB; if (ratio > 0.3 && ratio < 3.0) { return true; } } } return false; }
double mergeMB = bytesToMB(newMerge.estimatedMergeBytes); if (mergeMB < MIN_BIG_MERGE_MB) { boolean newBacklog = isBacklog(now, newMerge); if (isBacklog(now, mergeThread.merge)) { curBacklog = true; break; targetMBPerSec = MAX_MERGE_MB_PER_SEC; if (verbose()) { if (curMBPerSec == targetMBPerSec) { message(String.format(Locale.ROOT, "io throttle: new merge backlog; leave IO rate at ceiling %.1f MB/sec", targetMBPerSec)); } else { message(String.format(Locale.ROOT, "io throttle: new merge backlog; increase IO rate to %.1f MB/sec", targetMBPerSec)); if (verbose()) { message(String.format(Locale.ROOT, "io throttle: current merge backlog; leave IO rate at %.1f MB/sec", targetMBPerSec)); if (verbose()) { if (curMBPerSec == targetMBPerSec) { message(String.format(Locale.ROOT, "io throttle: no merge backlog; leave IO rate at floor %.1f MB/sec", targetMBPerSec)); } else { message(String.format(Locale.ROOT, "io throttle: no merge backlog; decrease IO rate to %.1f MB/sec", targetMBPerSec)); targetMBPerSecChanged();
if (verbose()) { message = new StringBuilder(); message.append(String.format(Locale.ROOT, "updateMergeThreads ioThrottle=%s targetMBPerSec=%.1f MB/sec", doAutoIOThrottle, targetMBPerSec)); double curMBPerSec = rateLimiter.getMBPerSec(); if (verbose()) { long mergeStartNS = merge.mergeStartNS; if (mergeStartNS == -1) { message.append(String.format(Locale.ROOT, "merge thread %s estSize=%.1f MB (written=%.1f MB) runTime=%.1fs (stopped=%.1fs, paused=%.1fs) rate=%s\n", mergeThread.getName(), bytesToMB(merge.estimatedMergeBytes), bytesToMB(rateLimiter.getTotalBytesWritten()), nsToSec(now - mergeStartNS), nsToSec(rateLimiter.getTotalStoppedNS()), nsToSec(rateLimiter.getTotalPausedNS()), rateToString(rateLimiter.getMBPerSec()))); if (verbose()) { message(message.toString());
initMergeThreadPriority(); message("now merge"); message(" index: " + writer.segString()); message(" no more merges pending; now return"); return; while (mergeThreadCount() >= maxThreadCount) { message(" too many merge threads running; stalling..."); try { wait(); message(" consider merge " + merge.segString(dir)); assert mergeThreadCount() < maxThreadCount; final MergeThread merger = getMergeThread(writer, merge); mergeThreads.add(merger); message(" launch new thread [" + merger.getName() + "]"); merger.start();
private synchronized void initDynamicDefaults(IndexWriter writer) throws IOException { if (maxThreadCount == AUTO_DETECT_MERGES_AND_THREADS) { boolean spins = IOUtils.spins(writer.getDirectory()); // Let tests override this to help reproducing a failure on a machine that has a different // core count than the one where the test originally failed: try { String value = System.getProperty(DEFAULT_SPINS_PROPERTY); if (value != null) { spins = Boolean.parseBoolean(value); } } catch (Exception ignored) { // that's fine we might hit a SecurityException etc. here just continue } setDefaultMaxMergesAndThreads(spins); if (verbose()) { message("initDynamicDefaults spins=" + spins + " maxThreadCount=" + maxThreadCount + " maxMergeCount=" + maxMergeCount); } } }
doMerge(merge); handleMergeException(exc);
private IndexWriterCount createWriter(final File indexDirectory) throws IOException { final List<Closeable> closeables = new ArrayList<>(); final Directory directory = FSDirectory.open(indexDirectory); closeables.add(directory); try { final Analyzer analyzer = new StandardAnalyzer(); closeables.add(analyzer); final IndexWriterConfig config = new IndexWriterConfig(LuceneUtil.LUCENE_VERSION, analyzer); final ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); final int mergeThreads = repoConfig.getConcurrentMergeThreads(); mergeScheduler.setMaxMergesAndThreads(mergeThreads, mergeThreads); config.setMergeScheduler(mergeScheduler); final IndexWriter indexWriter = new IndexWriter(directory, config); final EventIndexWriter eventIndexWriter = new LuceneEventIndexWriter(indexWriter, indexDirectory); final IndexWriterCount writerCount = new IndexWriterCount(eventIndexWriter, analyzer, directory, 1, false); logger.debug("Providing new index writer for {}", indexDirectory); return writerCount; } catch (final IOException ioe) { for (final Closeable closeable : closeables) { try { closeable.close(); } catch (final IOException ioe2) { ioe.addSuppressed(ioe2); } } throw ioe; } }
ConcurrentMergeScheduler cms; if (r.nextBoolean()) { cms = new ConcurrentMergeScheduler(); } else { cms = new ConcurrentMergeScheduler() { @Override protected synchronized boolean maybeStall(IndexWriter writer) { cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount); if (random().nextBoolean()) { cms.disableAutoIOThrottle(); assertFalse(cms.getAutoIOThrottle()); cms.setForceMergeMBPerSec(10 + 10*random().nextDouble()); c.setMergeScheduler(cms); } else { ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); cms.setMaxMergesAndThreads(3, 1); c.setMergeScheduler(cms);
@Override protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException { MergeThread thread = super.getMergeThread(writer, merge); thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + thread.getName())); return thread; }
super.doMerge(writer, merge); } finally { long tookMS = TimeValue.nsecToMSec(System.nanoTime() - timeNS);
this.commitWrapper = commitWrapper; final ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); String overrideSpins = System.getProperty(OVERRIDE_SPINS_PROPERTY); if (overrideSpins != null) { cms.setDefaultMaxMergesAndThreads(Boolean.parseBoolean(overrideSpins));
/** * Returns Lucene {@link org.apache.lucene.index.ConcurrentMergeScheduler} allowing to configure * using {@link org.compass.core.lucene.LuceneEnvironment.MergeScheduler.Concurrent}. */ public MergeScheduler create(LuceneSearchEngineIndexManager indexManager, CompassSettings settings) throws SearchEngineException { ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); mergeScheduler.setMaxThreadCount(settings.getSettingAsInt(LuceneEnvironment.MergeScheduler.Concurrent.MAX_THREAD_COUNT, 3)); mergeScheduler.setMergeThreadPriority(settings.getSettingAsInt(LuceneEnvironment.MergeScheduler.Concurrent.THREAD_PRIORITY, Thread.NORM_PRIORITY)); return mergeScheduler; } }
public synchronized void sync() { while(mergeThreadCount() > 0) { message("now wait for threads; currently " + mergeThreads.size() + " still running"); final int count = mergeThreads.size(); for(int i=0;i<count;i++) message(" " + i + ": " + ((MergeThread) mergeThreads.get(i))); try { wait(); } catch (InterruptedException e) { } } } private synchronized int mergeThreadCount() {
@Override public void close() { sync(); }
/** Return the priority that merge threads run at. */ public synchronized void setMergeThreadPriority(int pri) { if (pri > Thread.MAX_PRIORITY || pri < Thread.MIN_PRIORITY) throw new IllegalArgumentException("priority must be in range " + Thread.MIN_PRIORITY + " .. " + Thread.MAX_PRIORITY + " inclusive"); mergeThreadPriority = pri; final int numThreads = mergeThreadCount(); for(int i=0;i<numThreads;i++) { MergeThread merge = (MergeThread) mergeThreads.get(i); merge.setThreadPriority(pri); } }