/** * Forces merging of all segments that have deleted * documents. The actual merges to be executed are * determined by the {@link MergePolicy}. For example, * the default {@link TieredMergePolicy} will only * pick a segment if the percentage of * deleted docs is over 10%. * * <p>This is often a horribly costly operation; rarely * is it warranted.</p> * * <p>To see how * many deletions you have pending in your index, call * {@link IndexReader#numDeletedDocs}.</p> * * <p><b>NOTE</b>: this method first flushes a new * segment (if there are indexed documents), and applies * all buffered deletes. */ public void forceMergeDeletes() throws IOException { forceMergeDeletes(true); }
public void admin(final HttpServletRequest req, final HttpServletResponse resp) throws IOException, JSONException { final IndexState state = getState(req, resp); if (state == null) return; final String command = new PathParts(req).getCommand(); if ("_expunge".equals(command)) { logger.info("Expunging deletes from " + state); state.writer.forceMergeDeletes(false); resp.setStatus(202); ServletUtils.sendJsonSuccess(req, resp); return; } if ("_optimize".equals(command)) { logger.info("Optimizing " + state); state.writer.forceMerge(1, false); resp.setStatus(202); ServletUtils.sendJsonSuccess(req, resp); return; } ServletUtils.sendJsonError(req, resp, 400, "bad_request"); }
if (onlyExpungeDeletes) { assert upgrade == false; indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { assert upgrade == false;
/** * Forces merging of all segments that have deleted * documents. The actual merges to be executed are * determined by the {@link MergePolicy}. For example, * the default {@link TieredMergePolicy} will only * pick a segment if the percentage of * deleted docs is over 10%. * * <p>This is often a horribly costly operation; rarely * is it warranted.</p> * * <p>To see how * many deletions you have pending in your index, call * {@link IndexReader#numDeletedDocs}.</p> * * <p><b>NOTE</b>: this method first flushes a new * segment (if there are indexed documents), and applies * all buffered deletes. */ public void forceMergeDeletes() throws IOException { forceMergeDeletes(true); }
/** * Forces merging of all segments that have deleted * documents. The actual merges to be executed are * determined by the {@link MergePolicy}. For example, * the default {@link TieredMergePolicy} will only * pick a segment if the percentage of * deleted docs is over 10%. * * <p>This is often a horribly costly operation; rarely * is it warranted.</p> * * <p>To see how * many deletions you have pending in your index, call * {@link IndexReader#numDeletedDocs}.</p> * * <p><b>NOTE</b>: this method first flushes a new * segment (if there are indexed documents), and applies * all buffered deletes. */ public void forceMergeDeletes() throws IOException { forceMergeDeletes(true); }
/** * Forces merging of all segments that have deleted * documents. The actual merges to be executed are * determined by the {@link MergePolicy}. For example, * the default {@link TieredMergePolicy} will only * pick a segment if the percentage of * deleted docs is over 10%. * * <p>This is often a horribly costly operation; rarely * is it warranted.</p> * * <p>To see how * many deletions you have pending in your index, call * {@link IndexReader#numDeletedDocs}.</p> * * <p><b>NOTE</b>: this method first flushes a new * segment (if there are indexed documents), and applies * all buffered deletes. */ public void forceMergeDeletes() throws IOException { forceMergeDeletes(true); }
@Override public void forceMergeDeletes() throws IOException { try (Ticker ignored = mergeDocumentsMetric.start()) { super.forceMergeDeletes(); } }
@Override public void forceMergeDeletes(boolean doWait) throws IOException { try (Ticker ignored = mergeDocumentsMetric.start()) { super.forceMergeDeletes(doWait); } }
public void expungeDeletes() throws IOException { log.info("expunging deletes..."); synchronized (_optimizeMonitor) { BaseSearchIndex<R> idx = getSearchIndex(); IndexWriter writer = null; try { writer = idx.openIndexWriter(_analyzer, _similarity); writer.forceMergeDeletes(); } finally { if (writer != null) { idx.closeIndexWriter(); } } _idxMgr.refreshDiskReader(); } log.info("deletes expunged"); }
public void expungeDeletes() throws IOException { log.info("expunging deletes..."); synchronized (_optimizeMonitor) { BaseSearchIndex<R> idx = getSearchIndex(); IndexWriter writer = null; try { writer = idx.openIndexWriter(_analyzer, _similarity); writer.forceMergeDeletes(); } finally { if (writer != null) { idx.closeIndexWriter(); } } _idxMgr.refreshDiskReader(); } log.info("deletes expunged"); }
@Override public void optimize() throws IOException { synchronized (lock) { IndexWriter writer = acquireWriter(); try { writer.forceMerge(1, true); writer.forceMergeDeletes(true); writer.deleteUnusedFiles(); } finally { release(writer); } } }
if (onlyExpungeDeletes) { assert upgrade == false; indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { assert upgrade == false;
private void purgeDeletedDocs(NodeBuilder idx, LuceneIndexDefinition definition) throws IOException { Directory dir = new DefaultDirectoryFactory(null, null).newInstance(definition, idx, FulltextIndexConstants.INDEX_DATA_CHILD_NAME, false); IndexWriter writer = new IndexWriter(dir, getIndexWriterConfig(definition, true)); writer.forceMergeDeletes(); writer.close(); }
private void buildBigIndex() throws InterruptedException, IOException { System.out.println( "Going to create fake index..." ); Path detectiveIndexPath = getIndexBaseDir().resolve( Detective.class.getCanonicalName() ); FSDirectory directory = FSDirectory.open( detectiveIndexPath ); SimpleAnalyzer analyzer = new SimpleAnalyzer(); IndexWriterConfig cfg = new IndexWriterConfig( analyzer ); IndexWriter iw = new IndexWriter( directory, cfg ); IndexFillRunnable filler = new IndexFillRunnable( iw ); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool( WORKER_THREADS ); for ( int batch = 0; batch <= INDEX_ELEMENTS; batch++ ) { executor.execute( filler ); } executor.shutdown(); executor.awaitTermination( 600, TimeUnit.SECONDS ); iw.commit(); iw.forceMergeDeletes(); iw.forceMerge( 1 ); iw.close(); System.out.println( "Index created." ); }
if (onlyExpungeDeletes) { assert upgrade == false; indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { assert upgrade == false;
if (onlyExpungeDeletes) { assert upgrade == false; indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { assert upgrade == false;
if (onlyExpungeDeletes) { assert upgrade == false; indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { assert upgrade == false;
if (onlyExpungeDeletes) { assert upgrade == false; indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { assert upgrade == false;