writer.optimize(); writer.close();
/** * Optimize the index down to <= maxNumSegments. If * maxNumSegments==1 then this is the same as {@link * #optimize()}. * @param maxNumSegments maximum number of segments left * in the index after optimization finishes */ public void optimize(int maxNumSegments) throws CorruptIndexException, IOException { optimize(maxNumSegments, true); }
/** * Optimize the index down to <= maxNumSegments. If * maxNumSegments==1 then this is the same as {@link * #optimize()}. * @param maxNumSegments maximum number of segments left * in the index after optimization finishes */ public void optimize(int maxNumSegments) throws CorruptIndexException, IOException { optimize(maxNumSegments, true); }
/** Just like {@link #optimize()}, except you can specify * whether the call should block until the optimize * completes. This is only meaningful with a * {@link MergeScheduler} that is able to run merges in * background threads. */ public void optimize(boolean doWait) throws CorruptIndexException, IOException { optimize(1, doWait); }
public void optimize() throws IOException { writer.optimize(); }
/** Just like {@link #optimize()}, except you can specify * whether the call should block until the optimize * completes. This is only meaningful with a * {@link MergeScheduler} that is able to run merges in * background threads. */ public void optimize(boolean doWait) throws CorruptIndexException, IOException { optimize(1, doWait); }
private boolean optimizeIndex() { boolean result = false; try { IndexWriter indexWriter = new IndexWriter(directory, analyzer, false, IndexWriter.MaxFieldLength.UNLIMITED); indexWriter.optimize(); indexWriter.close(); result = true; } catch (IOException e) { //logger.error("Error while trying to optimize index."); } return result; }
protected void closeIndexWriter() throws IOException { log.info("Starting optimize"); // optimize and close the index. writer.optimize(); writer.close(); writer = null; log.info("Optimize complete, index closed"); }
public void flushWriter() { try { if ( indexWriter != null ) indexWriter.optimize(); } catch (IOException ex) { throw new ARQLuceneException("flushWriter", ex) ; } }
@Override public void endProcess() { super.endProcess(); try { templateWriter.optimize(); templateWriter.close(); } catch (Exception e) { e.printStackTrace(); } }
@Override public void endProcess() { super.endProcess(); try { templateWriter.optimize(); templateWriter.close(); } catch (Exception e) { e.printStackTrace(); } }
@Override public void endProcess() { super.endProcess(); try { categoryWriter.optimize(); categoryWriter.close(); } catch (Exception e) { e.printStackTrace(); } }
@Override public void endProcess() { super.endProcess(); try { disambiguationWriter.optimize(); disambiguationWriter.close(); } catch (Exception e) { e.printStackTrace(); } }
public static void main(String[] args) { String index = args[0]; try { IndexWriter i = new IndexWriter(FSDirectory.open(new File(index)), new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); i.optimize(); i.close(); } catch (Exception e) { e.printStackTrace(); } } }
public void close() throws IOException { logger.info("Optimizing index and closing..."); Stopwatch stopwatch = Stopwatch.start(); indexWriter.optimize(); indexWriter.close(); logger.info("Optimized in "+ stopwatch.click()+" ms"); }
private void buildIndex() throws IOException, ParseException { IndexWriter writer = new IndexWriter(_bundle.getStopSearchIndexPath(), new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); for (Stop stop : _dao.getAllStops()) { Document document = getStopAsDocument(stop); writer.addDocument(document); } writer.optimize(); writer.close(); _refreshService.refresh(RefreshableResources.STOP_SEARCH_DATA); }
private void buildIndex() throws IOException, ParseException { IndexWriter writer = new IndexWriter(_bundle.getRouteSearchIndexPath(), new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); for (RouteCollection routeCollection : _whereDao.getAllRouteCollections()) { List<Document> documents = getRouteCollectionAsDocuments(routeCollection); for (Document document : documents) writer.addDocument(document); } writer.optimize(); writer.close(); _refreshService.refresh(RefreshableResources.ROUTE_COLLECTION_SEARCH_DATA); }
private void buildIndex() throws IOException, ParseException { IndexWriter writer = new IndexWriter(_bundle.getStopSearchIndexPath(), new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); for (StopEntry stopEntry : _transitGraphDao.getAllStops()) { StopNarrative narrative = _narrativeService.getStopForId(stopEntry.getId()); Document document = getStopAsDocument(stopEntry, narrative); writer.addDocument(document); } writer.optimize(); writer.close(); _refreshService.refresh(RefreshableResources.STOP_SEARCH_DATA); }
private void buildIndex() throws IOException, ParseException { IndexWriter writer = new IndexWriter(_bundle.getStopSearchIndexPath(), new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); for (StopEntry stopEntry : _transitGraphDao.getAllStops()) { StopNarrative narrative = _narrativeService.getStopForId(stopEntry.getId()); Document document = getStopAsDocument(stopEntry, narrative); writer.addDocument(document); } writer.optimize(); writer.close(); _refreshService.refresh(RefreshableResources.STOP_SEARCH_DATA); }
/** * Merges the provided indexes into this index. After this completes, the * index is optimized. * <p> * The provided IndexReaders are not closed. * * @param readers the readers of indexes to add. * @throws IOException if an error occurs while adding indexes. */ void addIndexes(IndexReader[] readers) throws IOException { getIndexWriter().addIndexes(readers); getIndexWriter().optimize(); }