private IndexWriterCount createWriter(final File indexDirectory) throws IOException { final List<Closeable> closeables = new ArrayList<>(); final Directory directory = FSDirectory.open(indexDirectory); closeables.add(directory); try { final Analyzer analyzer = new StandardAnalyzer(); closeables.add(analyzer); final IndexWriterConfig config = new IndexWriterConfig(LuceneUtil.LUCENE_VERSION, analyzer); final ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); final int mergeThreads = repoConfig.getConcurrentMergeThreads(); mergeScheduler.setMaxMergesAndThreads(mergeThreads, mergeThreads); config.setMergeScheduler(mergeScheduler); final IndexWriter indexWriter = new IndexWriter(directory, config); final EventIndexWriter eventIndexWriter = new LuceneEventIndexWriter(indexWriter, indexDirectory); final IndexWriterCount writerCount = new IndexWriterCount(eventIndexWriter, analyzer, directory, 1, false); logger.debug("Providing new index writer for {}", indexDirectory); return writerCount; } catch (final IOException ioe) { for (final Closeable closeable : closeables) { try { closeable.close(); } catch (final IOException ioe2) { ioe.addSuppressed(ioe2); } } throw ioe; } }
public static IndexWriterConfig standard( Analyzer analyzer ) { IndexWriterConfig writerConfig = new IndexWriterConfig( analyzer ); writerConfig.setMaxBufferedDocs( MAX_BUFFERED_DOCS ); writerConfig.setMaxBufferedDeleteTerms( MAX_BUFFERED_DELETE_TERMS ); writerConfig.setIndexDeletionPolicy( new SnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy() ) ); writerConfig.setUseCompoundFile( true ); writerConfig.setRAMBufferSizeMB( STANDARD_RAM_BUFFER_SIZE_MB ); writerConfig.setCodec(new Lucene54Codec() { @Override public PostingsFormat getPostingsFormatForField( String field ) { PostingsFormat postingFormat = super.getPostingsFormatForField( field ); return CODEC_BLOCK_TREE_ORDS_POSTING_FORMAT ? blockTreeOrdsPostingsFormat : postingFormat; } }); if ( CUSTOM_MERGE_SCHEDULER ) { writerConfig.setMergeScheduler( new PooledConcurrentMergeScheduler() ); } LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setNoCFSRatio( MERGE_POLICY_NO_CFS_RATIO ); mergePolicy.setMinMergeMB( MERGE_POLICY_MIN_MERGE_MB ); mergePolicy.setMergeFactor( MERGE_POLICY_MERGE_FACTOR ); writerConfig.setMergePolicy( mergePolicy ); return writerConfig; }
iwc.setMergeScheduler(mergeScheduler);
/** * Method that return {@link IndexWriterConfig} properly configured in order to * work in google app engine environment. * @param analyzer The analyzer to use * @return An {@link IndexWriterConfig} properly configured */ @SuppressWarnings("resource")//SerialMergeScheduler is Closable public static IndexWriterConfig getIndexWriterConfig(Analyzer analyzer) { final IndexWriterConfig config = new IndexWriterConfig(analyzer); config.setMergeScheduler(new SerialMergeScheduler()); return config; }
private static IndexWriterConfig createIndexConfig(Analyzer analyzer) { IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, analyzer); config.setMergeScheduler(new SerialMergeScheduler()); config.setMaxThreadStates(2); return config; }
public static IndexWriter createIndexWriter(Directory location, Analyzer analyzer, int mergeFactor) throws IOException { LOG.info("Creating IndexWriter with:\nDirectory: " + location + "\nAnalyzer: " + analyzer + "\nMerge Factor: " + mergeFactor); IndexWriterConfig idxConfig = new IndexWriterConfig(Version.LUCENE_40, analyzer); LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setMergeFactor(mergeFactor); mergePolicy.setUseCompoundFile(false); idxConfig.setMergePolicy(mergePolicy); idxConfig.setMergeScheduler(new SerialMergeScheduler()); IndexWriter writer = new IndexWriter(location, idxConfig); return writer; }
public static IndexWriterConfig defaultConfig() { final IndexWriterConfig config = new IndexWriterConfig( Version.LUCENE_46, new NexusAnalyzer() ); // default open mode is CreateOrAppend which suits us config.setRAMBufferSizeMB( 2.0 ); // old default config.setMergeScheduler( new SerialMergeScheduler() ); // merging serially config.setWriteLockTimeout(IndexWriterConfig.WRITE_LOCK_TIMEOUT); return config; } }
public static IndexWriterConfig defaultConfig() { final IndexWriterConfig config = new IndexWriterConfig( new NexusAnalyzer() ); // default open mode is CreateOrAppend which suits us config.setRAMBufferSizeMB( 2.0 ); // old default config.setMergeScheduler( new SerialMergeScheduler() ); // merging serially config.setWriteLockTimeout( IndexWriterConfig.WRITE_LOCK_TIMEOUT ); return config; } }
public static IndexWriterConfig defaultConfig() { final IndexWriterConfig config = new IndexWriterConfig( new NexusAnalyzer() ); // default open mode is CreateOrAppend which suits us config.setRAMBufferSizeMB( 2.0 ); // old default config.setMergeScheduler( new SerialMergeScheduler() ); // merging serially config.setWriteLockTimeout( IndexWriterConfig.WRITE_LOCK_TIMEOUT ); return config; } }
/** * Create as new IndexWriter using the passed in IndexWriterConfig as a template, but still applies some late changes: * we need to override the MergeScheduler to handle background errors, and a new instance needs to be created for each * new IndexWriter. * Also each new IndexWriter needs a new MergePolicy. */ private IndexWriter createNewIndexWriter() throws IOException { final IndexWriterConfig indexWriterConfig = createWriterConfig(); //Each writer config can be attached only once to an IndexWriter LogByteSizeMergePolicy newMergePolicy = indexParameters.getNewMergePolicy(); //TODO make it possible to configure a different policy? indexWriterConfig.setMergePolicy( newMergePolicy ); MergeScheduler mergeScheduler = new ConcurrentMergeScheduler( this.errorHandler, this.indexName ); indexWriterConfig.setMergeScheduler( mergeScheduler ); return new IndexWriter( directoryProvider.getDirectory(), indexWriterConfig ); }
/** * Create as new IndexWriter using the passed in IndexWriterConfig as a template, but still applies some late changes: * we need to override the MergeScheduler to handle background errors, and a new instance needs to be created for each * new IndexWriter. * Also each new IndexWriter needs a new MergePolicy. */ private IndexWriter createNewIndexWriter() throws IOException { final IndexWriterConfig indexWriterConfig = createWriterConfig(); //Each writer config can be attached only once to an IndexWriter LogByteSizeMergePolicy newMergePolicy = indexParameters.getNewMergePolicy(); //TODO make it possible to configure a different policy? indexWriterConfig.setMergePolicy( newMergePolicy ); MergeScheduler mergeScheduler = new ConcurrentMergeScheduler( this.errorHandler, this.indexName ); indexWriterConfig.setMergeScheduler( mergeScheduler ); return new IndexWriter( directoryProvider.getDirectory(), indexWriterConfig ); }
public static IndexWriter openWriter(Directory directory, int maxMergeDocs, boolean useSerialMerger) throws IOException { IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer); if (useSerialMerger) { indexWriterConfig.setMergeScheduler(mergeScheduler); } LogMergePolicy mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setMaxMergeDocs(maxMergeDocs); indexWriterConfig.setMergePolicy(mergePolicy); return new IndexWriter(directory, indexWriterConfig); }
protected void start(final TimelineConfiguration configuration) throws IOException { closeIndexWriter(); if (directory != null) { directory.close(); } directory = openFSDirectory(configuration.getIndexDirectory()); if (IndexReader.indexExists(directory)) { if (IndexWriter.isLocked(directory)) { IndexWriter.unlock(directory); } } final IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, new StandardAnalyzer(Version.LUCENE_36)); config.setMergeScheduler(new SerialMergeScheduler()); config.setRAMBufferSizeMB(2.0); indexWriter = new IndexWriter(directory, config); indexWriter.commit(); searcherManager = new SearcherManager(indexWriter, false, new SearcherFactory()); generation = generation + 1; }
@Override public IndexWriter openIndexWriter(Analyzer analyzer, Similarity similarity) throws IOException { if (_indexWriter != null) { return _indexWriter; } ZoieMergePolicy mergePolicy = new ZoieMergePolicy(); mergePolicy.setMergePolicyParams(_mergePolicyParams); mergePolicy.setUseCompoundFile(false); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer); config.setOpenMode(OpenMode.CREATE_OR_APPEND); config.setMergeScheduler(_mergeScheduler); config.setMergePolicy(mergePolicy); config.setReaderPooling(false); if (similarity != null) { config.setSimilarity(similarity); } config.setRAMBufferSizeMB(3); IndexWriter idxWriter = new IndexWriter(_directory, config); _indexWriter = idxWriter; return idxWriter; }
@Override public IndexWriter openIndexWriter(Analyzer analyzer, Similarity similarity) throws IOException { if (_indexWriter != null) { return _indexWriter; } ZoieMergePolicy mergePolicy = new ZoieMergePolicy(); mergePolicy.setMergePolicyParams(_mergePolicyParams); mergePolicy.setUseCompoundFile(false); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer); config.setOpenMode(OpenMode.CREATE_OR_APPEND); config.setMergeScheduler(_mergeScheduler); config.setMergePolicy(mergePolicy); config.setReaderPooling(false); if (similarity != null) { config.setSimilarity(similarity); } config.setRAMBufferSizeMB(3); IndexWriter idxWriter = new IndexWriter(_directory, config); _indexWriter = idxWriter; return idxWriter; }
public IndexWriter openIndexWriter(Analyzer analyzer, Similarity similarity) throws IOException { if (_indexWriter != null) return _indexWriter; ZoieMergePolicy mergePolicy = new ZoieMergePolicy(); mergePolicy.setMergePolicyParams(_mergePolicyParams); mergePolicy.setUseCompoundFile(false); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_34,analyzer); config.setOpenMode(OpenMode.CREATE_OR_APPEND); config.setMergeScheduler(_mergeScheduler); config.setMergePolicy(mergePolicy); config.setReaderPooling(false); if (similarity!=null){ config.setSimilarity(similarity); } config.setRAMBufferSizeMB(3); IndexWriter idxWriter = new IndexWriter(_directory,config); _indexWriter = idxWriter; return idxWriter; }
/** * Opens an index modifier. * @param analyzer Analyzer * @return IndexModifer instance */ public IndexWriter openIndexWriter(Analyzer analyzer,Similarity similarity) throws IOException { if(_indexWriter != null) return _indexWriter; Directory directory = _dirMgr.getDirectory(true); log.info("opening index writer at: "+_dirMgr.getPath()); ZoieMergePolicy mergePolicy = new ZoieMergePolicy(); mergePolicy.setMergePolicyParams(_mergePolicyParams); // hao: autocommit is set to false with this constructor IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_34,analyzer); config.setOpenMode(OpenMode.CREATE_OR_APPEND); config.setIndexDeletionPolicy(_deletionPolicy); config.setMergeScheduler(_mergeScheduler); config.setMergePolicy(mergePolicy); config.setReaderPooling(false); if (similarity!=null){ config.setSimilarity(similarity); } config.setRAMBufferSizeMB(5); IndexWriter idxWriter = new IndexWriter(directory,config); _indexWriter = idxWriter; return idxWriter; }
private void open() throws IOException { // create it? boolean createIt = !DirectoryReader.indexExists(this._directory); // read only? boolean readonly = isReadOnly(this._directory); if (readonly) { this._writer = null; this._reader = new ReaderManager(this._directory); this._searcher = new SearcherManager(this._directory, FACTORY); } else { // create writer IndexWriterConfig config = new IndexWriterConfig(this._analyzer); ConcurrentMergeScheduler merger = new ConcurrentMergeScheduler(); // merger.setMaxMergesAndThreads(maxMergeCount, maxThreadCount); config.setMergeScheduler(merger); if (createIt) config.setOpenMode(OpenMode.CREATE); this._writer = new IndexWriter(this._directory, config); if (createIt) this._writer.commit(); boolean applyAllDeletes = true; // create searcher this._searcher = new SearcherManager(this._writer, applyAllDeletes, FACTORY); // create reader this._reader = new ReaderManager(this._writer, applyAllDeletes); } // add it to list of opened indexes OpenIndexManager.add(this); // set state to clean state(State.CLEAN); }
IndexWriter createWriter(boolean create) throws IOException { try { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); iwc.setIndexDeletionPolicy(deletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; try { verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); } catch (Exception ignore) { } iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); iwc.setMergeScheduler(mergeScheduler); MergePolicy mergePolicy = config().getMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges mergePolicy = new ElasticsearchMergePolicy(mergePolicy); iwc.setMergePolicy(mergePolicy); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh return new IndexWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { logger.warn("could not lock IndexWriter", ex); throw ex; } }
private IndexWriterConfig getIndexWriterConfig() { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); iwc.setIndexDeletionPolicy(combinedDeletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; try { verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); } catch (Exception ignore) { } iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); iwc.setMergeScheduler(mergeScheduler); MergePolicy mergePolicy = config().getMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges mergePolicy = new ElasticsearchMergePolicy(mergePolicy); iwc.setMergePolicy(mergePolicy); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh if (config().getIndexSort() != null) { iwc.setIndexSort(config().getIndexSort()); } return iwc; }