iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy())); iwc.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
public static IndexWriterConfig standard( Analyzer analyzer ) { IndexWriterConfig writerConfig = new IndexWriterConfig( analyzer ); writerConfig.setMaxBufferedDocs( MAX_BUFFERED_DOCS ); writerConfig.setMaxBufferedDeleteTerms( MAX_BUFFERED_DELETE_TERMS ); writerConfig.setIndexDeletionPolicy( new SnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy() ) ); writerConfig.setUseCompoundFile( true ); writerConfig.setRAMBufferSizeMB( STANDARD_RAM_BUFFER_SIZE_MB ); writerConfig.setCodec(new Lucene54Codec() { @Override public PostingsFormat getPostingsFormatForField( String field ) { PostingsFormat postingFormat = super.getPostingsFormatForField( field ); return CODEC_BLOCK_TREE_ORDS_POSTING_FORMAT ? blockTreeOrdsPostingsFormat : postingFormat; } }); if ( CUSTOM_MERGE_SCHEDULER ) { writerConfig.setMergeScheduler( new PooledConcurrentMergeScheduler() ); } LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setNoCFSRatio( MERGE_POLICY_NO_CFS_RATIO ); mergePolicy.setMinMergeMB( MERGE_POLICY_MIN_MERGE_MB ); mergePolicy.setMergeFactor( MERGE_POLICY_MERGE_FACTOR ); writerConfig.setMergePolicy( mergePolicy ); return writerConfig; }
private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openMode, final Directory dir, final IndexCommit commit) throws IOException { assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; IndexWriterConfig iwc = new IndexWriterConfig(null) .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setIndexCommit(commit) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here // we also don't specify a codec here and merges should use the engines for this index .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(openMode); return new IndexWriter(dir, iwc); }
protected void addNewHistoryCommit(Directory indexDirectory, Terminal terminal, boolean updateLocalCheckpoint) throws IOException { final String historyUUID = UUIDs.randomBase64UUID(); terminal.println("Marking index with the new history uuid : " + historyUUID); // commit the new history id final IndexWriterConfig iwc = new IndexWriterConfig(null) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here // we also don't specify a codec here and merges should use the engines for this index .setCommitOnClose(false) .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND); // IndexWriter acquires directory lock by its own try (IndexWriter indexWriter = new IndexWriter(indexDirectory, iwc)) { final Map<String, String> userData = new HashMap<>(); indexWriter.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); if (updateLocalCheckpoint) { // In order to have a safe commit invariant, we have to assign the global checkpoint to the max_seqno of the last commit. // We can only safely do it because we will generate a new history uuid this shard. final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); // Also advances the local checkpoint of the last commit to its max_seqno. userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(commitInfo.maxSeqNo)); } // commit the new history id userData.put(Engine.HISTORY_UUID_KEY, historyUUID); indexWriter.setLiveCommitData(userData.entrySet()); indexWriter.commit(); } }
/** * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted * this operation fails. */ public static void cleanLuceneIndex(Directory directory) throws IOException { try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (final String file : directory.listAll()) { if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { directory.deleteFile(file); // remove all segment_N files } } } try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... { // do nothing and close this will kick of IndexFileDeleter which will remove all pending files } }
LogDocMergePolicy logDocMergePolicy = new LogDocMergePolicy(); logDocMergePolicy.setMergeFactor(1000); indexWriterConfig.setMergePolicy(logDocMergePolicy); w = new IndexWriter(index, indexWriterConfig); w.getConfig().setRAMBufferSizeMB(32);
.setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND); if (indexSort != null) {
.setIndexCommit(cp) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND))) {
LogDocMergePolicy logDocMergePolicy = new LogDocMergePolicy(); logDocMergePolicy.setMergeFactor(1000); indexWriterConfig.setMergePolicy(logDocMergePolicy); w = new IndexWriter(index, indexWriterConfig);
new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
@Override public void open() throws IOException { if (_closed) { IndexWriterConfig idxWriterConfig = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43)); idxWriterConfig.setMergePolicy(new ZoieMergePolicy()); idxWriterConfig.setOpenMode(OpenMode.CREATE_OR_APPEND); _idxWriter = new IndexWriter(_dir, idxWriterConfig); updateReader(); _closed = false; } }
public void open() throws IOException{ if (_closed){ IndexWriterConfig idxWriterConfig = new IndexWriterConfig(Version.LUCENE_34,new StandardAnalyzer(Version.LUCENE_34)); idxWriterConfig.setMergePolicy(new ZoieMergePolicy()); idxWriterConfig.setOpenMode(OpenMode.CREATE_OR_APPEND); _idxWriter = new IndexWriter(_dir,idxWriterConfig); updateReader(); _closed = false; } }
@Override public void open() throws IOException { if (_closed) { IndexWriterConfig idxWriterConfig = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43)); idxWriterConfig.setMergePolicy(new ZoieMergePolicy()); idxWriterConfig.setOpenMode(OpenMode.CREATE_OR_APPEND); _idxWriter = new IndexWriter(_dir, idxWriterConfig); updateReader(); _closed = false; } }
/** Override this to customize index settings, e.g. which * codec to use. */ protected IndexWriterConfig getIndexWriterConfig(Analyzer indexAnalyzer, IndexWriterConfig.OpenMode openMode) { IndexWriterConfig iwc = new IndexWriterConfig(indexAnalyzer); iwc.setOpenMode(openMode); // This way all merged segments will be sorted at // merge time, allow for per-segment early termination // when those segments are searched: iwc.setMergePolicy(new SortingMergePolicy(iwc.getMergePolicy(), SORT)); return iwc; }
static IndexWriter defaultIndexWriter(Directory directory) throws IOException { IndexWriterConfig iwc = new IndexWriterConfig(new KeywordAnalyzer()); TieredMergePolicy mergePolicy = new TieredMergePolicy(); mergePolicy.setSegmentsPerTier(4); iwc.setMergePolicy(mergePolicy); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); return new IndexWriter(directory, iwc); }
@Override protected IndexWriterConfig getWriterConfig() { final IndexWriterConfig writerConfig = super.getWriterConfig(); // NEXUS-5380 force use of compound lucene index file to postpone "Too many open files" final TieredMergePolicy mergePolicy = new TieredMergePolicy(); mergePolicy.setUseCompoundFile(true); mergePolicy.setNoCFSRatio(1.0); writerConfig.setMergePolicy(mergePolicy); return writerConfig; }
IndexWriter getWriter() throws IOException { if (writer == null) { final long start = PERF_LOGGER.start(); directory = directoryFactory.newInstance(definition, definitionBuilder, dirName, reindex); IndexWriterConfig config = getIndexWriterConfig(definition, directoryFactory.remoteDirectory(), writerConfig); config.setMergePolicy(definition.getMergePolicy()); writer = new IndexWriter(directory, config); genAtStart = getLatestGeneration(directory); log.trace("IndexWriterConfig for index [{}] is {}", definition.getIndexPath(), config); PERF_LOGGER.end(start, -1, "Created IndexWriter for directory {}", definition); } return writer; }
public static IndexWriter openWriter(Directory directory, int maxMergeDocs, boolean useSerialMerger) throws IOException { IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer); if (useSerialMerger) { indexWriterConfig.setMergeScheduler(mergeScheduler); } LogMergePolicy mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setMaxMergeDocs(maxMergeDocs); indexWriterConfig.setMergePolicy(mergePolicy); return new IndexWriter(directory, indexWriterConfig); }
protected static RandomIndexWriter newRandomIndexWriter(final Directory dir, final Analyzer analyzer, final Codec codec) throws IOException { return newRandomIndexWriter(dir, analyzer, codec, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer) .setCodec(codec).setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); }
protected static RandomIndexWriter newRandomIndexWriter(final Directory dir, final Analyzer analyzer, final Codec codec) throws IOException { return newRandomIndexWriter(dir, analyzer, codec, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer) .setCodec(codec).setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); }