writer.addIndexes(sources); if (split) { writer.deleteDocuments(new ShardSplittingQuery(indexMetaData, shardId, hasNested));
/** Calls {@link IndexWriter#addIndexes(Directory...)} and * returns the generation that reflects this change. */ public long addIndexes(Directory... dirs) throws IOException { writer.addIndexes(dirs); // Return gen as of when indexing finished: return indexingGen.get(); }
/** Calls {@link IndexWriter#addIndexes(CodecReader...)} * and returns the generation that reflects this change. */ public long addIndexes(CodecReader... readers) throws IOException { writer.addIndexes(readers); // Return gen as of when indexing finished: return indexingGen.get(); }
/** Calls {@link IndexWriter#addIndexes(CodecReader...)} * and returns the generation that reflects this change. */ public long addIndexes(CodecReader... readers) throws IOException { writer.addIndexes(readers); // Return gen as of when indexing finished: return indexingGen.get(); }
/** Calls {@link IndexWriter#addIndexes(Directory...)} and * returns the generation that reflects this change. */ public long addIndexes(Directory... dirs) throws IOException { writer.addIndexes(dirs); // Return gen as of when indexing finished: return indexingGen.get(); }
public void crawlerFinished() { try { tempWriter.close(); IndexWriter indexWriter = new IndexWriter(indexDir, null, true); indexWriter.addIndexes(new Directory[] { tempIndexDir }); indexWriter.close(); } catch (IOException e) { log.error("Error", e); } tempWriter = null; }
static void rewriteIndex(Directory srcDir, File destFolder) throws IOException { final IndexWriterConfig iwc = new IndexWriterConfig(Version.LATEST, new StandardAnalyzer(CharArraySet.EMPTY_SET)); try (FSDirectory destDir = FSDirectory.open(destFolder); IndexWriter writer = new IndexWriter(destDir, iwc)) { writer.addIndexes(srcDir); writer.commit(); } }
/** * Close all temporary writers, and merge all temporary indices to the writer provided. * When the merge is complete, the target index will be optimized. * @param writer * @throws IOException */ public void merge(IndexWriter writer) throws IOException { final List indexDirectories = new LinkedList(); for (Iterator it = writers.keySet().iterator(); it.hasNext();) { WriterData writerData = (WriterData) writers.get(it.next()); writerData.writer.close(); indexDirectories.add(writerData.dir); } writer.addIndexes((Directory[]) indexDirectories.toArray(new Directory[indexDirectories.size()])); }
public void copyStagingContent(String srcPath) throws IOException { try (FSDirectory dir = FSDirectory.open(new File(srcPath));) { IndexWriter writer = getWriter(); writer.deleteAll(); writer.addIndexes(dir); writer.commit(); } } }
public static void main(String[] args) throws IOException { if (args.length < 3) { System.err.println("Usage: IndexMergeTool <mergedIndex> <index1> <index2> [index3] ..."); System.exit(1); } FSDirectory mergedIndex = FSDirectory.open(Paths.get(args[0])); IndexWriter writer = new IndexWriter(mergedIndex, new IndexWriterConfig(null) .setOpenMode(OpenMode.CREATE)); Directory[] indexes = new Directory[args.length - 1]; for (int i = 1; i < args.length; i++) { indexes[i - 1] = FSDirectory.open(Paths.get(args[i])); } System.out.println("Merging..."); writer.addIndexes(indexes); System.out.println("Full merge..."); writer.forceMerge(1); writer.close(); System.out.println("Done."); }
public static void main(String[] args) throws IOException { if (args.length < 3) { System.err.println("Usage: IndexMergeTool <mergedIndex> <index1> <index2> [index3] ..."); System.exit(1); } // Try to use hardlinks to source segments, if possible. Directory mergedIndex = new HardlinkCopyDirectoryWrapper(FSDirectory.open(Paths.get(args[0]))); IndexWriter writer = new IndexWriter(mergedIndex, new IndexWriterConfig(null).setOpenMode(OpenMode.CREATE)); Directory[] indexes = new Directory[args.length - 1]; for (int i = 1; i < args.length; i++) { indexes[i - 1] = FSDirectory.open(Paths.get(args[i])); } System.out.println("Merging..."); writer.addIndexes(indexes); System.out.println("Full merge..."); writer.forceMerge(1); writer.close(); System.out.println("Done."); }
public void sort(Directory input, Directory output, String field) throws IOException { LOG.info("IndexSorter: starting."); long start = System.currentTimeMillis(); IndexReader reader = IndexReader.open(input, true); SortingReader sorter = new SortingReader(reader, oldToNew(reader, field)); IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_31, new WhitespaceAnalyzer(Version.LUCENE_31)); IndexWriter writer = new IndexWriter(output, cfg); writer.addIndexes(new IndexReader[] { sorter }); writer.close(); long end = System.currentTimeMillis(); LOG.info("IndexSorter: done, " + (end - start) + " total milliseconds"); }
/** * End of blocklet notification */ public void onBlockletEnd(int blockletId) throws IOException { // close ram writer ramIndexWriter.close(); // add ram index data into disk indexWriter.addIndexes(ramDir); // delete this ram data ramDir.close(); if (storeBlockletWise) { flushCache(cache, getIndexColumns(), indexWriter, storeBlockletWise); indexWriter.close(); indexWriter = null; } }
private void createIndex(IndexWriterConfig config, Directory target, IndexReader reader, Filter preserveFilter, boolean negateFilter) throws IOException { boolean success = false; IndexWriter w = new IndexWriter(target, config); try { w.addIndexes(new DocumentFilteredIndexReader(reader, preserveFilter, negateFilter)); success = true; } finally { if (success) { IOUtils.close(w); } else { IOUtils.closeWhileHandlingException(w); } } }
void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException { target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), new IndexWriterConfig(null) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here // we also don't specify a codec here and merges should use the engines for this index .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { writer.addIndexes(sources); writer.commit(); } }
@Override public int doLogic() throws Exception { IndexWriter writer = getRunData().getIndexWriter(); if (useAddIndexesDir) { writer.addIndexes(inputDir); } else { try (IndexReader r = DirectoryReader.open(inputDir)) { CodecReader leaves[] = new CodecReader[r.leaves().size()]; int i = 0; for (LeafReaderContext leaf : r.leaves()) { leaves[i++] = SlowCodecReaderWrapper.wrap(leaf.reader()); } writer.addIndexes(leaves); } } return 1; }
/** * Merges the provided indexes into this index. After this completes, the * index is optimized. * <p> * The provided IndexReaders are not closed. * * @param readers the readers of indexes to add. * @throws IOException if an error occurs while adding indexes. */ void addIndexes(IndexReader[] readers) throws IOException { getIndexWriter().addIndexes(readers); getIndexWriter().optimize(); }
/** * Merges the provided indexes into this index. After this completes, the * index is optimized. * <p> * The provided IndexReaders are not closed. * * @param readers the readers of indexes to add. * @throws IOException if an error occurs while adding indexes. */ void addIndexes(IndexReader[] readers) throws IOException { getIndexWriter().addIndexes(readers); getIndexWriter().optimize(); }
/** * Merges the provided indexes into this index. After this completes, the * index is optimized. * <p/> * The provided IndexReaders are not closed. * * @param readers the readers of indexes to add. * @throws IOException if an error occurs while adding indexes. */ void addIndexes(IndexReader[] readers) throws IOException { getIndexWriter().addIndexes(readers); getIndexWriter().maybeMerge(); }
final IndexStatus merge(final IndexInstance mergedIndex, final Map<String, String> commitUserData) throws IOException { checkIsMaster(); try (final ReadWriteSemaphores.Lock writeLock = readWriteSemaphores.acquireWriteSemaphore()) { writerAndSearcher.write((indexWriter, taxonomyWriter) -> { try (final ReadWriteSemaphores.Lock readLock = mergedIndex.readWriteSemaphores.acquireReadSemaphore()) { indexWriter.addIndexes(mergedIndex.dataDirectory); if (commitUserData != null) indexWriter.setLiveCommitData(commitUserData.entrySet()); } return null; }); nrtCommit(); return getIndexStatus(); } }