@Override public void finishUpdating() { if(indexWriter != null){ try { indexWriter.commit(); } catch (IOException e) { throw new RuntimeException(e); } } closeIndexWriter(); }
@Override public void addPatterns(Map<String, Map<Integer, Set<E>>> pats) { try { setIndexWriter(); for(Map.Entry<String, Map<Integer, Set<E>>> en: pats.entrySet()){ //String sentence = StringUtils.joinWords(en.getValue(), " "); addPatterns(en.getKey(), en.getValue(), false); } indexWriter.commit(); //closeIndexWriter(); } catch (IOException e) { throw new RuntimeException(e); } }
@Override public long commit() throws IOException { final long lastCommitCount = lastCommitTotalIndexed.get(); final long currentCommitCount = totalIndexed.get(); indexWriter.commit(); commitStats.set(new CommitStats(0, System.nanoTime() + maxCommitNanos)); lastCommitTotalIndexed.set(currentCommitCount); return currentCommitCount - lastCommitCount; }
private void addPatterns(String id, Map<Integer, Set<E>> p, boolean commit) { try{ setIndexWriter(); Document doc = new Document(); doc.add(new StringField("sentid", id, Field.Store.YES)); doc.add(new Field("patterns", getBytes(p), LuceneFieldType.NOT_INDEXED)); indexWriter.addDocument(doc); if(commit){ indexWriter.commit(); //closeIndexWriter(); } }catch(IOException e){ throw new RuntimeException(e); } }
/** * Commit the changes. */ public synchronized void commit() throws IOException { writer.commit(); if (counter != 0) { counters.put(searcher, counter); counter = 0; } else { closeSearcher(searcher); } // recreate Searcher with the IndexWriter's reader. searcher = new IndexSearcher(IndexReader.open(writer, true)); }
@Override public void add(Map<String, DataInstance> sentences, boolean addProcessedText) { try { this.setIndexWriter(); for(Map.Entry<String, DataInstance> en: sentences.entrySet()){ //String sentence = StringUtils.joinWords(en.getValue(), " "); add(en.getValue().getTokens(), en.getKey(), addProcessedText); } indexWriter.commit(); closeIndexWriter(); } catch (IOException e) { throw new RuntimeException(e); } }
@Override public void add( long id, Map<String, Object> properties ) { try { Document document = IndexType.newDocument( entityId( id ) ); for ( Map.Entry<String, Object> entry : properties.entrySet() ) { String key = entry.getKey(); Object value = entry.getValue(); addSingleProperty( id, document, key, value ); } writer.addDocument( document ); if ( ++updateCount == commitBatchSize ) { writer.commit(); updateCount = 0; } } catch ( IOException e ) { throw new RuntimeException( e ); } }
private static void writeSomething( IndexReference indexReference ) throws IOException { IndexWriter writer = indexReference.getWriter(); writer.addDocument( new Document() ); writer.commit(); }
void force() { if ( readOnly ) { return; } for ( IndexReference index : getAllIndexes() ) { try { index.getWriter().commit(); } catch ( IOException e ) { throw new RuntimeException( "Unable to commit changes to " + index.getIdentifier(), e ); } } }
private static void insertRandomDocuments( IndexWriter writer ) throws IOException { Document doc = new Document(); doc.add( new StringField( "a", "b", Field.Store.YES ) ); doc.add( new StringField( "c", "d", Field.Store.NO ) ); writer.addDocument( doc ); writer.commit(); }
/** * Commits all index partitions. * * @param merge also merge all segments together. This should be done before reading term frequencies. * @throws IOException on Lucene I/O error. */ public void flush( boolean merge ) throws IOException { List<AbstractIndexPartition> partitions = getPartitions(); for ( AbstractIndexPartition partition : partitions ) { IndexWriter writer = partition.getIndexWriter(); writer.commit(); if ( merge ) { writer.forceMerge( 1 ); } } }
@Override public synchronized void commit() throws IOException { long start = stats.startCommit(); try { writer.commit(); searcherManager.maybeRefresh(); } finally { stats.endCommit(start); } }
private void finishWriting() throws IOException { boolean hasPendingCommit = false; try { writeAnalysisSettings(); writer.prepareCommit(); hasPendingCommit = true; int n = completer.complete(); LOGGER.log(Level.FINE, "completed {0} object(s)", n); // Just before commit(), reset the `hasPendingCommit' flag, // since after commit() is called, there is no need for // rollback() regardless of success. hasPendingCommit = false; writer.commit(); } catch (RuntimeException|IOException e) { if (hasPendingCommit) { writer.rollback(); } LOGGER.log(Level.WARNING, "An error occurred while finishing writer and completer", e); throw e; } }
private Directory createRandomLuceneDir( File rootFolder ) throws IOException { File folder = createRandomFolder( rootFolder ); Directory directory = directoryFactory.open( folder ); try ( IndexWriter writer = new IndexWriter( directory, IndexWriterConfigs.standard() ) ) { writer.addDocument( randomDocument() ); writer.commit(); } return directory; }
index.getWriter().commit(); commit = deletionPolicy.snapshot();
indexWriter.commit(); } catch (DatabaseException ex) { LOGGER.debug("", ex);
final IndexWriter indexWriter = writer.getIndexWriter(); indexWriter.deleteDocuments(term); indexWriter.commit(); final int docsLeft = indexWriter.numDocs(); deleteDir = docsLeft <= 0;
writer.commit();
@Test void createWritablePartition() throws Exception { try ( AbstractIndexPartition indexPartition = new WritableIndexPartitionFactory( IndexWriterConfigs::standard ) .createPartition( testDirectory.directory(), directory ) ) { try ( IndexWriter indexWriter = indexPartition.getIndexWriter() ) { indexWriter.addDocument( new Document() ); indexWriter.commit(); indexPartition.maybeRefreshBlocking(); try ( PartitionSearcher searcher = indexPartition.acquireSearcher() ) { assertEquals( 1, searcher.getIndexSearcher().getIndexReader().numDocs(), "We should be able to see newly added document " ); } } } }
w.commit(); if (infoStream.isEnabled(LOG_PREFIX)) { infoStream.message(LOG_PREFIX, "Committed upgraded metadata to index.");