void startDocument(int docID) throws IOException { assert lastDoc < docID; initStoredFieldsWriter(); while (++lastDoc < docID) { writer.startDocument(); writer.finishDocument(); } writer.startDocument(); }
startDocument(); sub.reader.visitDocument(sub.docID, sub.visitor); finishDocument();
@Override void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException { super.flush(state, sortMap); if (sortMap == null) { // we're lucky the index is already sorted, just rename the temporary file and return for (Map.Entry<String, String> entry : tmpDirectory.getTemporaryFiles().entrySet()) { tmpDirectory.rename(entry.getValue(), entry.getKey()); } return; } StoredFieldsReader reader = docWriter.codec.storedFieldsFormat() .fieldsReader(tmpDirectory, state.segmentInfo, state.fieldInfos, IOContext.DEFAULT); StoredFieldsReader mergeReader = reader.getMergeInstance(); StoredFieldsWriter sortWriter = docWriter.codec.storedFieldsFormat() .fieldsWriter(state.directory, state.segmentInfo, IOContext.DEFAULT); try { reader.checkIntegrity(); CopyVisitor visitor = new CopyVisitor(sortWriter); for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) { sortWriter.startDocument(); mergeReader.visitDocument(sortMap.newToOld(docID), visitor); sortWriter.finishDocument(); } sortWriter.finish(state.fieldInfos, state.segmentInfo.maxDoc()); } finally { IOUtils.close(reader, sortWriter); IOUtils.deleteFiles(tmpDirectory, tmpDirectory.getTemporaryFiles().values()); } }
void startDocument(int docID) throws IOException { assert lastDoc < docID; initStoredFieldsWriter(); while (++lastDoc < docID) { writer.startDocument(); writer.finishDocument(); } writer.startDocument(); }
/** Calls StoredFieldsWriter.startDocument, aborting the * segment if it hits any exception. */ private void startStoredFields() throws IOException, AbortingException { try { initStoredFieldsWriter(); storedFieldsWriter.startDocument(); } catch (Throwable th) { throw AbortingException.wrap(th); } lastStoredDocID++; }
/** Calls StoredFieldsWriter.startDocument, aborting the * segment if it hits any exception. */ private void startStoredFields() throws IOException, AbortingException { try { initStoredFieldsWriter(); storedFieldsWriter.startDocument(); } catch (Throwable th) { throw AbortingException.wrap(th); } lastStoredDocID++; }
startDocument(); sub.reader.visitDocument(sub.docID, sub.visitor); finishDocument();
/** Merges in the stored fields from the readers in * <code>mergeState</code>. The default implementation skips * over deleted documents, and uses {@link #startDocument()}, * {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(FieldInfos, int)}, * returning the number of documents that were written. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public int merge(MergeState mergeState) throws IOException { int docCount = 0; for (int i=0;i<mergeState.storedFieldsReaders.length;i++) { StoredFieldsReader storedFieldsReader = mergeState.storedFieldsReaders[i]; storedFieldsReader.checkIntegrity(); MergeVisitor visitor = new MergeVisitor(mergeState, i); int maxDoc = mergeState.maxDocs[i]; Bits liveDocs = mergeState.liveDocs[i]; for (int docID=0;docID<maxDoc;docID++) { if (liveDocs != null && !liveDocs.get(docID)) { // skip deleted docs continue; } startDocument(); storedFieldsReader.visitDocument(docID, visitor); finishDocument(); docCount++; } } finish(mergeState.mergeFieldInfos, docCount); return docCount; }
/** Merges in the stored fields from the readers in * <code>mergeState</code>. The default implementation skips * over deleted documents, and uses {@link #startDocument()}, * {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(FieldInfos, int)}, * returning the number of documents that were written. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public int merge(MergeState mergeState) throws IOException { int docCount = 0; for (int i=0;i<mergeState.storedFieldsReaders.length;i++) { StoredFieldsReader storedFieldsReader = mergeState.storedFieldsReaders[i]; storedFieldsReader.checkIntegrity(); MergeVisitor visitor = new MergeVisitor(mergeState, i); int maxDoc = mergeState.maxDocs[i]; Bits liveDocs = mergeState.liveDocs[i]; for (int docID=0;docID<maxDoc;docID++) { if (liveDocs != null && !liveDocs.get(docID)) { // skip deleted docs continue; } startDocument(); storedFieldsReader.visitDocument(docID, visitor); finishDocument(); docCount++; } } finish(mergeState.mergeFieldInfos, docCount); return docCount; }
@Override void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException { super.flush(state, sortMap); if (sortMap == null) { // we're lucky the index is already sorted, just rename the temporary file and return for (Map.Entry<String, String> entry : tmpDirectory.getTemporaryFiles().entrySet()) { tmpDirectory.rename(entry.getValue(), entry.getKey()); } return; } StoredFieldsReader reader = docWriter.codec.storedFieldsFormat() .fieldsReader(tmpDirectory, state.segmentInfo, state.fieldInfos, IOContext.DEFAULT); StoredFieldsReader mergeReader = reader.getMergeInstance(); StoredFieldsWriter sortWriter = docWriter.codec.storedFieldsFormat() .fieldsWriter(state.directory, state.segmentInfo, IOContext.DEFAULT); try { reader.checkIntegrity(); CopyVisitor visitor = new CopyVisitor(sortWriter); for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) { sortWriter.startDocument(); mergeReader.visitDocument(sortMap.newToOld(docID), visitor); sortWriter.finishDocument(); } sortWriter.finish(state.fieldInfos, state.segmentInfo.maxDoc()); } finally { IOUtils.close(reader, sortWriter); IOUtils.deleteFiles(tmpDirectory, tmpDirectory.getTemporaryFiles().values()); } }