startDocument(); sub.reader.visitDocument(sub.docID, sub.visitor); finishDocument(); docCount++; finish(mergeState.mergeFieldInfos, docCount); return docCount;
/** * Merge stored fields from each of the segments into the new one. * @return The number of documents in all of the readers * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ private int mergeFields() throws IOException { try (StoredFieldsWriter fieldsWriter = codec.storedFieldsFormat().fieldsWriter(directory, mergeState.segmentInfo, context)) { return fieldsWriter.merge(mergeState); } }
void write() throws IOException { writer.writeField(currentField, this); } }
void startDocument(int docID) throws IOException { assert lastDoc < docID; initStoredFieldsWriter(); while (++lastDoc < docID) { writer.startDocument(); writer.finishDocument(); } writer.startDocument(); }
void finishDocument() throws IOException { writer.finishDocument(); }
void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException { try { writer.finish(state.fieldInfos, state.segmentInfo.maxDoc()); } finally { IOUtils.close(writer); writer = null; } }
/** Calls StoredFieldsWriter.startDocument, aborting the * segment if it hits any exception. */ private void startStoredFields() throws IOException, AbortingException { try { initStoredFieldsWriter(); storedFieldsWriter.startDocument(); } catch (Throwable th) { throw AbortingException.wrap(th); } lastStoredDocID++; }
void startDocument(int docID) throws IOException { assert lastDoc < docID; initStoredFieldsWriter(); while (++lastDoc < docID) { writer.startDocument(); writer.finishDocument(); } writer.startDocument(); }
void finishDocument() throws IOException { writer.finishDocument(); }
void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException { try { writer.finish(state.fieldInfos, state.segmentInfo.maxDoc()); } finally { IOUtils.close(writer); writer = null; } }
/** Calls StoredFieldsWriter.startDocument, aborting the * segment if it hits any exception. */ private void startStoredFields() throws IOException, AbortingException { try { initStoredFieldsWriter(); storedFieldsWriter.startDocument(); } catch (Throwable th) { throw AbortingException.wrap(th); } lastStoredDocID++; }
@Override void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException { super.flush(state, sortMap); if (sortMap == null) { // we're lucky the index is already sorted, just rename the temporary file and return for (Map.Entry<String, String> entry : tmpDirectory.getTemporaryFiles().entrySet()) { tmpDirectory.rename(entry.getValue(), entry.getKey()); } return; } StoredFieldsReader reader = docWriter.codec.storedFieldsFormat() .fieldsReader(tmpDirectory, state.segmentInfo, state.fieldInfos, IOContext.DEFAULT); StoredFieldsReader mergeReader = reader.getMergeInstance(); StoredFieldsWriter sortWriter = docWriter.codec.storedFieldsFormat() .fieldsWriter(state.directory, state.segmentInfo, IOContext.DEFAULT); try { reader.checkIntegrity(); CopyVisitor visitor = new CopyVisitor(sortWriter); for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) { sortWriter.startDocument(); mergeReader.visitDocument(sortMap.newToOld(docID), visitor); sortWriter.finishDocument(); } sortWriter.finish(state.fieldInfos, state.segmentInfo.maxDoc()); } finally { IOUtils.close(reader, sortWriter); IOUtils.deleteFiles(tmpDirectory, tmpDirectory.getTemporaryFiles().values()); } }
void write() throws IOException { writeField(currentField, this); } }
subs.add(new CompressingStoredFieldsMergeSub(storedFieldsReader, mergeState.docMaps[i], mergeState.maxDocs[i])); } else { return super.merge(mergeState);
/** Calls StoredFieldsWriter.finishDocument, aborting the * segment if it hits any exception. */ private void finishStoredFields() throws IOException, AbortingException { try { storedFieldsWriter.finishDocument(); } catch (Throwable th) { throw AbortingException.wrap(th); } }
/** Merges in the stored fields from the readers in * <code>mergeState</code>. The default implementation skips * over deleted documents, and uses {@link #startDocument()}, * {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(FieldInfos, int)}, * returning the number of documents that were written. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public int merge(MergeState mergeState) throws IOException { int docCount = 0; for (int i=0;i<mergeState.storedFieldsReaders.length;i++) { StoredFieldsReader storedFieldsReader = mergeState.storedFieldsReaders[i]; storedFieldsReader.checkIntegrity(); MergeVisitor visitor = new MergeVisitor(mergeState, i); int maxDoc = mergeState.maxDocs[i]; Bits liveDocs = mergeState.liveDocs[i]; for (int docID=0;docID<maxDoc;docID++) { if (liveDocs != null && !liveDocs.get(docID)) { // skip deleted docs continue; } startDocument(); storedFieldsReader.visitDocument(docID, visitor); finishDocument(); docCount++; } } finish(mergeState.mergeFieldInfos, docCount); return docCount; }
void writeField(FieldInfo info, IndexableField field) throws IOException { writer.writeField(info, field); }
/** * Merge stored fields from each of the segments into the new one. * @return The number of documents in all of the readers * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ private int mergeFields() throws IOException { try (StoredFieldsWriter fieldsWriter = codec.storedFieldsFormat().fieldsWriter(directory, mergeState.segmentInfo, context)) { return fieldsWriter.merge(mergeState); } }