@Override public void write(Fields fields) throws IOException { Map<PostingsFormat, FieldsGroup> formatToGroups = buildFieldsGroupMapping(fields); // Write postings boolean success = false; try { for (Map.Entry<PostingsFormat, FieldsGroup> ent : formatToGroups.entrySet()) { PostingsFormat format = ent.getKey(); final FieldsGroup group = ent.getValue(); // Exposes only the fields from this group: Fields maskedFields = new FilterFields(fields) { @Override public Iterator<String> iterator() { return group.fields.iterator(); } }; FieldsConsumer consumer = format.fieldsConsumer(group.state); toClose.add(consumer); consumer.write(maskedFields); } success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(toClose); } } }
/** Merges in the fields from the readers in * <code>mergeState</code>. The default implementation skips * and maps around deleted documents, and calls {@link #write(Fields)}. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public void merge(MergeState mergeState) throws IOException { final List<Fields> fields = new ArrayList<>(); final List<ReaderSlice> slices = new ArrayList<>(); int docBase = 0; for(int readerIndex=0;readerIndex<mergeState.fieldsProducers.length;readerIndex++) { final FieldsProducer f = mergeState.fieldsProducers[readerIndex]; final int maxDoc = mergeState.maxDocs[readerIndex]; f.checkIntegrity(); slices.add(new ReaderSlice(docBase, maxDoc, readerIndex)); fields.add(f); docBase += maxDoc; } Fields mergedFields = new MappedMultiFields(mergeState, new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), slices.toArray(ReaderSlice.EMPTY_ARRAY))); write(mergedFields); }
boolean success = false; try { consumer.write(fields); success = true; } finally {
@Override public void write(Fields fields) throws IOException { Map<PostingsFormat, FieldsGroup> formatToGroups = buildFieldsGroupMapping(fields); // Write postings boolean success = false; try { for (Map.Entry<PostingsFormat, FieldsGroup> ent : formatToGroups.entrySet()) { PostingsFormat format = ent.getKey(); final FieldsGroup group = ent.getValue(); // Exposes only the fields from this group: Fields maskedFields = new FilterFields(fields) { @Override public Iterator<String> iterator() { return group.fields.iterator(); } }; FieldsConsumer consumer = format.fieldsConsumer(group.state); toClose.add(consumer); consumer.write(maskedFields); } success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(toClose); } } }
/** Merges in the fields from the readers in * <code>mergeState</code>. The default implementation skips * and maps around deleted documents, and calls {@link #write(Fields)}. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public void merge(MergeState mergeState) throws IOException { final List<Fields> fields = new ArrayList<>(); final List<ReaderSlice> slices = new ArrayList<>(); int docBase = 0; for(int readerIndex=0;readerIndex<mergeState.fieldsProducers.length;readerIndex++) { final FieldsProducer f = mergeState.fieldsProducers[readerIndex]; final int maxDoc = mergeState.maxDocs[readerIndex]; f.checkIntegrity(); slices.add(new ReaderSlice(docBase, maxDoc, readerIndex)); fields.add(f); docBase += maxDoc; } Fields mergedFields = new MappedMultiFields(mergeState, new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), slices.toArray(ReaderSlice.EMPTY_ARRAY))); write(mergedFields); }
/** Merges in the fields from the readers in * <code>mergeState</code>. The default implementation skips * and maps around deleted documents, and calls {@link #write(Fields)}. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public void merge(MergeState mergeState) throws IOException { final List<Fields> fields = new ArrayList<>(); final List<ReaderSlice> slices = new ArrayList<>(); int docBase = 0; for(int readerIndex=0;readerIndex<mergeState.fieldsProducers.length;readerIndex++) { final FieldsProducer f = mergeState.fieldsProducers[readerIndex]; final int maxDoc = mergeState.maxDocs[readerIndex]; f.checkIntegrity(); slices.add(new ReaderSlice(docBase, maxDoc, readerIndex)); fields.add(f); docBase += maxDoc; } Fields mergedFields = new MappedMultiFields(mergeState, new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), slices.toArray(ReaderSlice.EMPTY_ARRAY))); write(mergedFields); }
/** Merges in the fields from the readers in * <code>mergeState</code>. The default implementation skips * and maps around deleted documents, and calls {@link #write(Fields)}. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public void merge(MergeState mergeState) throws IOException { final List<Fields> fields = new ArrayList<>(); final List<ReaderSlice> slices = new ArrayList<>(); int docBase = 0; for(int readerIndex=0;readerIndex<mergeState.fieldsProducers.length;readerIndex++) { final FieldsProducer f = mergeState.fieldsProducers[readerIndex]; final int maxDoc = mergeState.maxDocs[readerIndex]; f.checkIntegrity(); slices.add(new ReaderSlice(docBase, maxDoc, readerIndex)); fields.add(f); docBase += maxDoc; } Fields mergedFields = new MappedMultiFields(mergeState, new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), slices.toArray(ReaderSlice.EMPTY_ARRAY))); write(mergedFields); }
consumer.write(maskedFields);
consumer.write(maskedFields);
delegateFieldsConsumer.write(fields);
@Override public void write(Fields fields) throws IOException { delegateFieldsConsumer.write(fields); for (String field : fields) { CompletionTermWriter termWriter = new CompletionTermWriter(); Terms terms = fields.terms(field); TermsEnum termsEnum = terms.iterator(); // write terms BytesRef term; while ((term = termsEnum.next()) != null) { termWriter.write(term, termsEnum); } // store lookup, if needed long filePointer = dictOut.getFilePointer(); if (termWriter.finish(dictOut)) { seenFields.put(field, new CompletionMetaData(filePointer, termWriter.minWeight, termWriter.maxWeight, termWriter.type)); } } }
delegateFieldsConsumer.write(fields);
@Override public void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state) throws IOException { super.flush(fieldsToFlush, state); // Gather all fields that saw any postings: List<FreqProxTermsWriterPerField> allFields = new ArrayList<>(); for (TermsHashPerField f : fieldsToFlush.values()) { final FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) f; if (perField.bytesHash.size() > 0) { perField.sortPostings(); assert perField.fieldInfo.getIndexOptions() != IndexOptions.NONE; allFields.add(perField); } } // Sort by field name CollectionUtil.introSort(allFields); Fields fields = new FreqProxFields(allFields); applyDeletes(state, fields); FieldsConsumer consumer = state.segmentInfo.getCodec().postingsFormat().fieldsConsumer(state); boolean success = false; try { consumer.write(fields); success = true; } finally { if (success) { IOUtils.close(consumer); } else { IOUtils.closeWhileHandlingException(consumer); } } }
@Override public void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state) throws IOException { super.flush(fieldsToFlush, state); // Gather all fields that saw any postings: List<FreqProxTermsWriterPerField> allFields = new ArrayList<>(); for (TermsHashPerField f : fieldsToFlush.values()) { final FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) f; if (perField.bytesHash.size() > 0) { perField.sortPostings(); assert perField.fieldInfo.getIndexOptions() != IndexOptions.NONE; allFields.add(perField); } } // Sort by field name CollectionUtil.introSort(allFields); Fields fields = new FreqProxFields(allFields); applyDeletes(state, fields); FieldsConsumer consumer = state.segmentInfo.getCodec().postingsFormat().fieldsConsumer(state); boolean success = false; try { consumer.write(fields); success = true; } finally { if (success) { IOUtils.close(consumer); } else { IOUtils.closeWhileHandlingException(consumer); } } }
boolean success = false; try { consumer.write(fields); success = true; } finally {