@Override public void checkIntegrity() throws IOException { in.checkIntegrity(); } }
@Override public long ramBytesUsed() { return in.ramBytesUsed(); }
@Override public Terms terms(String field) throws IOException { FieldsProducer fieldsProducer = fields.get(field); return fieldsProducer == null ? null : fieldsProducer.terms(field); }
FieldsReader(FieldsReader other) throws IOException { Map<FieldsProducer,FieldsProducer> oldToNew = new IdentityHashMap<>(); // First clone all formats for(Map.Entry<String,FieldsProducer> ent : other.formats.entrySet()) { FieldsProducer values = ent.getValue().getMergeInstance(); formats.put(ent.getKey(), values); oldToNew.put(ent.getValue(), values); } // Then rebuild fields: for(Map.Entry<String,FieldsProducer> ent : other.fields.entrySet()) { FieldsProducer producer = oldToNew.get(ent.getValue()); assert producer != null; fields.put(ent.getKey(), producer); } segment = other.segment; }
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { FieldsProducer postings = PostingsFormat.forName("Lucene50").fieldsProducer(state); if (state.context.context != IOContext.Context.MERGE) { FieldsProducer loadedPostings; try { postings.checkIntegrity(); loadedPostings = new DirectFields(state, postings, minSkipCount, lowFreqCutoff); } finally { postings.close(); } return loadedPostings; } else { // Don't load postings for merge: return postings; } }
@Override public int size() { return delegateProducer.size(); }
@Override public Iterator<String> iterator() { return delegateProducer.iterator(); }
@Override public void close() throws IOException { in.close(); }
/** * Test the term index. * @lucene.experimental */ public static Status.TermIndexStatus testPostings(CodecReader reader, PrintStream infoStream, boolean verbose, boolean failFast, Version version) throws IOException { // TODO: we should go and verify term vectors match, if // crossCheckTermVectors is on... Status.TermIndexStatus status; final int maxDoc = reader.maxDoc(); try { if (infoStream != null) { infoStream.print(" test: terms, freq, prox..."); } final Fields fields = reader.getPostingsReader().getMergeInstance(); final FieldInfos fieldInfos = reader.getFieldInfos(); status = checkFields(fields, reader.getLiveDocs(), maxDoc, fieldInfos, true, false, infoStream, verbose, version); } catch (Throwable e) { if (failFast) { throw IOUtils.rethrowAlways(e); } msg(infoStream, "ERROR: " + e); status = new Status.TermIndexStatus(); status.error = e; if (infoStream != null) { e.printStackTrace(infoStream); } } return status; }
@Override public int size() { return delegateProducer.size(); }
@Override public Iterator<String> iterator() { return delegateProducer.iterator(); }
@Override public void close() throws IOException { delegateFieldsProducer.close(); }
@Override public void checkIntegrity() throws IOException { for (FieldsProducer producer : formats.values()) { producer.checkIntegrity(); } }
fields = fields.getMergeInstance();
@Override public Terms terms(String field) throws IOException { if (!filtered.contains(field)) { throw new IllegalArgumentException("The field named '" + field + "' is not accessible in the current " + "merge context, available ones are: " + filtered); } return in.terms(field); }
@Override public long ramBytesUsed() { long ramBytesUsed = BASE_RAM_BYTES_USED; ramBytesUsed += fields.size() * 2L * RamUsageEstimator.NUM_BYTES_OBJECT_REF; ramBytesUsed += formats.size() * 2L * RamUsageEstimator.NUM_BYTES_OBJECT_REF; for(Map.Entry<String,FieldsProducer> entry: formats.entrySet()) { ramBytesUsed += entry.getValue().ramBytesUsed(); } return ramBytesUsed; }
@Override public int size() { return delegateFieldsProducer.size(); }
@Override public Iterator<String> iterator() { return delegateFieldsProducer.iterator(); }
@Override public void close() throws IOException { in.close(); }
/** Merges in the fields from the readers in * <code>mergeState</code>. The default implementation skips * and maps around deleted documents, and calls {@link #write(Fields)}. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ public void merge(MergeState mergeState) throws IOException { final List<Fields> fields = new ArrayList<>(); final List<ReaderSlice> slices = new ArrayList<>(); int docBase = 0; for(int readerIndex=0;readerIndex<mergeState.fieldsProducers.length;readerIndex++) { final FieldsProducer f = mergeState.fieldsProducers[readerIndex]; final int maxDoc = mergeState.maxDocs[readerIndex]; f.checkIntegrity(); slices.add(new ReaderSlice(docBase, maxDoc, readerIndex)); fields.add(f); docBase += maxDoc; } Fields mergedFields = new MappedMultiFields(mergeState, new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), slices.toArray(ReaderSlice.EMPTY_ARRAY))); write(mergedFields); }