/** Record the given document. */ public void addDoc( int docId ) { bits.add( docId ); }
if (matchingTerms == null) { docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); } else if (matchingTerms.size() < threshold) { matchingTerms.add(new TermAndState(field, termsEnum)); builder = new DocIdSetBuilder(reader.maxDoc(), terms); docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); for (TermAndState t : matchingTerms) { t.termsEnum.seekExact(t.term, t.state); docs = t.termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs);
/** Record the given document. */ public void addDoc( int docId ) { bits.add( docId ); }
/** * Returns a DocIdSet per segments containing the matching docs for the specified slice. */ private DocIdSet build(LeafReader reader) throws IOException { final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc()); final Terms terms = reader.terms(getField()); final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { // use a fixed seed instead of term.hashCode() otherwise this query may return inconsistent results when // running on another replica (StringHelper sets its default seed at startup with current time) int hashCode = StringHelper.murmurhash3_x86_32(term, SEED); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); } } return builder.build(); } }
protected void collectDocs(DocIdSetBuilder docSetBuilder) throws IOException { assert termsEnum != null; postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE); docSetBuilder.add(postingsEnum); } }
protected void collectDocs(DocIdSetBuilder docSetBuilder) throws IOException { assert termsEnum != null; postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE); docSetBuilder.add(postingsEnum); } }
/** Adds all docs from the current block. */ private int addAll(QueryState state) throws IOException { // How many values are stored in this leaf cell: int count = state.in.readVInt(); state.docs.grow(count); for(int i=0;i<count;i++) { int docID = state.in.readInt(); state.docs.add(docID); } return count; }
@Override protected DocIdSet finish() throws IOException { if (exactIsEmpty) { exactDocIdSet = null; } else { exactDocIdSet = exactBuilder.build(); } if (approxIsEmpty) { approxDocIdSet = exactDocIdSet;//optimization } else { if (exactDocIdSet != null) { approxBuilder.add(exactDocIdSet.iterator()); } approxDocIdSet = approxBuilder.build(); } return null;//unused in this weird re-use of AVPTQ }
@Override protected DocIdSet finish() throws IOException { if (exactIsEmpty) { exactDocIdSet = null; } else { exactDocIdSet = exactBuilder.build(); } if (approxIsEmpty) { approxDocIdSet = exactDocIdSet;//optimization } else { if (exactDocIdSet != null) { approxBuilder.add(exactDocIdSet.iterator()); } approxDocIdSet = approxBuilder.build(); } return null;//unused in this weird re-use of AVPTQ }
/** Adds docs from the current block, filtering each hit against the query min/max. This * is only needed on the boundary blocks. */ private int addSome(QueryState state) throws IOException { int hitCount = 0; // How many points are stored in this leaf cell: int count = state.in.readVInt(); state.docs.grow(count); for(int i=0;i<count;i++) { int docID = state.in.readInt(); state.sndv.setDocument(docID); // How many values this doc has: int docValueCount = state.sndv.count(); for(int j=0;j<docValueCount;j++) { long value = state.sndv.valueAt(j); if (value >= state.minValueIncl && value <= state.maxValueIncl) { state.docs.add(docID); hitCount++; // Stop processing values for this doc: break; } } } return hitCount; }
for(int i=0;i<count;i++) { int docID = state.in.readInt(); state.docs.add(docID);
for(int i=0;i<count;i++) { int docID = state.in.readInt(); state.docs.add(docID);
/** * Returns a DocIdSet per segments containing the matching docs for the specified slice. */ private DocIdSet build(LeafReader reader) throws IOException { final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc()); final Terms terms = reader.terms(getField()); final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { int hashCode = term.hashCode(); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); } } return builder.build(); } }
/** * Returns a DocIdSet per segments containing the matching docs for the specified slice. */ private DocIdSet build(LeafReader reader) throws IOException { final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc()); final Terms terms = reader.terms(getField()); final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { // use a fixed seed instead of term.hashCode() otherwise this query may return inconsistent results when // running on another replica (StringHelper sets its default seed at startup with current time) int hashCode = StringHelper.murmurhash3_x86_32(term, SEED); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); } } return builder.build(); } }
public DocIdSet getDocIdSet(LeafReaderContext context) throws IOException { final Terms terms = context.reader().terms(field); // make sure the field exists if (terms == null) return null; final BytesRefTermsSet termsSet = this.getTermsSet(); // make sure there are terms to filter on if (termsSet == null || termsSet.isEmpty()) return null; SeekingTermSetTermsEnum termsEnum = new SeekingTermSetTermsEnum(terms.iterator(), termsSet); DocIdSetBuilder builder = new DocIdSetBuilder(context.reader().maxDoc()); PostingsEnum docs = null; while (termsEnum.next() != null) { docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); } return builder.build(); }
/** * Returns a DocIdSet per segments containing the matching docs for the specified slice. */ private DocIdSet build(LeafReader reader) throws IOException { final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc()); final Terms terms = reader.terms(getField()); final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { // use a fixed seed instead of term.hashCode() otherwise this query may return inconsistent results when // running on another replica (StringHelper sets its default seed at startup with current time) int hashCode = StringHelper.murmurhash3_x86_32(term, SEED); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); } } return builder.build(); } }
/** * Returns a DocIdSet per segments containing the matching docs for the specified slice. */ private DocIdSet build(LeafReader reader) throws IOException { final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc()); final Terms terms = reader.terms(getField()); final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { // use a fixed seed instead of term.hashCode() otherwise this query may return inconsistent results when // running on another replica (StringHelper sets its default seed at startup with current time) int hashCode = StringHelper.murmurhash3_x86_32(term, SEED); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); } } return builder.build(); } }
private static final class QueryState { final IndexInput in; byte[] scratch = new byte[16]; final ByteArrayDataInput scratchReader = new ByteArrayDataInput(scratch); final DocIdSetBuilder docs; final int xMin; final int xMax; final int yMin; final int yMax; final int zMin; final int zMax; final ValueFilter valueFilter; public QueryState(IndexInput in, int maxDoc, int xMin, int xMax, int yMin, int yMax, int zMin, int zMax, ValueFilter valueFilter) { this.in = in; this.docs = new DocIdSetBuilder(maxDoc); this.xMin = xMin; this.xMax = xMax; this.yMin = yMin; this.yMax = yMax; this.zMin = zMin; this.zMax = zMax; this.valueFilter = valueFilter; } }