if (matchingTerms == null) { docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); } else if (matchingTerms.size() < threshold) { matchingTerms.add(new TermAndState(field, termsEnum)); } else { assert matchingTerms.size() == threshold; builder = new DocIdSetBuilder(reader.maxDoc(), terms); docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); for (TermAndState t : matchingTerms) { t.termsEnum.seekExact(t.term, t.state); docs = t.termsEnum.postings(docs, PostingsEnum.NONE); builder.add(docs); } else { assert builder != null; return new WeightOrDocIdSet(builder.build());
@Override public void grow(int count) { adder = result.grow(count); }
/** * Build a {@link DocIdSet} from the accumulated doc IDs. */ public DocIdSet build() { try { if (bitSet != null) { assert counter >= 0; final long cost = Math.round(counter / numValuesPerDoc); return new BitDocIdSet(bitSet, cost); } else { Buffer concatenated = concat(buffers); LSBRadixSorter sorter = new LSBRadixSorter(); sorter.sort(PackedInts.bitsRequired(maxDoc - 1), concatenated.array, concatenated.length); final int l; if (multivalued) { l = dedup(concatenated.array, concatenated.length); } else { assert noDups(concatenated.array, concatenated.length); l = concatenated.length; } assert l <= concatenated.length; concatenated.array[l] = DocIdSetIterator.NO_MORE_DOCS; return new IntArrayDocIdSet(concatenated.array, l); } } finally { this.buffers = null; this.bitSet = null; } }
/** * Reserve space and return a {@link BulkAdder} object that can be used to * add up to {@code numDocs} documents. */ public BulkAdder grow(int numDocs) { if (bitSet == null) { if ((long) totalAllocated + numDocs <= threshold) { ensureBufferCapacity(numDocs); } else { upgradeToBitSet(); counter += numDocs; } } else { counter += numDocs; } return adder; }
private void ensureBufferCapacity(int numDocs) { if (buffers.isEmpty()) { addBuffer(additionalCapacity(numDocs)); return; } Buffer current = buffers.get(buffers.size() - 1); if (current.array.length - current.length >= numDocs) { // current buffer is large enough return; } if (current.length < current.array.length - (current.array.length >>> 3)) { // current buffer is less than 7/8 full, resize rather than waste space growBuffer(current, additionalCapacity(numDocs)); } else { addBuffer(additionalCapacity(numDocs)); } }
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field); return new ConstantScoreScorer(this, score(), result.build().iterator());
@Override public void visit(int docID, byte[] packedValue) throws IOException { if (compare(packedValue, packedValue) != PointValues.Relation.CELL_CROSSES_QUERY) { remaining --; return; } long bucket = bucketFunction.applyAsLong(packedValue); if (first == false && bucket != lastBucket) { final DocIdSet docIdSet = bucketDocsBuilder.build(); if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) && // lower bucket is inclusive lowerBucket != lastBucket) { // this bucket does not have any competitive composite buckets, // we can early terminate the collection because the remaining buckets are guaranteed // to be greater than this bucket. throw new CollectionTerminatedException(); } bucketDocsBuilder = new DocIdSetBuilder(maxDoc); assert remaining > 0; adder = bucketDocsBuilder.grow(remaining); } lastBucket = bucket; first = false; adder.add(docID); remaining --; }
Docs( int maxDoc ) { bits = new DocIdSetBuilder( maxDoc ); }
/** Return the {@see DocIdSet} which contains all the recorded docs. */ public DocIdSet getDocIdSet() { return bits.build(); } }
/** Adds all docs from the current block. */ private int addAll(QueryState state) throws IOException { // How many values are stored in this leaf cell: int count = state.in.readVInt(); state.docs.grow(count); for(int i=0;i<count;i++) { int docID = state.in.readInt(); state.docs.add(docID); } return count; }
@Override protected DocIdSet finish() throws IOException { if (exactIsEmpty) { exactDocIdSet = null; } else { exactDocIdSet = exactBuilder.build(); } if (approxIsEmpty) { approxDocIdSet = exactDocIdSet;//optimization } else { if (exactDocIdSet != null) { approxBuilder.add(exactDocIdSet.iterator()); } approxDocIdSet = approxBuilder.build(); } return null;//unused in this weird re-use of AVPTQ }
/** Record the given document. */ public void addDoc( int docId ) { bits.add( docId ); }
grow((int) Math.min(Integer.MAX_VALUE, iter.cost())); growBuffer(bufferSize+1); upgradeToBitSet(); for (int doc = iter.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iter.nextDoc()) { bitSet.set(doc);
/** * Reserve space so that this builder can hold {@code numDocs} MORE documents. */ public void grow(int numDocs) { if (bitSet == null) { final long newLength = bufferSize + numDocs; if (newLength < threshold) { growBuffer((int) newLength); } else { upgradeToBitSet(); } } }
/** * Expert: build a {@link DocIdSet} with a hint on the cost that the resulting * {@link DocIdSet} would have. */ public DocIdSet build(long costHint) { try { if (bitSet != null) { if (costHint == -1) { return new BitDocIdSet(bitSet); } else { return new BitDocIdSet(bitSet, costHint); } } else { LSBRadixSorter sorter = new LSBRadixSorter(); sorter.sort(buffer, 0, bufferSize); final int l = dedup(buffer, bufferSize); assert l <= bufferSize; buffer = ArrayUtil.grow(buffer, l + 1); buffer[l] = DocIdSetIterator.NO_MORE_DOCS; return new IntArrayDocIdSet(buffer, l); } } finally { this.buffer = null; this.bufferSize = 0; this.bitSet = null; } }
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field); return new ConstantScoreScorer(this, score(), result.build().iterator());
@Override public void visit(int docID, byte[] packedValue) throws IOException { if (compare(packedValue, packedValue) != PointValues.Relation.CELL_CROSSES_QUERY) { remaining --; return; } long bucket = bucketFunction.applyAsLong(packedValue); if (first == false && bucket != lastBucket) { final DocIdSet docIdSet = bucketDocsBuilder.build(); if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) && // lower bucket is inclusive lowerBucket != lastBucket) { // this bucket does not have any competitive composite buckets, // we can early terminate the collection because the remaining buckets are guaranteed // to be greater than this bucket. throw new CollectionTerminatedException(); } bucketDocsBuilder = new DocIdSetBuilder(maxDoc); assert remaining > 0; adder = bucketDocsBuilder.grow(remaining); } lastBucket = bucket; first = false; adder.add(docID); remaining --; }
public Docs( int maxDoc ) { bits = new DocIdSetBuilder( maxDoc ); }
/** Return the {@see DocIdSet} which contains all the recorded docs. */ public DocIdSet getDocIdSet() { return bits.build(); } }