private static Scorer getScorer(Query query, CodecReader reader) throws IOException { IndexSearcher s = new IndexSearcher(reader); s.setQueryCache(null); Weight weight = s.createWeight(s.rewrite(query), false, 1.0f); return weight.scorer(reader.getContext()); }
/** Expert: low-level implementation method * Returns an Explanation that describes how <code>doc</code> scored against * <code>weight</code>. * * <p>This is intended to be used in developing Similarity implementations, * and, for good performance, should not be displayed with every hit. * Computing an explanation is as expensive as executing the query over the * entire index. * <p>Applications should call {@link IndexSearcher#explain(Query, int)}. * @throws BooleanQuery.TooManyClauses If a query would exceed * {@link BooleanQuery#getMaxClauseCount()} clauses. */ protected Explanation explain(Weight weight, int doc) throws IOException { int n = ReaderUtil.subIndex(doc, leafContexts); final LeafReaderContext ctx = leafContexts.get(n); int deBasedDoc = doc - ctx.docBase; final Bits liveDocs = ctx.reader().getLiveDocs(); if (liveDocs != null && liveDocs.get(deBasedDoc) == false) { return Explanation.noMatch("Document " + doc + " is deleted"); } return weight.explain(ctx, deBasedDoc); }
@Override public void extractTerms(Set<Term> terms) { for (Weight weight : weights) { weight.extractTerms(terms); } }
if (in.isCacheable(context) == false) { return in.bulkScorer(context); return in.bulkScorer(context); final IndexReader.CacheHelper cacheHelper = context.reader().getCoreCacheHelper(); if (cacheHelper == null) { return in.bulkScorer(context); return in.bulkScorer(context); docIdSet = get(in.getQuery(), context, cacheHelper); } finally { lock.unlock(); if (policy.shouldCache(in.getQuery())) { docIdSet = cache(context); putIfAbsent(in.getQuery(), context, docIdSet, cacheHelper); } else { return in.bulkScorer(context);
if (in.isCacheable(context) == false) { return in.scorerSupplier(context); return in.scorerSupplier(context); final IndexReader.CacheHelper cacheHelper = context.reader().getCoreCacheHelper(); if (cacheHelper == null) { return in.scorerSupplier(context); return in.scorerSupplier(context); docIdSet = get(in.getQuery(), context, cacheHelper); } finally { lock.unlock(); if (policy.shouldCache(in.getQuery())) { docIdSet = cache(context); putIfAbsent(in.getQuery(), context, docIdSet, cacheHelper); } else { return in.scorerSupplier(context);
private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) { return context -> { Query query = Queries.newNonNestedFilter(indexVersionCreated); final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); final Weight weight = searcher.createNormalizedWeight(query, false); Scorer s = weight.scorer(context); return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc()); }; } }
if (getBooleanParameter(req, "rewrite")) { final Query rewritten_q = q.rewrite(searcher .getIndexReader()); queryRow.put("rewritten_q", rewritten_q.toString()); final Weight weight = rewritten_q.createWeight(searcher, false); weight.extractTerms(terms); for (final Object term : terms) { final int freq = searcher.getIndexReader().docFreq((Term) term); freqs.put(term.toString(), freq); td = searcher.search(q, skip + limit); } else { td = searcher.search(q, skip + limit, sort);
limit = Integer.MAX_VALUE; final IndexSearcher searcher = new IndexSearcher(readerContext.reader()); searcher.setQueryCache(null); query = searcher.rewrite(query); final Weight weight = searcher.createWeight(query, false, 1); final Scorer scorer = weight.scorer(readerContext); if (scorer != null) { final DocIdSetIterator it = scorer.iterator(); if (segState.rld.sortMap != null && limit != Integer.MAX_VALUE) { assert privateSegment != null; while ((docID = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { while ((docID = it.nextDoc()) < limit) { if (segState.rld.delete(docID)) { delCount++;
/** * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } }
LongConsumer onNewSeqNo) throws IOException { final DirectoryReader reader = Lucene.wrapAllDocsLive(directoryReader); final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); final Query query = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo); final Weight weight = searcher.createWeight(query, false, 1.0f); for (LeafReaderContext leaf : reader.leaves()) { final Scorer scorer = weight.scorer(leaf); if (scorer == null) { continue; final DocIdSetIterator docIdSetIterator = scorer.iterator(); final NumericDocValues seqNoDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.NAME); int docId; while ((docId = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { if (seqNoDocValues == null || seqNoDocValues.advanceExact(docId) == false) { throw new IllegalStateException("seq_no doc_values not found for doc_id=" + docId);
List<LeafReaderContext> leaves = searcher.getIndexReader().leaves(); Query rewritten = searcher.rewrite(query); Weight weight = searcher.createWeight(rewritten, true, 1); readerUpto++; readerContext = leaves.get(readerUpto); endDoc = readerContext.docBase + readerContext.reader().maxDoc(); scorer = weight.scorer(readerContext); int actualDoc = scorer.docID(); if (actualDoc < targetDoc) { actualDoc = scorer.iterator().advance(targetDoc); hit.score = combine(hit.score, true, scorer.score()); } else {
/** * Check whether there is one or more documents matching the provided query. */ public static boolean exists(IndexSearcher searcher, Query query) throws IOException { final Weight weight = searcher.createNormalizedWeight(query, false); // the scorer API should be more efficient at stopping after the first // match than the bulk scorer API for (LeafReaderContext context : searcher.getIndexReader().leaves()) { final Scorer scorer = weight.scorer(context); if (scorer == null) { continue; } final Bits liveDocs = context.reader().getLiveDocs(); final DocIdSetIterator iterator = scorer.iterator(); for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) { if (liveDocs == null || liveDocs.get(doc)) { return true; } } } return false; }
DocIdSetIterator scorerIt = null; if (needsScores) { Scorer scorer = weight.scorer(entry.context); if (scorer != null) { scorerIt = scorer.iterator(); subCollector.setScorer(scorer); while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { if (needsScores) { assert scorerIt != null && scorerIt.docID() < docID; scorerIt.advance(docID); assert scorerIt.docID() == docID;
private static void suggest(IndexSearcher searcher, CompletionQuery query, TopSuggestDocsCollector collector) throws IOException { query = (CompletionQuery) query.rewrite(searcher.getIndexReader()); Weight weight = query.createWeight(searcher, collector.needsScores(), 1f); for (LeafReaderContext context : searcher.getIndexReader().leaves()) { BulkScorer scorer = weight.bulkScorer(context); if (scorer != null) { try { scorer.score(collector.getLeafCollector(context), context.reader().getLiveDocs()); } catch (CollectionTerminatedException e) { // collection was terminated prematurely // continue with the following leaf } } } } }
costThreshold = context.reader().maxDoc() / 3; BooleanClause c = cIter.next(); if (c.isProhibited()) { Scorer scorer = w.scorer(context); if (scorer != null) { prohibited.add(scorer); ? prohibited.get(0) : new DisjunctionSumScorer(this, prohibited, false); if (prohibitedScorer.twoPhaseIterator() != null) { return new ReqExclBulkScorer(positiveScorer, prohibitedScorer.iterator());
@Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { Explanation expl = subQueryWeight.explain(context, doc); if (!expl.isMatch()) { return expl; if (filterWeights[i] != null) { final Bits docSet = Lucene.asSequentialAccessBits( context.reader().maxDoc(), filterWeights[i].scorerSupplier(context)); if (docSet.get(doc) == false) { continue; } else { FunctionFactorScorer scorer = functionScorer(context); int actualDoc = scorer.iterator().advance(doc); assert (actualDoc == doc); double score = scorer.computeScore(doc, expl.getValue());
private FunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException { Scorer subQueryScorer = subQueryWeight.scorer(context); if (subQueryScorer == null) { return null; } final LeafScoreFunction[] leafFunctions = new LeafScoreFunction[functions.length]; final Bits[] docSets = new Bits[functions.length]; for (int i = 0; i < functions.length; i++) { ScoreFunction function = functions[i]; leafFunctions[i] = function.getLeafScoreFunction(context); if (filterWeights[i] != null) { ScorerSupplier filterScorerSupplier = filterWeights[i].scorerSupplier(context); docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorerSupplier); } else { docSets[i] = new Bits.MatchAllBits(context.reader().maxDoc()); } } return new FunctionFactorScorer(this, subQueryScorer, scoreMode, functions, maxBoost, leafFunctions, docSets, combineFunction, needsScores); }
private DocIdSet cache(LeafReaderContext context) throws IOException { final BulkScorer scorer = in.bulkScorer(context); if (scorer == null) { return DocIdSet.EMPTY; } else { return cacheImpl(scorer, context.reader().maxDoc()); } }
/** * Returns {@link Matches} for a specific document, or {@code null} if the document * does not match the parent query * * A query match that contains no position information (for example, a Point or * DocValues query) will return {@link MatchesUtils#MATCH_WITH_NO_TERMS} * * @param context the reader's context to create the {@link Matches} for * @param doc the document's id relative to the given context's reader * @lucene.experimental */ public Matches matches(LeafReaderContext context, int doc) throws IOException { Scorer scorer = scorer(context); if (scorer == null) { return null; } final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator(); if (twoPhase == null) { if (scorer.iterator().advance(doc) != doc) { return null; } } else { if (twoPhase.approximation().advance(doc) != doc || twoPhase.matches() == false) { return null; } } return MatchesUtils.MATCH_WITH_NO_TERMS; }