@Override public boolean hasPositions() { return in.hasPositions(); }
/** Sole constructor. * * @param subs The {@link Terms} instances of all sub-readers. * @param subSlices A parallel array (matching {@code * subs}) describing the sub-reader slices. */ public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException { this.subs = subs; this.subSlices = subSlices; assert subs.length > 0 : "inefficient: don't use MultiTerms over one sub"; boolean _hasFreqs = true; boolean _hasOffsets = true; boolean _hasPositions = true; boolean _hasPayloads = false; for(int i=0;i<subs.length;i++) { _hasFreqs &= subs[i].hasFreqs(); _hasOffsets &= subs[i].hasOffsets(); _hasPositions &= subs[i].hasPositions(); _hasPayloads |= subs[i].hasPayloads(); } hasFreqs = _hasFreqs; hasOffsets = _hasOffsets; hasPositions = _hasPositions; hasPayloads = hasPositions && _hasPayloads; // if all subs have pos, and at least one has payloads. }
@Override public Matches matches(LeafReaderContext context, int doc) throws IOException { String field = terms[0].field(); Terms terms = context.reader().terms(field); if (terms == null || terms.hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(field, () -> DisjunctionMatchesIterator.fromTerms(context, doc, getQuery(), field, Arrays.asList(SynonymQuery.this.terms))); }
@Override public Matches matches(LeafReaderContext context, int doc) throws IOException { final Terms terms = context.reader().terms(query.field); if (terms == null) { return null; } if (terms.hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(query.field, () -> DisjunctionMatchesIterator.fromTermsEnum(context, doc, query, query.field, query.getTermsEnum(terms))); }
@Override public Matches matches(LeafReaderContext context, int doc) throws IOException { Terms terms = context.reader().terms(field); if (terms == null || terms.hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(field, () -> DisjunctionMatchesIterator.fromTermsEnum(context, doc, getQuery(), field, termData.iterator())); }
if (fieldTerms.hasPositions() == false) { throw new IllegalStateException("field \"" + field + "\" was indexed without position data;" + " cannot run MultiPhraseQuery (phrase=" + getQuery() + ")");
final boolean hasPositions = terms.hasPositions(); final boolean hasPayloads = terms.hasPayloads(); final boolean hasOffsets = terms.hasOffsets();
} else if (!fieldTerms.hasPositions()) { throw new IllegalStateException("field \"" +field + "\" was indexed without position data; cannot run CustomPhraseQuery (phrase=" + this.getQuery() + ")");
final boolean hasProx = terms.hasOffsets() || terms.hasPositions(); BytesRef term = null; while ((term = termsEnum.next()) != null) { for (int i = 0; i < tf; i++) { int pos = postings.nextPosition(); if (postingsTerms.hasPositions()) { int postingsPos = postingsDocs.nextPosition(); if (terms.hasPositions() && pos != postingsPos) { throw new RuntimeException("vector term=" + term + " field=" + field + " doc=" + j + ": pos=" + pos + " differs from postings pos=" + postingsPos);
@Override public Matches matches(LeafReaderContext context, int doc) throws IOException { TermsEnum te = getTermsEnum(context); if (te == null) { return null; } if (context.reader().terms(term.field()).hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(term.field(), () -> { PostingsEnum pe = te.postings(null, PostingsEnum.OFFSETS); if (pe.advance(doc) != doc) { return null; } return new TermMatchesIterator(getQuery(), pe); }); }
if (fieldTerms.hasPositions() == false) { throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run PhraseQuery (phrase=" + getQuery() + ")");
@Override public Spans getSpans(final LeafReaderContext context, Postings requiredPostings) throws IOException { assert termContext.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context); final TermState state = termContext.get(context.ord); if (state == null) { // term is not present in that reader assert context.reader().docFreq(term) == 0 : "no termstate found but term exists in reader term=" + term; return null; } final Terms terms = context.reader().terms(term.field()); if (terms == null) return null; if (terms.hasPositions() == false) throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run SpanTermQuery (term=" + term.text() + ")"); final TermsEnum termsEnum = terms.iterator(); termsEnum.seekExact(term.bytes(), state); final PostingsEnum postings = termsEnum.postings(null, requiredPostings.getRequiredPostings()); float positionsCost = termPositionsCost(termsEnum) * PHRASE_TO_SPAN_TERM_POSITIONS_COST; return new TermSpans(getSimScorer(context), postings, term, positionsCost); } }
final boolean hasPositions = terms.hasPositions(); final boolean hasOffsets = terms.hasOffsets(); final boolean hasPayloads = terms.hasPayloads();
final boolean hasPositions = terms.hasPositions(); final boolean hasOffsets = terms.hasOffsets(); final boolean hasPayloads = terms.hasPayloads();
private void initMemory(Terms curTerms, int termFreq) { // init memory for performance reasons if (curTerms.hasPositions()) { currentPositions = ArrayUtil.grow(currentPositions, termFreq); } if (curTerms.hasOffsets()) { currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq); currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq); } if (curTerms.hasPayloads()) { currentPayloads = new BytesArray[termFreq]; } }
private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException { if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) { return; } builder.startArray(FieldStrings.TOKENS); for (int i = 0; i < termFreq; i++) { builder.startObject(); if (curTerms.hasPositions()) { builder.field(FieldStrings.POS, currentPositions[i]); } if (curTerms.hasOffsets()) { builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]); builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]); } if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) { BytesRef bytesRef = currentPayloads[i].toBytesRef(); builder.field(FieldStrings.PAYLOAD, bytesRef.bytes, bytesRef.offset, bytesRef.length); } builder.endObject(); } builder.endArray(); }
private void initValues(Terms curTerms, PostingsEnum posEnum, int termFreq) throws IOException { for (int j = 0; j < termFreq; j++) { int nextPos = posEnum.nextPosition(); if (curTerms.hasPositions()) { currentPositions[j] = nextPos; } if (curTerms.hasOffsets()) { currentStartOffset[j] = posEnum.startOffset(); currentEndOffset[j] = posEnum.endOffset(); } if (curTerms.hasPayloads()) { BytesRef curPayload = posEnum.getPayload(); if (curPayload != null) { currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length); } else { currentPayloads[j] = null; } } } }
boolean positions = flags.contains(Flag.Positions) && fieldTermVector.hasPositions(); boolean offsets = flags.contains(Flag.Offsets) && fieldTermVector.hasOffsets(); boolean payloads = flags.contains(Flag.Payloads) && fieldTermVector.hasPayloads();
@Override public Matches matches(LeafReaderContext context, int doc) throws IOException { final Terms terms = context.reader().terms(query.field); if (terms == null) { return null; } if (terms.hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(query.field, () -> DisjunctionMatchesIterator.fromTermsEnum(context, doc, query, query.field, query.getTermsEnum(terms))); }
@Override public Matches matches(LeafReaderContext context, int doc) throws IOException { Terms terms = context.reader().terms(field); if (terms == null || terms.hasPositions() == false) { return super.matches(context, doc); } return MatchesUtils.forField(field, () -> DisjunctionMatchesIterator.fromTermsEnum(context, doc, getQuery(), field, termData.iterator())); }