@Override public boolean hasOffsets() { return in.hasOffsets(); }
/** Sole constructor. * * @param subs The {@link Terms} instances of all sub-readers. * @param subSlices A parallel array (matching {@code * subs}) describing the sub-reader slices. */ public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException { this.subs = subs; this.subSlices = subSlices; assert subs.length > 0 : "inefficient: don't use MultiTerms over one sub"; boolean _hasFreqs = true; boolean _hasOffsets = true; boolean _hasPositions = true; boolean _hasPayloads = false; for(int i=0;i<subs.length;i++) { _hasFreqs &= subs[i].hasFreqs(); _hasOffsets &= subs[i].hasOffsets(); _hasPositions &= subs[i].hasPositions(); _hasPayloads |= subs[i].hasPayloads(); } hasFreqs = _hasFreqs; hasOffsets = _hasOffsets; hasPositions = _hasPositions; hasPayloads = hasPositions && _hasPayloads; // if all subs have pos, and at least one has payloads. }
final boolean hasPositions = terms.hasPositions(); final boolean hasPayloads = terms.hasPayloads(); final boolean hasOffsets = terms.hasOffsets();
final boolean hasProx = terms.hasOffsets() || terms.hasPositions(); BytesRef term = null; while ((term = termsEnum.next()) != null) { if (startOffset != -1 && endOffset != -1 && postingsTerms.hasOffsets()) { int postingsStartOffset = postingsDocs.startOffset(); int postingsEndOffset = postingsDocs.endOffset();
final boolean hasOffsets = terms.hasOffsets(); final boolean hasPayloads = terms.hasPayloads(); assert !hasPayloads || hasPositions;
final boolean hasOffsets = terms.hasOffsets(); final boolean hasPayloads = terms.hasPayloads(); assert !hasPayloads || hasPositions;
private void initMemory(Terms curTerms, int termFreq) { // init memory for performance reasons if (curTerms.hasPositions()) { currentPositions = ArrayUtil.grow(currentPositions, termFreq); } if (curTerms.hasOffsets()) { currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq); currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq); } if (curTerms.hasPayloads()) { currentPayloads = new BytesArray[termFreq]; } }
private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException { if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) { return; } builder.startArray(FieldStrings.TOKENS); for (int i = 0; i < termFreq; i++) { builder.startObject(); if (curTerms.hasPositions()) { builder.field(FieldStrings.POS, currentPositions[i]); } if (curTerms.hasOffsets()) { builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]); builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]); } if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) { BytesRef bytesRef = currentPayloads[i].toBytesRef(); builder.field(FieldStrings.PAYLOAD, bytesRef.bytes, bytesRef.offset, bytesRef.length); } builder.endObject(); } builder.endArray(); }
private void initValues(Terms curTerms, PostingsEnum posEnum, int termFreq) throws IOException { for (int j = 0; j < termFreq; j++) { int nextPos = posEnum.nextPosition(); if (curTerms.hasPositions()) { currentPositions[j] = nextPos; } if (curTerms.hasOffsets()) { currentStartOffset[j] = posEnum.startOffset(); currentEndOffset[j] = posEnum.endOffset(); } if (curTerms.hasPayloads()) { BytesRef curPayload = posEnum.getPayload(); if (curPayload != null) { currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length); } else { currentPayloads[j] = null; } } } }
@Override public boolean hasOffsets() { return in.hasOffsets(); }
@Override public boolean hasOffsets() { return delegateTerms.hasOffsets(); }
@Override public boolean hasOffsets() { return in.hasOffsets(); }
@Override public boolean hasOffsets() { return in.hasOffsets(); }
boolean offsets = flags.contains(Flag.Offsets) && fieldTermVector.hasOffsets(); boolean payloads = flags.contains(Flag.Payloads) && fieldTermVector.hasPayloads();
/** * Returns a token stream generated from a {@link Terms}. This * can be used to feed the highlighter with a pre-parsed token * stream. The {@link Terms} must have offsets available. If there are no positions available, * all tokens will have position increments reflecting adjacent tokens, or coincident when terms * share a start offset. If there are stopwords filtered from the index, you probably want to ensure * term vectors have positions so that phrase queries won't match across stopwords. * * @throws IllegalArgumentException if no offsets are available */ @Deprecated // maintenance reasons LUCENE-6445 public static TokenStream getTokenStream(final Terms tpv) throws IOException { if (!tpv.hasOffsets()) { throw new IllegalArgumentException("Highlighting requires offsets from the TokenStream."); //TokenStreamFromTermVector can handle a lack of offsets if there are positions. But // highlighters require offsets, so we insist here. } return new TokenStreamFromTermVector(tpv, -1); // TODO propagate maxStartOffset; see LUCENE-6445 }
private void initMemory(Terms curTerms, int termFreq) { // init memory for performance reasons if (curTerms.hasPositions()) { currentPositions = ArrayUtil.grow(currentPositions, termFreq); } if (curTerms.hasOffsets()) { currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq); currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq); } if (curTerms.hasPayloads()) { currentPayloads = new BytesArray[termFreq]; } }
private void initMemory(Terms curTerms, int termFreq) { // init memory for performance reasons if (curTerms.hasPositions()) { currentPositions = ArrayUtil.grow(currentPositions, termFreq); } if (curTerms.hasOffsets()) { currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq); currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq); } if (curTerms.hasPayloads()) { currentPayloads = new BytesArray[termFreq]; } }
private void initMemory(Terms curTerms, int termFreq) { // init memory for performance reasons if (curTerms.hasPositions()) { currentPositions = ArrayUtil.grow(currentPositions, termFreq); } if (curTerms.hasOffsets()) { currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq); currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq); } if (curTerms.hasPayloads()) { currentPayloads = new BytesArray[termFreq]; } }
private void initMemory(Terms curTerms, int termFreq) { // init memory for performance reasons if (curTerms.hasPositions()) { currentPositions = ArrayUtil.grow(currentPositions, termFreq); } if (curTerms.hasOffsets()) { currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq); currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq); } if (curTerms.hasPayloads()) { currentPayloads = new BytesArray[termFreq]; } }
private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException { if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) { return; } builder.startArray(FieldStrings.TOKENS); for (int i = 0; i < termFreq; i++) { builder.startObject(); if (curTerms.hasPositions()) { builder.field(FieldStrings.POS, currentPositions[i]); } if (curTerms.hasOffsets()) { builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]); builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]); } if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) { BytesRef bytesRef = currentPayloads[i].toBytesRef(); builder.field(FieldStrings.PAYLOAD, bytesRef.bytes, bytesRef.offset, bytesRef.length); } builder.endObject(); } builder.endArray(); }