private void fillTerm() { final int termLength = prefix + suffix; ste.term.setLength(termLength); ste.term.grow(termLength); System.arraycopy(suffixBytes, startBytePos, ste.term.bytes(), prefix, suffix); } }
@Override public int nextDoc() throws IOException { int docID = docsWithField.nextDoc(); if (docID != NO_MORE_DOCS) { int length = Math.toIntExact(lengthsIterator.next()); value.setLength(length); bytesIterator.readBytes(value.bytes(), 0, length); } return docID; }
private void readTermBytes(int prefix, int suffix) throws IOException { builder.grow(prefix + suffix); input.readBytes(builder.bytes(), prefix, suffix); builder.setLength(prefix + suffix); }
@Override public long ramBytesUsed() { return super.ramBytesUsed() + offsets.ramBytesUsed() + lengths.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * Integer.BYTES + 3 * RamUsageEstimator.NUM_BYTES_OBJECT_REF + values.bytes().length; } }
/** Return an array that contains the min and max values for the [offset, offset+length] interval * of the given {@link BytesRef}s. */ private static BytesRef[] computeMinMax(int count, IntFunction<BytesRef> packedValues, int offset, int length) { assert length > 0; BytesRefBuilder min = new BytesRefBuilder(); BytesRefBuilder max = new BytesRefBuilder(); BytesRef first = packedValues.apply(0); min.copyBytes(first.bytes, first.offset + offset, length); max.copyBytes(first.bytes, first.offset + offset, length); for (int i = 1; i < count; ++i) { BytesRef candidate = packedValues.apply(i); if (FutureArrays.compareUnsigned(min.bytes(), 0, length, candidate.bytes, candidate.offset + offset, candidate.offset + offset + length) > 0) { min.copyBytes(candidate.bytes, candidate.offset + offset, length); } else if (FutureArrays.compareUnsigned(max.bytes(), 0, length, candidate.bytes, candidate.offset + offset, candidate.offset + offset + length) < 0) { max.copyBytes(candidate.bytes, candidate.offset + offset, length); } } return new BytesRef[]{min.get(), max.get()}; }
public void nextLeaf() { //if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + " entCount=" + entCount); assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp; nextEnt++; suffix = suffixesReader.readVInt(); startBytePos = suffixesReader.getPosition(); ste.term.setLength(prefix + suffix); ste.term.grow(ste.term.length()); suffixesReader.readBytes(ste.term.bytes(), prefix, suffix); ste.termExists = true; }
/** * Returns the <i>n'th</i> element of this {@link BytesRefArray} * @param spare a spare {@link BytesRef} instance * @param index the elements index to retrieve * @return the <i>n'th</i> element of this {@link BytesRefArray} */ public BytesRef get(BytesRefBuilder spare, int index) { FutureObjects.checkIndex(index, lastElement); int offset = offsets[index]; int length = index == lastElement - 1 ? currentOffset - offset : offsets[index + 1] - offset; spare.grow(length); spare.setLength(length); pool.readBytes(offset, spare.bytes(), 0, spare.length()); return spare.get(); }
/** * Reads the next entry into the provided {@link BytesRef}. The internal * storage is resized if needed. * * @return Returns <code>false</code> if EOF occurred when trying to read * the header of the next sequence. Returns <code>true</code> otherwise. * @throws EOFException if the file ends before the full sequence is read. */ public BytesRef next() throws IOException { if (in.getFilePointer() >= end) { return null; } short length = in.readShort(); ref.grow(length); ref.setLength(length); in.readBytes(ref.bytes(), 0, length); return ref.get(); }
if (linearUpperBound.bytes.length < length) linearUpperBound.bytes = new byte[length]; System.arraycopy(seekBytesRef.bytes(), 0, linearUpperBound.bytes, 0, position); linearUpperBound.bytes[position] = (byte) maxInterval; linearUpperBound.length = length;
@Override public int nextPosition() throws IOException { assert posLeft > 0; posLeft--; int code = posReader.readVInt(); pos += code >>> 1; if ((code & 1) != 0) { hasPayload = true; // has a payload payload.setLength(posReader.readVInt()); payload.grow(payload.length()); posReader.readBytes(payload.bytes(), 0, payload.length()); } else { hasPayload = false; } if (readOffsets) { startOffset += posReader.readVInt(); endOffset = startOffset + posReader.readVInt(); } return pos; }
ste.term.setLength(prefix + suffix); ste.term.grow(ste.term.length()); suffixesReader.readBytes(ste.term.bytes(), prefix, suffix); if ((code & 1) == 0) {
positions.readBytes(payload.bytes(), 0, payloadLength); payload.setLength(payloadLength); thisPayload = payload.get();
public static byte[] toUTF8Bytes(CharSequence charSequence, BytesRefBuilder spare) { spare.copyChars(charSequence); return Arrays.copyOf(spare.bytes(), spare.length()); }
/** * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB */ public static void hashFile(BytesRefBuilder fileHash, InputStream in, long size) throws IOException { final int len = (int) Math.min(1024 * 1024, size); // for safety we limit this to 1MB fileHash.grow(len); fileHash.setLength(len); final int readBytes = Streams.readFully(in, fileHash.bytes(), 0, len); assert readBytes == len : Integer.toString(readBytes) + " != " + Integer.toString(len); assert fileHash.length() == len : Integer.toString(fileHash.length()) + " != " + Integer.toString(len); }
@Override void copyCurrent(int slot) { values = bigArrays.grow(values, slot+1); valueBuilders = bigArrays.grow(valueBuilders, slot+1); BytesRefBuilder builder = valueBuilders.get(slot); int byteSize = builder == null ? 0 : builder.bytes().length; if (builder == null) { builder = new BytesRefBuilder(); valueBuilders.set(slot, builder); } if (missingBucket && currentValue == null) { values.set(slot, null); } else { assert currentValue != null; builder.copyBytes(currentValue); breakerConsumer.accept(builder.bytes().length - byteSize); values.set(slot, builder.get()); } }
private void writeInfos(final StreamInput input) throws IOException { for (int i = 0; i < freq; i++) { if (hasPositions) { positions[i] = input.readVInt(); } if (hasOffsets) { startOffsets[i] = input.readVInt(); endOffsets[i] = input.readVInt(); } if (hasPayloads) { int payloadLength = input.readVInt(); if (payloads[i] == null) { payloads[i] = new BytesRefBuilder(); } payloads[i].grow(payloadLength); input.readBytes(payloads[i].bytes(), 0, payloadLength); payloads[i].setLength(payloadLength); } } }
public void writeText(Text text) throws IOException { if (!text.hasBytes()) { final String string = text.string(); spare.copyChars(string); writeInt(spare.length()); write(spare.bytes(), 0, spare.length()); } else { BytesReference bytes = text.bytes(); writeInt(bytes.length()); bytes.writeTo(this); } }
/** * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } }
spare.grow(termVectorSize); perFieldTermVectorInput.readBytes(spare.bytes(), 0, termVectorSize); spare.setLength(termVectorSize); if (hasTermStatistic) {