public NodeHash(FST<T> fst, FST.BytesReader in) { table = new PagedGrowableWriter(16, 1<<27, 8, PackedInts.COMPACT); mask = 15; this.fst = fst; this.in = in; }
public NumericDocValuesFieldUpdates(long delGen, String field, int maxDoc) { super(maxDoc, delGen, field, DocValuesType.NUMERIC); values = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); } @Override
public BinaryDocValuesFieldUpdates(long delGen, String field, int maxDoc) { super(maxDoc, delGen, field, DocValuesType.BINARY); offsets = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); lengths = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); values = new BytesRefBuilder(); }
@Override protected PagedGrowableWriter newUnfilledCopy(long newSize) { return new PagedGrowableWriter(newSize, pageSize(), bitsPerValue, acceptableOverheadRatio, false); }
private void rehash() throws IOException { final PagedGrowableWriter oldTable = table; table = new PagedGrowableWriter(2*oldTable.size(), 1<<30, PackedInts.bitsRequired(count), PackedInts.COMPACT); mask = table.size()-1; for(long idx=0;idx<oldTable.size();idx++) { final long address = oldTable.get(idx); if (address != 0) { addNew(address); } } } }
OrdinalsStore(int maxDoc, int startBitsPerValue, float acceptableOverheadRatio) { this.startBitsPerValue = startBitsPerValue; this.acceptableOverheadRatio = acceptableOverheadRatio; positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio); firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio); // over allocate in order to never worry about the array sizes, 24 entries would allow // to store several millions of ordinals per doc... ordinals = new PagedGrowableWriter[24]; nextLevelSlices = new PagedGrowableWriter[24]; sizes = new int[24]; Arrays.fill(sizes, 1); // reserve the 1st slice on every level }
/** * Allocate a new slice and return its ID. */ private long newSlice(int level) { final long newSlice = sizes[level]++; // Lazily allocate ordinals if (ordinals[level] == null) { ordinals[level] = new PagedGrowableWriter(8L * numSlots(level), PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio); } else { ordinals[level] = ordinals[level].grow(sizes[level] * numSlots(level)); if (nextLevelSlices[level] != null) { nextLevelSlices[level] = nextLevelSlices[level].grow(sizes[level]); } } return newSlice; }
public NumericDocValuesFieldUpdates(long delGen, String field, int maxDoc) { super(maxDoc, delGen, field, DocValuesType.NUMERIC); values = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); } @Override
public NodeHash(FST<T> fst, FST.BytesReader in) { table = new PagedGrowableWriter(16, 1<<27, 8, PackedInts.COMPACT); mask = 15; this.fst = fst; this.in = in; }
public NodeHash(FST<T> fst, FST.BytesReader in) { table = new PagedGrowableWriter(16, 1<<27, 8, PackedInts.COMPACT); mask = 15; this.fst = fst; this.in = in; }
public NodeHash(FST<T> fst, FST.BytesReader in) { table = new PagedGrowableWriter(16, 1<<27, 8, PackedInts.COMPACT); mask = 15; this.fst = fst; this.in = in; }
private int firstLevel(int docID, long ordinal) { // 0 or 1 ordinal if (firstOrdinals.get(docID) == 0L) { firstOrdinals.set(docID, ordinal + 1); return 1; } else { final long newSlice = newSlice(1); if (firstNextLevelSlices == null) { firstNextLevelSlices = new PagedGrowableWriter(firstOrdinals.size(), PAGE_SIZE, 3, acceptableOverheadRatio); } firstNextLevelSlices.set(docID, newSlice); final long offset = startOffset(1, newSlice); ordinals[1].set(offset, ordinal + 1); positions.set(docID, position(1, offset)); // current position is on the 1st level and not allocated yet return 2; } }
private int nonFirstLevel(int docID, long ordinal, long position) { int level = level(position); long offset = offset(position, level); assert offset != 0L; if (((offset + 1) & slotsMask(level)) == 0L) { // reached the end of the slice, allocate a new one on the next level final long newSlice = newSlice(level + 1); if (nextLevelSlices[level] == null) { nextLevelSlices[level] = new PagedGrowableWriter(sizes[level], PAGE_SIZE, 1, acceptableOverheadRatio); } nextLevelSlices[level].set(sliceID(level, offset), newSlice); ++level; offset = startOffset(level, newSlice); assert (offset & slotsMask(level)) == 0L; } else { // just go to the next slot ++offset; } ordinals[level].set(offset, ordinal + 1); final long newPosition = position(level, offset); positions.set(docID, newPosition); return numOrdinals(level, offset); }
OrdinalsStore(int maxDoc, int startBitsPerValue, float acceptableOverheadRatio) { this.startBitsPerValue = startBitsPerValue; this.acceptableOverheadRatio = acceptableOverheadRatio; positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio); firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio); // over allocate in order to never worry about the array sizes, 24 entries would allow to store several millions of ordinals per doc... ordinals = new PagedGrowableWriter[24]; nextLevelSlices = new PagedGrowableWriter[24]; sizes = new int[24]; Arrays.fill(sizes, 1); // reserve the 1st slice on every level }
OrdinalsStore(int maxDoc, int startBitsPerValue, float acceptableOverheadRatio) { this.startBitsPerValue = startBitsPerValue; this.acceptableOverheadRatio = acceptableOverheadRatio; positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio); firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio); // over allocate in order to never worry about the array sizes, 24 entries would allow to store several millions of ordinals per doc... ordinals = new PagedGrowableWriter[24]; nextLevelSlices = new PagedGrowableWriter[24]; sizes = new int[24]; Arrays.fill(sizes, 1); // reserve the 1st slice on every level }
public BinaryDocValuesFieldUpdates(String field, int maxDoc) { super(field, DocValuesType.BINARY); bitsPerValue = PackedInts.bitsRequired(maxDoc - 1); docs = new PagedMutable(1, PAGE_SIZE, bitsPerValue, PackedInts.COMPACT); offsets = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); lengths = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); values = new BytesRefBuilder(); size = 0; }
public BinaryDocValuesFieldUpdates(String field, int maxDoc) { super(field, DocValuesType.BINARY); bitsPerValue = PackedInts.bitsRequired(maxDoc - 1); docs = new PagedMutable(1, PAGE_SIZE, bitsPerValue, PackedInts.COMPACT); offsets = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); lengths = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); values = new BytesRefBuilder(); size = 0; }
public NumericDocValuesFieldUpdates(String field, int maxDoc) { super(field, DocValuesType.NUMERIC); bitsPerValue = PackedInts.bitsRequired(maxDoc - 1); docs = new PagedMutable(1, PAGE_SIZE, bitsPerValue, PackedInts.COMPACT); values = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); size = 0; }
public NumericDocValuesFieldUpdates(String field, int maxDoc) { super(field, DocValuesType.NUMERIC); bitsPerValue = PackedInts.bitsRequired(maxDoc - 1); docs = new PagedMutable(1, PAGE_SIZE, bitsPerValue, PackedInts.COMPACT); values = new PagedGrowableWriter(1, PAGE_SIZE, 1, PackedInts.FAST); size = 0; }
private void rehash() throws IOException { final PagedGrowableWriter oldTable = table; table = new PagedGrowableWriter(2*oldTable.size(), 1<<30, PackedInts.bitsRequired(count), PackedInts.COMPACT); mask = table.size()-1; for(long idx=0;idx<oldTable.size();idx++) { final long address = oldTable.get(idx); if (address != 0) { addNew(address); } } } }