/** * Gives an estimated size of row in bytes, it accounts for: * <ul> * <li> overhead per Map Entry * <li> TimeAndDims key size * <li> aggregator size * </ul> * * @param key TimeAndDims key * @param maxBytesPerRowForAggregators max size per aggregator * * @return estimated size of row */ private long estimateRowSizeInBytes(IncrementalIndexRow key, long maxBytesPerRowForAggregators) { return ROUGH_OVERHEAD_PER_MAP_ENTRY + key.estimateBytesInMemory() + maxBytesPerRowForAggregators; }
@Override public int getPriorIndex(IncrementalIndexRow key) { IncrementalIndexRow row = facts.get(key); return row == null ? IncrementalIndexRow.EMPTY_ROW_INDEX : row.getRowIndex(); }
static IncrementalIndexRow createTimeAndDimswithDimsKeySize( long timestamp, Object[] dims, List<IncrementalIndex.DimensionDesc> dimensionDescsList, long dimsKeySize ) { return new IncrementalIndexRow(timestamp, dims, dimensionDescsList, dimsKeySize); }
@Override public int putIfAbsent(IncrementalIndexRow key, int rowIndex) { // setRowIndex() must be called before facts.putIfAbsent() for visibility of rowIndex from concurrent readers. key.setRowIndex(rowIndex); IncrementalIndexRow prev = facts.putIfAbsent(key, key); return prev == null ? IncrementalIndexRow.EMPTY_ROW_INDEX : prev.getRowIndex(); }
@Override public int putIfAbsent(IncrementalIndexRow key, int rowIndex) { Long time = key.getTimestamp(); Deque<IncrementalIndexRow> rows = facts.get(time); if (rows == null) { facts.putIfAbsent(time, new ConcurrentLinkedDeque<>()); // in race condition, rows may be put by other thread, so always get latest status from facts rows = facts.get(time); } // setRowIndex() must be called before rows.add() for visibility of rowIndex from concurrent readers. key.setRowIndex(rowIndex); rows.add(key); // always return EMPTY_ROW_INDEX to indicate that we always add new row return IncrementalIndexRow.EMPTY_ROW_INDEX; }
getFacts().iterator(descending), incrementalIndexRow -> { final int rowOffset = incrementalIndexRow.getRowIndex(); Object[] theDims = incrementalIndexRow.getDims(); return new MapBasedRow(incrementalIndexRow.getTimestamp(), theVals);
final Object[] dims = row.getDims();
@Override public long getLong() { return currEntry.getTimestamp(); }
truncated = gran.bucketStart(row.getTimestamp()).getMillis(); IncrementalIndexRow incrementalIndexRow = IncrementalIndexRow.createTimeAndDimswithDimsKeySize( Math.max(truncated, minTimestamp), dims,
getFacts().iterator(descending), incrementalIndexRow -> { final int rowOffset = incrementalIndexRow.getRowIndex(); Object[] theDims = incrementalIndexRow.getDims(); return new MapBasedRow(incrementalIndexRow.getTimestamp(), theVals);
@Override public int putIfAbsent(IncrementalIndexRow key, int rowIndex) { // setRowIndex() must be called before facts.putIfAbsent() for visibility of rowIndex from concurrent readers. key.setRowIndex(rowIndex); IncrementalIndexRow prev = facts.putIfAbsent(key, key); return prev == null ? IncrementalIndexRow.EMPTY_ROW_INDEX : prev.getRowIndex(); }
@Override public int putIfAbsent(IncrementalIndexRow key, int rowIndex) { Long time = key.getTimestamp(); Deque<IncrementalIndexRow> rows = facts.get(time); if (rows == null) { facts.putIfAbsent(time, new ConcurrentLinkedDeque<>()); // in race condition, rows may be put by other thread, so always get latest status from facts rows = facts.get(time); } // setRowIndex() must be called before rows.add() for visibility of rowIndex from concurrent readers. key.setRowIndex(rowIndex); rows.add(key); // always return EMPTY_ROW_INDEX to indicate that we always add new row return IncrementalIndexRow.EMPTY_ROW_INDEX; }
@Override public long getMinTimeMillis() { if (sortFacts) { return ((ConcurrentNavigableMap<IncrementalIndexRow, IncrementalIndexRow>) facts).firstKey().getTimestamp(); } else { throw new UnsupportedOperationException("can't get minTime from unsorted facts data."); } }
truncated = gran.bucketStart(row.getTimestamp()).getMillis(); IncrementalIndexRow incrementalIndexRow = IncrementalIndexRow.createTimeAndDimswithDimsKeySize( Math.max(truncated, minTimestamp), dims,
@Override public void advanceUninterruptibly() { if (!baseIter.hasNext()) { done = true; return; } while (baseIter.hasNext()) { if (Thread.currentThread().isInterrupted()) { return; } IncrementalIndexRow entry = baseIter.next(); if (beyondMaxRowIndex(entry.getRowIndex())) { continue; } currEntry.set(entry); if (filterMatcher.matches()) { return; } } done = true; }
@Test public void testIncrementalIndexRowSizeArr() { IncrementalIndex index = new IncrementalIndex.Builder() .setSimpleTestingIndexSchema(new CountAggregatorFactory("cnt")) .setMaxRowCount(10000) .setMaxBytesInMemory(1000) .buildOnheap(); long time = System.currentTimeMillis(); IncrementalIndex.IncrementalIndexRowResult tndResult = index.toIncrementalIndexRow(toMapRow( time + 1, "billy", "A", "joe", Arrays.asList("A", "B") )); IncrementalIndexRow td1 = tndResult.getIncrementalIndexRow(); Assert.assertEquals(50, td1.estimateBytesInMemory()); }
final Object[] dims = row.getDims();
@Override public long getMaxTimeMillis() { if (sortFacts) { return ((ConcurrentNavigableMap<IncrementalIndexRow, IncrementalIndexRow>) facts).lastKey().getTimestamp(); } else { throw new UnsupportedOperationException("can't get maxTime from unsorted facts data."); } }
@Override public Iterable<IncrementalIndexRow> timeRangeIterable(boolean descending, long timeStart, long timeEnd) { if (!sortFacts) { throw new UnsupportedOperationException("can't get timeRange from unsorted facts data."); } IncrementalIndexRow start = new IncrementalIndexRow(timeStart, new Object[]{}, dimensionDescsList); IncrementalIndexRow end = new IncrementalIndexRow(timeEnd, new Object[]{}, dimensionDescsList); ConcurrentNavigableMap<IncrementalIndexRow, IncrementalIndexRow> subMap = ((ConcurrentNavigableMap<IncrementalIndexRow, IncrementalIndexRow>) facts).subMap(start, end); final Map<IncrementalIndexRow, IncrementalIndexRow> rangeMap = descending ? subMap.descendingMap() : subMap; return rangeMap.keySet(); }