this.minTimestamp = incrementalIndexSchema.getMinTimestamp(); this.gran = incrementalIndexSchema.getGran(); this.rollup = incrementalIndexSchema.isRollup(); this.virtualColumns = incrementalIndexSchema.getVirtualColumns(); this.metrics = incrementalIndexSchema.getMetrics(); this.rowTransformers = new CopyOnWriteArrayList<>(); this.deserializeComplexMetrics = deserializeComplexMetrics; null, getCombiningAggregators(metrics), incrementalIndexSchema.getTimestampSpec(), this.gran, this.rollup DimensionsSpec dimensionsSpec = incrementalIndexSchema.getDimensionsSpec(); this.dimensionDescs = Maps.newLinkedHashMap();
public IncrementalIndexSchema build() { return new IncrementalIndexSchema( minTimestamp, timestampSpec, gran, virtualColumns, dimensionsSpec, metrics, rollup ); } }
FireHydrant lastHydrant = hydrants.get(numHydrants - 1); newCount = lastHydrant.getCount() + 1; if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) { Map<String, ColumnCapabilitiesImpl> oldCapabilities; if (lastHydrant.hasSwapped()) {
/** * Gives estimated max size per aggregator. It is assumed that every aggregator will have enough overhead for its own * object header and for a pointer to a selector. We are adding a overhead-factor for each object as additional 16 * bytes. * These 16 bytes or 128 bits is the object metadata for 64-bit JVM process and consists of: * <ul> * <li>Class pointer which describes the object type: 64 bits * <li>Flags which describe state of the object including hashcode: 64 bits * <ul/> * total size estimation consists of: * <ul> * <li> metrics length : Integer.BYTES * len * <li> maxAggregatorIntermediateSize : getMaxIntermediateSize per aggregator + overhead-factor(16 bytes) * </ul> * * @param incrementalIndexSchema * * @return long max aggregator size in bytes */ private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema) { long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length; maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics()) .mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls() + Long.BYTES * 2) .sum(); return maxAggregatorIntermediateSize; }
OffheapIncrementalIndex( IncrementalIndexSchema incrementalIndexSchema, boolean deserializeComplexMetrics, boolean reportParseExceptions, boolean concurrentEventAdd, boolean sortFacts, int maxRowCount, NonBlockingPool<ByteBuffer> bufferPool ) { super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd); this.maxRowCount = maxRowCount; this.bufferPool = bufferPool; this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions()) : new PlainFactsHolder(sortFacts, dimsComparator()); //check that stupid pool gives buffers that can hold at least one row's aggregators ResourceHolder<ByteBuffer> bb = bufferPool.take(); if (bb.get().capacity() < aggsTotalSize) { bb.close(); throw new IAE("bufferPool buffers capacity must be >= [%s]", aggsTotalSize); } aggBuffers.add(bb); }
private static IncrementalIndex makeIncrementalIndex( Bucket theBucket, AggregatorFactory[] aggs, HadoopDruidIndexerConfig config, Iterable<String> oldDimOrder, Map<String, ColumnCapabilitiesImpl> oldCapabilities ) { final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig(); final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder() .withMinTimestamp(theBucket.time.getMillis()) .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec()) .withDimensionsSpec(config.getSchema().getDataSchema().getParser()) .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity()) .withMetrics(aggs) .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup()) .build(); IncrementalIndex newIndex = new IncrementalIndex.Builder() .setIndexSchema(indexSchema) .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) // only used by OffHeapIncrementalIndex .setMaxRowCount(tuningConfig.getRowFlushBoundary()) .setMaxBytesInMemory(TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory())) .buildOnheap(); if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) { newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities); } return newIndex; }
Iterables.toArray( Iterables.transform( Arrays.asList(schema.getMetrics()), new Function<AggregatorFactory, AggregatorFactory>()
OnheapIncrementalIndex( IncrementalIndexSchema incrementalIndexSchema, boolean deserializeComplexMetrics, boolean reportParseExceptions, boolean concurrentEventAdd, boolean sortFacts, int maxRowCount, long maxBytesInMemory ) { super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd); this.maxRowCount = maxRowCount; this.maxBytesInMemory = maxBytesInMemory == 0 ? Long.MAX_VALUE : maxBytesInMemory; this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions()) : new PlainFactsHolder(sortFacts, dimsComparator()); maxBytesPerRowForAggregators = getMaxBytesPerRowForAggregators(incrementalIndexSchema); }
this.minTimestamp = incrementalIndexSchema.getMinTimestamp(); this.gran = incrementalIndexSchema.getGran(); this.rollup = incrementalIndexSchema.isRollup(); this.virtualColumns = incrementalIndexSchema.getVirtualColumns(); this.metrics = incrementalIndexSchema.getMetrics(); this.rowTransformers = new CopyOnWriteArrayList<>(); this.deserializeComplexMetrics = deserializeComplexMetrics; null, getCombiningAggregators(metrics), incrementalIndexSchema.getTimestampSpec(), this.gran, this.rollup DimensionsSpec dimensionsSpec = incrementalIndexSchema.getDimensionsSpec(); this.dimensionDescs = Maps.newLinkedHashMap();
FireHydrant lastHydrant = hydrants.get(numHydrants - 1); newCount = lastHydrant.getCount() + 1; if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) { Map<String, ColumnCapabilitiesImpl> oldCapabilities; if (lastHydrant.hasSwapped()) {
null, null ); IncrementalIndexSchema schema = new IncrementalIndexSchema( 0, new TimestampSpec("ds", "auto", null),
/** * Gives estimated max size per aggregator. It is assumed that every aggregator will have enough overhead for its own * object header and for a pointer to a selector. We are adding a overhead-factor for each object as additional 16 * bytes. * These 16 bytes or 128 bits is the object metadata for 64-bit JVM process and consists of: * <ul> * <li>Class pointer which describes the object type: 64 bits * <li>Flags which describe state of the object including hashcode: 64 bits * <ul/> * total size estimation consists of: * <ul> * <li> metrics length : Integer.BYTES * len * <li> maxAggregatorIntermediateSize : getMaxIntermediateSize per aggregator + overhead-factor(16 bytes) * </ul> * * @param incrementalIndexSchema * * @return long max aggregator size in bytes */ private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema) { long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length; maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics()) .mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls() + Long.BYTES * 2) .sum(); return maxAggregatorIntermediateSize; }
OffheapIncrementalIndex( IncrementalIndexSchema incrementalIndexSchema, boolean deserializeComplexMetrics, boolean reportParseExceptions, boolean concurrentEventAdd, boolean sortFacts, int maxRowCount, NonBlockingPool<ByteBuffer> bufferPool ) { super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd); this.maxRowCount = maxRowCount; this.bufferPool = bufferPool; this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions()) : new PlainFactsHolder(sortFacts, dimsComparator()); //check that stupid pool gives buffers that can hold at least one row's aggregators ResourceHolder<ByteBuffer> bb = bufferPool.take(); if (bb.get().capacity() < aggsTotalSize) { bb.close(); throw new IAE("bufferPool buffers capacity must be >= [%s]", aggsTotalSize); } aggBuffers.add(bb); }
private static IncrementalIndex makeIncrementalIndex( Bucket theBucket, AggregatorFactory[] aggs, HadoopDruidIndexerConfig config, Iterable<String> oldDimOrder, Map<String, ColumnCapabilitiesImpl> oldCapabilities ) { final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig(); final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder() .withMinTimestamp(theBucket.time.getMillis()) .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec()) .withDimensionsSpec(config.getSchema().getDataSchema().getParser()) .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity()) .withMetrics(aggs) .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup()) .build(); IncrementalIndex newIndex = new IncrementalIndex.Builder() .setIndexSchema(indexSchema) .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) // only used by OffHeapIncrementalIndex .setMaxRowCount(tuningConfig.getRowFlushBoundary()) .setMaxBytesInMemory(TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory())) .buildOnheap(); if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) { newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities); } return newIndex; }
public IncrementalIndexSchema build() { return new IncrementalIndexSchema( minTimestamp, timestampSpec, gran, virtualColumns, dimensionsSpec, metrics, rollup ); } }
OnheapIncrementalIndex( IncrementalIndexSchema incrementalIndexSchema, boolean deserializeComplexMetrics, boolean reportParseExceptions, boolean concurrentEventAdd, boolean sortFacts, int maxRowCount, long maxBytesInMemory ) { super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd); this.maxRowCount = maxRowCount; this.maxBytesInMemory = maxBytesInMemory == 0 ? Long.MAX_VALUE : maxBytesInMemory; this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions()) : new PlainFactsHolder(sortFacts, dimsComparator()); maxBytesPerRowForAggregators = getMaxBytesPerRowForAggregators(incrementalIndexSchema); }