Tabnine Logo
IncrementalIndexSchema
Code IndexAdd Tabnine to your IDE (free)

How to use
IncrementalIndexSchema
in
org.apache.druid.segment.incremental

Best Java code snippets using org.apache.druid.segment.incremental.IncrementalIndexSchema (Showing top 16 results out of 315)

origin: apache/incubator-druid

this.minTimestamp = incrementalIndexSchema.getMinTimestamp();
this.gran = incrementalIndexSchema.getGran();
this.rollup = incrementalIndexSchema.isRollup();
this.virtualColumns = incrementalIndexSchema.getVirtualColumns();
this.metrics = incrementalIndexSchema.getMetrics();
this.rowTransformers = new CopyOnWriteArrayList<>();
this.deserializeComplexMetrics = deserializeComplexMetrics;
  null,
  getCombiningAggregators(metrics),
  incrementalIndexSchema.getTimestampSpec(),
  this.gran,
  this.rollup
DimensionsSpec dimensionsSpec = incrementalIndexSchema.getDimensionsSpec();
this.dimensionDescs = Maps.newLinkedHashMap();
origin: apache/incubator-druid

 public IncrementalIndexSchema build()
 {
  return new IncrementalIndexSchema(
    minTimestamp,
    timestampSpec,
    gran,
    virtualColumns,
    dimensionsSpec,
    metrics,
    rollup
  );
 }
}
origin: apache/incubator-druid

FireHydrant lastHydrant = hydrants.get(numHydrants - 1);
newCount = lastHydrant.getCount() + 1;
if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) {
 Map<String, ColumnCapabilitiesImpl> oldCapabilities;
 if (lastHydrant.hasSwapped()) {
origin: apache/incubator-druid

/**
 * Gives estimated max size per aggregator. It is assumed that every aggregator will have enough overhead for its own
 * object header and for a pointer to a selector. We are adding a overhead-factor for each object as additional 16
 * bytes.
 * These 16 bytes or 128 bits is the object metadata for 64-bit JVM process and consists of:
 * <ul>
 * <li>Class pointer which describes the object type: 64 bits
 * <li>Flags which describe state of the object including hashcode: 64 bits
 * <ul/>
 * total size estimation consists of:
 * <ul>
 * <li> metrics length : Integer.BYTES * len
 * <li> maxAggregatorIntermediateSize : getMaxIntermediateSize per aggregator + overhead-factor(16 bytes)
 * </ul>
 *
 * @param incrementalIndexSchema
 *
 * @return long max aggregator size in bytes
 */
private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema)
{
 long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length;
 maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics())
                     .mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls() + Long.BYTES * 2)
                     .sum();
 return maxAggregatorIntermediateSize;
}
origin: apache/incubator-druid

OffheapIncrementalIndex(
  IncrementalIndexSchema incrementalIndexSchema,
  boolean deserializeComplexMetrics,
  boolean reportParseExceptions,
  boolean concurrentEventAdd,
  boolean sortFacts,
  int maxRowCount,
  NonBlockingPool<ByteBuffer> bufferPool
)
{
 super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd);
 this.maxRowCount = maxRowCount;
 this.bufferPool = bufferPool;
 this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions())
                         : new PlainFactsHolder(sortFacts, dimsComparator());
 //check that stupid pool gives buffers that can hold at least one row's aggregators
 ResourceHolder<ByteBuffer> bb = bufferPool.take();
 if (bb.get().capacity() < aggsTotalSize) {
  bb.close();
  throw new IAE("bufferPool buffers capacity must be >= [%s]", aggsTotalSize);
 }
 aggBuffers.add(bb);
}
origin: apache/incubator-druid

private static IncrementalIndex makeIncrementalIndex(
  Bucket theBucket,
  AggregatorFactory[] aggs,
  HadoopDruidIndexerConfig config,
  Iterable<String> oldDimOrder,
  Map<String, ColumnCapabilitiesImpl> oldCapabilities
)
{
 final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig();
 final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder()
   .withMinTimestamp(theBucket.time.getMillis())
   .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec())
   .withDimensionsSpec(config.getSchema().getDataSchema().getParser())
   .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity())
   .withMetrics(aggs)
   .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup())
   .build();
 IncrementalIndex newIndex = new IncrementalIndex.Builder()
   .setIndexSchema(indexSchema)
   .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) // only used by OffHeapIncrementalIndex
   .setMaxRowCount(tuningConfig.getRowFlushBoundary())
   .setMaxBytesInMemory(TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory()))
   .buildOnheap();
 if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) {
  newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities);
 }
 return newIndex;
}
origin: apache/incubator-druid

Iterables.toArray(
  Iterables.transform(
    Arrays.asList(schema.getMetrics()),
    new Function<AggregatorFactory, AggregatorFactory>()
origin: apache/incubator-druid

OnheapIncrementalIndex(
  IncrementalIndexSchema incrementalIndexSchema,
  boolean deserializeComplexMetrics,
  boolean reportParseExceptions,
  boolean concurrentEventAdd,
  boolean sortFacts,
  int maxRowCount,
  long maxBytesInMemory
)
{
 super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd);
 this.maxRowCount = maxRowCount;
 this.maxBytesInMemory = maxBytesInMemory == 0 ? Long.MAX_VALUE : maxBytesInMemory;
 this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions())
                         : new PlainFactsHolder(sortFacts, dimsComparator());
 maxBytesPerRowForAggregators = getMaxBytesPerRowForAggregators(incrementalIndexSchema);
}
origin: org.apache.druid/druid-processing

this.minTimestamp = incrementalIndexSchema.getMinTimestamp();
this.gran = incrementalIndexSchema.getGran();
this.rollup = incrementalIndexSchema.isRollup();
this.virtualColumns = incrementalIndexSchema.getVirtualColumns();
this.metrics = incrementalIndexSchema.getMetrics();
this.rowTransformers = new CopyOnWriteArrayList<>();
this.deserializeComplexMetrics = deserializeComplexMetrics;
  null,
  getCombiningAggregators(metrics),
  incrementalIndexSchema.getTimestampSpec(),
  this.gran,
  this.rollup
DimensionsSpec dimensionsSpec = incrementalIndexSchema.getDimensionsSpec();
this.dimensionDescs = Maps.newLinkedHashMap();
origin: org.apache.druid/druid-server

FireHydrant lastHydrant = hydrants.get(numHydrants - 1);
newCount = lastHydrant.getCount() + 1;
if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) {
 Map<String, ColumnCapabilitiesImpl> oldCapabilities;
 if (lastHydrant.hasSwapped()) {
origin: apache/incubator-druid

  null, null
);
IncrementalIndexSchema schema = new IncrementalIndexSchema(
  0,
  new TimestampSpec("ds", "auto", null),
origin: org.apache.druid/druid-processing

/**
 * Gives estimated max size per aggregator. It is assumed that every aggregator will have enough overhead for its own
 * object header and for a pointer to a selector. We are adding a overhead-factor for each object as additional 16
 * bytes.
 * These 16 bytes or 128 bits is the object metadata for 64-bit JVM process and consists of:
 * <ul>
 * <li>Class pointer which describes the object type: 64 bits
 * <li>Flags which describe state of the object including hashcode: 64 bits
 * <ul/>
 * total size estimation consists of:
 * <ul>
 * <li> metrics length : Integer.BYTES * len
 * <li> maxAggregatorIntermediateSize : getMaxIntermediateSize per aggregator + overhead-factor(16 bytes)
 * </ul>
 *
 * @param incrementalIndexSchema
 *
 * @return long max aggregator size in bytes
 */
private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema)
{
 long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length;
 maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics())
                     .mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls() + Long.BYTES * 2)
                     .sum();
 return maxAggregatorIntermediateSize;
}
origin: org.apache.druid/druid-processing

OffheapIncrementalIndex(
  IncrementalIndexSchema incrementalIndexSchema,
  boolean deserializeComplexMetrics,
  boolean reportParseExceptions,
  boolean concurrentEventAdd,
  boolean sortFacts,
  int maxRowCount,
  NonBlockingPool<ByteBuffer> bufferPool
)
{
 super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd);
 this.maxRowCount = maxRowCount;
 this.bufferPool = bufferPool;
 this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions())
                         : new PlainFactsHolder(sortFacts, dimsComparator());
 //check that stupid pool gives buffers that can hold at least one row's aggregators
 ResourceHolder<ByteBuffer> bb = bufferPool.take();
 if (bb.get().capacity() < aggsTotalSize) {
  bb.close();
  throw new IAE("bufferPool buffers capacity must be >= [%s]", aggsTotalSize);
 }
 aggBuffers.add(bb);
}
origin: org.apache.druid/druid-indexing-hadoop

private static IncrementalIndex makeIncrementalIndex(
  Bucket theBucket,
  AggregatorFactory[] aggs,
  HadoopDruidIndexerConfig config,
  Iterable<String> oldDimOrder,
  Map<String, ColumnCapabilitiesImpl> oldCapabilities
)
{
 final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig();
 final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder()
   .withMinTimestamp(theBucket.time.getMillis())
   .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec())
   .withDimensionsSpec(config.getSchema().getDataSchema().getParser())
   .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity())
   .withMetrics(aggs)
   .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup())
   .build();
 IncrementalIndex newIndex = new IncrementalIndex.Builder()
   .setIndexSchema(indexSchema)
   .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) // only used by OffHeapIncrementalIndex
   .setMaxRowCount(tuningConfig.getRowFlushBoundary())
   .setMaxBytesInMemory(TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory()))
   .buildOnheap();
 if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) {
  newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities);
 }
 return newIndex;
}
origin: org.apache.druid/druid-processing

 public IncrementalIndexSchema build()
 {
  return new IncrementalIndexSchema(
    minTimestamp, timestampSpec, gran, virtualColumns, dimensionsSpec, metrics, rollup
  );
 }
}
origin: org.apache.druid/druid-processing

OnheapIncrementalIndex(
  IncrementalIndexSchema incrementalIndexSchema,
  boolean deserializeComplexMetrics,
  boolean reportParseExceptions,
  boolean concurrentEventAdd,
  boolean sortFacts,
  int maxRowCount,
  long maxBytesInMemory
)
{
 super(incrementalIndexSchema, deserializeComplexMetrics, reportParseExceptions, concurrentEventAdd);
 this.maxRowCount = maxRowCount;
 this.maxBytesInMemory = maxBytesInMemory == 0 ? Long.MAX_VALUE : maxBytesInMemory;
 this.facts = incrementalIndexSchema.isRollup() ? new RollupFactsHolder(sortFacts, dimsComparator(), getDimensions())
                         : new PlainFactsHolder(sortFacts, dimsComparator());
 maxBytesPerRowForAggregators = getMaxBytesPerRowForAggregators(incrementalIndexSchema);
}
org.apache.druid.segment.incrementalIncrementalIndexSchema

Most used methods

  • getDimensionsSpec
  • <init>
  • getMetrics
  • getGran
  • getMinTimestamp
  • getTimestampSpec
  • getVirtualColumns
  • isRollup

Popular in Java

  • Making http requests using okhttp
  • getResourceAsStream (ClassLoader)
  • setRequestProperty (URLConnection)
  • getSharedPreferences (Context)
  • Pointer (com.sun.jna)
    An abstraction for a native pointer data type. A Pointer instance represents, on the Java side, a na
  • PrintWriter (java.io)
    Wraps either an existing OutputStream or an existing Writerand provides convenience methods for prin
  • ConnectException (java.net)
    A ConnectException is thrown if a connection cannot be established to a remote host on a specific po
  • Permission (java.security)
    Legacy security code; do not use.
  • Annotation (javassist.bytecode.annotation)
    The annotation structure.An instance of this class is returned bygetAnnotations() in AnnotationsAttr
  • JComboBox (javax.swing)
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now