/** * Wraps a query as a GroupBy Query. * * @param query The query to wrap. */ public SqlAggregationQuery(DruidAggregationQuery<?> query) { this( query.getDataSource(), query.getGranularity(), query.getDimensions(), query.getFilter(), query.getAggregations(), query.getPostAggregations(), query.getIntervals(), query.getContext() ); }
/** * Generate a query that calculates the even weight of the response cardinality of the given query. * * @param query Query to calculate the weighted response cardinality of * @param weight Weight to apply to each response row */ public WeightEvaluationQuery(DruidAggregationQuery<?> query, int weight) { super( makeInnerQuery(query, weight), AllGranularity.INSTANCE, Collections.<Dimension>emptyList(), (Filter) null, (Having) null, Collections.<Aggregation>singletonList(new LongSumAggregation("count", "count")), Collections.<PostAggregation>emptyList(), query.getIntervals(), query.getQueryType() == DefaultQueryType.GROUP_BY ? stripColumnsFromLimitSpec(query) : null ); }
/** * Produce the schema-defining columns for a given druid query. * * @param druidQuery The query being modelled. * * @return A stream of columns based on the signature of the Druid Query. */ public Stream<Column> buildSchemaColumns(DruidAggregationQuery<?> druidQuery) { // Pass through to druid query to allow for possible behavior customization on injected DruidResponseParsers. return druidQuery.buildSchemaColumns(); } }
/** * Get all the dimensions from Filtered Aggregations of a filtered metric. * * @return Set of dimensions which are part of Filtered Aggregations in a filtered logical metric */ @JsonIgnore default Set<Dimension> getMetricDimensions() { return getInnermostQuery().getAggregations().stream() .map(Aggregation::getDependentDimensions) .flatMap(Set::stream) .collect(Collectors.toCollection(LinkedHashSet::new)); }
/** * Retrieve a simplified list of intervals aligned to the time buckets of the query which are partially or fully * volatile. * * @param query The query whose time bucketed intervals will be checked for volatility * @param factSource The fact source table whose intervals are being retrieved * * @return A simplified interval list of volatile intervals * * @deprecated Exists solely for backwards compatibility. * {@link VolatileIntervalsService#getVolatileIntervals(Granularity, List, PhysicalTable)} should be used instead */ @Deprecated default SimplifiedIntervalList getVolatileIntervals(DruidAggregationQuery<?> query, PhysicalTable factSource) { return getVolatileIntervals(query.getGranularity(), query.getIntervals(), factSource); } }
DruidAggregationQuery<?> innerQuery = query.getInnermostQuery(); postAggregations = Collections.singletonList(new ConstantPostAggregation("count", weight)); if (!(innerQuery.getQueryType() instanceof DefaultQueryType)) { return null; DefaultQueryType innerQueryType = (DefaultQueryType) innerQuery.getQueryType(); switch (innerQueryType) { case GROUP_BY: GroupByQuery inner = new GroupByQuery( innerQuery.getDataSource(), innerQuery.getGranularity(), innerQuery.getDimensions(), innerQuery.getFilter(), (Having) null, aggregations, postAggregations, innerQuery.getIntervals(), stripColumnsFromLimitSpec(innerQuery) );
DruidAggregationQuery<?> innerQuery = query.getInnermostQuery(); int sketchWeight = Utils.getSubsetByType(innerQuery.getAggregations(), SketchAggregation.class).size(); if (sketchWeight == 0) { return 0; long periods = IntervalUtils.countSlicedIntervals(innerQuery.getIntervals(), innerQuery.getGranularity()); long cardinalityWeight; if (innerQuery.getQueryType() == DefaultQueryType.TOP_N) { TopNQuery topNQuery = (TopNQuery) innerQuery; cardinalityWeight = Math.min( ); } else { cardinalityWeight = innerQuery.getDimensions().stream() .mapToLong(Dimension::getCardinality) .filter(cardinality -> cardinality > 0)
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }
/** * Produce the schema-defining columns for a given druid query. * * @return A stream of columns based on the signature of the Druid Query. */ default Stream<Column> buildSchemaColumns() { return Stream.of( getDimensions().stream() .map(DimensionColumn::new), Stream.concat( getAggregations().stream() .map(Aggregation::getName), getPostAggregations().stream() .map(PostAggregation::getName) ).map(MetricColumn::new) ).flatMap(Function.identity()); } }
int timePartFunctions = sqlTimeConverter.timeGrainToDatePartFunctions(druidQuery.getGranularity()).size(); int groupBys = druidQuery.getDimensions().size() + timePartFunctions; if (druidQuery.getQueryType().equals(GROUP_BY)) { GroupByQuery groupByQuery = (GroupByQuery) druidQuery; LimitSpec limitSpec = groupByQuery.getLimitSpec(); sorts.add(builder.field(timestampColumn)); sorts.addAll(builder.fields().subList(druidQuery.getDimensions().size(), groupBys)); List<RexNode> unorderedDimensions = druidQuery.getDimensions().stream() .map(Dimension::getApiName) .map(apiToFieldMapper)
final ResponseProcessor response ) { Granularity granularity = druidQuery.getGranularity(); List<Interval> queryIntervals = druidQuery.getIntervals(); .stream() .forEachOrdered(interval -> queries.add( druidQuery.withAllIntervals(Collections.singletonList(interval))) );
@Override @JsonIgnore public List<Interval> getIntervals() { return getInnerQueryUnchecked().getIntervals(); }
/** * Gets the timezone of the backing table for the given druid query. * * @param druidQuery The druid query to find the timezone for * * @return the {@link DateTimeZone} of the physical table for this query. */ private DateTimeZone getTimeZone(DruidAggregationQuery<?> druidQuery) { return druidQuery.getDataSource() .getPhysicalTable() .getSchema() .getTimeGrain() .getTimeZone(); }
String[] row ) throws IOException { int lastTimeIndex = sqlTimeConverter.timeGrainToDatePartFunctions(druidQuery.getGranularity()).size(); int columnCount = columnToColumnName.size(); for (PostAggregation postAggregation : druidQuery.getPostAggregations()) { Number postAggResult = postAggregationEvaluator.calculate( postAggregation,
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { return next.handleRequest( context, request, druidQuery.withContext( druidQuery.getContext().withUncoveredIntervalsLimit(druidUncoveredIntervalLimit) ), new DruidPartialDataResponseProcessor(response) ); } }
@Override public Optional<Long> getSegmentSetId(DruidAggregationQuery<?> query) { // Gather the data source names backing the query Set<DataSourceName> dataSourceNames = query.getInnermostQuery() .getDataSource() .getPhysicalTable() .getDataSourceNames() .stream() .collect(Collectors.toSet()); // Get all the segments for the data sources of the query's physical tables Set<SortedMap<DateTime, Map<String, SegmentInfo>>> tableSegments = dataSourceMetadataService.getSegments( dataSourceNames ); // Check if we have no tables with segments if (tableSegments.isEmpty()) { LOG.warn(DRUID_METADATA_SEGMENTS_MISSING.logFormat(dataSourceNames)); return Optional.empty(); } // Get requested intervals, then their segments, and sum their hash codes into a long return getSegmentHash( requestedIntervalsQueryExtractionFunctions.get(query.getClass()).apply(query).stream() .flatMap(interval -> tableSegments.stream() .map(segments -> segments.subMap(interval.getStart(), interval.getEnd())) ) ); }
@Override @JsonIgnore public Granularity getGranularity() { return getInnerQueryUnchecked().getGranularity(); }
@Override @JsonIgnore public Set<Aggregation> getAggregations() { return getInnerQueryUnchecked().getAggregations(); }
@Override @JsonIgnore public Filter getFilter() { return getInnerQueryUnchecked().getFilter(); }
@Override @JsonIgnore public Collection<Dimension> getDimensions() { return getInnerQueryUnchecked().getDimensions(); }