@Override public GroupByQuery withContext(QueryContext context) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); } // CHECKSTYLE:ON
@Override public GroupByQuery withDataSource(DataSource dataSource) { return new GroupByQuery(dataSource, granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
/** * Strip the columns from the LimitSpec on the query and return it, if present. * * @param query Query to strip the columns from within the LimitSpec * * @return the cleaned LimitSpec if there is one */ private static LimitSpec stripColumnsFromLimitSpec(DruidFactQuery query) { return ((GroupByQuery) query).getLimitSpec() == null ? null : ((GroupByQuery) query).getLimitSpec().withColumns(new LinkedHashSet<>()); } }
@Override public GroupByQuery withAllIntervals(Collection<Interval> intervals) { Optional<DruidFactQuery<?>> innerQuery = (Optional<DruidFactQuery<?>>) this.dataSource.getQuery(); return !innerQuery.isPresent() ? withIntervals(intervals) : withDataSource(new QueryDataSource(innerQuery.get().withAllIntervals(intervals))).withIntervals(intervals); }
@Override public GroupByQuery withInnermostDataSource(DataSource dataSource) { Optional<DruidFactQuery<?>> innerQuery = (Optional<DruidFactQuery<?>>) this.dataSource.getQuery(); return !innerQuery.isPresent() ? withDataSource(dataSource) : withDataSource(new QueryDataSource(innerQuery.get().withInnermostDataSource(dataSource))); }
/** * Gets the collection of having filters to be applied from the druid query. * * @param builder The RelBuilder created with Calcite. * @param druidQuery The query to find the having filter from. * @param apiToFieldMapper The mapping from api to physical name. * * @return the collection of equivalent filters for calcite. */ protected Collection<RexNode> getHavingFilter( RelBuilder builder, DruidAggregationQuery<?> druidQuery, ApiToFieldMapper apiToFieldMapper ) { RexNode filter = null; if (druidQuery.getQueryType().equals(GROUP_BY)) { Having having = ((GroupByQuery) druidQuery).getHaving(); if (having != null) { filter = havingEvaluator.evaluateHaving(having, builder, apiToFieldMapper); } } return Collections.singletonList(filter); }
@Override public void query(Dimension dimension, DataSource dataSource) { SuccessCallback successCallback = buildSuccessCallback(dimension); SqlPhysicalTable sqlTable = (SqlPhysicalTable) dataSource.getPhysicalTable().getSourceTable(); GroupByQuery groupByQuery = new GroupByQuery( dataSource, AllGranularity.INSTANCE, Collections.singletonList(dimension), null, null, Collections.emptyList(), Collections.emptyList(), Collections.singletonList(INTERVAL), new LimitSpec(Utils.asLinkedHashSet(), OptionalInt.of(ROW_LIMIT)) ); sqlBackedClient.executeQuery(groupByQuery, successCallback, failureCallback); }
/** * Gets the number of rows to limit results to for a Group by Query. Otherwise no limit is applied. * * @param druidQuery The query to get the row limit from. * * @return the number of rows to include in the results. */ protected int getLimit(DruidAggregationQuery<?> druidQuery) { if (druidQuery.getQueryType().equals(GROUP_BY)) { GroupByQuery groupByQuery = (GroupByQuery) druidQuery; LimitSpec limitSpec = groupByQuery.getLimitSpec(); if (limitSpec != null) { return limitSpec.getLimit().orElse(NO_LIMIT); } } return NO_LIMIT; }
@Override public GroupByQuery withIntervals(Collection<Interval> intervals) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, true); }
return new GroupByQuery( dataSource, mergedGranularity,
if (druidQuery.getQueryType().equals(GROUP_BY)) { GroupByQuery groupByQuery = (GroupByQuery) druidQuery; LimitSpec limitSpec = groupByQuery.getLimitSpec(); if (limitSpec != null) { limitSpec.getColumns()
public GroupByQuery withLimitSpec(LimitSpec limitSpec) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
case TOP_N: TopNQuery topNQuery = (TopNQuery) innerQuery; GroupByQuery transformed = new GroupByQuery( new UnionDataSource(topNQuery.getDataSource().getPhysicalTable()), topNQuery.getGranularity(),
@Override public GroupByQuery withFilter(Filter filter) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
public GroupByQuery withDimensions(Collection<Dimension> dimensions) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
public GroupByQuery withHaving(Having having) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
@Override public GroupByQuery withGranularity(Granularity granularity) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
@Override public GroupByQuery withAggregations(Collection<Aggregation> aggregations) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
@Override public GroupByQuery withPostAggregations(Collection<PostAggregation> postAggregations) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }
public GroupByQuery withOrderBy(LimitSpec limitSpec) { return new GroupByQuery(getDataSource(), granularity, dimensions, filter, having, aggregations, postAggregations, intervals, limitSpec, context, false); }