@Override public TemplateDruidQuery getInnermostQuery() { return (TemplateDruidQuery) DruidAggregationQuery.super.getInnermostQuery(); }
/** * Retrieves a set of druid metric names associated with the query. * * @return set of druid metric names */ @JsonIgnore default Set<String> getDependentFieldNames() { return getInnermostQuery().getAggregations().stream() .map(Aggregation::getFieldName) .filter(it -> !it.isEmpty()) .collect(Collectors.toSet()); }
/** * Get all the dimensions from Filtered Aggregations of a filtered metric. * * @return Set of dimensions which are part of Filtered Aggregations in a filtered logical metric */ @JsonIgnore default Set<Dimension> getMetricDimensions() { return getInnermostQuery().getAggregations().stream() .map(Aggregation::getDependentDimensions) .flatMap(Set::stream) .collect(Collectors.toCollection(LinkedHashSet::new)); }
/** * Evaluate Druid query for expensive aggregation that could bring down Druid. * * @param query Druid Query * * @return query or null if not required */ public static WeightEvaluationQuery makeWeightEvaluationQuery(DruidAggregationQuery<?> query) { // get inner-most query for evaluation DruidAggregationQuery<?> innerQuery = query.getInnermostQuery(); int weight = Utils.getSubsetByType(innerQuery.getAggregations(), SketchAggregation.class).size(); return new WeightEvaluationQuery(innerQuery, weight); }
@Override public Optional<Long> getSegmentSetId(DruidAggregationQuery<?> query) { // Gather the data source names backing the query Set<DataSourceName> dataSourceNames = query.getInnermostQuery() .getDataSource() .getPhysicalTable() .getDataSourceNames() .stream() .collect(Collectors.toSet()); // Get all the segments for the data sources of the query's physical tables Set<SortedMap<DateTime, Map<String, SegmentInfo>>> tableSegments = dataSourceMetadataService.getSegments( dataSourceNames ); // Check if we have no tables with segments if (tableSegments.isEmpty()) { LOG.warn(DRUID_METADATA_SEGMENTS_MISSING.logFormat(dataSourceNames)); return Optional.empty(); } // Get requested intervals, then their segments, and sum their hash codes into a long return getSegmentHash( requestedIntervalsQueryExtractionFunctions.get(query.getClass()).apply(query).stream() .flatMap(interval -> tableSegments.stream() .map(segments -> segments.subMap(interval.getStart(), interval.getEnd())) ) ); }
DruidAggregationQuery<?> innerQuery = query.getInnermostQuery();
final WeightCheckResponseProcessor weightCheckResponse = new WeightCheckResponseProcessor(response); final DruidAggregationQuery<?> weightEvaluationQuery = queryWeightUtil.makeWeightEvaluationQuery(druidQuery); Granularity granularity = druidQuery.getInnermostQuery().getGranularity(); final long queryRowLimit = queryWeightUtil.getQueryWeightThreshold(granularity);
druidQuery.getInnermostQuery().getDataSource().getPhysicalTable().getAvailableIntervals(), new SimplifiedIntervalList(request.getIntervals()), request.getGranularity()
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }
DruidAggregationQuery<?> innerQuery = query.getInnermostQuery();