@Override @JsonIgnore public Granularity getGranularity() { return getInnerQueryUnchecked().getGranularity(); }
/** * Retrieve a simplified list of intervals aligned to the time buckets of the query which are partially or fully * volatile. * * @param query The query whose time bucketed intervals will be checked for volatility * @param factSource The fact source table whose intervals are being retrieved * * @return A simplified interval list of volatile intervals * * @deprecated Exists solely for backwards compatibility. * {@link VolatileIntervalsService#getVolatileIntervals(Granularity, List, PhysicalTable)} should be used instead */ @Deprecated default SimplifiedIntervalList getVolatileIntervals(DruidAggregationQuery<?> query, PhysicalTable factSource) { return getVolatileIntervals(query.getGranularity(), query.getIntervals(), factSource); } }
/** * Indicate if the weight check query can be skipped based on heuristics. * * @param query Query to test * * @return true if the weight check query does not need to be run */ public boolean skipWeightCheckQuery(DruidAggregationQuery<?> query) { try { long worstCaseRows = WeightEvaluationQuery.getWorstCaseWeightEstimate(query); double skipThreshold = getQueryWeightThreshold(query.getGranularity()) / weightCheckBypassFactor; return worstCaseRows <= skipThreshold; } catch (ArithmeticException ignored) { // We got a really big estimate, so don't skip the check LOG.debug("worst case weight larger than {}", Long.MAX_VALUE); return false; } }
/** * Collects all the time columns and dimensions to be grouped on. * * @param builder The RelBuilder created with Calcite. * @param druidQuery The query to find grouping columns from. * @param apiToFieldMapper The mapping from api to physical name. * @param timestampColumn The name of the timestamp column in the database. * * @return all columns which should be grouped on. */ protected List<RexNode> getAllGroupByColumns( RelBuilder builder, DruidAggregationQuery<?> druidQuery, ApiToFieldMapper apiToFieldMapper, String timestampColumn ) { List<RexNode> timeFilters = sqlTimeConverter.buildGroupBy( builder, druidQuery.getGranularity(), timestampColumn ); List<RexNode> dimensionFields = getDimensionFields(builder, druidQuery, apiToFieldMapper); List<RexNode> allGroupBys = new ArrayList<>(timeFilters.size() + dimensionFields.size()); allGroupBys.addAll(timeFilters); allGroupBys.addAll(dimensionFields); return allGroupBys; }
/** * Given an array of strings (a row from a {@link java.sql.ResultSet}) and the * {@link Granularity} used to make groupBy statements on time, it will parse out a {@link DateTime} * for the row which represents the beginning of the interval it was grouped on. * * @param offset the last column before the date fields. * @param recordValues The results returned by Sql needed to read the time columns. * @param druidQuery The original druid query which was made using calling * {@link #buildGroupBy(RelBuilder, Granularity, String)}. * * @return the datetime for the start of the interval. */ public DateTime getIntervalStart(int offset, String[] recordValues, DruidAggregationQuery<?> druidQuery) { List<SqlDatePartFunction> times = timeGrainToDatePartFunctions(druidQuery.getGranularity()); DateTimeZone timeZone = getTimeZone(druidQuery); if (times.isEmpty()) { throw new UnsupportedOperationException("Can't parse dateTime for if no times were grouped on."); } MutableDateTime mutableDateTime = new MutableDateTime(0, 1, 1, 0, 0, 0, 0, timeZone); for (int i = 0; i < times.size(); i++) { int value = Integer.parseInt(recordValues[offset + i]); SqlDatePartFunction fn = times.get(i); setDateTime(value, fn, mutableDateTime); } return mutableDateTime.toDateTime(); }
int timePartFunctions = sqlTimeConverter.timeGrainToDatePartFunctions(druidQuery.getGranularity()).size(); int groupBys = druidQuery.getDimensions().size() + timePartFunctions;
String[] row ) throws IOException { int lastTimeIndex = sqlTimeConverter.timeGrainToDatePartFunctions(druidQuery.getGranularity()).size(); int columnCount = columnToColumnName.size();
final ResponseProcessor response ) { Granularity granularity = druidQuery.getGranularity(); List<Interval> queryIntervals = druidQuery.getIntervals();
long periods = IntervalUtils.countSlicedIntervals(innerQuery.getIntervals(), innerQuery.getGranularity()); long cardinalityWeight;
/** * Wraps a query as a GroupBy Query. * * @param query The query to wrap. */ public SqlAggregationQuery(DruidAggregationQuery<?> query) { this( query.getDataSource(), query.getGranularity(), query.getDimensions(), query.getFilter(), query.getAggregations(), query.getPostAggregations(), query.getIntervals(), query.getContext() ); }
final WeightCheckResponseProcessor weightCheckResponse = new WeightCheckResponseProcessor(response); final DruidAggregationQuery<?> weightEvaluationQuery = queryWeightUtil.makeWeightEvaluationQuery(druidQuery); Granularity granularity = druidQuery.getInnermostQuery().getGranularity(); final long queryRowLimit = queryWeightUtil.getQueryWeightThreshold(granularity);
if (AllGranularity.INSTANCE.equals(druidQuery.getGranularity())) { timestamp = druidQuery.getIntervals().get(0).getStart(); } else {
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }
GroupByQuery inner = new GroupByQuery( innerQuery.getDataSource(), innerQuery.getGranularity(), innerQuery.getDimensions(), innerQuery.getFilter(),