@Override public Optional<Long> getSegmentSetId(DruidAggregationQuery<?> query) { // Gather the data source names backing the query Set<DataSourceName> dataSourceNames = query.getInnermostQuery() .getDataSource() .getPhysicalTable() .getDataSourceNames() .stream() .collect(Collectors.toSet()); // Get all the segments for the data sources of the query's physical tables Set<SortedMap<DateTime, Map<String, SegmentInfo>>> tableSegments = dataSourceMetadataService.getSegments( dataSourceNames ); // Check if we have no tables with segments if (tableSegments.isEmpty()) { LOG.warn(DRUID_METADATA_SEGMENTS_MISSING.logFormat(dataSourceNames)); return Optional.empty(); } // Get requested intervals, then their segments, and sum their hash codes into a long return getSegmentHash( requestedIntervalsQueryExtractionFunctions.get(query.getClass()).apply(query).stream() .flatMap(interval -> tableSegments.stream() .map(segments -> segments.subMap(interval.getStart(), interval.getEnd())) ) ); }
/** * Gets the timezone of the backing table for the given druid query. * * @param druidQuery The druid query to find the timezone for * * @return the {@link DateTimeZone} of the physical table for this query. */ private DateTimeZone getTimeZone(DruidAggregationQuery<?> druidQuery) { return druidQuery.getDataSource() .getPhysicalTable() .getSchema() .getTimeGrain() .getTimeZone(); }
query.getDataSource().getPhysicalTable().getAvailableIntervals() );
ApiToFieldMapper aliasMaker = new ApiToFieldMapper(druidQuery.getDataSource().getPhysicalTable().getSchema());
/** * Wraps a query as a GroupBy Query. * * @param query The query to wrap. */ public SqlAggregationQuery(DruidAggregationQuery<?> query) { this( query.getDataSource(), query.getGranularity(), query.getDimensions(), query.getFilter(), query.getAggregations(), query.getPostAggregations(), query.getIntervals(), query.getContext() ); }
/** * Builds the druid query as sql and returns it as a string. * * @param druidQuery The query to convert to sql. * @param apiToFieldMapper The mapping between api and physical names for the query. * * @return the sql equivalent of the query. */ public String buildSqlQuery(DruidAggregationQuery<?> druidQuery, ApiToFieldMapper apiToFieldMapper) { SqlPhysicalTable sqlTable = (SqlPhysicalTable) druidQuery.getDataSource() .getPhysicalTable() .getSourceTable(); LOG.debug( "Querying table {} with schema {} using timestampColumn {}", sqlTable.getName(), sqlTable.getSchemaName(), sqlTable.getTimestampColumn() ); RelNode query = convertDruidQueryToRelNode(druidQuery, apiToFieldMapper, sqlTable); RelToSqlConverter relToSql = calciteHelper.getNewRelToSqlConverter(); SqlPrettyWriter sqlWriter = calciteHelper.getNewSqlWriter(); return writeSql(sqlWriter, relToSql, query); }
ResponseProcessor response ) { boolean isSqlBacked = druidQuery.getDataSource() .getPhysicalTable() .getSourceTable() instanceof SqlPhysicalTable;
druidQuery.getInnermostQuery().getDataSource().getPhysicalTable().getAvailableIntervals(), new SimplifiedIntervalList(request.getIntervals()), request.getGranularity()
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }
case GROUP_BY: GroupByQuery inner = new GroupByQuery( innerQuery.getDataSource(), innerQuery.getGranularity(), innerQuery.getDimensions(),