private RelNode copyNodeScan(RelNode scan) { final RelNode newScan; if (scan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) scan; // Ideally we should use HiveRelNode convention. However, since Volcano planner // throws in that case because DruidQuery does not implement the interface, // we set it as Bindable. Currently, we do not use convention in Hive, hence that // should be fine. // TODO: If we want to make use of convention (e.g., while directly generating operator // tree instead of AST), this should be changed. newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(BindableConvention.INSTANCE), scan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) scan.getTable(), ((RelOptHiveTable) scan.getTable()).getName(), null, false, false); } return newScan; } }
private static RelNode project(DruidQuery dq, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields, RelBuilder relBuilder) { final int fieldCount = dq.getRowType().getFieldCount(); if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount)) && extraFields.isEmpty()) { final RexBuilder rexBuilder = dq.getCluster().getRexBuilder(); final List<RelDataTypeField> fields = dq.getRowType().getFieldList();
hts = ((HiveJdbcConverter) scan).getTableScan().getHiveTableScan(); } else if (scan instanceof DruidQuery) { hts = (HiveTableScan) ((DruidQuery) scan).getTableScan(); } else { hts = (HiveTableScan) scan; .add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_JSON + "\"") .add(HiveParser.StringLiteral, "\"" + SemanticAnalyzer.escapeSQLString( dq.getQueryString()) + "\"")); "\"" + dq.getRowType().getFieldNames().stream().map(Object::toString) .collect(Collectors.joining(",")) + "\"" )); .add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_FIELD_TYPES + "\"") .add(HiveParser.StringLiteral, "\"" + dq.getRowType().getFieldList().stream() .map(e -> TypeConverter.convert(e.getType()).getTypeName()) .collect(Collectors.joining(",")) + "\"" propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY") .add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_TYPE + "\"") .add(HiveParser.StringLiteral, "\"" + dq.getQueryType().getQueryName() + "\"")); } else if (scan instanceof HiveJdbcConverter) { HiveJdbcConverter jdbcConverter = (HiveJdbcConverter) scan;
Schema(DruidQuery dq) { HiveTableScan hts = (HiveTableScan) ((DruidQuery)dq).getTableScan(); String tabName = hts.getTableAlias(); for (RelDataTypeField field : dq.getRowType().getFieldList()) { add(new ColumnInfo(tabName, field.getName())); } }
/** Extends a DruidQuery. */ public static DruidQuery extendQuery(DruidQuery query, RelNode r) { final ImmutableList.Builder<RelNode> builder = ImmutableList.builder(); return DruidQuery.create(query.getCluster(), r.getTraitSet().replace(query.getConvention()), query.getTable(), query.druidTable, query.intervals, builder.addAll(query.rels).add(r).build(), query.getOperatorConversionMap()); }
/** Extends a DruidQuery. */ public static DruidQuery extendQuery(DruidQuery query, List<Interval> intervals) { return DruidQuery.create(query.getCluster(), query.getTraitSet(), query.getTable(), query.druidTable, intervals, query.rels); }
final RexBuilder rexBuilder = cluster.getRexBuilder(); if (!DruidQuery.isValidSignature(query.signature() + 'f')) { return; final RexNode cond = simplify.simplify(filter.getCondition()); for (RexNode e : RelOptUtil.conjunctions(cond)) { if (query.isValidFilter(e)) { validPreds.add(e); } else { for (int i = 0; i < query.getRowType().getFieldCount(); i++) { if (query.druidTable.timestampFieldName.equals( query.getRowType().getFieldList().get(i).getName())) { timestampFieldIdx = i; break; final RelNode newFilter = filter.copy(filter.getTraitSet(), Util.last(query.rels), RexUtil.composeConjunction(rexBuilder, triple.getMiddle(), false)); newDruidQuery = DruidQuery.extendQuery(query, newFilter); newDruidQuery = DruidQuery.extendQuery((DruidQuery) newDruidQuery, intervals);
List<Integer> collationIndexes, List<Direction> collationDirections, ImmutableBitSet numericCollationIndexes, Integer fetch, Project postProject) { final CalciteConnectionConfig config = getConnectionConfig(); QueryType queryType = QueryType.SCAN; final Translator translator = new Translator(druidTable, rowType, config.timeZone()); final String originalFieldName = druidTable.getRowType(getCluster().getTypeFactory()) .getFieldList().get(ref.getIndex()).getName(); if (originalFieldName.equals(druidTable.timestampFieldName)) { getJsonAggregation(fieldNames, agg.right, agg.left, projects, translator); aggregations.add(jsonAggregation); builder.add(jsonAggregation.name); DruidQuery.JsonPostAggregation jsonPost = getJsonPostAggregation(fieldName, rex, postProject.getInput()); postAggs.add(jsonPost); generator.writeBooleanField("descending", timeSeriesDirection != null && timeSeriesDirection == Direction.DESCENDING); writeField(generator, "granularity", finalGranularity); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null); writeField(generator, "intervals", intervals); writeField(generator, "granularity", finalGranularity); writeField(generator, "dimension", dimensions.get(0)); generator.writeStringField("metric", fieldNames.get(collationIndexes.get(0)));
@Override public void onMatch(RelOptRuleCall call) { final Filter filter = call.rel(0); final DruidQuery query = call.rel(1); if (!DruidQuery.isValidSignature(query.signature() + 'h')) { return; } final RexNode cond = filter.getCondition(); final DruidJsonFilter druidJsonFilter = DruidJsonFilter .toDruidFilters(cond, query.getTopNode().getRowType(), query); if (druidJsonFilter != null) { final RelNode newFilter = filter .copy(filter.getTraitSet(), Util.last(query.rels), filter.getCondition()); final DruidQuery newDruidQuery = DruidQuery.extendQuery(query, newFilter); call.transformTo(newDruidQuery); } } }
final DruidJsonFilter jsonFilter = computeFilter(filter, this); Pair<List<String>, List<VirtualColumn>> projectResult = computeProjectAsScan( project, project.getInput().getRowType(), this); scanColumnNames = projectResult.left; Pair<List<DimensionSpec>, List<VirtualColumn>> projectGroupSet = computeProjectGroupSet( project, groupSet, aggInputRowType, this); final List<JsonAggregation> aggregations = computeDruidJsonAgg(aggCalls, aggNames, project, this); for (JsonAggregation jsonAgg : aggregations) { if (postProject != null) { final List<String> postProjectDimListBuilder = new ArrayList<>(); final RelDataType postAggInputRowType = getCluster().getTypeFactory() .createStructType(Pair.right(postProject.getInput().getRowType().getFieldList()), aggregateStageFieldNames); limit = computeSort(fetch, collationIndexes, collationDirections, numericCollationIndexes, queryOutputFieldNames); final String timeSeriesQueryString = planAsTimeSeries(groupByKeyDims, jsonFilter, virtualColumnList, aggregations, postAggs, limit, havingJsonFilter); if (timeSeriesQueryString != null) { final String topNQuery = planAsTopN(groupByKeyDims, jsonFilter, virtualColumnList, aggregations, postAggs, limit, havingJsonFilter); if (topNQuery != null) {
public static ASTNode table(RelNode scan) { HiveTableScan hts; if (scan instanceof DruidQuery) { hts = (HiveTableScan) ((DruidQuery)scan).getTableScan(); } else { hts = (HiveTableScan) scan; .add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_JSON + "\"") .add(HiveParser.StringLiteral, "\"" + SemanticAnalyzer.escapeSQLString( dq.getQueryString()) + "\"")); propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY") .add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_TYPE + "\"") .add(HiveParser.StringLiteral, "\"" + dq.getQueryType().getQueryName() + "\""));
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false); tableRel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.<RelNode>of(scan), ImmutableMap.of()); } else {
public void onMatch(RelOptRuleCall call) { final Aggregate aggregate = call.rel(0); final DruidQuery query = call.rel(1); final RelNode topDruidNode = query.getTopNode(); final Project project = topDruidNode instanceof Project ? (Project) topDruidNode : null; if (!DruidQuery.isValidSignature(query.signature() + 'a')) { return; } if (aggregate.indicator || aggregate.getGroupSets().size() != 1) { return; } if (DruidQuery .computeProjectGroupSet(project, aggregate.getGroupSet(), query.table.getRowType(), query) == null) { return; } final List<String> aggNames = Util .skip(aggregate.getRowType().getFieldNames(), aggregate.getGroupSet().cardinality()); if (DruidQuery.computeDruidJsonAgg(aggregate.getAggCallList(), aggNames, project, query) == null) { return; } final RelNode newAggregate = aggregate .copy(aggregate.getTraitSet(), ImmutableList.of(query.getTopNode())); call.transformTo(DruidQuery.extendQuery(query, newAggregate)); } }
final int fieldCount = dq.getRowType().getFieldCount(); if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount)) && extraFields.isEmpty()) {
@Override public RelDataType deriveRowType() { return getCluster().getTypeFactory().createStructType( Pair.right(Util.last(rels).getRowType().getFieldList()), getQuerySpec().fieldNames); }
public void onMatch(RelOptRuleCall call) { final Sort sort = call.rel(0); final DruidQuery query = call.rel(1); if (!DruidQuery.isValidSignature(query.signature() + 'l')) { return; } // Either it is: // - a pure limit above a query of type scan // - a sort and limit on a dimension/metric part of the druid group by query if (sort.offset != null && RexLiteral.intValue(sort.offset) != 0) { // offset not supported by Druid return; } if (query.getQueryType() == QueryType.SCAN && !RelOptUtil.isPureLimit(sort)) { return; } final RelNode newSort = sort .copy(sort.getTraitSet(), ImmutableList.of(Util.last(query.rels))); call.transformTo(DruidQuery.extendQuery(query, newSort)); } }
private static String getTblAlias(RelNode rel) { if (null == rel) { return null; } if (rel instanceof HiveTableScan) { return ((HiveTableScan)rel).getTableAlias(); } if (rel instanceof DruidQuery) { DruidQuery dq = (DruidQuery) rel; return ((HiveTableScan) dq.getTableScan()).getTableAlias(); } if (rel instanceof Project) { return null; } if (rel.getInputs().size() == 1) { return getTblAlias(rel.getInput(0)); } return null; }
} else if (call.getOperands().size() == 1) { return DruidQuery.format("floor(%s)", druidExpression); } else if (call.getOperands().size() == 2) { tz = TimeZone.getTimeZone(druidQuery.getConnectionConfig().timeZone()); } else { tz = DateTimeUtils.UTC_ZONE;
protected CalciteConnectionConfig getConnectionConfig() { return getCluster().getPlanner().getContext().unwrap(CalciteConnectionConfig.class); }
public QueryType getQueryType() { return getQuerySpec().queryType; }