@Override public RelNode convert(RelNode in) { return new JdbcDrel(in.getCluster(), in.getTraitSet().replace(DrillRel.DRILL_LOGICAL), convert(in, in.getTraitSet().replace(this.getInTrait()).simplify())); }
/** * Creates a HiveSortExchange. * * @param input Input relational expression * @param distribution Distribution specification * @param collation Collation specification * @param joinKeys Join Keys specification */ public static HiveSortExchange create(RelNode input, RelDistribution distribution, RelCollation collation, ImmutableList<RexNode> joinKeys) { RelOptCluster cluster = input.getCluster(); distribution = RelDistributionTraitDef.INSTANCE.canonize(distribution); collation = RelCollationTraitDef.INSTANCE.canonize(collation); RelTraitSet traitSet = RelTraitSet.createEmpty().plus(distribution).plus(collation); return new HiveSortExchange(cluster, traitSet, input, distribution, collation, joinKeys); }
public JdbcPrel(RelOptCluster cluster, RelTraitSet traitSet, JdbcIntermediatePrel prel) { super(cluster, traitSet); final RelNode input = prel.getInput(); rows = input.estimateRowCount(cluster.getMetadataQuery()); convention = (DrillJdbcConvention) input.getTraitSet().getTrait(ConventionTraitDef.INSTANCE); // generate sql for tree. final SqlDialect dialect = convention.getPlugin().getDialect(); final JdbcImplementor jdbcImplementor = new JdbcImplementor( dialect, (JavaTypeFactory) getCluster().getTypeFactory()); final JdbcImplementor.Result result = jdbcImplementor.visitChild(0, input.accept(new SubsetRemover())); sql = result.asStatement().toSqlString(dialect).getSql(); rowType = input.getRowType(); }
private RelNode convertToStormRel(RelNode relNode) throws RelConversionException { RelTraitSet traitSet = relNode.getTraitSet(); traitSet = traitSet.simplify(); // PlannerImpl.transform() optimizes RelNode with ruleset return planner.transform(STORM_REL_CONVERSION_RULES, traitSet.plus(StreamsLogicalConvention.INSTANCE), relNode); }
public static RelTraitSet getDefaultTraitSet(RelOptCluster cluster, RelTraitSet traitsFromInput) { return RelTraitSet.createEmpty().merge(traitsFromInput).merge(getDefaultTraitSet(cluster)); } }
private static RelNode toPhysicalPlan(RelNode root, RuleSet rules) { Program program = Programs.of(rules); RelOptPlanner plan = root.getCluster().getPlanner(); RelTraitSet traits = plan.emptyTraitSet().replace(EnumerableConvention.INSTANCE); return program.run(plan, root, traits, ImmutableList.<RelOptMaterialization>of(), ImmutableList.<RelOptLattice>of()); }
@Test public void testRelDistributionSatisfy() { RelDistribution distribution1 = RelDistributions.hash(ImmutableList.of(0)); RelDistribution distribution2 = RelDistributions.hash(ImmutableList.of(1)); RelTraitSet traitSet = RelTraitSet.createEmpty(); RelTraitSet simpleTrait1 = traitSet.plus(distribution1); RelTraitSet simpleTrait2 = traitSet.plus(distribution2); RelTraitSet compositeTrait = traitSet.replace(RelDistributionTraitDef.INSTANCE, ImmutableList.of(distribution1, distribution2)); assertThat(compositeTrait.satisfies(simpleTrait1), is(true)); assertThat(compositeTrait.satisfies(simpleTrait2), is(true)); assertThat(distribution1.compareTo(distribution2), is(-1)); assertThat(distribution2.compareTo(distribution1), is(1)); //noinspection EqualsWithItself assertThat(distribution2.compareTo(distribution2), is(0)); } }
public void onMatch(RelOptRuleCall call) { final HiveProject project = call.rel(0); final HiveSortLimit sort = call.rel(1); final RelOptCluster cluster = project.getCluster(); project.getProjects(), project.getInput().getRowType()).inverse(); Set<Integer> needed = new HashSet<>(); for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) { RexCallBinding.create(cluster.getTypeFactory(), cast, ImmutableList.of(RexUtil.apply(map, sort.getCollation()))); if (cast.getOperator().getMonotonicity(binding) == SqlMonotonicity.NOT_MONOTONIC) { return; RelTraitSet traitSet = sort.getCluster().traitSetOf(HiveRelNode.CONVENTION); RelCollation newCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations)); final RelNode newProject = project.copy(sort.getInput().getTraitSet(), ImmutableList.<RelNode>of(sort.getInput())); final HiveSortLimit newSort = sort.copy(newProject.getTraitSet(), newProject, newCollation, sort.offset, sort.fetch); call.transformTo(newSort);
String dataSource = tabMetaData.getParameters().get(Constants.DRUID_DATA_SOURCE); Set<String> metrics = new HashSet<>(); RexBuilder rexBuilder = cluster.getRexBuilder(); RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); List<RelDataType> druidColTypes = new ArrayList<>(); rowType, tabMetaData, nonPartitionColumns, partitionColumns, virtualCols, conf, partitionCache, colStatsCache, noColsMissingStats); final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, null == tableAlias ? tabMetaData.getTableName() : tableAlias, getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP), qb.isInsideView() || qb.getAliasInsideView().contains(tableAlias.toLowerCase())); tableRel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.of(scan), DruidSqlOperatorConverter.getDefaultMap()); } else { optTable = new RelOptHiveTable(relOptSchema, relOptSchema.getTypeFactory(), fullyQualifiedTabName, tableRel = new HiveJdbcConverter(cluster, jdbcTableRel.getTraitSet().replace(HiveRelNode.CONVENTION), jdbcTableRel, jc, url, user);
/** Extends a DruidQuery. */ public static DruidQuery extendQuery(DruidQuery query, RelNode r) { final ImmutableList.Builder<RelNode> builder = ImmutableList.builder(); return DruidQuery.create(query.getCluster(), r.getTraitSet().replace(query.getConvention()), query.getTable(), query.druidTable, query.intervals, builder.addAll(query.rels).add(r).build()); }
public RelNode convert(RelNode rel) { final Aggregate agg = (Aggregate) rel; if (agg.getGroupSets().size() != 1) { // GROUPING SETS not supported; see // [CALCITE-734] Push GROUPING SETS to underlying SQL via JDBC adapter return null; } final RelTraitSet traitSet = agg.getTraitSet().replace(out); try { return new JdbcAggregate(rel.getCluster(), traitSet, convert(agg.getInput(), out), agg.indicator, agg.getGroupSet(), agg.getGroupSets(), agg.getAggCallList()); } catch (InvalidRelException e) { LOGGER.debug(e.toString()); return null; } } }
@Override public void onMatch(RelOptRuleCall call) { final SampleCrel sample = call.rel(0); final RelNode input = sample.getInput(); final RelNode convertedInput = convert(input, input.getTraitSet().plus(Rel.LOGICAL).simplify()); call.transformTo(new SampleRel(sample.getCluster(), convertedInput.getTraitSet().plus(Rel.LOGICAL), convertedInput)); } }
public void onMatch(RelOptRuleCall call) { final HiveSortLimit sort = call.rel(0); final HiveProject project = call.rel(1); // Determine mapping between project input and output fields. If sort // relies on non-trivial expressions, we can't push. final Mappings.TargetMapping map = RelOptUtil.permutation( project.getProjects(), project.getInput().getRowType()); for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) { if (map.getTargetOpt(fc.getFieldIndex()) < 0) { return; } } // Create new collation final RelCollation newCollation = RelCollationTraitDef.INSTANCE.canonize( RexUtil.apply(map, sort.getCollation())); // New operators final HiveSortLimit newSort = sort.copy(sort.getTraitSet().replace(newCollation), project.getInput(), newCollation, sort.offset, sort.fetch); final RelNode newProject = project.copy(sort.getTraitSet(), ImmutableList.<RelNode>of(newSort)); call.transformTo(newProject); }
final RexBuilder rexBuilder = cluster.getRexBuilder(); final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); calciteGenPlan.getCluster().getPlanner().setExecutor(executorProvider); if (!calcitePreCboPlan.getTraitSet().equals(desiredTraits)) { rootRel = hepPlanner.changeTraits(calcitePreCboPlan, desiredTraits);
HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, corrScalarRexSQWithAgg); RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext); final RexBuilder rexBuilder = cluster.getRexBuilder(); final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); list.add(mdProvider.getMetadataProvider()); RelTraitSet desiredTraits = optCluster .traitSetOf(HiveRelNode.CONVENTION, RelCollations.EMPTY); if (!calcitePreCboPlan.getTraitSet().equals(desiredTraits)) { rootRel = hepPlanner.changeTraits(calcitePreCboPlan, desiredTraits); planner.registerMetadataProviders(Lists.newArrayList(calciteMdProvider)); planner.addMaterialization(materialization); planner.addRule(HiveMaterializedViewFilterScanRule.INSTANCE); planner.setRoot(calciteOptimizedPlan);
/** * Converts a relation expression to a given set of traits, if it does not * already have those traits. * * @param rel Relational expression to convert * @param toTraits desired traits * @return a relational expression with the desired traits; never null */ public static RelNode convert(RelNode rel, RelTraitSet toTraits) { RelOptPlanner planner = rel.getCluster().getPlanner(); if (rel.getTraitSet().size() < toTraits.size()) { new RelTraitPropagationVisitor(planner, toTraits).go(rel); } RelTraitSet outTraits = rel.getTraitSet(); for (int i = 0; i < toTraits.size(); i++) { RelTrait toTrait = toTraits.getTrait(i); if (toTrait != null) { outTraits = outTraits.replace(i, toTrait); } } if (rel.getTraitSet().matches(outTraits)) { return rel; } return planner.changeTraits(rel, outTraits); }
/** * Converts one trait of a relational expression, if it does not * already have that trait. * * @param rel Relational expression to convert * @param toTrait Desired trait * @return a relational expression with the desired trait; never null */ public static RelNode convert(RelNode rel, RelTrait toTrait) { RelOptPlanner planner = rel.getCluster().getPlanner(); RelTraitSet outTraits = rel.getTraitSet(); if (toTrait != null) { outTraits = outTraits.replace(toTrait); } if (rel.getTraitSet().matches(outTraits)) { return rel; } return planner.changeTraits(rel, outTraits.simplify()); }
return; RelTraitSet newTraits = call.getPlanner().emptyTraitSet(); newTraits.plus(trait); final ScanPrel newScan = new ScanPrel(scan.getCluster(), newTraits.plus(Prel.DRILL_PHYSICAL), groupScan.clone(columnInfo.getFields()), columnInfo.createNewRowType(project.getInput().getCluster().getTypeFactory()), scan.getTable()); project.getTraitSet().plus(Prel.DRILL_PHYSICAL), newScan, newProjects, call.transformTo(newScan); } else { call.transformTo(newProj);
@Override public RelNode convert(RelNode rel) { RelTraitSet newTraitSet = rel.getTraitSet().replace(getOutConvention()); return new OLAPToEnumerableConverter(rel.getCluster(), newTraitSet, rel); }
@Override public void onMatch(RelOptRuleCall call) { final Sort sort = call.rel(0); if (sort.offset == null && sort.fetch == null) { return; } RelTraitSet origTraitSet = sort.getTraitSet(); RelTraitSet traitSet = origTraitSet.replace(OLAPRel.CONVENTION).simplify(); RelNode input = sort.getInput(); if (!sort.getCollation().getFieldCollations().isEmpty()) { // Create a sort with the same sort key, but no offset or fetch. input = sort.copy(sort.getTraitSet(), input, sort.getCollation(), null, null); } RelNode x = convert(input, input.getTraitSet().replace(OLAPRel.CONVENTION)); call.transformTo(new OLAPLimitRel(sort.getCluster(), traitSet, x, sort.offset, sort.fetch)); }