public JdbcPrel(RelOptCluster cluster, RelTraitSet traitSet, JdbcIntermediatePrel prel) { super(cluster, traitSet); final RelNode input = prel.getInput(); rows = input.estimateRowCount(cluster.getMetadataQuery()); convention = (DrillJdbcConvention) input.getTraitSet().getTrait(ConventionTraitDef.INSTANCE); // generate sql for tree. final SqlDialect dialect = convention.getPlugin().getDialect(); final JdbcImplementor jdbcImplementor = new JdbcImplementor( dialect, (JavaTypeFactory) getCluster().getTypeFactory()); final JdbcImplementor.Result result = jdbcImplementor.visitChild(0, input.accept(new SubsetRemover())); sql = result.asStatement().toSqlString(dialect).getSql(); rowType = input.getRowType(); }
if (rel.getTraitSet().getTrait(ConventionTraitDef.INSTANCE) == Convention.NONE) { return costFactory.makeInfiniteCost();
public ScanPrelBase(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, StoragePluginId pluginId, TableMetadata dataset, List<SchemaPath> projectedColumns, double observedRowcountAdjustment) { super(cluster, traitSet, table, pluginId, dataset, projectedColumns, observedRowcountAdjustment); assert traitSet.getTrait(ConventionTraitDef.INSTANCE) != Rel.LOGICAL; }
private FilesystemScanDrel(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, StoragePluginId pluginId, TableMetadata dataset, List<SchemaPath> projectedColumns, ParquetScanFilter filter, double observedRowcountAdjustment) { super(cluster, traitSet, table, pluginId, dataset, projectedColumns, observedRowcountAdjustment); assert traitSet.getTrait(ConventionTraitDef.INSTANCE) == Rel.LOGICAL; this.filter = filter; }
public HiveScanDrel(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, StoragePluginId pluginId, TableMetadata dataset, List<SchemaPath> projectedColumns, double observedRowcountAdjustment, ScanFilter filter) { super(cluster, traitSet, table, pluginId, dataset, projectedColumns, observedRowcountAdjustment); assert traitSet.getTrait(ConventionTraitDef.INSTANCE) == Rel.LOGICAL; this.filter = filter; }
/** * Returns whether a given kind of trait is enabled. */ public <T extends RelTrait> boolean isEnabled(RelTraitDef<T> traitDef) { return getTrait(traitDef) != null; }
/** * Returns whether a given kind of trait is enabled. */ public <T extends RelTrait> boolean isEnabled(RelTraitDef<T> traitDef) { return getTrait(traitDef) != null; }
@Override public boolean useAbstractConvertersForConversion(RelTraitSet fromTraits, RelTraitSet toTraits) { return canConvertConvention((Convention) toTraits.getTrait(this.getTraitDef())); } };
@Override public boolean useAbstractConvertersForConversion(RelTraitSet fromTraits, RelTraitSet toTraits) { return canConvertConvention((Convention) toTraits.getTrait(this.getTraitDef())); } };
@Override public RelNode convertChild(DrillProjectRel project, RelNode rel) throws RuntimeException { DrillDistributionTrait childDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE); RelCollation childCollation = rel.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE); DrillDistributionTrait newDist = convertDist(childDist, distributionMap); RelCollation newCollation = convertRelCollation(childCollation, collationMap); RelTraitSet newProjectTraits = newTraitSet(Prel.DRILL_PHYSICAL, newDist, newCollation); return new ProjectPrel(project.getCluster(), newProjectTraits, rel, project.getProjects(), project.getRowType()); }
@Override public RelNode convertChild(ProjectRel project, RelNode rel) throws RuntimeException { DistributionTrait childDist = rel.getTraitSet().getTrait(DistributionTraitDef.INSTANCE); RelCollation childCollation = rel.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE); DistributionTrait newDist = convertDist(childDist, distributionMap); RelCollation newCollation = convertRelCollation(childCollation, collationMap); RelTraitSet newProjectTraits = newTraitSet(Prel.PHYSICAL, newDist, newCollation); return new ProjectPrel(project.getCluster(), newProjectTraits, rel, project.getProjects(), project.getRowType()); }
public PhysicalOperator addMetadata(Prel originalPrel, PhysicalOperator op){ op.setOperatorId(opIdMap.get(originalPrel).getAsSingleInt()); op.setCost(originalPrel.getCostForParallelization()); if (originalPrel.getTraitSet().getTrait(DistributionTraitDef.INSTANCE) == DistributionTrait.SINGLETON) { op.setAsSingle(); } return op; }
@Override public RelNode convertChild(final JoinRel join, final RelNode rel) throws InvalidRelException { DistributionTrait toDist = rel.getTraitSet().getTrait(DistributionTraitDef.INSTANCE); RelTraitSet newTraitsLeft = newTraitSet(Prel.PHYSICAL, collationLeft, toDist); RelNode newLeft = convert(left, newTraitsLeft); return new MergeJoinPrel(join.getCluster(), newTraitsLeft, newLeft, convertedRight, joinCondition, join.getJoinType()); }
@Override public RelNode convertChild(final JoinRel join, final RelNode rel) throws InvalidRelException { DistributionTrait toDist = rel.getTraitSet().getTrait(DistributionTraitDef.INSTANCE); RelTraitSet newTraitsLeft = newTraitSet(Prel.PHYSICAL, toDist); RelNode newLeft = convert(left, newTraitsLeft); return new NestedLoopJoinPrel(join.getCluster(), newTraitsLeft, newLeft, convertedRight, joinCondition, join.getJoinType()); }
@Override public RelNode convertChild(final DrillJoin join, final RelNode rel) throws InvalidRelException { DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE); RelTraitSet newTraitsLeft = newTraitSet(Prel.DRILL_PHYSICAL, collationLeft, toDist); RelNode newLeft = convert(left, newTraitsLeft); return new MergeJoinPrel(join.getCluster(), newTraitsLeft, newLeft, convertedRight, joinCondition, join.getJoinType()); }
@Override public RelNode convertChild(final JoinRel join, final RelNode rel) throws InvalidRelException { DistributionTrait toDist = rel.getTraitSet().getTrait(DistributionTraitDef.INSTANCE); RelTraitSet newTraitsLeft = newTraitSet(Prel.PHYSICAL, toDist); RelNode newLeft = convert(left, newTraitsLeft); return new HashJoinPrel(join.getCluster(), newTraitsLeft, newLeft, convertedRight, joinCondition, join.getJoinType()); }
private FilesystemScanDrel(FilesystemScanDrel that, ParquetScanFilter filter) { super(that.getCluster(), that.getTraitSet(), that.getTable(), that.getPluginId(), that.getTableMetadata(), that.getProjectedColumns(), that.getObservedRowcountAdjustment()); assert traitSet.getTrait(ConventionTraitDef.INSTANCE) == Rel.LOGICAL; this.filter = filter; }
@Override public RelNode convertChild(final DrillJoin join, final RelNode rel) throws InvalidRelException { DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE); RelTraitSet newTraitsLeft = newTraitSet(Prel.DRILL_PHYSICAL, toDist); RelNode newLeft = convert(left, newTraitsLeft); return new HashJoinPrel(join.getCluster(), newTraitsLeft, newLeft, convertedRight, joinCondition, join.getJoinType(), join.isSemiJoin()); }
@Override public RelNode convertChild(DrillWriterRelBase writer, RelNode rel) throws RuntimeException { DrillDistributionTrait childDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE); // Create the Writer with the child's distribution because the degree of parallelism for the writer // should correspond to the number of child minor fragments. The Writer itself is not concerned with // the collation of the child. Note that the Writer's output RowType consists of // {fragment_id varchar(255), number_of_records_written bigint} which are very different from the // child's output RowType. return new WriterPrel(writer.getCluster(), writer.getTraitSet().plus(childDist).plus(Prel.DRILL_PHYSICAL), rel, writer.getCreateTableEntry()); }
private FilesystemScanDrel(FilesystemScanDrel that, TableMetadata newDatasetPointer) { super(that.getCluster(), that.getTraitSet(), new RelOptNamespaceTable(newDatasetPointer, that.getCluster()), that.getPluginId(), newDatasetPointer, that.getProjectedColumns(), that.getObservedRowcountAdjustment()); assert traitSet.getTrait(ConventionTraitDef.INSTANCE) == Rel.LOGICAL; this.filter = that.getFilter(); }