private static String getTblAlias(RelNode rel) { if (null == rel) { return null; } if (rel instanceof HiveTableScan) { return ((HiveTableScan)rel).getTableAlias(); } if (rel instanceof DruidQuery) { DruidQuery dq = (DruidQuery) rel; return ((HiveTableScan) dq.getTableScan()).getTableAlias(); } if (rel instanceof Project) { return null; } if (rel.getInputs().size() == 1) { return getTblAlias(rel.getInput(0)); } return null; }
Schema(DruidQuery dq) { HiveTableScan hts = (HiveTableScan) ((DruidQuery)dq).getTableScan(); String tabName = hts.getTableAlias(); for (RelDataTypeField field : dq.getRowType().getFieldList()) { add(new ColumnInfo(tabName, field.getName())); } }
Schema(DruidQuery dq) { HiveTableScan hts = (HiveTableScan) ((DruidQuery)dq).getTableScan(); String tabName = hts.getTableAlias(); for (RelDataTypeField field : dq.getRowType().getFieldList()) { add(new ColumnInfo(tabName, field.getName())); } }
private static String getTblAlias(RelNode rel) { if (null == rel) { return null; } if (rel instanceof HiveTableScan) { return ((HiveTableScan)rel).getTableAlias(); } if (rel instanceof DruidQuery) { DruidQuery dq = (DruidQuery) rel; return ((HiveTableScan) dq.getTableScan()).getTableAlias(); } if (rel instanceof HiveJdbcConverter) { HiveJdbcConverter conv = (HiveJdbcConverter) rel; return conv.getTableScan().getHiveTableScan().getTableAlias(); } if (rel instanceof Project) { return null; } if (rel.getInputs().size() == 1) { return getTblAlias(rel.getInput(0)); } return null; }
private RelNode copyNodeScan(RelNode scan) { final RelNode newScan; if (scan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) scan; // Ideally we should use HiveRelNode convention. However, since Volcano planner // throws in that case because DruidQuery does not implement the interface, // we set it as Bindable. Currently, we do not use convention in Hive, hence that // should be fine. // TODO: If we want to make use of convention (e.g., while directly generating operator // tree instead of AST), this should be changed. newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(BindableConvention.INSTANCE), scan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) scan.getTable(), ((RelOptHiveTable) scan.getTable()).getName(), null, false, false); } return newScan; } }
@Override public RelOptMaterialization apply(RelOptMaterialization materialization) { final RelNode viewScan = materialization.tableRel; final RelNode newViewScan; if (viewScan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) viewScan; newViewScan = DruidQuery.create(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), viewScan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newViewScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) viewScan.getTable(), viewScan.getTable().getQualifiedName().get(0), null, false, false); } return new RelOptMaterialization(newViewScan, materialization.queryRel, null); } }
hts = ((HiveJdbcConverter) scan).getTableScan().getHiveTableScan(); } else if (scan instanceof DruidQuery) { hts = (HiveTableScan) ((DruidQuery) scan).getTableScan(); } else { hts = (HiveTableScan) scan;
public static ASTNode table(RelNode scan) { HiveTableScan hts; if (scan instanceof DruidQuery) { hts = (HiveTableScan) ((DruidQuery)scan).getTableScan(); } else { hts = (HiveTableScan) scan;
private TableScan transferTableScan(RelNode relNode) { if (isDruidQuery(relNode)) { return ((DruidQuery) relNode).getTableScan(); } else { return (TableScan) relNode; } }
public Result visit(DruidQuery e) { return visit(e.getTableScan()); }
public RelNode visit(DruidQuery druidQuery) { return visit(druidQuery.getTableScan()); }