rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger()); DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals, null, null);
@Override public boolean rolledUpColumnValidInsideAgg(String column, SqlCall call, SqlNode parent, CalciteConnectionConfig config) { assert isRolledUp(column); // Our rolled up columns are only allowed in COUNT(DISTINCT ...) aggregate functions. // We only allow this when approximate results are acceptable. return ((config != null && config.approximateDistinctCount() && isCountDistinct(call)) || call.getOperator() == SqlStdOperatorTable.APPROX_COUNT_DISTINCT) && call.getOperandList().size() == 1 // for COUNT(a_1, a_2, ... a_n). n should be 1 && isValidParentKind(parent); }
@Override public Table load(@Nonnull String tableName) throws Exception { final Map<String, SqlTypeName> fieldMap = new LinkedHashMap<>(); final Set<String> metricNameSet = new LinkedHashSet<>(); final Map<String, List<ComplexMetric>> complexMetrics = new HashMap<>(); connection.metadata(tableName, DruidTable.DEFAULT_TIMESTAMP_COLUMN, null, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(DruidSchema.this, tableName, null, fieldMap, metricNameSet, DruidTable.DEFAULT_TIMESTAMP_COLUMN, complexMetrics); } }));
ComplexMetric complexMetric = druidTable.resolveComplexMetric(only, aggCall);
private boolean isValidParentKind(SqlNode node) { return node.getKind() == SqlKind.SELECT || node.getKind() == SqlKind.FILTER || isSupportedPostAggOperation(node.getKind()); }
/** * Returns the appropriate {@link ComplexMetric} that is mapped from the given <code>alias</code> * if it exists, and is used in the expected context with the given {@link AggregateCall}. * Otherwise returns <code>null</code>. * */ public ComplexMetric resolveComplexMetric(String alias, AggregateCall call) { List<ComplexMetric> potentialMetrics = getComplexMetricsFrom(alias); // It's possible that multiple complex metrics match the AggregateCall, // but for now we only return the first that matches for (ComplexMetric complexMetric : potentialMetrics) { if (complexMetric.canBeUsed(call)) { return complexMetric; } } return null; }
@Override public boolean rolledUpColumnValidInsideAgg(String column, SqlCall call, SqlNode parent, CalciteConnectionConfig config) { assert isRolledUp(column); // Our rolled up columns are only allowed in COUNT(DISTINCT ...) aggregate functions. // We only allow this when approximate results are acceptable. return ((config != null && config.approximateDistinctCount() && isCountDistinct(call)) || call.getOperator() == SqlStdOperatorTable.APPROX_COUNT_DISTINCT) && call.getOperandList().size() == 1 // for COUNT(a_1, a_2, ... a_n). n should be 1 && isValidParentKind(parent); }
private Table table(String tableName, DruidConnectionImpl connection) { final Map<String, SqlTypeName> fieldMap = new LinkedHashMap<>(); final Set<String> metricNameSet = new LinkedHashSet<>(); final Map<String, List<ComplexMetric>> complexMetrics = new HashMap<>(); connection.metadata(tableName, DruidTable.DEFAULT_TIMESTAMP_COLUMN, null, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(DruidSchema.this, tableName, null, fieldMap, metricNameSet, DruidTable.DEFAULT_TIMESTAMP_COLUMN, complexMetrics); } }
ComplexMetric metric = druidTable.resolveComplexMetric(complexName, aggCall);
private boolean isValidParentKind(SqlNode node) { return node.getKind() == SqlKind.SELECT || node.getKind() == SqlKind.FILTER || isSupportedPostAggOperation(node.getKind()); }
/** * Returns the appropriate {@link ComplexMetric} that is mapped from the given <code>alias</code> * if it exists, and is used in the expected context with the given {@link AggregateCall}. * Otherwise returns <code>null</code>. * */ public ComplexMetric resolveComplexMetric(String alias, AggregateCall call) { List<ComplexMetric> potentialMetrics = getComplexMetricsFrom(alias); // It's possible that multiple complex metrics match the AggregateCall, // but for now we only return the first that matches for (ComplexMetric complexMetric : potentialMetrics) { if (complexMetric.canBeUsed(call)) { return complexMetric; } } return null; }
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals); final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
/** Creates a {@link DruidTable} by using the given {@link DruidConnectionImpl} * to populate the other parameters. The parameters may be partially populated. * * @param druidSchema Druid schema * @param dataSourceName Data source name in Druid, also table name * @param intervals Intervals, or null to use default * @param fieldMap Partially populated map of fields (dimensions plus metrics) * @param metricNameSet Partially populated set of metric names * @param timestampColumnName Name of timestamp column, or null * @param connection Connection used to find column definitions; Must be non-null * @param complexMetrics List of complex metrics in Druid (thetaSketch, hyperUnique) * * @return A table */ static Table create(DruidSchema druidSchema, String dataSourceName, List<Interval> intervals, Map<String, SqlTypeName> fieldMap, Set<String> metricNameSet, String timestampColumnName, DruidConnectionImpl connection, Map<String, List<ComplexMetric>> complexMetrics) { assert connection != null; connection.metadata(dataSourceName, timestampColumnName, intervals, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldMap, metricNameSet, timestampColumnName, complexMetrics); }
ComplexMetric complexMetric = druidQuery.druidTable.resolveComplexMetric(fieldName, aggCall);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals); final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
/** Creates a {@link DruidTable} by using the given {@link DruidConnectionImpl} * to populate the other parameters. The parameters may be partially populated. * * @param druidSchema Druid schema * @param dataSourceName Data source name in Druid, also table name * @param intervals Intervals, or null to use default * @param fieldMap Partially populated map of fields (dimensions plus metrics) * @param metricNameSet Partially populated set of metric names * @param timestampColumnName Name of timestamp column, or null * @param connection Connection used to find column definitions; Must be non-null * @param complexMetrics List of complex metrics in Druid (thetaSketch, hyperUnique) * * @return A table */ static Table create(DruidSchema druidSchema, String dataSourceName, List<Interval> intervals, Map<String, SqlTypeName> fieldMap, Set<String> metricNameSet, String timestampColumnName, DruidConnectionImpl connection, Map<String, List<ComplexMetric>> complexMetrics) { assert connection != null; connection.metadata(dataSourceName, timestampColumnName, intervals, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldMap, metricNameSet, timestampColumnName, complexMetrics); }
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals, null, null);
DruidConnectionImpl connection = new DruidConnectionImpl(druidSchema.url, druidSchema.url.replace(":8082", ":8081")); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldBuilder, metricNameBuilder, timestampColumnName, connection, complexMetrics); } else { return DruidTable.create(druidSchema, dataSourceName, intervals, fieldBuilder, metricNameBuilder, timestampColumnName, complexMetrics);
/** Creates a {@link DruidTable} by copying the given parameters. * * @param druidSchema Druid schema * @param dataSourceName Data source name in Druid, also table name * @param intervals Intervals, or null to use default * @param fieldMap Fully populated map of fields (dimensions plus metrics) * @param metricNameSet Fully populated set of metric names * @param timestampColumnName Name of timestamp column, or null * @param complexMetrics List of complex metrics in Druid (thetaSketch, hyperUnique) * * @return A table */ static Table create(DruidSchema druidSchema, String dataSourceName, List<Interval> intervals, Map<String, SqlTypeName> fieldMap, Set<String> metricNameSet, String timestampColumnName, Map<String, List<ComplexMetric>> complexMetrics) { final ImmutableMap<String, SqlTypeName> fields = ImmutableMap.copyOf(fieldMap); return new DruidTable(druidSchema, dataSourceName, new MapRelProtoDataType(fields, timestampColumnName), ImmutableSet.copyOf(metricNameSet), timestampColumnName, intervals, complexMetrics, fieldMap); }
DruidConnectionImpl connection = new DruidConnectionImpl(druidSchema.url, druidSchema.url.replace(":8082", ":8081")); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldBuilder, metricNameBuilder, timestampColumnName, connection, complexMetrics); } else { return DruidTable.create(druidSchema, dataSourceName, intervals, fieldBuilder, metricNameBuilder, timestampColumnName, complexMetrics);