public FrameworkConfig buildFrameWorkConfig() { if (hasUdf) { List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>(); sqlOperatorTables.add(SqlStdOperatorTable.instance()); sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), Collections.emptyList(), typeFactory, new CalciteConnectionConfigImpl(new Properties()))); return Frameworks.newConfigBuilder().defaultSchema(schema) .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build(); } else { return Frameworks.newConfigBuilder().defaultSchema(schema).build(); } }
private void visitQuarkTileScan(QuarkTileScan node) { foundOptimizedTableScan.set(true); QuarkTile quarkTile = node.getQuarkTile(); CalciteCatalogReader calciteCatalogReader = new CalciteCatalogReader( CalciteSchema.from(getRootSchma()), false, context.getDefaultSchemaPath(), getTypeFactory()); CalciteSchema tileSchema = calciteCatalogReader.getTable(quarkTile.tableName) .unwrap(CalciteSchema.class); dsBuilder.addAll(getDrivers(tileSchema)); }
final JavaTypeFactory typeFactory = prepareContext.getTypeFactory(); CalciteCatalogReader reader = new CalciteCatalogReader(prepareContext.getRootSchema(), ImmutableList.of(), typeFactory, prepareContext.config()); new SqlIdentifier(Lists.newArrayList(schemaName, funcName), null, SqlParserPos.ZERO, null); reader.lookupOperatorOverloads(myFuncIdentifier, SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, SqlSyntax.FUNCTION, operatorList); reader.lookupOperatorOverloads(myFuncIdentifier, SqlFunctionCategory.USER_DEFINED_FUNCTION, SqlSyntax.FUNCTION, operatorList); new SqlIdentifier(Lists.newArrayList(schemaName, anotherName), null, SqlParserPos.ZERO, null); reader.lookupOperatorOverloads(anotherFuncIdentifier, SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, SqlSyntax.FUNCTION, operatorList);
public Prepare.PreparingTable getTableForMember(List<String> names) { return getTable(names); }
public void lookupOperatorOverloads(final SqlIdentifier opName, SqlFunctionCategory category, SqlSyntax syntax, List<SqlOperator> operatorList) { if (syntax != SqlSyntax.FUNCTION) { return; } final Predicate<Function> predicate; if (category == null) { predicate = function -> true; } else if (category.isTableFunction()) { predicate = function -> function instanceof TableMacro || function instanceof TableFunction; } else { predicate = function -> !(function instanceof TableMacro || function instanceof TableFunction); } getFunctionsFrom(opName.names) .stream() .filter(predicate) .map(function -> toOp(opName, function)) .forEachOrdered(operatorList::add); }
OperandTypes.family(typeFamilies, i -> function.getParameters().get(i).isOptional()); final List<RelDataType> paramTypes = toSql(typeFactory, argTypes); if (function instanceof ScalarFunction) { return new SqlUserDefinedFunction(name, infer((ScalarFunction) function), InferTypes.explicit(argTypes), typeChecker, paramTypes, function); } else if (function instanceof AggregateFunction) { return new SqlUserDefinedAggFunction(name, infer((AggregateFunction) function), InferTypes.explicit(argTypes), typeChecker, (AggregateFunction) function, false, false, Optionality.FORBIDDEN, typeFactory);
try { SqlNode sqlNode = sqlParser.parseQuery(); CalciteCatalogReader catalogReader = createCatalogReader().withSchemaPath(schemaPath);
public RelDataType getNamedType(SqlIdentifier typeName) { CalciteSchema.TypeEntry typeEntry = SqlValidatorUtil.getTypeEntry(getRootSchema(), typeName); if (typeEntry != null) { return typeEntry.getType().apply(typeFactory); } else { return null; } }
new CalciteCatalogReader(schema.root(), false, queryContext.getDefaultSchemaPath(), queryContext.getTypeFactory()); CalciteSchema viewSchema = calciteCatalogReader.getTable(tableName) .unwrap(CalciteSchema.class); assert viewSchema != null;
final JavaTypeFactory typeFactory = prepareContext.getTypeFactory(); CalciteCatalogReader reader = new CalciteCatalogReader(prepareContext.getRootSchema(), ImmutableList.of(), typeFactory, prepareContext.config()); new SqlIdentifier(Lists.newArrayList(schemaName, funcName), null, SqlParserPos.ZERO, null); reader.lookupOperatorOverloads(myFuncIdentifier, SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, SqlSyntax.FUNCTION, operatorList); reader.lookupOperatorOverloads(myFuncIdentifier, SqlFunctionCategory.USER_DEFINED_FUNCTION, SqlSyntax.FUNCTION, operatorList); new SqlIdentifier(Lists.newArrayList(schemaName, anotherName), null, SqlParserPos.ZERO, null); reader.lookupOperatorOverloads(anotherFuncIdentifier, SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, SqlSyntax.FUNCTION, operatorList);
public Prepare.PreparingTable getTableForMember(List<String> names) { return getTable(names); }
public void lookupOperatorOverloads(final SqlIdentifier opName, SqlFunctionCategory category, SqlSyntax syntax, List<SqlOperator> operatorList) { if (syntax != SqlSyntax.FUNCTION) { return; } final Predicate<Function> predicate; if (category == null) { predicate = function -> true; } else if (category.isTableFunction()) { predicate = function -> function instanceof TableMacro || function instanceof TableFunction; } else { predicate = function -> !(function instanceof TableMacro || function instanceof TableFunction); } getFunctionsFrom(opName.names) .stream() .filter(predicate) .map(function -> toOp(opName, function)) .forEachOrdered(operatorList::add); }
OperandTypes.family(typeFamilies, i -> function.getParameters().get(i).isOptional()); final List<RelDataType> paramTypes = toSql(typeFactory, argTypes); if (function instanceof ScalarFunction) { return new SqlUserDefinedFunction(name, infer((ScalarFunction) function), InferTypes.explicit(argTypes), typeChecker, paramTypes, function); } else if (function instanceof AggregateFunction) { return new SqlUserDefinedAggFunction(name, infer((AggregateFunction) function), InferTypes.explicit(argTypes), typeChecker, (AggregateFunction) function, false, false, typeFactory); } else if (function instanceof TableMacro) {
createCatalogReader().withSchemaPath(schemaPath); final SqlValidator validator = new CalciteSqlValidator(operatorTable, catalogReader, typeFactory,
public RelDataType getNamedType(SqlIdentifier typeName) { CalciteSchema.TypeEntry typeEntry = SqlValidatorUtil.getTypeEntry(getRootSchema(), typeName); if (typeEntry != null) { return typeEntry.getType().apply(typeFactory); } else { return null; } }
public QueryPlanner(SchemaPlus schema) { final List<RelTraitDef> traitDefs = new ArrayList<RelTraitDef>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelCollationTraitDef.INSTANCE); List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>(); sqlOperatorTables.add(SqlStdOperatorTable.instance()); sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), Collections.emptyList(), typeFactory, new CalciteConnectionConfigImpl(new Properties()))); FrameworkConfig config = Frameworks.newConfigBuilder() .defaultSchema(schema) .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)) .traitDefs(traitDefs) .context(Contexts.EMPTY_CONTEXT) .ruleSets(StreamsStormRuleSets.getRuleSets()) .costFactory(null) .typeSystem(StormRelDataTypeSystem.STORM_REL_DATATYPE_SYSTEM) .build(); this.planner = Frameworks.getPlanner(config); }
@Override public Table createTable(CalciteSchema schema, String viewSql, List<String> viewSchemaPath) { assert nzTile.tableName != null; CalciteCatalogReader calciteCatalogReader = new CalciteCatalogReader( schema.root(), false, queryContext.getDefaultSchemaPath(), queryContext.getTypeFactory()); CalciteSchema tileSchema = calciteCatalogReader.getTable(nzTile.tableName) .unwrap(CalciteSchema.class); assert tileSchema != null; CalciteSchema.TableEntry tileTEntry = tileSchema.getTable( Util.last(nzTile.tableName), false); assert tileTEntry != null; return new QuarkTileTable(nzTile, calciteCatalogReader, tileTEntry.getTable().getRowType(queryContext.getTypeFactory()), Schemas.path(rootSchema, nzTile.alias), (QuarkTable) tileTEntry.getTable()); } };
/** * If schema is not indicated (only one element in the list) or schema is default temporary workspace, * we need to check among session temporary tables in default temporary workspace first. * If temporary table is found and temporary tables usage is allowed, its table instance will be returned, * otherwise search will be conducted in original workspace. * * @param names list of schema and table names, table name is always the last element * @return table instance, null otherwise * @throws UserException if temporary tables usage is disallowed */ @Override public Prepare.PreparingTable getTable(final List<String> names) { String originalTableName = session.getOriginalTableNameFromTemporaryTable(names.get(names.size() - 1)); if (originalTableName != null) { if (!allowTemporaryTables) { throw UserException .validationError() .message("Temporary tables usage is disallowed. Used temporary table name: [%s].", originalTableName) .build(logger); } } Prepare.PreparingTable table = super.getTable(names); DrillTable unwrap; // add session options if found table is Drill table if (table != null && (unwrap = table.unwrap(DrillTable.class)) != null) { unwrap.setOptions(session.getOptions()); } return table; }
createCatalogReader().withSchemaPath(schemaPath); final SqlValidator validator = new CalciteSqlValidator(operatorTable, catalogReader, typeFactory,
public CalciteCatalogReader withSchemaPath(List<String> schemaPath) { return new CalciteCatalogReader(rootSchema, nameMatcher, ImmutableList.of(schemaPath, ImmutableList.of()), typeFactory, config); }