public static SchemaPlus createRootSchema( final DruidSchema druidSchema, final SystemSchema systemSchema, final AuthorizerMapper authorizerMapper ) { final SchemaPlus rootSchema = CalciteSchema.createRootSchema(false, false).plus(); rootSchema.add(DruidSchema.NAME, druidSchema); rootSchema.add(InformationSchema.NAME, new InformationSchema(rootSchema, authorizerMapper)); rootSchema.add(SystemSchema.NAME, systemSchema); return rootSchema; }
void setHolder(SchemaPlus plusOfThis) { for (String s : getSubSchemaNames()) { plusOfThis.add(s, getSubSchema(s)); } }
void setHolder(SchemaPlus plusOfThis) { for (String s : getSubSchemaNames()) { plusOfThis.add(s, getSubSchemaKnownExists(s)); } }
void setHolder(SchemaPlus plusOfThis) { for (String s : getSubSchemaNames()) { CapitalizingJdbcSchema inner = getSubSchema(s); SchemaPlus holder = plusOfThis.add(s, inner); inner.setHolder(holder); } }
void setHolder(SchemaPlus plusOfThis) { for (String s : getSubSchemaNames()) { CapitalizingJdbcSchema inner = getSubSchema(s); SchemaPlus holder = plusOfThis.add(s, inner); inner.setHolder(holder); } }
@Override public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException { OpenTSDBSchema schema = new OpenTSDBSchema(getName()); parent.add(getName(), schema); }
public void interpretCreateFunction(SqlCreateFunction sqlCreateFunction) throws ClassNotFoundException { if (sqlCreateFunction.jarName() != null) { throw new UnsupportedOperationException("UDF 'USING JAR' not implemented"); } Method method; Function function; if ((method = findMethod(sqlCreateFunction.className(), "evaluate")) != null) { function = ScalarFunctionImpl.create(method); } else if (findMethod(sqlCreateFunction.className(), "add") != null) { function = AggregateFunctionImpl.create(Class.forName(sqlCreateFunction.className())); } else { throw new RuntimeException("Invalid scalar or aggregate function"); } schema.add(sqlCreateFunction.functionName().toUpperCase(), function); hasUdf = true; }
@Override public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException { HBaseSchema schema = new HBaseSchema(getName()); SchemaPlus hPlus = parent.add(getName(), schema); schema.setHolder(hPlus); }
@Override public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException { KuduTables schema = new KuduTables(getName()); SchemaPlus hPlus = parent.add(getName(), schema); schema.setHolder(hPlus); }
@Override public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException { KafkaMessageSchema schema = new KafkaMessageSchema(plugin, getName()); SchemaPlus hPlus = parent.add(getName(), schema); schema.setHolder(hPlus); }
@Override public void registerSchemas(SchemaConfig config, SchemaPlus parent) { JdbcCatalogSchema schema = new JdbcCatalogSchema(getName()); SchemaPlus holder = parent.add(getName(), schema); schema.setHolder(holder); }
private CachedStatement buildCachedStatement(final String sql, final Supplier<CalciteConnection> connectionSupplier, final ProcessSession session, final FlowFile flowFile, final RecordReaderFactory recordReaderFactory) throws SQLException { final CalciteConnection connection = connectionSupplier.get(); final SchemaPlus rootSchema = connection.getRootSchema(); final FlowFileTable<?, ?> flowFileTable = new FlowFileTable<>(session, flowFile, recordReaderFactory, getLogger()); rootSchema.add("FLOWFILE", flowFileTable); rootSchema.setCacheEnabled(false); final PreparedStatement stmt = connection.prepareStatement(sql); return new CachedStatement(stmt, flowFileTable, connection); }
@Override public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException { DrillHiveMetaStoreClient mClientForSchemaTree = processUserMetastoreClient; if (isDrillImpersonationEnabled) { try { mClientForSchemaTree = metaStoreClientLoadingCache.get(schemaConfig.getUserName()); } catch (final ExecutionException e) { throw new IOException("Failure setting up Hive metastore client.", e); } } HiveSchema schema = new HiveSchema(schemaConfig, mClientForSchemaTree, getName()); SchemaPlus hPlus = parent.add(getName(), schema); schema.setHolder(hPlus); }
rootSchema.add("FLOWFILE", flowFileTable); rootSchema.setCacheEnabled(false);
public void interpretCreateTable(SqlCreateTable n) { CompilerUtil.TableBuilderInfo builder = new CompilerUtil.TableBuilderInfo(typeFactory); List<FieldInfo> fields = new ArrayList<>(); for (ColumnDefinition col : n.fieldList()) { builder.field(col.name(), col.type(), col.constraint()); RelDataType dataType = col.type().deriveType(typeFactory); Class<?> javaType = (Class<?>) typeFactory.getJavaClass(dataType); ColumnConstraint constraint = col.constraint(); boolean isPrimary = constraint != null && constraint instanceof ColumnConstraint.PrimaryKey; fields.add(new FieldInfo(col.name(), javaType, isPrimary)); } if (n.parallelism() != null) { builder.parallelismHint(n.parallelism()); } Table table = builder.build(); schema.add(n.tableName(), table); ISqlStreamsDataSource ds = DataSourcesRegistry.constructStreamsDataSource(n.location(), n .inputFormatClass(), n.outputFormatClass(), n.properties(), fields); if (ds == null) { throw new RuntimeException("Failed to find data source for " + n .tableName() + " URI: " + n.location()); } else if (dataSources.containsKey(n.tableName())) { throw new RuntimeException("Duplicated definition for table " + n .tableName()); } dataSources.put(n.tableName(), ds); }
public static CalciteState sqlOverDummyTable(String sql) throws RelConversionException, ValidationException, SqlParseException { SchemaPlus schema = Frameworks.createRootSchema(true); JavaTypeFactory typeFactory = new JavaTypeFactoryImpl (RelDataTypeSystem.DEFAULT); StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory) .field("ID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)) .field("NAME", typeFactory.createType(String.class)) .field("ADDR", typeFactory.createType(String.class)) .build(); Table table = streamableTable.stream(); schema.add("FOO", table); schema.add("BAR", table); schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval")); QueryPlanner queryPlanner = new QueryPlanner(schema); StreamsRel tree = queryPlanner.getPlan(sql); System.out.println(StormRelUtils.explain(tree, SqlExplainLevel.ALL_ATTRIBUTES)); return new CalciteState(schema, tree); }
public static CalciteState sqlOverDummyGroupByTable(String sql) throws RelConversionException, ValidationException, SqlParseException { SchemaPlus schema = Frameworks.createRootSchema(true); JavaTypeFactory typeFactory = new JavaTypeFactoryImpl (RelDataTypeSystem.DEFAULT); StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory) .field("ID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)) .field("GRPID", SqlTypeName.INTEGER) .field("NAME", typeFactory.createType(String.class)) .field("ADDR", typeFactory.createType(String.class)) .field("AGE", SqlTypeName.INTEGER) .field("SCORE", SqlTypeName.INTEGER) .build(); Table table = streamableTable.stream(); schema.add("FOO", table); schema.add("BAR", table); schema.add("MYSTATICSUM", AggregateFunctionImpl.create(MyStaticSumFunction.class)); schema.add("MYSUM", AggregateFunctionImpl.create(MySumFunction.class)); QueryPlanner queryPlanner = new QueryPlanner(schema); StreamsRel tree = queryPlanner.getPlan(sql); System.out.println(StormRelUtils.explain(tree, SqlExplainLevel.ALL_ATTRIBUTES)); return new CalciteState(schema, tree); }
public static CalciteState sqlOverSimpleEquiJoinTables(String sql) throws RelConversionException, ValidationException, SqlParseException { SchemaPlus schema = Frameworks.createRootSchema(true); JavaTypeFactory typeFactory = new JavaTypeFactoryImpl (RelDataTypeSystem.DEFAULT); StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory) .field("EMPID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)) .field("EMPNAME", SqlTypeName.VARCHAR) .field("DEPTID", SqlTypeName.INTEGER) .build(); StreamableTable streamableTable2 = new CompilerUtil.TableBuilderInfo(typeFactory) .field("DEPTID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)) .field("DEPTNAME", SqlTypeName.VARCHAR) .build(); Table table = streamableTable.stream(); Table table2 = streamableTable2.stream(); schema.add("EMP", table); schema.add("DEPT", table2); QueryPlanner queryPlanner = new QueryPlanner(schema); StreamsRel tree = queryPlanner.getPlan(sql); System.out.println(StormRelUtils.explain(tree, SqlExplainLevel.ALL_ATTRIBUTES)); return new CalciteState(schema, tree); }
public void visit(JsonMapSchema jsonSchema) { checkRequiredAttributes(jsonSchema, "name"); final SchemaPlus parentSchema = currentMutableSchema("schema"); final SchemaPlus schema = parentSchema.add(jsonSchema.name, new AbstractSchema()); if (jsonSchema.path != null) { schema.setPath(stringListList(jsonSchema.path)); } populateSchema(jsonSchema, schema); }