public static RelRoot expandView(NamespaceKey path, final String viewOwner, final String queryString, final List<String> context, final SqlConverter sqlConverter) { ViewExpansionToken token = null; try { //RelDataType rowType = view.getRowType(cluster.getTypeFactory()); token = sqlConverter.getViewExpansionContext().reserveViewExpansionToken(viewOwner); final DremioCatalogReader catalog; if(viewOwner != null) { catalog = sqlConverter.getCatalogReader().withSchemaPathAndUser(viewOwner, context); } else { catalog = sqlConverter.getCatalogReader().withSchemaPath(context); } final SqlConverter newConverter = new SqlConverter(sqlConverter, catalog); final SqlNode parsedNode = newConverter.parse(queryString); final SqlNode validatedNode = newConverter.validate(parsedNode); final RelRootPlus root = newConverter.toConvertibleRelRoot(validatedNode, true); if(path == null) { return root; } // we need to make sure that if a inner expansion is context sensitive, we consider the current // expansion context sensitive even if it isn't locally. final boolean contextSensitive = root.isContextSensitive() || ExpansionNode.isContextSensitive(root.rel); return new RelRoot(ExpansionNode.wrap(path, root.rel, contextSensitive), root.validatedRowType, root.kind, root.fields, root.collation); } finally { if (token != null) { token.release(); } } }
public static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent) { final SqlConverter parser = new SqlConverter(parent, parent.getCatalogReader().withSchemaPath(ImmutableList.of())); final LogicalPlanDeserializer deserializer = parser.getSerializerFactory().getDeserializer(parser.getCluster(), parser.getCatalogReader(), parser.getFunctionImplementationRegistry()); return deserializer.deserialize(planBytes); }
public DremioSqlToRelConverter( SqlConverter sqlConverter, SqlValidator validator, SqlRexConvertletTable convertletTable, Config config) { super(new NoOpExpander(), validator, sqlConverter.getCatalogReader(), sqlConverter.getCluster(), convertletTable, config); this.sqlConverter = sqlConverter; }
protected ElasticsearchGroupScan generate(String sql) throws Exception { AttemptObserver observer = new PassthroughQueryObserver(ExecTest.mockUserClientConnection(null)); SqlConverter converter = new SqlConverter(context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observer, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); SqlNode node = converter.parse(sql); SqlHandlerConfig config = new SqlHandlerConfig(context, converter, observer, null); NormalHandler handler = new NormalHandler(); PhysicalPlan plan = handler.getPlan(config, sql, node); List<PhysicalOperator> operators = plan.getSortedOperators(); ElasticsearchGroupScan scan = find(operators); assertNotNull("Physical plan does not contain an elasticsearch scan for query: " + sql, scan); return scan; }
private CommandRunner<?> getSqlCommand(String sql, boolean isPrepare) { try{ final SqlConverter parser = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), final DremioCatalogReader reader = parser.getCatalogReader(); final Catalog catalog = context.getCatalog(); final SqlNode sqlNode = parser.parse(sql); final SqlHandlerConfig config = new SqlHandlerConfig(context, parser, observer, parser.getMaterializations());
ExpansionHelper(QueryContext context) { this.context = Preconditions.checkNotNull(context, "query context required"); converter = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, MaterializationDescriptorProvider.EMPTY, context.getFunctionRegistry(), context.getSession(), AbstractAttemptObserver.NOOP, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
observer.planValidated(RecordWriter.SCHEMA.toCalciteRecordType(config.getConverter().getCluster().getTypeFactory()), compact, watch.elapsed(TimeUnit.MILLISECONDS)); ); final RelNode doubleWriter = SqlHandlerUtil.storeQueryResultsIfNeeded(config.getConverter().getParserConfig(), config.getContext(), writerDrel);
private SqlNode parseQueryInternal(SqlConverter converter, String sql) { try (TimedBlock b = time("parse")) { return converter.parse(sql); } }
@Override public RelOptCluster getCluster() { return parent.getCluster(); }
/** * Return Dremio Logical RelNode tree for a SELECT statement, when it is executed / explained directly. * * @param relNode : root RelNode corresponds to Calcite Logical RelNode. * @param validatedRowType : the rowType for the final field names. A rename project may be placed on top of the root. * @return * @throws RelConversionException * @throws SqlUnsupportedException */ public static Rel convertToDrel(SqlHandlerConfig config, RelNode relNode, RelDataType validatedRowType) throws RelConversionException, SqlUnsupportedException { Rel convertedRelNode = convertToDrel(config, relNode); // Put a non-trivial topProject to ensure the final output field name is preserved, when necessary. convertedRelNode = addRenamedProject(config, convertedRelNode, validatedRowType); convertedRelNode = SqlHandlerUtil.storeQueryResultsIfNeeded(config.getConverter().getParserConfig(), config.getContext(), convertedRelNode); return new ScreenRel(convertedRelNode.getCluster(), convertedRelNode.getTraitSet(), convertedRelNode); }
public DremioCatalogReader getCatalogReader() { return sqlConverter.getCatalogReader(); }
private void validateFormattedIs(String sql, SqlParserPos pos, String expected) { String formatted = SqlConverter.formatSQLParsingError(sql, pos); assertEquals(expected, formatted); }
final SqlConverter converter = new SqlConverter( queryContext.getPlannerSettings(), queryContext.getOperatorTable(), queryContext.getConfig(), queryContext.getScanResult()); final SqlNode node = converter.parse(sql); final SqlHandlerConfig config = new SqlHandlerConfig(queryContext, converter, observer, null);
private SqlConverter getNewConverter(QueryContext context, SqlQuery query, AttemptObserver observerForSubstitution) { Catalog catalog = context.getCatalog(); final List<String> sqlContext = query.getContext(); if(sqlContext != null){ NamespaceKey path = new NamespaceKey(sqlContext); try { catalog = catalog.resolveCatalog(path); } catch (Exception e) { throw UserException.validationError(e) .message("Unable to resolve schema path [%s]. Failure resolving [%s] portion of path.", sqlContext, path) .build(logger); } } return new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observerForSubstitution, catalog, context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
observer.planValidated(RecordWriter.SCHEMA.toCalciteRecordType(config.getConverter().getCluster().getTypeFactory()), materialize, watch.elapsed(TimeUnit.MILLISECONDS)); ), initial.getRowType()); final RelNode doubleWriter = SqlHandlerUtil.storeQueryResultsIfNeeded(config.getConverter().getParserConfig(), config.getContext(), writerDrel);
@Override public RelOptCluster getCluster() { return sqlConverter.getCluster(); }
private static Rel convertToDrel( SqlHandlerConfig config, RelNode relNode, Catalog catalog, NamespaceKey key, WriterOptions options, RelDataType queryRowType, final Map<String, Object> storageOptions) throws RelConversionException, SqlUnsupportedException { Rel convertedRelNode = PrelTransformer.convertToDrel(config, relNode); // Put a non-trivial topProject to ensure the final output field name is preserved, when necessary. // Only insert project when the field count from the child is same as that of the queryRowType. convertedRelNode = new WriterRel(convertedRelNode.getCluster(), convertedRelNode.getCluster().traitSet().plus(Rel.LOGICAL), convertedRelNode, catalog.createNewTable(key, options, storageOptions), queryRowType); convertedRelNode = SqlHandlerUtil.storeQueryResultsIfNeeded(config.getConverter().getParserConfig(), config.getContext(), convertedRelNode); return new ScreenRel(convertedRelNode.getCluster(), convertedRelNode.getTraitSet(), convertedRelNode); }
private RelNode expandSchemaPath(final List<String> path) { final DremioCatalogReader catalog = parent.getCatalogReader(); final RelOptTable table = catalog.getTable(path); if(table == null){ return null; } ToRelContext context = new ToRelContext() { @Override public RelOptCluster getCluster() { return parent.getCluster(); } @Override public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { return null; } }; NamespaceTable newTable = table.unwrap(NamespaceTable.class); if(newTable != null){ return newTable.toRel(context, table); } throw new IllegalStateException("Unable to expand path for table: " + table); }
final QueryContext queryContext = new QueryContext(session(), context, UserBitShared.QueryId.getDefaultInstance()); final AttemptObserver observer = new PassthroughQueryObserver(ExecTest.mockUserClientConnection(null)); final SqlConverter converter = new SqlConverter( queryContext.getPlannerSettings(), queryContext.getOperatorTable(), queryContext.getConfig(), queryContext.getScanResult()); final SqlNode node = converter.parse(sql); final SqlHandlerConfig config = new SqlHandlerConfig(queryContext, converter, observer, null);
@Override public DremioMaterialization getMaterializationFor(SqlConverter converter) { final CopyWithCluster copier = new CopyWithCluster(converter.getCluster()); final DremioMaterialization copied = materialization.accept(copier); copier.validate(); return copied; }