/** * To facilitate generating workunits * with the assumption that QueryContext is NOT null * it's not always going to be true, since e.g. QueryContextInfo * may change between ctor and this method * @param options * @param reader * @param rootNode * @param planningSet * @return * @throws ExecutionSetupException */ private List<PlanFragment> generateWorkUnit( OptionList options, PhysicalPlanReader reader, Fragment rootNode, PlanningSet planningSet) throws ExecutionSetupException { Preconditions.checkNotNull(queryContext); return generateWorkUnit(options, queryContext.getCurrentEndpoint(), queryContext.getQueryId(), reader, rootNode, planningSet, queryContext.getSession(), queryContext.getQueryContextInfo(), queryContext.getFunctionRegistry()); }
@Override public CreatePreparedStatementResp execute() { return PreparedStatementProvider.build(plan.getRoot().getSchema(context.getFunctionRegistry()), state, context.getQueryId(), context.getSession().getCatalogName()); }
ExpansionHelper(QueryContext context) { this.context = Preconditions.checkNotNull(context, "query context required"); converter = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, MaterializationDescriptorProvider.EMPTY, context.getFunctionRegistry(), context.getSession(), AbstractAttemptObserver.NOOP, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
final BatchSchema childSchema = child.getSchema(creator.getContext().getFunctionRegistry()); LogicalExpression expr = ExpressionTreeMaterializer.materializeAndCheckErrors(ne.getExpr(), childSchema, creator.getContext().getFunctionRegistry()); final LogicalExpression expr = ExpressionTreeMaterializer.materializeAndCheckErrors(ne.getExpr(), childSchema, creator.getContext().getFunctionRegistry());
@Override public Prel visitPrel(Prel prel, Void value) throws IOException { if (prel instanceof LimitPrel) { LimitPrel limit = (LimitPrel) prel; if(isLimit0(limit.getFetch())){ PhysicalOperator op = PrelTransformer.convertToPop(config, prel); BatchSchema schema = op.getSchema(config.getContext().getFunctionRegistry()); // make sure to remove any selection vector modes since we're now the leaf node. schema = schema.clone(SelectionVectorMode.NONE); return new EmptyPrel(prel.getCluster(), prel.getTraitSet(), prel.getRowType(), schema); } } List<RelNode> children = new ArrayList<>(); for(Prel child : prel){ children.add(child.accept(this, null)); } return (Prel) prel.copy(prel.getTraitSet(), children); }
protected QueryContext mockQueryContext(SabotContext dbContext) throws Exception { final UserSession userSession = UserSession.Builder.newBuilder().withOptionManager(dbContext.getOptionManager()).build(); final SessionOptionManager sessionOptions = (SessionOptionManager) userSession.getOptions(); final QueryOptionManager queryOptions = new QueryOptionManager(sessionOptions); final ExecutionControls executionControls = new ExecutionControls(queryOptions, NodeEndpoint.getDefaultInstance()); final OperatorTable table = new OperatorTable(FUNCTIONS()); final LogicalPlanPersistence lp = dbContext.getLpPersistence(); final CatalogService registry = dbContext.getCatalogService(); final QueryContext context = Mockito.mock(QueryContext.class); when(context.getSession()).thenReturn(userSession); when(context.getLpPersistence()).thenReturn(lp); when(context.getCatalogService()).thenReturn(registry); when(context.getFunctionRegistry()).thenReturn(FUNCTIONS()); when(context.getSession()).thenReturn(UserSession.Builder.newBuilder().setSupportComplexTypes(true).build()); when(context.getCurrentEndpoint()).thenReturn(NodeEndpoint.getDefaultInstance()); when(context.getActiveEndpoints()).thenReturn(ImmutableList.of(NodeEndpoint.getDefaultInstance())); when(context.getPlannerSettings()).thenReturn(new PlannerSettings(dbContext.getConfig(), queryOptions, dbContext.getClusterResourceInformation())); when(context.getOptions()).thenReturn(queryOptions); when(context.getConfig()).thenReturn(DEFAULT_SABOT_CONFIG); when(context.getOperatorTable()).thenReturn(table); when(context.getAllocator()).thenReturn(allocator); when(context.getExecutionControls()).thenReturn(executionControls); when(context.getMaterializationProvider()).thenReturn(Mockito.mock(MaterializationDescriptorProvider.class)); return context; }
public QueryManager( final QueryId queryId, final QueryContext context, final CoordToExecTunnelCreator tunnelCreator, final CompletionListener completionListener, final Pointer<QueryId> prepareId, final AttemptObservers observers, final boolean verboseProfiles, final boolean includeDatasetProfiles, final Catalog catalog) { this.queryId = queryId; this.tunnelCreator = tunnelCreator; this.completionListener = completionListener; this.context = context; this.prepareId = prepareId; this.catalog = catalog; this.nonDefaultOptions = context.getNonDefaultOptions(); resourceAllocationResultObserver = new ResourceAllocationResultObserver(); observers.add(resourceAllocationResultObserver); capturer = new PlanCaptureAttemptObserver(verboseProfiles, includeDatasetProfiles, context.getFunctionRegistry(), context.getAccelerationManager().newPopulator()); observers.add(capturer); observers.add(new TimeMarker()); }
private PhysicalOperator getHashJoinPop(PhysicalPlanCreator creator, RelNode left, RelNode right, List<Integer> leftKeys, List<Integer> rightKeys) throws IOException{ final List<String> fields = getRowType().getFieldNames(); assert isUnique(fields); final List<String> leftFields = left.getRowType().getFieldNames(); final List<String> rightFields = right.getRowType().getFieldNames(); PhysicalOperator leftPop = ((Prel)left).getPhysicalOperator(creator); PhysicalOperator rightPop = ((Prel)right).getPhysicalOperator(creator); JoinRelType jtype = this.getJoinType(); final List<JoinCondition> conditions = Lists.newArrayList(); buildJoinConditions(conditions, leftFields, rightFields, leftKeys, rightKeys); final boolean vectorize = creator.getContext().getOptions().getOption(ExecConstants.ENABLE_VECTORIZED_HASHJOIN) && canVectorize(creator.getContext().getFunctionRegistry(), leftPop, rightPop, conditions); final HashJoinPOP hjoin = new HashJoinPOP(leftPop, rightPop, conditions, jtype, vectorize); return creator.addMetadata(this, hjoin); }
private OperatorContextImpl createContext(Writer writer) { BufferAllocator allocator = context.getAllocator().newChildAllocator("direct-command", 0, Long.MAX_VALUE); final OperatorStats stats = new OperatorStats(new OpProfileDef(0,0,0), allocator); final OperatorContextImpl oc = new OperatorContextImpl( context.getConfig(), FragmentHandle.newBuilder().setQueryId(context.getQueryId()).setMajorFragmentId(0).setMinorFragmentId(0).build(), writer, allocator, allocator, null, stats, null, null, context.getFunctionRegistry(), null, context.getOptions(), context.getNamespaceService(), null, NodeDebugContextProvider.NOOP, 60000, null, ImmutableList.of()); return oc; } }
new SplitUpComplexExpressions.SplitUpComplexExpressionsVisitor( context.getOperatorTable(), context.getFunctionRegistry()), null);
queryContext.getSession(), queryContextInformation, queryContext.getFunctionRegistry());
private SqlConverter getNewConverter(QueryContext context, SqlQuery query, AttemptObserver observerForSubstitution) { Catalog catalog = context.getCatalog(); final List<String> sqlContext = query.getContext(); if(sqlContext != null){ NamespaceKey path = new NamespaceKey(sqlContext); try { catalog = catalog.resolveCatalog(path); } catch (Exception e) { throw UserException.validationError(e) .message("Unable to resolve schema path [%s]. Failure resolving [%s] portion of path.", sqlContext, path) .build(logger); } } return new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observerForSubstitution, catalog, context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
queryContext, queryContext.getMaterializationProvider(), queryContext.getFunctionRegistry(), queryContext.getSession(), observer,
protected ElasticsearchGroupScan generate(String sql) throws Exception { AttemptObserver observer = new PassthroughQueryObserver(ExecTest.mockUserClientConnection(null)); SqlConverter converter = new SqlConverter(context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observer, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); SqlNode node = converter.parse(sql); SqlHandlerConfig config = new SqlHandlerConfig(context, converter, observer, null); NormalHandler handler = new NormalHandler(); PhysicalPlan plan = handler.getPlan(config, sql, node); List<PhysicalOperator> operators = plan.getSortedOperators(); ElasticsearchGroupScan scan = find(operators); assertNotNull("Physical plan does not contain an elasticsearch scan for query: " + sql, scan); return scan; }
queryContext, queryContext.getMaterializationProvider(), queryContext.getFunctionRegistry(), queryContext.getSession(), observer,
context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observer,