@Override public boolean matches(RelOptRuleCall call) { LimitPrel limitPrel = call.rel(0); ProjectPrel projectPrel = call.rel(1); // pushdown only apply limit but not offset, // so if getFetch() return null no need to run this rule. // Do not push across Project containing CONVERT_FROMJSON for limit 0 queries. For limit 0 queries, this would // mess up the schema since Convert_FromJson() is different from other regular functions in that it only knows // the output schema after evaluation is performed. When input has 0 row, Drill essentially does not have a way // to know the output type. if (!limitPrel.isPushDown() && (limitPrel.getFetch() != null) && (!DrillRelOptUtil.isLimit0(limitPrel.getFetch()) || !DrillRelOptUtil.isProjectOutputSchemaUnknown(projectPrel))) { return true; } return false; } };
public boolean initialize(RexNode condition, DrillScanRelBase scanRel, IndexCallContext context) { GroupScan scan = IndexPlanUtils.getGroupScan(scanRel); PlannerSettings settings = PrelUtil.getPlannerSettings(scanRel.getCluster().getPlanner()); rowKeyJoinBackIOFactor = settings.getIndexRowKeyJoinCostFactor(); if (scan instanceof DbGroupScan) { String conditionAsStr = convertRexToString(condition, scanRel.getRowType()); if (statsCache.get(conditionAsStr) == null) { IndexCollection indexes = ((DbGroupScan)scan).getSecondaryIndexCollection(scanRel); populateStats(condition, indexes, scanRel, context); logger.info("index_plan_info: initialize: scanRel #{} and groupScan {} got fulltable {}, statsCache: {}, fiStatsCache: {}", scanRel.getId(), System.identityHashCode(scan), fullTableScanPayload, statsCache, fIStatsCache); return true; } } return false; }
return fullTableScanPayload.getLeadingRowCount(); } else if (condition != null) { conditionAsStr = convertRexToString(condition, scanRel.getRowType()); payloadMap = statsCache.get(conditionAsStr); if (payloadMap != null) {
DrillRelOptUtil.getFieldsInformation(scan.getRowType(), project.getProjects()); if (columnInfo == null || Utilities.isStarQuery(columnInfo.getFields()) || !groupScan.canPushdownProjects(columnInfo.getFields())) { return; groupScan.clone(columnInfo.getFields()), columnInfo.createNewRowType(project.getInput().getCluster().getTypeFactory()), scan.getTable()); newProjects.add(n.accept(columnInfo.getInputReWriter()));
RexBuilder builder = scanRel.getCluster().getRexBuilder(); PlannerSettings settings = PrelUtil.getSettings(scanRel.getCluster()); newRowType = FunctionalIndexHelper.rewriteFunctionalRowType(scanRel, context, functionInfo); } else { newRowType = scanRel.getRowType(); StatisticsPayload payload = new MapRDBStatisticsPayload(rowCount, leadingRowCount, avgRowSize); addToCache(idxCondition, idx, context, payload, jTabGrpScan, scanRel, newRowType); addBaseConditions(idxCondition, payload, false, baseConditionMap, scanRel.getRowType()); RexNode idxTotColCondition = IndexPlanUtils.getTotalFilter(idxLeadColCondition, idxTotRemColCondition, builder); FunctionalIndexInfo functionInfo = idx.getFunctionalInfo(); RelDataType newRowType = scanRel.getRowType(); if (functionInfo.hasFunctional()) { newRowType = FunctionalIndexHelper.rewriteFunctionalRowType(scanRel, context, functionInfo); convertToLogicalExpression(condition, scanRel.getRowType(), settings, builder)), null, scanRel); addToCache(condition, null, null, new MapRDBStatisticsPayload(rowCount, ftsLeadingKeyPayload.getRowCount(), ftsPayload.getAvgRowSize()), jTabGrpScan, scanRel, scanRel.getRowType()); ftsPayload.getAvgRowSize()), jTabGrpScan, scanRel, scanRel.getRowType());
public boolean isCompatible(boolean compareNames, boolean allowSubstring) { RelDataType unionType = getRowType(); for (RelNode input : getInputs()) { if (! DrillRelOptUtil.areRowTypesCompatible( input.getRowType(), unionType, compareNames, allowSubstring)) { return false; } } return true; }
@Override public Boolean visitCorrelVariable(RexCorrelVariable correlVariable) { return doUnknown(correlVariable); }
/** * Given a of rexnode it transforms the rexnode by changing the expr to use new index mapped to the old index. * @param builder : RexBuilder from the planner. * @param expr: RexNode to be transformed. * @param corrMap: Mapping between old index to new index. * @return */ public static RexNode transformExpr(RexBuilder builder, RexNode expr, Map<Integer, Integer> corrMap) { DrillRelOptUtil.RexFieldsTransformer transformer = new DrillRelOptUtil.RexFieldsTransformer(builder, corrMap); return transformer.go(expr); }
@Override public void onMatch(RelOptRuleCall call) { final ProjectPrel project = call.rel(1); final LimitPrel limit = call.rel(0); RelNode child = project.getInput(); final RelNode limitUnderProject = new LimitPrel(child.getCluster(), child.getTraitSet(), child, limit.getOffset(), limit.getFetch()); final RelNode newProject = new ProjectPrel(project.getCluster(), project.getTraitSet(), limitUnderProject, project.getProjects(), project.getRowType()); if (DrillRelOptUtil.isProjectFlatten(project)) { //Preserve limit above the project since Flatten can produce more rows. Also mark it so we do not fire the rule again. child = newProject; final RelNode limitAboveProject = new LimitPrel(child.getCluster(), child.getTraitSet(), child, limit.getOffset(), limit.getFetch(), true); call.transformTo(limitAboveProject); } else { call.transformTo(newProject); } }
public List<RelCollation> getCollationList() { if (sort != null) { return sort.getCollationList(); } return null; }
protected RelOptCost computeLogicalJoinCost(RelOptPlanner planner, RelMetadataQuery mq) { // During Logical Planning, although we don't care much about the actual physical join that will // be chosen, we do care about which table - bigger or smaller - is chosen as the right input // of the join since that is important at least for hash join and we don't currently have // hybrid-hash-join that can swap the inputs dynamically. The Calcite planner's default cost of a join // is the same whether the bigger table is used as left input or right. In order to overcome that, // we will use the Hash Join cost as the logical cost such that cardinality of left and right inputs // is considered appropriately. return computeHashJoinCost(planner, mq); }
public RelCollation getCollation() { if (sort != null) { return sort.getCollation(); } return null; }
protected RelOptCost computeLogicalAggCost(RelOptPlanner planner, RelMetadataQuery mq) { // Similar to Join cost estimation, use HashAgg cost during the logical planning. return computeHashAggCost(planner, mq); }
public static GroupScan getGroupScan(DrillScanRelBase relNode) { return relNode.getGroupScan(); }
public double getRowCount(DrillLimitRelBase rel, RelMetadataQuery mq) { return rel.estimateRowCount(mq); }
@Override public Boolean visitLiteral(RexLiteral literal) { return doUnknown(literal); }
@Override public Boolean visitOver(RexOver over) { return doUnknown(over); }
@Override public Boolean visitLocalRef(RexLocalRef localRef) { return doUnknown(localRef); }
@Override public Boolean visitDynamicParam(RexDynamicParam dynamicParam) { return doUnknown(dynamicParam); }
@Override public Boolean visitFieldAccess(RexFieldAccess fieldAccess) { return doUnknown(fieldAccess); }