@Override public void onMatch(RelOptRuleCall call) { final ProjectPrel project = call.rel(1); final LimitPrel limit = call.rel(0); RelNode child = project.getInput(); final RelNode limitUnderProject = new LimitPrel(child.getCluster(), child.getTraitSet(), child, limit.getOffset(), limit.getFetch()); final RelNode newProject = new ProjectPrel(project.getCluster(), project.getTraitSet(), limitUnderProject, project.getProjects(), project.getRowType()); if (DrillRelOptUtil.isProjectFlatten(project)) { //Preserve limit above the project since Flatten can produce more rows. Also mark it so we do not fire the rule again. child = newProject; final RelNode limitAboveProject = new LimitPrel(child.getCluster(), child.getTraitSet(), child, limit.getOffset(), limit.getFetch(), true); call.transformTo(limitAboveProject); } else { call.transformTo(newProject); } }
@Override public boolean matches(RelOptRuleCall call) { final LimitPrel limit = call.rel(0); // We do not fire this rule if fetch() is null (indicating we have to fetch all the // remaining rows starting from offset. return !limit.isPushDown() && limit.getFetch() != null; } };
private GroupScan getGroupScanWithLimit(GroupScan groupScan, LimitPrel limit) { final int offset = limit.getOffset() != null ? Math.max(0, RexLiteral.intValue(limit.getOffset())) : 0; final int fetch = Math.max(0, RexLiteral.intValue(limit.getFetch())); // Scan Limit uses conservative approach: use offset 0 and fetch = parent limit offset + parent limit fetch. if (groupScan instanceof JsonTableGroupScan) { JsonTableGroupScan jsonTableGroupScan = (JsonTableGroupScan) groupScan; return (jsonTableGroupScan.clone(jsonTableGroupScan.getScanSpec()).applyLimit(offset + fetch)); } else if (groupScan instanceof BinaryTableGroupScan) { BinaryTableGroupScan binaryTableGroupScan = (BinaryTableGroupScan) groupScan; final HBaseScanSpec oldScanSpec = binaryTableGroupScan.getHBaseScanSpec(); final HBaseScanSpec newScanSpec = new HBaseScanSpec(oldScanSpec.getTableName(), oldScanSpec.getStartRow(), oldScanSpec.getStopRow(), oldScanSpec.getFilter()); return new BinaryTableGroupScan(binaryTableGroupScan.getUserName(), binaryTableGroupScan.getStoragePlugin(), binaryTableGroupScan.getFormatPlugin(), newScanSpec, binaryTableGroupScan.getColumns(), binaryTableGroupScan.getTableStats()).applyLimit(offset + fetch); } return null; }
@Override public Prel prepareForLateralUnnestPipeline(List<RelNode> children) { return new LimitPrel(this.getCluster(), this.traitSet, children.get(0), getOffset(), getFetch(), isPushDown(), true); } }
@Override public void onMatch(RelOptRuleCall call) { final LimitPrel limit = (LimitPrel) call.rel(0); final ExchangePrel exchangePrel = (ExchangePrel) call.rel(1); RelNode child = exchangePrel.getInput(); final int offset = limit.getOffset() != null ? Math.max(0, RexLiteral.intValue(limit.getOffset())) : 0; final int fetch = Math.max(0, RexLiteral.intValue(limit.getFetch())); // child Limit uses conservative approach: use offset 0 and fetch = parent limit offset + parent limit fetch. final RexNode childFetch = limit.getCluster().getRexBuilder().makeExactLiteral(BigDecimal.valueOf(offset + fetch)); final RelNode limitUnderExchange = new LimitPrel(child.getCluster(), child.getTraitSet(), child, null, childFetch); final RelNode newExch = exchangePrel.copy(exchangePrel.getTraitSet(), ImmutableList.of(limitUnderExchange)); final RelNode limitAboveExchange = new LimitPrel(limit.getCluster(), limit.getTraitSet(), newExch, limit.getOffset(), limit.getFetch(), true); call.transformTo(limitAboveExchange); }
@Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) { return new LimitPrel(getCluster(), traitSet, sole(inputs), offset, fetch, isPushDown(), isPartitioned); }
private Prel transformTopNToSortAndLimit(List<RelNode> children, RelTraitSet traits, RelCollation collationTrait) { SortPrel sortprel = new SortPrel(this.getCluster(), traits, children.get(0), collationTrait); RexNode offset = this.getCluster().getRexBuilder().makeExactLiteral(BigDecimal.valueOf(0), this.getCluster().getTypeFactory().createSqlType(SqlTypeName.INTEGER)); RexNode limit = this.getCluster().getRexBuilder().makeExactLiteral(BigDecimal.valueOf(this.limit), this.getCluster().getTypeFactory().createSqlType(SqlTypeName.INTEGER)); //SMEX is not needed here because Lateral/Unnest pipeline doesn't support exchanges. LimitPrel limitPrel = new LimitPrel(this.getCluster(), traits, sortprel, offset, limit, false, true); return limitPrel; } }
@Override public boolean matches(RelOptRuleCall call) { boolean topNEnabled = PrelUtil.getPlannerSettings(call.getPlanner()).getOptions().getOption(PlannerSettings.TOPN.getOptionName()).bool_val; if (!topNEnabled) { return false; } else { // If no limit is defined it doesn't make sense to use TopN since it could use unbounded memory in this case. // We should use the sort and limit operator in this case. // This also fixes DRILL-6474 final LimitPrel limit = call.rel(0); return limit.getFetch() != null; } }
@Override public Iterator<Prel> iterator() { return PrelUtil.iter(getInput()); }
@Override public void onMatch(RelOptRuleCall call) { final LimitPrel limit = call.rel(0); final SingleMergeExchangePrel smex = call.rel(1); final SortPrel sort = call.rel(2); // First offset to include into results (inclusive). Null implies it is starting from offset 0 int offset = limit.getOffset() != null ? Math.max(0, RexLiteral.intValue(limit.getOffset())) : 0; int fetch = Math.max(0, RexLiteral.intValue(limit.getFetch())); final TopNPrel topN = new TopNPrel(limit.getCluster(), sort.getTraitSet(), sort.getInput(), offset + fetch, sort.getCollation()); final LimitPrel newLimit = new LimitPrel(limit.getCluster(), limit.getTraitSet(), new SingleMergeExchangePrel(smex.getCluster(), smex.getTraitSet(), topN, sort.getCollation()), limit.getOffset(), limit.getFetch()); call.transformTo(newLimit); }
private GroupScan getGroupScanWithLimit(GroupScan groupScan, LimitPrel limit) { final int offset = limit.getOffset() != null ? Math.max(0, RexLiteral.intValue(limit.getOffset())) : 0; final int fetch = Math.max(0, RexLiteral.intValue(limit.getFetch())); // Scan Limit uses conservative approach: use offset 0 and fetch = parent limit offset + parent limit fetch. if (groupScan instanceof JsonTableGroupScan) { JsonTableGroupScan jsonTableGroupScan = (JsonTableGroupScan) groupScan; return (jsonTableGroupScan.clone(jsonTableGroupScan.getScanSpec()).applyLimit(offset + fetch)); } else if (groupScan instanceof BinaryTableGroupScan) { BinaryTableGroupScan binaryTableGroupScan = (BinaryTableGroupScan) groupScan; final HBaseScanSpec oldScanSpec = binaryTableGroupScan.getHBaseScanSpec(); final HBaseScanSpec newScanSpec = new HBaseScanSpec(oldScanSpec.getTableName(), oldScanSpec.getStartRow(), oldScanSpec.getStopRow(), oldScanSpec.getFilter()); return new BinaryTableGroupScan(binaryTableGroupScan.getUserName(), binaryTableGroupScan.getStoragePlugin(), binaryTableGroupScan.getFormatPlugin(), newScanSpec, binaryTableGroupScan.getColumns(), binaryTableGroupScan.getTableStats()).applyLimit(offset + fetch); } return null; }
@Override public void onMatch(RelOptRuleCall call) { final DrillLimitRel limit = (DrillLimitRel) call.rel(0); final RelNode input = limit.getInput(); final RelTraitSet traits = input.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON); final RelNode convertedInput = convert(input, traits); LimitPrel newLimit = new LimitPrel(limit.getCluster(), limit.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON), convertedInput, limit.getOffset(), limit.getFetch()); call.transformTo(newLimit); }
@Override public PhysicalOperator getPhysicalOperator(PhysicalPlanCreator creator) throws IOException { Prel child = (Prel) this.getInput(); PhysicalOperator childPOP = child.getPhysicalOperator(creator); // First offset to include into results (inclusive). Null implies it is starting from offset 0 int first = offset != null ? Math.max(0, RexLiteral.intValue(offset)) : 0; // Last offset to stop including into results (exclusive), translating fetch row counts into an offset. // Null value implies including entire remaining result set from first offset Integer last = fetch != null ? Math.max(0, RexLiteral.intValue(fetch)) + first : null; Limit limit; if (isPartitioned) { limit = new PartitionLimit(childPOP, first, last, DrillRelOptUtil.IMPLICIT_COLUMN); } else { limit = new Limit(childPOP, first, last); } return creator.addMetadata(this, limit); }
protected void doPushLimitIntoRowKeyJoin(RelOptRuleCall call, LimitPrel limit, final ProjectPrel project, RowKeyJoinPrel join) { final RelNode newChild; try { RelNode left = join.getLeft(); RelNode right = join.getRight(); final RelNode limitOnLeft = new LimitPrel(left.getCluster(), left.getTraitSet(), left, limit.getOffset(), limit.getFetch()); RowKeyJoinPrel newJoin = new RowKeyJoinPrel(join.getCluster(), join.getTraitSet(), limitOnLeft, right, join.getCondition(), join.getJoinType()); if (project != null) { final ProjectPrel newProject = new ProjectPrel(project.getCluster(), project.getTraitSet(), newJoin, project.getProjects(), project.getRowType()); newChild = newProject; } else { newChild = newJoin; } call.transformTo(newChild); logger.debug("pushLimitIntoRowKeyJoin: Pushed limit on left side of Join " + join.toString()); } catch (Exception e) { logger.warn("pushLimitIntoRowKeyJoin: Exception while trying limit pushdown!", e); } } }
@Override public boolean matches(RelOptRuleCall call) { LimitPrel limitPrel = call.rel(0); ProjectPrel projectPrel = call.rel(1); // pushdown only apply limit but not offset, // so if getFetch() return null no need to run this rule. // Do not push across Project containing CONVERT_FROMJSON for limit 0 queries. For limit 0 queries, this would // mess up the schema since Convert_FromJson() is different from other regular functions in that it only knows // the output schema after evaluation is performed. When input has 0 row, Drill essentially does not have a way // to know the output type. if (!limitPrel.isPushDown() && (limitPrel.getFetch() != null) && (!DrillRelOptUtil.isLimit0(limitPrel.getFetch()) || !DrillRelOptUtil.isProjectOutputSchemaUnknown(projectPrel))) { return true; } return false; } };
newRel = new LimitPrel(newRel.getCluster(), newRel.getTraitSet().plus(indexContext.getCollation()).plus(Prel.DRILL_PHYSICAL), newRel, IndexPlanUtils.getOffset(rel), IndexPlanUtils.getFetch(rel));
@Override public void onMatch(RelOptRuleCall call) { final ProjectPrel project = call.rel(1); final LimitPrel limit = call.rel(0); RelNode child = project.getInput(); final RelNode limitUnderProject = new LimitPrel(child.getCluster(), child.getTraitSet(), child, limit.getOffset(), limit.getFetch()); final RelNode newProject = new ProjectPrel(project.getCluster(), project.getTraitSet(), limitUnderProject, project.getProjects(), project.getRowType()); if (DrillRelOptUtil.isProjectFlatten(project)) { //Preserve limit above the project since Flatten can produce more rows. Also mark it so we do not fire the rule again. child = newProject; final RelNode limitAboveProject = new LimitPrel(child.getCluster(), child.getTraitSet(), child, limit.getOffset(), limit.getFetch(), true); call.transformTo(limitAboveProject); } else { call.transformTo(newProject); } }
@Override public boolean matches(RelOptRuleCall call) { final ScanPrel scan = call.rel(1); final LimitPrel limit = call.rel(0); // pushdown only apply limit but not offset, // so if getFetch() return null no need to run this rule. if (scan.getGroupScan().supportsLimitPushdown() && !limit.isPushDown() && limit.getFetch() != null) { if ((scan.getGroupScan() instanceof JsonTableGroupScan && ((JsonTableGroupScan) scan.getGroupScan()).isIndexScan()) || (scan.getGroupScan() instanceof RestrictedJsonTableGroupScan)) { return true; } } return false; } };
protected void doPushLimitIntoRowKeyJoin(RelOptRuleCall call, LimitPrel limit, final ProjectPrel project, RowKeyJoinPrel join) { final RelNode newChild; try { RelNode left = join.getLeft(); RelNode right = join.getRight(); final RelNode limitOnLeft = new LimitPrel(left.getCluster(), left.getTraitSet(), left, limit.getOffset(), limit.getFetch()); RowKeyJoinPrel newJoin = new RowKeyJoinPrel(join.getCluster(), join.getTraitSet(), limitOnLeft, right, join.getCondition(), join.getJoinType()); if (project != null) { final ProjectPrel newProject = new ProjectPrel(project.getCluster(), project.getTraitSet(), newJoin, project.getProjects(), project.getRowType()); newChild = newProject; } else { newChild = newJoin; } call.transformTo(newChild); logger.debug("pushLimitIntoRowKeyJoin: Pushed limit on left side of Join " + join.toString()); } catch (Exception e) { logger.warn("pushLimitIntoRowKeyJoin: Exception while trying limit pushdown!", e); } } }
@Override public boolean matches(RelOptRuleCall call) { final LimitPrel limit = call.rel(0); // We do not fire this rule if fetch() is null (indicating we have to fetch all the // remaining rows starting from offset. return !limit.isPushDown() && limit.getFetch() != null; } };