@Override public RelOptCost computeSelfCost(final RelOptPlanner planner, final RelMetadataQuery mq) { return planner.getCostFactory().makeCost(rels.stream().mapToDouble(mq::getRowCount).sum(), 0, 0); }
@Override public RelOptCost computeSelfCost(final RelOptPlanner planner, final RelMetadataQuery mq) { return planner.getCostFactory().makeCost(mq.getRowCount(sourceRel), 0, 0).multiplyBy(10); } }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { RelOptCost relOptCost = super.computeSelfCost(planner, mq).multiplyBy(0.05); return planner.getCostFactory().makeCost(relOptCost.getRows(), 0, 0); }
/** * Since the project under aggregate maybe reduce expressions by {@link org.apache.kylin.query.optrule.AggregateProjectReduceRule}, * consider the count of expressions into cost, the reduced project will be used. * * Made RexOver much more expensive so we can transform into {@link org.apache.kylin.query.relnode.OLAPWindowRel} * by rules in {@link org.apache.calcite.rel.rules.ProjectToWindowRule} */ @Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { boolean hasRexOver = RexOver.containsOver(getProjects(), null); RelOptCost relOptCost = super.computeSelfCost(planner, mq).multiplyBy(.05) .multiplyBy(getProjects().size() * (hasRexOver ? 50 : 1)) .plus(planner.getCostFactory().makeCost(0.1 * caseCount, 0, 0)); return planner.getCostFactory().makeCost(relOptCost.getRows(), 0, 0); }
@Override public RelOptCost getCost(HiveJoin join) { RelMetadataQuery mq = RelMetadataQuery.instance(); double leftRCount = mq.getRowCount(join.getLeft()); double rightRCount = mq.getRowCount(join.getRight()); return HiveCost.FACTORY.makeCost(leftRCount + rightRCount, 0.0, 0.0); }
final double ioCost = algoUtils.computeSortMergeIOCost(relationInfos); return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
@Override public RelOptCost getCost(HiveJoin join) { final RelMetadataQuery mq = join.getCluster().getMetadataQuery(); double leftRCount = mq.getRowCount(join.getLeft()); double rightRCount = mq.getRowCount(join.getRight()); return HiveCost.FACTORY.makeCost(leftRCount + rightRCount, 0.0, 0.0); }
return planner.getCostFactory().makeCost(cost, 0, 0);
return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
final double ioCost = algoUtils.computeMapJoinIOCost(relationInfos, streaming, parallelism); return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
@Override public RelOptCost getAggregateCost(HiveAggregate aggregate) { if (aggregate.isBucketedInput()) { return HiveCost.FACTORY.makeZeroCost(); } else { final RelMetadataQuery mq = aggregate.getCluster().getMetadataQuery(); // 1. Sum of input cardinalities final Double rCount = mq.getRowCount(aggregate.getInput()); if (rCount == null) { return null; } // 2. CPU cost = sorting cost final double cpuCost = algoUtils.computeSortCPUCost(rCount); // 3. IO cost = cost of writing intermediary results to local FS + // cost of reading from local FS for transferring to GBy + // cost of transferring map outputs to GBy operator final Double rAverageSize = mq.getAverageRowSize(aggregate.getInput()); if (rAverageSize == null) { return null; } final double ioCost = algoUtils.computeSortIOCost(new Pair<Double,Double>(rCount,rAverageSize)); // 4. Result return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost); } }
@Override public RelOptCost getAggregateCost(HiveAggregate aggregate) { if (aggregate.isBucketedInput()) { return HiveCost.FACTORY.makeZeroCost(); } else { RelMetadataQuery mq = RelMetadataQuery.instance(); // 1. Sum of input cardinalities final Double rCount = mq.getRowCount(aggregate.getInput()); if (rCount == null) { return null; } // 2. CPU cost = sorting cost final double cpuCost = algoUtils.computeSortCPUCost(rCount); // 3. IO cost = cost of writing intermediary results to local FS + // cost of reading from local FS for transferring to GBy + // cost of transferring map outputs to GBy operator final Double rAverageSize = mq.getAverageRowSize(aggregate.getInput()); if (rAverageSize == null) { return null; } final double ioCost = algoUtils.computeSortIOCost(new Pair<Double,Double>(rCount,rAverageSize)); // 4. Result return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost); } }
final double ioCost = algoUtils.computeSortMergeIOCost(relationInfos); return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
final double ioCost = algoUtils.computeMapJoinIOCost(relationInfos, streaming, parallelism); return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
return HiveCost.FACTORY.makeCost(rCount, cpuCost, ioCost);
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { int minDepth = nodes.size(); if (minDepth == 0) { return planner.getCostFactory().makeInfiniteCost(); } /* Intermediate Prel doesn't really have any cost associated to it, but its cost is inversely proportional to the number of children it has * i.e. more pushdown the better. */ return planner.getCostFactory().makeCost(Integer.MAX_VALUE, Integer.MAX_VALUE, 0).multiplyBy(1.0/minDepth); }
public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { return planner.getCostFactory().makeCost(1, 1, 1); } }
public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { return planner.getCostFactory().makeCost(1, 1, 1); } }