protected void doPushLimitIntoGroupScan(RelOptRuleCall call, LimitPrel limit, final ProjectPrel project, ScanPrel scan, GroupScan groupScan) { try { final GroupScan newGroupScan = getGroupScanWithLimit(groupScan, limit); if (newGroupScan == null) { return; } final ScanPrel newScan = new ScanPrel(scan.getCluster(), scan.getTraitSet(), newGroupScan, scan.getRowType(), scan.getTable()); final RelNode newChild; if (project != null) { final ProjectPrel newProject = new ProjectPrel(project.getCluster(), project.getTraitSet(), newScan, project.getProjects(), project.getRowType()); newChild = newProject; } else { newChild = newScan; } call.transformTo(newChild); logger.debug("pushLimitIntoGroupScan: Converted to a new ScanPrel " + newScan.getGroupScan()); } catch (Exception e) { logger.warn("pushLimitIntoGroupScan: Exception while trying limit pushdown!", e); } }
@Override public boolean matches(RelOptRuleCall call) { final ScanPrel scan = (ScanPrel) call.rel(1); if (scan.getGroupScan() instanceof BinaryTableGroupScan || scan.getGroupScan() instanceof JsonTableGroupScan) { return super.matches(call); } return false; } };
@Override protected Object clone() throws CloneNotSupportedException { return new ScanPrel(this.getCluster(), this.getTraitSet(), getCopy(this.getGroupScan()), this.rowType, this.getTable()); }
@Override public RelOptCost computeSelfCost(final RelOptPlanner planner, RelMetadataQuery mq) { final PlannerSettings settings = PrelUtil.getPlannerSettings(planner); final ScanStats stats = this.getGroupScan().getScanStats(settings); final int columnCount = this.getRowType().getFieldCount(); if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) { return planner.getCostFactory().makeCost(stats.getRecordCount() * columnCount, stats.getCpuCost(), stats.getDiskCost()); } // double rowCount = RelMetadataQuery.getRowCount(this); double rowCount = stats.getRecordCount(); // As DRILL-4083 points out, when columnCount == 0, cpuCost becomes zero, // which makes the costs of HiveScan and HiveDrillNativeParquetScan the same double cpuCost = rowCount * Math.max(columnCount, 1); // For now, assume cpu cost is proportional to row count. // If a positive value for CPU cost is given multiply the default CPU cost by given CPU cost. if (stats.getCpuCost() > 0) { cpuCost *= stats.getCpuCost(); } double ioCost = stats.getDiskCost(); DrillCostFactory costFactory = (DrillCostFactory)planner.getCostFactory(); return costFactory.makeCost(rowCount, cpuCost, ioCost, 0); }
final PlannerSettings settings = PrelUtil.getPlannerSettings(indexContext.call.getPlanner()); DbGroupScan groupScan = (DbGroupScan)indexContext.scan.getGroupScan(); boolean isIndexScan = groupScan.isIndexScan(); if (!isIndexScan) { selector.addIndex(indexDesc, true, indexContext.lowerProject != null ? indexContext.lowerProject.getRowType().getFieldCount() : indexContext.scan.getRowType().getFieldCount()); Preconditions.checkNotNull(indexContext.getSort()); if (indexContext.scan.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE).getFieldCollations().size() == 0) { return; RelNode finalRel = indexContext.scan.copy(indexContext.scan.getTraitSet(), indexContext.scan.getInputs()); if (indexContext.lowerProject != null) { List<RelNode> inputs = Lists.newArrayList();
@Override public double estimateRowCount(RelMetadataQuery mq) { final PlannerSettings settings = PrelUtil.getPlannerSettings(getCluster()); double rowCount = this.getGroupScan().getScanStats(settings).getRecordCount(); logger.debug("#{}.estimateRowCount get rowCount {} from groupscan {}", this.getId(), rowCount, System.identityHashCode(this.getGroupScan())); return rowCount; }
coveringCondition = IndexPlanUtils.getTotalFilter(indexCondition, remainderCondition, indexScanPrel.getCluster().getRexBuilder()); RexNode newIndexCondition = rewriteFunctionalCondition(coveringCondition, indexScanPrel.getRowType(), functionInfo); RelTraitSet indexFilterTraitSet = indexScanPrel.getTraitSet(); FilterPrel indexFilterPrel = new FilterPrel(indexScanPrel.getCluster(), indexFilterTraitSet, indexScanPrel, newIndexCondition); DrillParseContext parseContxt = new DrillParseContext(PrelUtil.getPlannerSettings(newProject.getCluster())); for(RexNode projectRex: newProject.getProjects()) { RexNode newRex = IndexPlanUtils.rewriteFunctionalRex(indexContext, parseContxt, null, origScan, projectRex, indexScanPrel.getRowType(), functionInfo); newProjects.add(newRex);
((IndexGroupScan)indexScanPrel.getGroupScan()).setStatistics(((DbGroupScan)IndexPlanUtils.getGroupScan(origScan)).getStatistics()); RelTraitSet indexScanTraitSet = indexScanPrel.getTraitSet(); for(RexNode projectRex: newProject.getProjects()) { RexNode newRex = IndexPlanUtils.rewriteFunctionalRex(indexContext, parseContxt, null, origScan, projectRex, indexScanPrel.getRowType(), functionInfo); newProjects.add(newRex);
public void addScan(ScanPrel prel) { maxWidth = Math.min(maxWidth, prel.getGroupScan().getMaxParallelizationWidth()); isMultiSubScan = prel.getGroupScan().getMinParallelizationWidth() > 1; distributionAffinity = prel.getDistributionAffinity(); add(prel); }
@Override public void onMatch(RelOptRuleCall call) { final DrillScanRel scan = (DrillScanRel) call.rel(0); GroupScan groupScan = scan.getGroupScan(); DrillDistributionTrait partition = (groupScan.getMaxParallelizationWidth() > 1 || groupScan.getDistributionAffinity() == DistributionAffinity.HARD) ? DrillDistributionTrait.RANDOM_DISTRIBUTED : DrillDistributionTrait.SINGLETON; final RelTraitSet traits = scan.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(partition); final ScanPrel newScan = new ScanPrel(scan.getCluster(), traits, groupScan, scan.getRowType(), scan.getTable()); call.transformTo(newScan); }
RelDataTypeField f = indexContext.scan.getRowType().getFieldList().get(idx); String pathSeg = f.getName().replaceAll("`", ""); final String[] segs = pathSeg.split("\\.");
private boolean buildAndCheckCollation(IndexProperties indexProps) { IndexDescriptor indexDesc = indexProps.getIndexDesc(); FunctionalIndexInfo functionInfo = indexDesc.getFunctionalInfo(); RelCollation inputCollation; // for the purpose of collation we can assume that a covering index scan would provide // the collation property that would be relevant for non-covering as well ScanPrel indexScanPrel = IndexPlanUtils.buildCoveringIndexScan(indexContext.getScan(), indexDesc.getIndexGroupScan(), indexContext, indexDesc); inputCollation = indexScanPrel.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE); // we don't create collation for Filter because it will inherit the child's collation if (indexContext.hasLowerProject()) { inputCollation = IndexPlanUtils.buildCollationProject(indexContext.getLowerProject().getProjects(), null, indexContext.getScan(), functionInfo,indexContext); } if (indexContext.hasUpperProject()) { inputCollation = IndexPlanUtils.buildCollationProject(indexContext.getUpperProject().getProjects(), indexContext.getLowerProject(), indexContext.getScan(), functionInfo, indexContext); } if ((inputCollation != null) && (inputCollation.satisfies(indexContext.getCollation()))) { return true; } return false; }
@Override public boolean matches(RelOptRuleCall call) { final ScanPrel scan = (ScanPrel) call.rel(2); if (scan.getGroupScan() instanceof BinaryTableGroupScan || scan.getGroupScan() instanceof JsonTableGroupScan) { return super.matches(call); } return false; } };
public static ScanPrel buildCoveringIndexScan(DrillScanRelBase origScan, IndexGroupScan indexGroupScan, IndexCallContext indexContext, IndexDescriptor indexDesc) { FunctionalIndexInfo functionInfo = indexDesc.getFunctionalInfo(); //to record the new (renamed)paths added List<SchemaPath> rewrittenPaths = Lists.newArrayList(); DbGroupScan dbGroupScan = (DbGroupScan) getGroupScan(origScan); indexGroupScan.setColumns( rewriteFunctionColumn(dbGroupScan.getColumns(), functionInfo, rewrittenPaths)); DrillDistributionTrait partition = scanIsPartition(getGroupScan(origScan))? DrillDistributionTrait.RANDOM_DISTRIBUTED : DrillDistributionTrait.SINGLETON; RelDataType newRowType = FunctionalIndexHelper.rewriteFunctionalRowType(origScan, indexContext, functionInfo, rewrittenPaths); // add a default collation trait otherwise Calcite runs into a ClassCastException, which at first glance // seems like a Calcite bug RelTraitSet indexScanTraitSet = origScan.getTraitSet().plus(Prel.DRILL_PHYSICAL). plus(RelCollationTraitDef.INSTANCE.getDefault()).plus(partition); // Create the collation traits for index scan based on the index columns under the // condition that the index actually has collation property (e.g hash indexes don't) if (indexDesc.getCollation() != null) { RelCollation collationTrait = buildCollationCoveringIndexScan(indexDesc, indexContext); indexScanTraitSet = indexScanTraitSet.plus(collationTrait); } ScanPrel indexScanPrel = new ScanPrel(origScan.getCluster(), indexScanTraitSet, indexGroupScan, newRowType, origScan.getTable()); return indexScanPrel; }
RelDataType scanRowType = scanPrel.getRowType(); RelDataTypeField field = scanRowType.getField(leftFieldName, true, true); int index = field.getIndex();
@Override public void onMatch(RelOptRuleCall call) { final ScanPrel scan = call.rel(1); final FilterPrel filter = call.rel(0); final RexNode condition = filter.getCondition(); LogicalExpression conditionExp = DrillOptiq.toDrill(new DrillParseContext(PrelUtil.getPlannerSettings(call.getPlanner())), scan, condition); KafkaGroupScan groupScan = (KafkaGroupScan) scan.getGroupScan(); logger.info("Partitions ScanSpec before pushdown: " + groupScan.getPartitionScanSpecList()); KafkaPartitionScanSpecBuilder builder = new KafkaPartitionScanSpecBuilder(groupScan, conditionExp); List<KafkaPartitionScanSpec> newScanSpec = null; newScanSpec = builder.parseTree(); builder.close(); //Close consumer //No pushdown if(newScanSpec == null) { return; } logger.info("Partitions ScanSpec after pushdown: " + newScanSpec); GroupScan newGroupScan = groupScan.cloneWithNewSpec(newScanSpec); final ScanPrel newScanPrel = new ScanPrel(scan.getCluster(), filter.getTraitSet(), newGroupScan, scan.getRowType(), scan.getTable()); call.transformTo(filter.copy(filter.getTraitSet(), ImmutableList.of(newScanPrel))); }
@Override public void onMatch(RelOptRuleCall call) { final FilterPrel filter = call.rel(0); final ScanPrel scan = call.rel(1); final RexNode condition = filter.getCondition(); if (scan.getGroupScan() instanceof BinaryTableGroupScan) { BinaryTableGroupScan groupScan = (BinaryTableGroupScan)scan.getGroupScan(); doPushFilterIntoBinaryGroupScan(call, filter, null, scan, groupScan, condition); } else { assert(scan.getGroupScan() instanceof JsonTableGroupScan); JsonTableGroupScan groupScan = (JsonTableGroupScan)scan.getGroupScan(); doPushFilterIntoJsonGroupScan(call, filter, null, scan, groupScan, condition); } }
protected void doPushFilterToScan(final RelOptRuleCall call, final FilterPrel filter, final ProjectPrel project, final ScanPrel scan, final HBaseGroupScan groupScan, final RexNode condition) { final LogicalExpression conditionExp = DrillOptiq.toDrill(new DrillParseContext(PrelUtil.getPlannerSettings(call.getPlanner())), scan, condition); final HBaseFilterBuilder hbaseFilterBuilder = new HBaseFilterBuilder(groupScan, conditionExp); final HBaseScanSpec newScanSpec = hbaseFilterBuilder.parseTree(); if (newScanSpec == null) { return; //no filter pushdown ==> No transformation. } final HBaseGroupScan newGroupsScan = new HBaseGroupScan(groupScan.getUserName(), groupScan.getStoragePlugin(), newScanSpec, groupScan.getColumns()); newGroupsScan.setFilterPushedDown(true); final ScanPrel newScanPrel = new ScanPrel(scan.getCluster(), filter.getTraitSet(), newGroupsScan, scan.getRowType(), scan.getTable()); // Depending on whether is a project in the middle, assign either scan or copy of project to childRel. final RelNode childRel = project == null ? newScanPrel : project.copy(project.getTraitSet(), ImmutableList.of(newScanPrel)); if (hbaseFilterBuilder.isAllExpressionsConverted()) { /* * Since we could convert the entire filter condition expression into an HBase filter, * we can eliminate the filter operator altogether. */ call.transformTo(childRel); } else { call.transformTo(filter.copy(filter.getTraitSet(), ImmutableList.of(childRel))); } }
@Override public void onMatch(RelOptRuleCall call) { final ProjectPrel project = call.rel(0); final ScanPrel scan = call.rel(1); if (scan.getGroupScan() instanceof JsonTableGroupScan) { JsonTableGroupScan groupScan = (JsonTableGroupScan) scan.getGroupScan(); doPushProjectIntoGroupScan(call, project, scan, groupScan); } }
DrillRelOptUtil.getFieldsInformation(scan.getRowType(), project.getProjects()); if (columnInfo == null || Utilities.isStarQuery(columnInfo.getFields()) || !groupScan.canPushdownProjects(columnInfo.getFields())) { for (RelTrait trait : scan.getTraitSet()) { if (!(trait instanceof RelCollation)) { newTraits.plus(trait); final ScanPrel newScan = new ScanPrel(scan.getCluster(), newTraits.plus(Prel.DRILL_PHYSICAL), groupScan.clone(columnInfo.getFields()), columnInfo.createNewRowType(project.getInput().getCluster().getTypeFactory()), scan.getTable()); sameRowTypeProjectionsFields(project.getRowType(), newScan.getRowType())) { call.transformTo(newScan); } else {