@Override public double getOption(DoubleValidator validator) { return getOptionSafe(validator).getFloatVal(); }
public double getMinimumCostPerSplit(SourceType sourceType) { if (SOURCES_WITH_MIN_COST.contains(sourceType.value().toLowerCase())) { OptionValue value = options.getOption(String.format("planner.%s.min_cost_per_split", sourceType.value().toLowerCase())); if (value != null) { return value.getFloatVal(); } } return options.getOption(DEFAULT_SCAN_MIN_COST); }
public double getNestedLoopJoinFactor(){ return options.getOption(NESTEDLOOPJOIN_FACTOR.getOptionName()).getFloatVal(); }
@Override public void validate(OptionValue v) { super.validate(v); if (v.getFloatVal() > max || v.getFloatVal() < min) { throw UserException.validationError() .message(String.format("Option %s must be between %f and %f.", getOptionName(), min, max)) .build(logger); } } }
@Override public double getOption(DoubleValidator validator) { return getOption(validator.getOptionName()).getFloatVal(); }
public double getHashJoinSwapMarginFactor() { return options.getOption(HASH_JOIN_SWAP_MARGIN_FACTOR.getOptionName()).getFloatVal() / 100d; }
public double getRowCountEstimateFactor(){ return options.getOption(JOIN_ROW_COUNT_ESTIMATE_FACTOR.getOptionName()).getFloatVal(); }
@Override public OptionValueWrapper next() { final OptionValue value = mergedOptions.next(); final Status status; if (value.getType() == OptionType.BOOT) { status = Status.BOOT; } else { final OptionValue def = fragmentOptions.getValidator(value.getName()).getDefault(); if (value.equalsIgnoreType(def)) { status = Status.DEFAULT; } else { status = Status.CHANGED; } } return new OptionValueWrapper(value.getName(), value.getKind(), value.getType(), value.getNumVal(), value.getStringVal(), value.getBoolVal(), value.getFloatVal(), status); }
public SpillManager(SabotConfig sabotConfig, OptionManager optionManager, String id, Configuration hadoopConf, SpillService spillService, String caller) { final List<String> directories = new ArrayList<>(sabotConfig.getStringList(ExecConstants.SPILL_DIRS)); if (directories.isEmpty()) { throw UserException.dataWriteError().message("No spill locations specified.").build(logger); } this.id = id; this.caller = caller; this.hadoopConf = hadoopConf; this.spillService = spillService; // load options if (optionManager != null) { this.minDiskSpacePercentage = optionManager.getOption(ExecConstants.SPILL_DISK_SPACE_LIMIT_PERCENTAGE); this.minDiskSpace = optionManager.getOption(ExecConstants.SPILL_DISK_SPACE_LIMIT_BYTES); this.healthCheckInterval = optionManager.getOption(ExecConstants.SPILL_DISK_SPACE_CHECK_INTERVAL); } else { this.minDiskSpacePercentage = ExecConstants.SPILL_DISK_SPACE_LIMIT_PERCENTAGE.getDefault().getFloatVal(); this.minDiskSpace = ExecConstants.SPILL_DISK_SPACE_LIMIT_BYTES.getDefault().getNumVal(); this.healthCheckInterval = ExecConstants.SPILL_DISK_SPACE_CHECK_INTERVAL.getDefault().getNumVal(); } try { spillService.makeSpillSubdirs(id); } catch (UserException e) { throw UserException.dataWriteError(e) .addContext("Caller", caller) .build(logger); } }
@SuppressWarnings("rawtypes") private Setting toSetting(OptionValue option){ // display the value if it is the whitelist or has been set. final boolean showOutsideWhitelist = options.isSet(option.getName()); switch(option.getKind()){ case BOOLEAN: return new Setting.BooleanSetting(option.getName(), option.getBoolVal(), showOutsideWhitelist); case DOUBLE: return new Setting.FloatSetting(option.getName(), option.getFloatVal(), showOutsideWhitelist); case LONG: return new Setting.IntegerSetting(option.getName(), option.getNumVal(), showOutsideWhitelist); case STRING: return new Setting.TextSetting(option.getName(), option.getStringVal(), showOutsideWhitelist); default: throw new IllegalStateException("Unable to handle kind " + option.getKind()); } } }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) { return super.computeSelfCost(planner).multiplyBy(.1); } final RelNode child = this.getInput(); double inputRows = mq.getRowCount(child); int numGroupByFields = this.getGroupCount(); int numAggrFields = this.aggCalls.size(); // cpu cost of hashing each grouping key double cpuCost = DremioCost.HASH_CPU_COST * numGroupByFields * inputRows; // add cpu cost for computing the aggregate functions cpuCost += DremioCost.FUNC_CPU_COST * numAggrFields * inputRows; double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use distinct row count // + hash table template stuff double factor = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.HASH_AGG_TABLE_FACTOR_KEY).getFloatVal(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal(); // table + hashValues + links double memCost = ( (fieldWidth * numGroupByFields) + IntHolder.WIDTH + IntHolder.WIDTH ) * inputRows * factor; Factory costFactory = (Factory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0 /* network cost */, memCost); }
.getOption(ExecConstants.HASH_JOIN_TABLE_FACTOR_KEY).getFloatVal(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal();