/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. */ public Costs(double networkCost, double diskCost) { setNetworkCost(networkCost); setDiskCost(diskCost); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. * @param cpuCost The cost for CPU operations. */ public Costs(double networkCost, double diskCost, double cpuCost) { setNetworkCost(networkCost); setDiskCost(diskCost); setCpuCost(cpuCost); }
@Override public void addFileInputCost(long fileSizeInBytes, Costs costs) { if (fileSizeInBytes >= 0) { costs.addDiskCost(fileSizeInBytes); } else { costs.setDiskCost(Costs.UNKNOWN); } costs.addHeuristicDiskCost(HEURISTIC_COST_BASE); }
@Override public void addLocalSortCost(EstimateProvider estimates, Costs costs) { final long s = estimates.getEstimatedOutputSize(); // we assume a two phase merge sort, so all in all 2 I/O operations per block if (s <= 0) { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } else { costs.addDiskCost(2 * s); costs.addCpuCost((long) (s * SORTING_CPU_FACTOR)); } costs.addHeuristicDiskCost(2 * HEURISTIC_COST_BASE); costs.addHeuristicCpuCost((long) (HEURISTIC_COST_BASE * SORTING_CPU_FACTOR)); }
@Override public void addBlockNestedLoopsCosts(EstimateProvider outerSide, EstimateProvider innerSide, long blockSize, Costs costs, int costWeight) { long is = innerSide.getEstimatedOutputSize(); long os = outerSide.getEstimatedOutputSize(); if (is > 0 && os > 0) { long loops = Math.max(os / blockSize, 1); costs.addDiskCost(loops * is); costs.addCpuCost((long) (loops * is * MATERIALIZATION_CPU_FACTOR)); } else { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } // hack: assume 1k loops (much cheaper than the streamed variant!) costs.addHeuristicDiskCost(HEURISTIC_COST_BASE * 1000); costs.addHeuristicCpuCost((long) (HEURISTIC_COST_BASE * 1000 * MATERIALIZATION_CPU_FACTOR)); costs.multiplyWith(costWeight); }
@Override public void addArtificialDamCost(EstimateProvider estimates, long bufferSize, Costs costs) { final long s = estimates.getEstimatedOutputSize(); // we assume spilling and re-reading if (s <= 0) { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } else { costs.addDiskCost(2 * s); costs.setCpuCost((long) (s * MATERIALIZATION_CPU_FACTOR)); } costs.addHeuristicDiskCost(2 * HEURISTIC_COST_BASE); costs.addHeuristicCpuCost((long) (HEURISTIC_COST_BASE * MATERIALIZATION_CPU_FACTOR)); } }
/** * Calculates the costs for the cached variant of the hybrid hash join. * We are assuming by default that half of the cached hash table fit into memory. */ @Override public void addCachedHybridHashCosts(EstimateProvider buildSideInput, EstimateProvider probeSideInput, Costs costs, int costWeight) { if (costWeight < 1) { throw new IllegalArgumentException("The cost weight must be at least one."); } long bs = buildSideInput.getEstimatedOutputSize(); long ps = probeSideInput.getEstimatedOutputSize(); if (bs > 0 && ps > 0) { long overall = 2*bs + costWeight*ps; costs.addDiskCost(overall); costs.addCpuCost((long) (overall * HASHING_CPU_FACTOR)); } else { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } // one time the build side plus cost-weight time the probe side costs.addHeuristicDiskCost((1 + costWeight) * HEURISTIC_COST_BASE); costs.addHeuristicCpuCost((long) ((1 + costWeight) * HEURISTIC_COST_BASE * HASHING_CPU_FACTOR)); }
@Override public void addHybridHashCosts(EstimateProvider buildSideInput, EstimateProvider probeSideInput, Costs costs, int costWeight) { long bs = buildSideInput.getEstimatedOutputSize(); long ps = probeSideInput.getEstimatedOutputSize(); if (bs > 0 && ps > 0) { long overall = 2*bs + ps; costs.addDiskCost(overall); costs.addCpuCost((long) (overall * HASHING_CPU_FACTOR)); } else { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } costs.addHeuristicDiskCost(2 * HEURISTIC_COST_BASE); costs.addHeuristicCpuCost((long) (2 * HEURISTIC_COST_BASE * HASHING_CPU_FACTOR)); // cost weight applies to everything costs.multiplyWith(costWeight); }
@Override public void addStreamedNestedLoopsCosts(EstimateProvider outerSide, EstimateProvider innerSide, long bufferSize, Costs costs, int costWeight) { long is = innerSide.getEstimatedOutputSize(); long oc = outerSide.getEstimatedNumRecords(); if (is > 0 && oc >= 0) { // costs, if the inner side cannot be cached if (is > bufferSize) { costs.addDiskCost(oc * is); } costs.addCpuCost((long) (oc * is * MATERIALIZATION_CPU_FACTOR)); } else { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } // hack: assume 100k loops (should be expensive enough) costs.addHeuristicDiskCost(HEURISTIC_COST_BASE * 100000); costs.addHeuristicCpuCost((long) (HEURISTIC_COST_BASE * 100000 * MATERIALIZATION_CPU_FACTOR)); costs.multiplyWith(costWeight); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. */ public Costs(double networkCost, double diskCost) { setNetworkCost(networkCost); setDiskCost(diskCost); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. */ public Costs(double networkCost, double diskCost) { setNetworkCost(networkCost); setDiskCost(diskCost); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. */ public Costs(double networkCost, double diskCost) { setNetworkCost(networkCost); setDiskCost(diskCost); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. * @param cpuCost The cost for CPU operations. */ public Costs(double networkCost, double diskCost, double cpuCost) { setNetworkCost(networkCost); setDiskCost(diskCost); setCpuCost(cpuCost); }
@Override public void addFileInputCost(long fileSizeInBytes, Costs costs) { if (fileSizeInBytes >= 0) { costs.addDiskCost(fileSizeInBytes); } else { costs.setDiskCost(Costs.UNKNOWN); } costs.addHeuristicDiskCost(HEURISTIC_COST_BASE); }
@Override public void addFileInputCost(long fileSizeInBytes, Costs costs) { if (fileSizeInBytes >= 0) { costs.addDiskCost(fileSizeInBytes); } else { costs.setDiskCost(Costs.UNKNOWN); } costs.addHeuristicDiskCost(HEURISTIC_COST_BASE); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. * @param cpuCost The cost for CPU operations. */ public Costs(double networkCost, double diskCost, double cpuCost) { setNetworkCost(networkCost); setDiskCost(diskCost); setCpuCost(cpuCost); }
@Override public void addFileInputCost(long fileSizeInBytes, Costs costs) { if (fileSizeInBytes >= 0) { costs.addDiskCost(fileSizeInBytes); } else { costs.setDiskCost(Costs.UNKNOWN); } costs.addHeuristicDiskCost(HEURISTIC_COST_BASE); }
/** * Creates a new costs object using the given values for the network and storage cost. * * @param networkCost The network cost, in bytes to be transferred. * @param diskCost The cost for disk, in bytes to be written and read. * @param cpuCost The cost for CPU operations. */ public Costs(double networkCost, double diskCost, double cpuCost) { setNetworkCost(networkCost); setDiskCost(diskCost); setCpuCost(cpuCost); }
@Override public void addLocalSortCost(EstimateProvider estimates, Costs costs) { final long s = estimates.getEstimatedOutputSize(); // we assume a two phase merge sort, so all in all 2 I/O operations per block if (s <= 0) { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } else { costs.addDiskCost(2 * s); costs.addCpuCost((long) (s * SORTING_CPU_FACTOR)); } costs.addHeuristicDiskCost(2 * HEURISTIC_COST_BASE); costs.addHeuristicCpuCost((long) (HEURISTIC_COST_BASE * SORTING_CPU_FACTOR)); }
@Override public void addLocalSortCost(EstimateProvider estimates, Costs costs) { final long s = estimates.getEstimatedOutputSize(); // we assume a two phase merge sort, so all in all 2 I/O operations per block if (s <= 0) { costs.setDiskCost(Costs.UNKNOWN); costs.setCpuCost(Costs.UNKNOWN); } else { costs.addDiskCost(2 * s); costs.addCpuCost((long) (s * SORTING_CPU_FACTOR)); } costs.addHeuristicDiskCost(2 * HEURISTIC_COST_BASE); costs.addHeuristicCpuCost((long) (HEURISTIC_COST_BASE * SORTING_CPU_FACTOR)); }