public static double currentErrorRate() { return collectorMap.values().stream() .mapToDouble(MetricCollector::errorRate) .sum(); }
public void testEquivalentStreams() { // For datasets of many double values created from an array, we test many combinations of finite // and non-finite values: for (ManyValues values : ALL_MANY_VALUES) { double[] array = values.asArray(); Stats stats = Stats.of(array); // instance methods on Stats vs on instance methods on DoubleStream assertThat(stats.count()).isEqualTo(stream(array).count()); assertEquivalent(stats.mean(), stream(array).average().getAsDouble()); assertEquivalent(stats.sum(), stream(array).sum()); assertEquivalent(stats.max(), stream(array).max().getAsDouble()); assertEquivalent(stats.min(), stream(array).min().getAsDouble()); // static method on Stats vs on instance method on DoubleStream assertEquivalent(Stats.meanOf(array), stream(array).average().getAsDouble()); // instance methods on Stats vs instance methods on DoubleSummaryStatistics DoubleSummaryStatistics streamStats = stream(array).summaryStatistics(); assertThat(stats.count()).isEqualTo(streamStats.getCount()); assertEquivalent(stats.mean(), streamStats.getAverage()); assertEquivalent(stats.sum(), streamStats.getSum()); assertEquivalent(stats.max(), streamStats.getMax()); assertEquivalent(stats.min(), streamStats.getMin()); } }
public Optional<IndexInfo> build() { List<Integer> partitions = partitionsSizes.build(); if (partitions.size() == 0) { return Optional.empty(); } double avgSize = partitions.stream().mapToLong(Integer::longValue).average().getAsDouble(); double squaredDifferences = partitions.stream().mapToDouble(size -> Math.pow(size - avgSize, 2)).sum(); checkState(partitions.stream().mapToLong(Integer::longValue).sum() == rowsNumber, "Total number of rows in index does not match number of rows in partitions within that index"); return Optional.of(new IndexInfo(rowsNumber, sizeInBytes, squaredDifferences, partitions.size())); } }
private double fragmentedMemory() { Double res = nodeIdToResources.get().values().parallelStream().filter(this::isFragmented) .mapToDouble(SupervisorResources::getAvailableMem).filter(x -> x > 0).sum(); return res.intValue(); }
private int fragmentedCpu() { Double res = nodeIdToResources.get().values().parallelStream().filter(this::isFragmented) .mapToDouble(SupervisorResources::getAvailableCpu).filter(x -> x > 0).sum(); return res.intValue(); }
private void trainPolicy(List<List<Pair<CandidateAction, CandidateAction>>> examples) { List<Pair<CandidateAction, CandidateAction>> flattenedExamples = new ArrayList<>(); examples.stream().forEach(flattenedExamples::addAll); for (int epoch = 0; epoch < NUM_EPOCHS; epoch++) { Collections.shuffle(flattenedExamples, random); flattenedExamples.forEach(classifier::learn); } double totalCost = flattenedExamples.stream() .mapToDouble(e -> classifier.bestAction(e).cost).sum(); Redwood.log("scoref.train", String.format("Training cost: %.4f", 100 * totalCost / flattenedExamples.size())); }
/** * Calculates the portion of the first value to all values passed * @param n The values in the set * @return the ratio of n[0] to the sum all n, 0 if result is {@link Double#NaN} */ public static double portion( double... n ) { assert n.length > 0; double first = n[0]; if ( numbersEqual( first, 0 ) ) { return 0d; } double total = Arrays.stream(n).sum(); return first / total; }
double totalAssignmentCost() { return assignmentCost().stream().mapToDouble(d -> d).sum(); }
double totalOriginalCost() { return originalCost().stream().mapToDouble(d -> d).sum(); }
@Override public RelOptCost computeSelfCost(final RelOptPlanner planner, final RelMetadataQuery mq) { return planner.getCostFactory().makeCost(rels.stream().mapToDouble(mq::getRowCount).sum(), 0, 0); }
@Override public double errorRate() { final List<TopicSensors.Stat> allStats = new ArrayList<>(); topicSensors.values().forEach(record -> allStats.addAll(record.errorRateStats())); return allStats .stream() .mapToDouble(TopicSensors.Stat::getValue) .sum(); }
public static double aggregateStat(final String name, final boolean isError) { return collectorMap.values().stream() .mapToDouble(m -> m.aggregateStat(name, isError)) .sum(); }
@Override protected Double transform(ClusterSummary clusterSummary) { return clusterSummary.get_supervisors().stream() //Filtered negative value .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_mem(), 0)) .sum(); } });
@Override protected Double transform(ClusterSummary clusterSummary) { return clusterSummary.get_supervisors().stream() //Filtered negative value .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_cpu(), 0)) .sum(); } });
@Benchmark public double parallel_lazy_collectDoubleSum_jdk() { return this.doublesJDK.parallelStream().mapToDouble(each -> each).sum(); }
/** * Returns estimated data size. * Unknown value is represented by {@link Double#NaN} */ public double getOutputSizeInBytes(Collection<Symbol> outputSymbols, TypeProvider types) { requireNonNull(outputSymbols, "outputSymbols is null"); return outputSymbols.stream() .mapToDouble(symbol -> getOutputSizeForSymbol(getSymbolStatistics(symbol), types.get(symbol))) .sum(); }
private static double getCpuUsed(SchedulerAssignment assignment) { return assignment.getScheduledResources().values().stream().mapToDouble((wr) -> wr.get_cpu()).sum(); }
public static <T> double aggregateStat( final String name, final boolean isError, final Collection<TopicSensors<T>> sensors) { return sensors.stream() .flatMap(r -> r.stats(isError).stream()) .filter(s -> s.name().equals(name)) .mapToDouble(TopicSensors.Stat::getValue) .sum(); }
private static double getMemoryUsed(SchedulerAssignment assignment) { return assignment.getScheduledResources().values().stream() .mapToDouble((wr) -> wr.get_mem_on_heap() + wr.get_mem_off_heap()).sum(); }
@VisibleForTesting static double calculateNullsFractionForPartitioningKey( HiveColumnHandle column, List<HivePartition> partitions, Map<String, PartitionStatistics> statistics, double averageRowsPerPartition, double rowCount) { if (rowCount == 0) { return 0; } double estimatedNullsCount = partitions.stream() .filter(partition -> partition.getKeys().get(column).isNull()) .map(HivePartition::getPartitionId) .mapToDouble(partitionName -> getPartitionRowCount(partitionName, statistics).orElse(averageRowsPerPartition)) .sum(); return normalizeFraction(estimatedNullsCount / rowCount); }