public Optional<IndexInfo> build() { List<Integer> partitions = partitionsSizes.build(); if (partitions.size() == 0) { return Optional.empty(); } double avgSize = partitions.stream().mapToLong(Integer::longValue).average().getAsDouble(); double squaredDifferences = partitions.stream().mapToDouble(size -> Math.pow(size - avgSize, 2)).sum(); checkState(partitions.stream().mapToLong(Integer::longValue).sum() == rowsNumber, "Total number of rows in index does not match number of rows in partitions within that index"); return Optional.of(new IndexInfo(rowsNumber, sizeInBytes, squaredDifferences, partitions.size())); } }
private double fragmentedMemory() { Double res = nodeIdToResources.get().values().parallelStream().filter(this::isFragmented) .mapToDouble(SupervisorResources::getAvailableMem).filter(x -> x > 0).sum(); return res.intValue(); }
private int fragmentedCpu() { Double res = nodeIdToResources.get().values().parallelStream().filter(this::isFragmented) .mapToDouble(SupervisorResources::getAvailableCpu).filter(x -> x > 0).sum(); return res.intValue(); }
private void trainPolicy(List<List<Pair<CandidateAction, CandidateAction>>> examples) { List<Pair<CandidateAction, CandidateAction>> flattenedExamples = new ArrayList<>(); examples.stream().forEach(flattenedExamples::addAll); for (int epoch = 0; epoch < NUM_EPOCHS; epoch++) { Collections.shuffle(flattenedExamples, random); flattenedExamples.forEach(classifier::learn); } double totalCost = flattenedExamples.stream() .mapToDouble(e -> classifier.bestAction(e).cost).sum(); Redwood.log("scoref.train", String.format("Training cost: %.4f", 100 * totalCost / flattenedExamples.size())); }
double totalAssignmentCost() { return assignmentCost().stream().mapToDouble(d -> d).sum(); }
double totalOriginalCost() { return originalCost().stream().mapToDouble(d -> d).sum(); }
@Override public RelOptCost computeSelfCost(final RelOptPlanner planner, final RelMetadataQuery mq) { return planner.getCostFactory().makeCost(rels.stream().mapToDouble(mq::getRowCount).sum(), 0, 0); }
public static double aggregateStat(final String name, final boolean isError) { return collectorMap.values().stream() .mapToDouble(m -> m.aggregateStat(name, isError)) .sum(); }
public static double[] pairwiseScoreThresholds(Properties props) { String thresholdsProp = props.getProperty("coref.statistical.pairwiseScoreThresholds"); if (thresholdsProp != null) { String[] split = thresholdsProp.split(","); if (split.length == 4) { return Arrays.stream(split).mapToDouble(Double::parseDouble).toArray(); } } double threshold = PropertiesUtils.getDouble( props, "coref.statistical.pairwiseScoreThresholds", 0.35); return new double[] {threshold, threshold, threshold, threshold}; }
@Override protected Double transform(ClusterSummary clusterSummary) { return clusterSummary.get_supervisors().stream() //Filtered negative value .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_mem(), 0)) .sum(); } });
@Override protected Double transform(ClusterSummary clusterSummary) { return clusterSummary.get_supervisors().stream() //Filtered negative value .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_cpu(), 0)) .sum(); } });
/** * Returns estimated data size. * Unknown value is represented by {@link Double#NaN} */ public double getOutputSizeInBytes(Collection<Symbol> outputSymbols, TypeProvider types) { requireNonNull(outputSymbols, "outputSymbols is null"); return outputSymbols.stream() .mapToDouble(symbol -> getOutputSizeForSymbol(getSymbolStatistics(symbol), types.get(symbol))) .sum(); }
private static double getCpuUsed(SchedulerAssignment assignment) { return assignment.getScheduledResources().values().stream().mapToDouble((wr) -> wr.get_cpu()).sum(); }
public static <T> double aggregateStat( final String name, final boolean isError, final Collection<TopicSensors<T>> sensors) { return sensors.stream() .flatMap(r -> r.stats(isError).stream()) .filter(s -> s.name().equals(name)) .mapToDouble(TopicSensors.Stat::getValue) .sum(); }
private int getNewTaskCount() { if (scheduledNodes.isEmpty()) { return 1; } double fullTasks = sourceTasksProvider.get().stream() .filter(task -> !task.getState().isDone()) .map(TaskStatus::isOutputBufferOverutilized) .mapToDouble(full -> full ? 1.0 : 0.0) .average().orElse(0.0); long writtenBytes = writerTasksProvider.get().stream() .map(TaskStatus::getPhysicalWrittenDataSize) .mapToLong(DataSize::toBytes) .sum(); if ((fullTasks >= 0.5) && (writtenBytes >= (writerMinSizeBytes * scheduledNodes.size()))) { return 1; } return 0; }
private static double getMemoryUsed(SchedulerAssignment assignment) { return assignment.getScheduledResources().values().stream() .mapToDouble((wr) -> wr.get_mem_on_heap() + wr.get_mem_off_heap()).sum(); }
@VisibleForTesting static double calculateNullsFractionForPartitioningKey( HiveColumnHandle column, List<HivePartition> partitions, Map<String, PartitionStatistics> statistics, double averageRowsPerPartition, double rowCount) { if (rowCount == 0) { return 0; } double estimatedNullsCount = partitions.stream() .filter(partition -> partition.getKeys().get(column).isNull()) .map(HivePartition::getPartitionId) .mapToDouble(partitionName -> getPartitionRowCount(partitionName, statistics).orElse(averageRowsPerPartition)) .sum(); return normalizeFraction(estimatedNullsCount / rowCount); }
/** * Creates {@link VectorGenerator} with vectors having feature values in according to * preudorandom producers. * * @param producers Feature value producers. * @return Vector generator. */ public static VectorGenerator vectorize(RandomProducer... producers) { A.notEmpty(producers, "producers"); return () -> VectorUtils.of(Arrays.stream(producers).mapToDouble(Supplier::get).toArray()); } }
/** {@inheritDoc} */ @Override public Vector get() { Double t = randomProducer.get(); return VectorUtils.of(perDimensionGenerators.stream() .mapToDouble(f -> f.apply(t)).toArray()); } }
private static double[] coordinatesAsArray( JsonNode element ) { return Iterables.stream( element.get( "coordinates" ) ) .mapToDouble( JsonNode::asDouble ) .toArray(); }