Collectors.groupingByConcurrent((x) -> { try { doLink(x);
Collectors.groupingByConcurrent((x) -> { try { doDelete(x);
Collectors.groupingByConcurrent((x) -> { try { doRename(x);
bySuccess = parallelizer.getForkJoinPool().submit(() -> args.works.parallelStream().collect( Collectors.groupingByConcurrent((x) -> { int tries = 0; Ctags pctags = null;
Collectors.groupingByConcurrent( Function.identity(), Collectors.<String> counting())); bh.consume(counts);
private TreeMap<SortingKey, List<String>> mapLogs(ArrayList<String> sourceCol) { TreeMap<SortingKey, List<String>> mapped = sourceCol .stream() .parallel() .filter(n -> RgxOpManager.anyMatch(n, eventPatterns, integerFilters, integerCompTypes)) .collect(Collectors.collectingAndThen( Collectors.groupingByConcurrent(this::createSortingKey), map -> { TreeMap<SortingKey, List<String>> tm = new TreeMap<>(); map.entrySet().forEach(n -> tm.put(n.getKey(), n.getValue())); return tm; } )); return mapped; }
private TreeMap<SortingKey, List<String>> finalReduction(Collection<String> sourceCol) { TreeMap<SortingKey, List<String>> mapped = sourceCol .parallelStream() .collect(Collectors.collectingAndThen( Collectors.groupingByConcurrent(this::createSortingKey), map -> { TreeMap<SortingKey, List<String>> tm = new TreeMap<>(); map.entrySet().forEach(n -> tm.put(n.getKey(), n.getValue())); return tm; }) ); TreeMap<SortingKey, List<String>> rgxResult = mapped.entrySet() .stream() .sequential() .flatMap(n -> n.getValue().stream()) .limit(limit) .collect(Collectors.collectingAndThen( Collectors.groupingByConcurrent(this::createSortingKey), map -> { TreeMap<SortingKey, List<String>> tm = new TreeMap<>(); map.entrySet().forEach(n -> tm.put(n.getKey(), n.getValue())); return tm; }) ); return rgxResult; }
.collect(Collectors.groupingByConcurrent(Person::getAge)); System.out.println(personByAgeConcurrent);
public static ConcurrentMap<String, Long> getResourceUsage(boolean globalConcurrencyEnabled, List<InstanceState> activeStates, Set<WorkflowInstance> timedOutInstances, WorkflowResourceDecorator resourceDecorator, Map<WorkflowId, Workflow> workflows) { return activeStates.parallelStream() .filter(entry -> !timedOutInstances.contains(entry.workflowInstance())) .filter(entry -> isConsumingResources(entry.runState().state())) .flatMap(instanceState -> pairWithResources(globalConcurrencyEnabled, instanceState, workflows, resourceDecorator)) .collect(groupingByConcurrent( ResourceWithInstance::resource, ConcurrentHashMap::new, counting())); }
private Stream<Optional<GeneModelPhenotypeMatch>> getBestModelsByGene(List<GeneModelPhenotypeMatch> organismModels) { if (options.isBenchmarkingEnabled()) { return organismModels.parallelStream() .filter(model -> model.getScore() > 0) // catch hit to known disease-gene association for purposes of benchmarking i.e to simulate novel gene discovery performance .filter(model -> !options.isBenchmarkHit(model)) .collect(groupingByConcurrent(GeneModelPhenotypeMatch::getEntrezGeneId, maxBy(comparingDouble(GeneModelPhenotypeMatch::getScore)))) .values() .stream(); } return organismModels.parallelStream() .filter(model -> model.getScore() > 0) .collect(groupingByConcurrent(GeneModelPhenotypeMatch::getEntrezGeneId, maxBy(comparingDouble(GeneModelPhenotypeMatch::getScore)))) .values() .stream(); }
/** * Saves the final clustering for each data point. */ protected void saveFinalClustering() { if (clusters.size() > 0) { List<? extends DataType> data = getData(); assignments = assignDataToClusters(data); clusters.forEach(cluster -> cluster.getMembers().clear()); IntStream.range(0, assignments.length).parallel() .mapToObj(Integer::valueOf) .collect( groupingByConcurrent(idx -> assignments[idx])) .forEach((clusterIdx, clusterPoints) -> clusters.get(clusterIdx).getMembers() .addAll( clusterPoints.stream() .map(idx -> data.get(idx)) .collect(toList()))); } }
/** * Saves the final clustering for each data point. */ protected void saveFinalClustering() { if (clusters.size() > 0) { List<? extends DataType> data = getData(); assignments = assignDataToClusters(data); clusters.forEach(cluster -> cluster.getMembers().clear()); IntStream.range(0, assignments.length).parallel() .mapToObj(Integer::valueOf) .collect( groupingByConcurrent(idx -> assignments[idx])) .forEach((clusterIdx, clusterPoints) -> clusters.get(clusterIdx).getMembers() .addAll( clusterPoints.stream() .map(idx -> data.get(idx)) .collect(toList()))); } }
/** * Returns a new instance. * * @param snapshot not null * @param fileConfig not null * @return a new instance, not null */ public static SignatureManager from(Snapshot snapshot, FileConfig fileConfig) { logger.trace("<< from() < dsPrsId: {} udid: {} snapshot: {} fileConfig: {}", snapshot.dsPrsID(), snapshot.backupUDID(), snapshot.snapshotID(), fileConfig); CloudFileWriter cloudWriter = CloudFileWriter.from(snapshot, fileConfig); Map<ByteString, Set<ICloud.MBSFile>> signatures = snapshot.files().stream() .collect(Collectors.groupingByConcurrent(ICloud.MBSFile::getSignature, Collectors.toSet())); long totalBytes = signatures.values().stream() .flatMap(Set::stream) .mapToLong(ICloud.MBSFile::getSize) .sum(); Lock lock = new ReentrantLock(); SignatureManager instance = new SignatureManager(signatures, cloudWriter, lock, totalBytes, new AtomicLong(0), new AtomicLong(0)); logger.trace(">> from() > {}", instance); return instance; }
private ImmutableMap<Node<VALUETYPE>, ImmutableList<Edge<VALUETYPE>>> createEdgeCache( ImmutableCollection<Node<VALUETYPE>> nodes, ImmutableCollection<Edge<VALUETYPE>> edges, Function<? super Edge<VALUETYPE>, ? extends Node<VALUETYPE>> mappingFunction ) { final Map<Node<VALUETYPE>, List<Edge<VALUETYPE>>> optimizedEdges = edges .stream() .collect( Collectors.groupingByConcurrent( mappingFunction, Collectors.toList() ) ); nodes.forEach(node -> optimizedEdges.putIfAbsent(node, new ArrayList<>())); return new ImmutableHashMap<>( optimizedEdges.keySet().stream().collect( Collectors.toMap( node -> node, node -> new ImmutableArrayList<>(optimizedEdges.get(node)) ) ) ); }
/** * Saves the final clustering for each data point. */ protected void saveFinalClustering() { if (clusters.size() > 0) { List<? extends DataType> data = getData(); assignments = assignDataToClusters(data); clusters.forEach(cluster -> cluster.getMembers().clear()); IntStream.range(0, assignments.length).parallel() .mapToObj(Integer::valueOf) .collect( groupingByConcurrent(idx -> assignments[idx])) .forEach((clusterIdx, clusterPoints) -> clusters.get(clusterIdx).getMembers() .addAll( clusterPoints.stream() .map(idx -> data.get(idx)) .collect(toList()))); } }
@Override public Stream<PhivePriorityResult> prioritise(List<String> hpoIds, List<Gene> genes) { logger.info("Starting {}", PRIORITY_TYPE); List<PhenotypeTerm> hpoPhenotypeTerms = priorityService.makePhenotypeTermsFromHpoIds(hpoIds); PhenotypeMatcher humanMousePhenotypeMatcher = priorityService.getPhenotypeMatcherForOrganism(hpoPhenotypeTerms, Organism.MOUSE); Set<Integer> wantedGeneIds = genes.stream().map(Gene::getEntrezGeneID).collect(ImmutableSet.toImmutableSet()); Set<GeneModel> modelsToScore = priorityService.getModelsForOrganism(Organism.MOUSE).stream() .filter(model -> wantedGeneIds.contains(model.getEntrezGeneId())) .collect(ImmutableSet.toImmutableSet()); List<GeneModelPhenotypeMatch> scoredModels = scoreModels(humanMousePhenotypeMatcher, modelsToScore); //n.b. this will contain models but with a phenotype score of zero Map<Integer, Optional<GeneModelPhenotypeMatch>> geneModelPhenotypeMatches = scoredModels.parallelStream() // .filter(model -> model.getScore() > 0) .collect(groupingByConcurrent(GeneModelPhenotypeMatch::getEntrezGeneId, maxBy(comparingDouble(GeneModelPhenotypeMatch::getScore)))); return genes.stream().map(getPhivePriorityResult(geneModelPhenotypeMatches)); }
private ConcurrentMap<String, List<String>> mapLogs(ArrayList<String> sourceCol, String filename) { ConcurrentMap<String, List<String>> mapResults = null; if (groupType == GroupTypes.BY_PROPS) { mapResults = sourceCol .parallelStream() .unordered() .filter(n -> RgxOpManager.anyMatch(n, eventPatterns, integerFilters, integerCompTypes)) .collect(Collectors.groupingByConcurrent(input -> { return RgxOpManager.getEventProperty(input, eventPatterns, cleanPropsRgx, groupPropsRgx); } )); } else if (groupType == GroupTypes.BY_FILE_NAMES) { mapResults = sourceCol .parallelStream() .unordered() .filter(n -> RgxOpManager.anyMatch(n, eventPatterns, integerFilters, integerCompTypes)) .collect(Collectors.groupingByConcurrent(n -> filename)); } return mapResults; }
if (isParallel() && downstream.characteristics().contains(Characteristics.UNORDERED) && mapSupplier.get() instanceof ConcurrentMap) { return (M) collect(Collectors.groupingByConcurrent(keyMapper, (Supplier<? extends ConcurrentMap<K, D>>) mapSupplier, mapping));
public void testObjCollectorGroupBy() { Cache<Integer, String> cache = getCache(0); int range = 10; // First populate the cache with a bunch of values IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value")); assertEquals(range, cache.size()); CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet(); ConcurrentMap<Boolean, List<Map.Entry<Integer, String>>> grouped = createStream(entrySet).collect( () -> Collectors.groupingByConcurrent(k -> k.getKey() % 2 == 0)); grouped.get(true).parallelStream().forEach(e -> assertTrue(e.getKey() % 2 == 0)); grouped.get(false).parallelStream().forEach(e -> assertTrue(e.getKey() % 2 == 1)); }
public static double[][] learnKMeans(int k, DataStream<DataInstance> data){ setK(k); Attributes atts = data.getAttributes(); double[][] centroids = new double[getK()][atts.getNumberOfAttributes()]; AtomicInteger index = new AtomicInteger(); data.stream().limit(getK()).forEach(dataInstance -> centroids[index.getAndIncrement()]=dataInstance.toArray()); data.restart(); boolean change = true; while(change){ Map<Integer, Averager> newCentroidsAv = data.parallelStream(batchSize) .map(instance -> Pair.newPair(centroids, instance)) .collect(Collectors.groupingByConcurrent(Pair::getClusterID, Collectors.reducing(new Averager(atts.getNumberOfAttributes()), p -> new Averager(p.getDataInstance()), Averager::combine))); double error = IntStream.rangeClosed(0, centroids.length - 1).mapToDouble( i -> { double distance = Pair.getED(centroids[i], newCentroidsAv.get(i).average()); centroids[i]=newCentroidsAv.get(i).average(); return distance; }).average().getAsDouble(); if (error<epsilon) change = false; data.restart(); } return centroids; }