Refine search
@Test public void testShouldVerifyNoDuplicateMetricRegistered() { final MetricRegistry metricRegistry = new MetricRegistry(); final MetricBuilderFactory metricBuilderFactory = new MetricBuilderFactory( () -> new MetricBuilder(metricRegistry) ); IntStream.rangeClosed(0, 500) .parallel() .forEach(i -> metricBuilderFactory.newMetric("metric_name").buildCounter().incSuccess()); } }
@Override public void getNextTarget(Tensor target) { IntStream stream = IntStream.range(0, currentArrays.size()); if (properties.getParallelPreprocessing()) { stream = stream.parallel(); } stream.forEach(i -> { float[] t = getNextTarget(augmentedToRaw.get(arrayToAugmented.get(currentArrays.get(i)))); if (t != null) { System.arraycopy(t, 0, target.getElements(), target.getStartIndex() + i * getTargetDimensions(), t.length); } }); }
@GenerateMicroBenchmark // BEGIN parallel public Map<Integer, Double> parallelDiceRolls() { double fraction = 1.0 / N; return IntStream.range(0, N) // <1> .parallel() // <2> .mapToObj(twoDiceThrows()) // <3> .collect(groupingBy(side -> side, // <4> summingDouble(n -> fraction))); // <5> } // END parallel
/** * Computes the bitwise union of the input bitmaps * @param bitmaps the input bitmaps * @return the union of the bitmaps */ public static RoaringBitmap or(RoaringBitmap... bitmaps) { SortedMap<Short, List<Container>> grouped = groupByKey(bitmaps); short[] keys = new short[grouped.size()]; Container[] values = new Container[grouped.size()]; List<List<Container>> slices = new ArrayList<>(grouped.size()); int i = 0; for (Map.Entry<Short, List<Container>> slice : grouped.entrySet()) { keys[i++] = slice.getKey(); slices.add(slice.getValue()); } IntStream.range(0, i) .parallel() .forEach(position -> values[position] = or(slices.get(position))); return new RoaringBitmap(new RoaringArray(keys, values, i)); }
/** * Computes the bitwise union of the input bitmaps * @param bitmaps the input bitmaps * @return the union of the bitmaps */ public static MutableRoaringBitmap or(ImmutableRoaringBitmap... bitmaps) { SortedMap<Short, List<MappeableContainer>> grouped = groupByKey(bitmaps); short[] keys = new short[grouped.size()]; MappeableContainer[] values = new MappeableContainer[grouped.size()]; List<List<MappeableContainer>> slices = new ArrayList<>(grouped.size()); int i = 0; for (Map.Entry<Short, List<MappeableContainer>> slice : grouped.entrySet()) { keys[i++] = slice.getKey(); slices.add(slice.getValue()); } IntStream.range(0, i) .parallel() .forEach(position -> values[position] = or(slices.get(position))); return new MutableRoaringBitmap(new MutableRoaringArray(keys, values, i)); }
private static Container or(List<Container> containers) { int parallelism; // if there are few enough containers it's possible no bitmaps will be materialised if (containers.size() < 16) { Container result = containers.get(0).clone(); for (int i = 1; i < containers.size(); ++i) { result = result.lazyIOR(containers.get(i)); } return result.repairAfterLazy(); } // heuristic to save memory if the union is large and likely to end up as a bitmap if (containers.size() < 512 || (parallelism = availableParallelism()) == 1) { Container result = new BitmapContainer(new long[1 << 10], -1); for (Container container : containers) { result = result.lazyIOR(container); } return result.repairAfterLazy(); } // we have an enormous slice (probably skewed), parallelise it int partitionSize = (containers.size() + parallelism - 1) / parallelism; return IntStream.range(0, parallelism) .parallel() .mapToObj(i -> containers.subList(i * partitionSize, Math.min((i + 1) * partitionSize, containers.size()))) .collect(OR); }
Preconditions.checkArgument(parallelism >= 1); Objects.requireNonNull(task); IntStream range = IntStream.range(0, numTasks); IntStream taskIndices = parallelism > 1 ? range.parallel() : range; if (parallelism > 1 && privatePool) { ForkJoinPool pool = new ForkJoinPool(parallelism); try { pool.submit(() -> taskIndices.forEach(task::accept)).get(); } catch (InterruptedException e) { throw new IllegalStateException(e); taskIndices.forEach(task::accept);
private static MappeableContainer or(List<MappeableContainer> containers) { int parallelism; // if there are few enough containers it's possible no bitmaps will be materialised if (containers.size() < 16) { MappeableContainer result = containers.get(0).clone(); for (int i = 1; i < containers.size(); ++i) { result = result.lazyIOR(containers.get(i)); } return result.repairAfterLazy(); } // heuristic to save memory if the union is large and likely to end up as a bitmap if (containers.size() < 512 || (parallelism = availableParallelism()) == 1) { MappeableContainer result = new MappeableBitmapContainer(LongBuffer.allocate(1 << 10), -1); for (MappeableContainer container : containers) { result = result.lazyIOR(container); } return result.repairAfterLazy(); } // we have an enormous slice (probably skewed), parallelise it int partitionSize = (containers.size() + parallelism - 1) / parallelism; return IntStream.range(0, parallelism) .parallel() .mapToObj(i -> containers.subList(i * partitionSize, Math.min((i + 1) * partitionSize, containers.size()))) .collect(OR); }
@Test public void shouldBeThreadSafe() { // When: final List<CompletableFuture<Void>> futures = IntStream.range(1, 11).parallel() .mapToObj(idx -> { final CompletableFuture<Void> f = futureStore.getFutureForSequenceNumber(idx); if (idx % 10 == 0) { futureStore.completeFuturesUpToAndIncludingSequenceNumber(idx); } return f; }) .collect(Collectors.toList()); // Then: assertThat(futures.stream().allMatch(CompletableFuture::isDone), is(true)); }
public static Stream<List<Object>> partitionSubList(List<Object> data, int partitions, List<Object> tombstone) { if (partitions==0) partitions=1; List<Object> list = new ArrayList<>(data); int total = list.size(); int batchSize = Math.max((int)Math.ceil((double)total / partitions),1); Stream<List<Object>> stream = IntStream.range(0, partitions).parallel() .mapToObj((part) -> list.subList(Math.min(part * batchSize, total), Math.min((part + 1) * batchSize, total))) .filter(partition -> !partition.isEmpty()); return tombstone == null ? stream : Stream.concat(stream,Stream.of(tombstone)); }
@Override public void getNextInput(Tensor input) { int mb = input.getDimensions()[0]; currentArrays.clear(); for (int i = 0; i < mb; i++) { try { if (getValues().size() == 0) { createArrays(); } currentArrays.add(getValues().take()); } catch (InterruptedException e) { e.printStackTrace(); } } IntStream stream = IntStream.range(0, mb); if (properties.getParallelPreprocessing()) { stream = stream.parallel(); } stream.forEach(i -> { float[] a = currentArrays.get(i); rawMBPosition.put(i, augmentedToRaw.get(arrayToAugmented.get(a))); System.arraycopy(a, 0, input.getElements(), input.getStartIndex() + i * getInputDimensions(), a.length); }); }
private Stream<List<Object>> partitionList(@Name("values") List list, @Name("batchSize") int batchSize) { int total = list.size(); int pages = total % batchSize == 0 ? total/batchSize : total/batchSize + 1; return IntStream.range(0, pages).parallel().boxed() .map(page -> { int from = page * batchSize; return list.subList(from, Math.min(from + batchSize, total)); }); }