/** * @param numTasks how many copies of {@code task} to run; all may be run simultaneously, * and a shared thread pool is used * @param task task to execute; takes an {@code Integer} argument that is the index of * the task that has been executed in [0, numTasks) */ public static void doInParallel(int numTasks, Consumer<Integer> task) { doInParallel(numTasks, numTasks, false, task); }
private static int maxActive(int numTasks, int parallelism, boolean privatePool) { Set<Integer> active = new HashSet<>(); AtomicInteger maxActive = new AtomicInteger(); ExecUtils.doInParallel(numTasks, parallelism, privatePool, i -> { synchronized (active) { active.add(i); maxActive.set(Math.max(maxActive.get(), active.size())); } sleepSeconds(1); synchronized (active) { active.remove(i); } }); return maxActive.get(); }
@Test public void testConcurrent() throws Exception { PartitionedFeatureVectors fv = new PartitionedFeatureVectors(NUM_PARTITIONS, getExecutor()); AtomicInteger counter = new AtomicInteger(); int numWorkers = 16; int numIterations = 10000; ExecUtils.doInParallel(numWorkers, i -> { for (int j = 0; j < numIterations; j++) { int c = counter.getAndIncrement(); fv.setVector(Integer.toString(c), new float[] { c }); } }); assertEquals((long) numIterations * numWorkers, fv.size()); assertEquals((long) numIterations * numWorkers, counter.get()); }
@Test public void testConcurrent() throws Exception { FeatureVectorsPartition fv = new FeatureVectorsPartition(); AtomicInteger counter = new AtomicInteger(); int numWorkers = 16; int numIterations = 10000; ExecUtils.doInParallel(numWorkers, i -> { for (int j = 0; j < numIterations; j++) { int c = counter.getAndIncrement(); fv.setVector(Integer.toString(c), new float[] { c }); } }); assertEquals((long) numIterations * numWorkers, fv.size()); assertEquals((long) numIterations * numWorkers, counter.get()); ExecUtils.doInParallel(numWorkers, i -> { for (int j = 0; j < numIterations; j++) { fv.removeVector(Integer.toString(counter.decrementAndGet())); } }); assertEquals(0, fv.size()); }
@Test public void testRecommendLoad() throws Exception { AtomicLong count = new AtomicLong(); Mean meanReqTimeNanos = new Mean(); long start = System.nanoTime(); int workers = LoadTestALSModelFactory.WORKERS; ExecUtils.doInParallel(workers, workers, true, i -> { RandomGenerator random = RandomManager.getRandom(Integer.toString(i).hashCode() ^ System.nanoTime()); for (int j = 0; j < LoadTestALSModelFactory.REQS_PER_WORKER; j++) { String userID = "U" + random.nextInt(LoadTestALSModelFactory.USERS); long callStart = System.nanoTime(); target("/recommend/" + userID).request() .accept(MediaType.APPLICATION_JSON_TYPE).get(LIST_ID_VALUE_TYPE); long timeNanos = System.nanoTime() - callStart; if (j > 0) { // Ignore first iteration's time as 'burn in' synchronized (meanReqTimeNanos) { meanReqTimeNanos.increment(timeNanos); } } long currentCount = count.incrementAndGet(); if (currentCount % 100 == 0) { log(currentCount, meanReqTimeNanos, start); } } }); int totalRequests = workers * LoadTestALSModelFactory.REQS_PER_WORKER; log(totalRequests, meanReqTimeNanos, start); }
/** * @param numTasks how many copies of {@code task} to run; all may be run simultaneously, * and a shared thread pool is used * @param task task to execute; takes an {@code Integer} argument that is the index of * the task that has been executed in [0, numTasks) */ public static void doInParallel(int numTasks, Consumer<Integer> task) { doInParallel(numTasks, numTasks, false, task); }