static MpscGrowableArrayQueue<Integer> makePopulated(int items) { MpscGrowableArrayQueue<Integer> buffer = new MpscGrowableArrayQueue<>(4, FULL_SIZE); for (int i = 0; i < items; i++) { buffer.offer(i); } return buffer; } }
@Test(dataProvider = "empty") public void poll_whenEmpty(MpscGrowableArrayQueue<Integer> buffer) { assertThat(buffer.poll(), is(nullValue())); }
@Test(dataProvider = "empty") public void oneProducer_oneConsumer(MpscGrowableArrayQueue<Integer> buffer) { AtomicInteger started = new AtomicInteger(); AtomicInteger finished = new AtomicInteger(); ConcurrentTestHarness.execute(() -> { started.incrementAndGet(); Awaits.await().untilAtomic(started, is(2)); for (int i = 0; i < PRODUCE; i++) { while (!buffer.offer(i)) {} } finished.incrementAndGet(); }); ConcurrentTestHarness.execute(() -> { started.incrementAndGet(); Awaits.await().untilAtomic(started, is(2)); for (int i = 0; i < PRODUCE; i++) { while (buffer.poll() == null) {} } finished.incrementAndGet(); }); Awaits.await().untilAtomic(finished, is(2)); assertThat(buffer.size(), is(0)); }
@Test(dataProvider = "caches") @CacheSpec(compute = Compute.SYNC, implementation = Implementation.Caffeine, population = Population.FULL, maximumSize = Maximum.FULL) public void afterWrite_drainFullWriteBuffer(Cache<Integer, Integer> cache, CacheContext context) { BoundedLocalCache<Integer, Integer> localCache = asBoundedLocalCache(cache); localCache.drainStatus = PROCESSING_TO_IDLE; int[] processed = { 0 }; Runnable pendingTask = () -> processed[0]++; int[] expectedCount = { 0 }; while (localCache.writeBuffer().offer(pendingTask)) { expectedCount[0]++; } int[] triggered = { 0 }; Runnable triggerTask = () -> triggered[0] = 1 + expectedCount[0]; localCache.afterWrite(triggerTask); assertThat(processed[0], is(expectedCount[0])); assertThat(triggered[0], is(expectedCount[0] + 1)); }
@Parameterized.Parameters public static Collection<Object[]> parameters() { ArrayList<Object[]> list = new ArrayList<Object[]>(); // MPSC size 1 list.add(makeQueue(0, 1, 4, Ordering.FIFO, new MpscGrowableArrayQueue<>(2, 4))); // MPSC size SIZE list.add(makeQueue(0, 1, SIZE, Ordering.FIFO, new MpscGrowableArrayQueue<>(8, SIZE))); return list; }
@Test(dataProvider = "populated") public void size_whenPopulated(MpscGrowableArrayQueue<Integer> buffer) { assertThat(buffer.size(), is(POPULATED_SIZE)); }
@Test(dataProvider = "empty") public void manyProducers_oneConsumer(MpscGrowableArrayQueue<Integer> buffer) { AtomicInteger started = new AtomicInteger(); AtomicInteger finished = new AtomicInteger(); ConcurrentTestHarness.execute(() -> { started.incrementAndGet(); Awaits.await().untilAtomic(started, is(NUM_PRODUCERS + 1)); for (int i = 0; i < (NUM_PRODUCERS * PRODUCE); i++) { while (buffer.poll() == null) {} } finished.incrementAndGet(); }); ConcurrentTestHarness.timeTasks(NUM_PRODUCERS, () -> { started.incrementAndGet(); Awaits.await().untilAtomic(started, is(NUM_PRODUCERS + 1)); for (int i = 0; i < PRODUCE; i++) { while (!buffer.offer(i)) {} } finished.incrementAndGet(); }); Awaits.await().untilAtomic(finished, is(NUM_PRODUCERS + 1)); assertThat(buffer.size(), is(0)); }
/** * Performs the post-processing work required after a write. * * @param task the pending operation to be applied */ void afterWrite(Runnable task) { if (buffersWrites()) { for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) { if (writeBuffer().offer(task)) { scheduleAfterWrite(); return; } scheduleDrainBuffers(); } // The maintenance task may be scheduled but not running due to all of the executor's threads // being busy. If all of the threads are writing into the cache then no progress can be made // without assistance. try { performCleanUp(task); } catch (RuntimeException e) { logger.log(Level.SEVERE, "Exception thrown when performing the maintenance task", e); } } else { scheduleAfterWrite(); } }
@Test(dataProvider = "empty") public void size_whenEmpty(MpscGrowableArrayQueue<Integer> buffer) { assertThat(buffer.size(), is(0)); }
/** Drains the write buffer. */ @GuardedBy("evictionLock") void drainWriteBuffer() { if (!buffersWrites()) { return; } for (int i = 0; i < WRITE_BUFFER_MAX; i++) { Runnable task = writeBuffer().poll(); if (task == null) { break; } task.run(); } }
/** * Performs the post-processing work required after a write. * * @param task the pending operation to be applied */ void afterWrite(Runnable task) { if (buffersWrites()) { for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) { if (writeBuffer().offer(task)) { scheduleAfterWrite(); return; } scheduleDrainBuffers(); } // The maintenance task may be scheduled but not running due to all of the executor's threads // being busy. If all of the threads are writing into the cache then no progress can be made // without assistance. try { performCleanUp(task); } catch (RuntimeException e) { logger.log(Level.SEVERE, "Exception thrown when performing the maintenance task", e); } } else { scheduleAfterWrite(); } }
@Test(dataProvider = "caches") @CacheSpec(compute = Compute.SYNC, implementation = Implementation.Caffeine, population = Population.EMPTY, maximumSize = Maximum.FULL) public void exceedsMaximumBufferSize_onWrite(Cache<Integer, Integer> cache, CacheContext context) { BoundedLocalCache<Integer, Integer> localCache = asBoundedLocalCache(cache); boolean[] ran = new boolean[1]; localCache.afterWrite(() -> ran[0] = true); assertThat(ran[0], is(true)); assertThat(localCache.writeBuffer().size(), is(0)); }
@Override @SuppressWarnings("FutureReturnValueIgnored") public void clear() { evictionLock.lock(); try { long now = expirationTicker().read(); // Apply all pending writes Runnable task; while (buffersWrites() && (task = writeBuffer().poll()) != null) { task.run(); } // Discard all entries for (Node<K, V> node : data.values()) { removeNode(node, now); } // Discard all pending reads readBuffer.drainTo(e -> {}); } finally { evictionLock.unlock(); } }
private void drain(BoundedLocalCache<K, V> cache) { do { cache.cleanUp(); } while (cache.buffersWrites() && cache.writeBuffer().size() > 0); }
@Test(dataProvider = "empty") public void manyProducers_noConsumer(MpscGrowableArrayQueue<Integer> buffer) { AtomicInteger count = new AtomicInteger(); ConcurrentTestHarness.timeTasks(NUM_PRODUCERS, () -> { for (int i = 0; i < PRODUCE; i++) { if (buffer.offer(i)) { count.incrementAndGet(); } } }); assertThat(buffer.size(), is(count.get())); }