/** * Conditionally schedules the asynchronous maintenance task after a write operation. If the * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it * is already processing then it is set to transition to REQUIRED upon completion so that a new * execution is triggered by the next operation. */ void scheduleAfterWrite() { for (;;) { switch (drainStatus()) { case IDLE: casDrainStatus(IDLE, REQUIRED); scheduleDrainBuffers(); return; case REQUIRED: scheduleDrainBuffers(); return; case PROCESSING_TO_IDLE: if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) { return; } continue; case PROCESSING_TO_REQUIRED: return; default: throw new IllegalStateException(); } } }
/** * Performs the maintenance work, blocking until the lock is acquired. Any exception thrown, such * as by {@link CacheWriter#delete}, is propagated to the caller. * * @param task an additional pending task to run, or {@code null} if not present */ void performCleanUp(@Nullable Runnable task) { evictionLock.lock(); try { maintenance(task); } finally { evictionLock.unlock(); } if ((drainStatus() == REQUIRED) && (executor == ForkJoinPool.commonPool())) { scheduleDrainBuffers(); } }
@Test public void scheduleDrainBuffers() { Executor executor = Mockito.mock(Executor.class); BoundedLocalCache<?, ?> cache = new BoundedLocalCache<Object, Object>( Caffeine.newBuilder().executor(executor), /* loader */ null, /* async */ false) {}; Map<Integer, Integer> transitions = ImmutableMap.of( IDLE, PROCESSING_TO_IDLE, REQUIRED, PROCESSING_TO_IDLE, PROCESSING_TO_IDLE, PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED, PROCESSING_TO_REQUIRED); transitions.forEach((start, end) -> { cache.drainStatus = start; cache.scheduleDrainBuffers(); assertThat(cache.drainStatus, is(end)); if (!start.equals(end)) { Mockito.verify(executor).execute(any()); Mockito.reset(executor); } }); }
@Override public @Nullable V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { requireNonNull(key); requireNonNull(remappingFunction); // A optimistic fast path to avoid unnecessary locking Object lookupKey = nodeFactory.newLookupKey(key); @Nullable Node<K, V> node = data.get(lookupKey); long now; if (node == null) { return null; } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) { scheduleDrainBuffers(); return null; } BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction = statsAware(remappingFunction, /* recordMiss */ false, /* recordLoad */ true); return remap(key, lookupKey, statsAwareRemappingFunction, new long[] { now }, /* computeIfAbsent */ false); }
/** * Performs the post-processing work required after a write. * * @param task the pending operation to be applied */ void afterWrite(Runnable task) { if (buffersWrites()) { for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) { if (writeBuffer().offer(task)) { scheduleAfterWrite(); return; } scheduleDrainBuffers(); } // The maintenance task may be scheduled but not running due to all of the executor's threads // being busy. If all of the threads are writing into the cache then no progress can be made // without assistance. try { performCleanUp(task); } catch (RuntimeException e) { logger.log(Level.SEVERE, "Exception thrown when performing the maintenance task", e); } } else { scheduleAfterWrite(); } }
@Test(dataProvider = "caches") @CacheSpec(compute = Compute.SYNC, implementation = Implementation.Caffeine, population = Population.EMPTY, maximumSize = Maximum.FULL) public void drain_nonblocking(Cache<Integer, Integer> cache, CacheContext context) { BoundedLocalCache<Integer, Integer> localCache = asBoundedLocalCache(cache); AtomicBoolean done = new AtomicBoolean(); Runnable task = () -> { localCache.lazySetDrainStatus(REQUIRED); localCache.scheduleDrainBuffers(); done.set(true); }; localCache.evictionLock.lock(); try { ConcurrentTestHarness.execute(task); await().untilTrue(done); } finally { localCache.evictionLock.unlock(); } }
/** * Performs the post-processing work required after a read. * * @param node the entry in the page replacement policy * @param now the current time, in nanoseconds * @param recordHit if the hit count should be incremented */ void afterRead(Node<K, V> node, long now, boolean recordHit) { if (recordHit) { statsCounter().recordHits(1); } boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL); if (shouldDrainBuffers(delayable)) { scheduleDrainBuffers(); } refreshIfNeeded(node, now); }
@Override public @Nullable V getIfPresent(Object key, boolean recordStats) { Node<K, V> node = data.get(nodeFactory.newLookupKey(key)); if (node == null) { if (recordStats) { statsCounter().recordMisses(1); } return null; } V value = node.getValue(); long now = expirationTicker().read(); if (hasExpired(node, now) || (collectValues() && (value == null))) { if (recordStats) { statsCounter().recordMisses(1); } scheduleDrainBuffers(); return null; } if (!isComputingAsync(node)) { @SuppressWarnings("unchecked") K castedKey = (K) key; setAccessTime(node, now); setVariableTime(node, expireAfterRead(node, castedKey, value, expiry(), now)); } afterRead(node, now, recordStats); return value; }
/** * Conditionally schedules the asynchronous maintenance task after a write operation. If the * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it * is already processing then it is set to transition to REQUIRED upon completion so that a new * execution is triggered by the next operation. */ void scheduleAfterWrite() { for (;;) { switch (drainStatus()) { case IDLE: casDrainStatus(IDLE, REQUIRED); scheduleDrainBuffers(); return; case REQUIRED: scheduleDrainBuffers(); return; case PROCESSING_TO_IDLE: if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) { return; } continue; case PROCESSING_TO_REQUIRED: return; default: throw new IllegalStateException(); } } }
@Override public @Nullable V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { requireNonNull(key); requireNonNull(remappingFunction); // A optimistic fast path to avoid unnecessary locking Object lookupKey = nodeFactory.newLookupKey(key); @Nullable Node<K, V> node = data.get(lookupKey); long now; if (node == null) { return null; } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) { scheduleDrainBuffers(); return null; } BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction = statsAware(remappingFunction, /* recordMiss */ false, /* recordLoad */ true); return remap(key, lookupKey, statsAwareRemappingFunction, new long[] { now }, /* computeIfAbsent */ false); }
/** * Performs the post-processing work required after a write. * * @param task the pending operation to be applied */ void afterWrite(Runnable task) { if (buffersWrites()) { for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) { if (writeBuffer().offer(task)) { scheduleAfterWrite(); return; } scheduleDrainBuffers(); } // The maintenance task may be scheduled but not running due to all of the executor's threads // being busy. If all of the threads are writing into the cache then no progress can be made // without assistance. try { performCleanUp(task); } catch (RuntimeException e) { logger.log(Level.SEVERE, "Exception thrown when performing the maintenance task", e); } } else { scheduleAfterWrite(); } }
/** * Performs the post-processing work required after a read. * * @param node the entry in the page replacement policy * @param now the current time, in nanoseconds * @param recordHit if the hit count should be incremented */ void afterRead(Node<K, V> node, long now, boolean recordHit) { if (recordHit) { statsCounter().recordHits(1); } boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL); if (shouldDrainBuffers(delayable)) { scheduleDrainBuffers(); } refreshIfNeeded(node, now); }
@Override public @Nullable V getIfPresent(Object key, boolean recordStats) { Node<K, V> node = data.get(nodeFactory.newLookupKey(key)); if (node == null) { if (recordStats) { statsCounter().recordMisses(1); } return null; } long now = expirationTicker().read(); if (hasExpired(node, now)) { if (recordStats) { statsCounter().recordMisses(1); } scheduleDrainBuffers(); return null; } @SuppressWarnings("unchecked") K castedKey = (K) key; V value = node.getValue(); if (!isComputingAsync(node)) { setVariableTime(node, expireAfterRead(node, castedKey, value, expiry(), now)); setAccessTime(node, now); } afterRead(node, now, recordStats); return value; }