public void clear() { cache.clear(); }
/** * @see java.util.Map#clear() */ @Override public void clear() { cache.clear(); }
private void reset(Cache<?, ?> cache, CountingStore countingCS) { cache.clear(); countingCS.numLoads = 0; countingCS.numContains = 0; }
private void flushAll(ByteBuf b, Channel ch, boolean isReadParams) throws IOException { if (isReadParams) readParameters(ch, b); Consumer<Cache<?, ?>> consumer = c -> c.clear(); int flushDelay = params == null ? 0 : params.flushDelay; if (flushDelay == 0) consumer.accept(cache); else scheduler.schedule(() -> consumer.accept(cache), toMillis(flushDelay), TimeUnit.MILLISECONDS); Object ret = params == null || !params.noReply ? OK : null; writeResponse(ch, ret); }
@Override public Void run() { cacheManager.getCache().clear(); return null; } });
@Override public Void run() { cacheManager.getCache().clear(); return null; } });
@AfterMethod public void afterMethod() throws PersistenceException { if (cache != null) cache.clear(); if (store != null) store.clear(); }
@Override public Void run() { cacheManager.getCache().clear(); return null; } });
@AfterMethod public void clearUp() throws SystemException { if (tm.getTransaction() != null) { try { tm.rollback(); } catch (Exception ignored) { // try to suspend? tm.suspend(); } } cache.clear(); }
@Override public Void run() { cacheManager.getCache().clear(); return null; } });
@Override protected void teardown() { if (cache != null) { // a proper cache.clear() should ensure indexes and stores are cleared too if present // this is better and more complete than the cleanup performed by the superclass cache.clear(); } super.teardown(); }
@AfterMethod(alwaysRun = true) public void clearCache() { if (cacheManager != null) { cacheManager.getCache().clear(); } Util.recursiveFileRemove(indexDir); }
@Test public void testQueryWithWrites() throws Exception { nodes.addAll(range(0, getIndexingNodes()).boxed() .map(i -> new IndexingNode(getIndexThreadsPerNode(), globalCounter)).collect(Collectors.toList())); nodes.addAll(range(0, getQueryingNodes()).boxed() .map(i -> new QueryingNode(getQueryThreadsPerNode(), globalCounter, getQueryType())).collect(Collectors.toList())); nodes.forEach(Node::addToCluster); waitForClusterToForm(); warmup(); nodes.get(0).cacheManager.getCache().clear(); summarizeReadWriteTest(runTests()); }
@AfterMethod public void cleanCache() { cache.clear(); cache.getAdvancedCache().getStats().reset(); }
public void testSizeAfterClear() { for (int i = 0; i < 10; i++) { cache.put(i, "value" + i); } cache.clear(); assertTrue(cache.isEmpty()); }
public void testClear() throws Exception { prepareClearTest(); c1.clear(); for (Cache<Object, String> c : caches) assert c.isEmpty(); for (int i = 0; i < 5; i++) { String key = "k" + i; assertRemovedFromStores(key); } }
public void testCleanStoreOnPut() throws Exception { Cache<String, String> testCache = cacheManager.getCache(CACHE_NAME); testCache.clear(); putIntoStore("key", "oldValue"); testCache.put("key", "value"); assertFalse(isEntryInStore("key")); }
public void testExpiredEntriesCleared() { cache.put("key-" + 0, "value-" + 1, -1, null, 0, TimeUnit.MILLISECONDS); cache.put("key-" + 1, "value-" + 1, -1, null, 1, TimeUnit.MILLISECONDS); // This should expire 1 of the entries timeService.advance(1); cache.clear(); assertEquals(0, cache.getAdvancedCache().getDataContainer().sizeIncludingExpired()); } }
public void testClear() throws Exception { doPutWithDisabledBlockingInterceptor(); cache("LON", 1).clear(); blockingInterceptor.invocationReceivedLatch.await(20000, TimeUnit.MILLISECONDS); assertNull(cache("LON", 0).get("k")); assertNull(cache("LON", 1).get("k")); assertEquals("v", backup("LON").get("k")); blockingInterceptor.waitingLatch.countDown(); eventuallyEquals(null, () -> backup("LON").get("k")); }
private void testPutAndClear(String site) { String key = key(site); String val = val(site); cache(site, 0).put(key, val); assertEquals(backup(site).get(key), val); cache(site, 0).clear(); assertNull(backup(site).get(key+1)); assertNull(backup(site).get(key)); }