/** {@inheritDoc} */ @Override public String toString() { return S.toString(GridCacheAdapter.class, this, "name", name(), "size", size()); }
offHeapBackupEntriesCnt = offHeapEntriesCnt; size = cctx.cache().size(); sizeLong = cctx.cache().sizeLong();
/** {@inheritDoc} */ @Override public void removeAll() throws IgniteCheckedException { assert ctx.isLocal(); // We do batch and recreate cursor because removing using a single cursor // will cause it to reinitialize on each merged page. List<K> keys = new ArrayList<>(Math.min(REMOVE_ALL_KEYS_BATCH, size())); do { Iterator<CacheDataRow> it = ctx.offheap().cacheIterator(ctx.cacheId(), true, true, null, null, null); while (it.hasNext() && keys.size() < REMOVE_ALL_KEYS_BATCH) keys.add((K)it.next().key()); removeAll(keys); keys.clear(); } while (!isEmpty()); }
@Override public boolean apply() { // Every node contains either near, backup, or primary. return cnt == cache.size() + near.nearSize(); } }, getTestTimeout()));
/** * Split clearLocally all task into multiple runnables. * * @param srv Whether to clear server cache. * @param near Whether to clear near cache. * @param readers Whether to clear readers. * @return Split runnables. */ public List<GridCacheClearAllRunnable<K, V>> splitClearLocally(boolean srv, boolean near, boolean readers) { if ((isNear() && near) || (!isNear() && srv)) { int keySize = size(); int cnt = Math.min(keySize / CLEAR_ALL_SPLIT_THRESHOLD + (keySize % CLEAR_ALL_SPLIT_THRESHOLD != 0 ? 1 : 0), Runtime.getRuntime().availableProcessors()); if (cnt == 0) cnt = 1; // Still perform cleanup since there could be entries in swap. GridCacheVersion obsoleteVer = ctx.versions().next(); List<GridCacheClearAllRunnable<K, V>> res = new ArrayList<>(cnt); for (int i = 0; i < cnt; i++) res.add(new GridCacheClearAllRunnable<>(this, obsoleteVer, i, cnt, readers)); return res; } else return null; }
/** * @param ignite1 Grid 1. * @param ignite2 Grid 2. * @throws Exception If failed. */ private void checkCachesConsistency(Ignite ignite1, Ignite ignite2) throws Exception { IgniteKernal g1 = (IgniteKernal)ignite1; IgniteKernal g2 = (IgniteKernal)ignite2; GridCacheAdapter<Integer, Object> cache1 = g1.internalCache(DEFAULT_CACHE_NAME); GridCacheAdapter<Integer, Object> cache2 = g2.internalCache(DEFAULT_CACHE_NAME); // Sleeping to allow the cache sizes to settle down. U.sleep(3000); info("Cache1 size: " + cache1.size(ALL_PEEK_MODES)); info("Cache2 size: " + cache2.size(ALL_PEEK_MODES)); assert cache1.size(ALL_PEEK_MODES) == cache2.size(ALL_PEEK_MODES) : "Sizes do not match [s1=" + cache1.size(ALL_PEEK_MODES) + ", s2=" + cache2.size(ALL_PEEK_MODES) + ']'; for (Integer key : cache1.keySet()) { Object e = cache1.localPeek(key, new CachePeekMode[] {CachePeekMode.ONHEAP}); if (e != null) assert cache2.containsKey(key) : "Cache2 does not contain key: " + key; } }
/** * @throws Exception If test failed. */ @SuppressWarnings({"TooBroadScope"}) @Test public void testNodeRestart() throws Exception { Assume.assumeFalse("https://issues.apache.org/jira/browse/IGNITE-10082", MvccFeatureChecker.forcedMvcc()); int keyCnt = 1000; int retries = 20; Ignite g0 = startGrid(0); Ignite g1 = startGrid(1); for (int i = 0; i < keyCnt; i++) g0.cache(DEFAULT_CACHE_NAME).put(i, i); assertEquals(keyCnt, ((IgniteKernal)g0).internalCache(DEFAULT_CACHE_NAME).size()); assertEquals(keyCnt, ((IgniteKernal)g1).internalCache(DEFAULT_CACHE_NAME).size()); for (int n = 0; n < retries; n++) { info("Starting additional grid node..."); Ignite g2 = startGrid(2); assertEquals(keyCnt, ((IgniteKernal)g2).internalCache(DEFAULT_CACHE_NAME).size()); info("Stopping additional grid node..."); stopGrid(2); } }
/** @throws Exception If failed. */ @Test public void testReadThrough() throws Exception { IgniteCache<Integer, String> near = jcache(); GridCacheAdapter<Integer, String> dht = dht(); String s = near.get(1); assert s != null; assertEquals(s, "1"); assertEquals(1, near.size()); assertEquals(1, near.size()); String d = localPeek(dht, 1); assert d != null; assertEquals(d, "1"); assert dht.size() == 1; assert dht.size() == 1; assert store.hasValue(1); }
/** * Test Optimistic repeatable read write-through. * * @throws Exception If failed. */ @Test public void testOptimisticTxWriteThrough() throws Exception { IgniteCache<Object, Object> near = jcache(); GridCacheAdapter<Integer, String> dht = dht(); try (Transaction tx = grid().transactions().txStart(OPTIMISTIC, REPEATABLE_READ) ) { near.put(2, "2"); near.put(3, "3"); assert "2".equals(near.get(2)); assert "3".equals(near.get(3)); GridCacheEntryEx entry = dht.peekEx(2); assert entry == null || entry.rawGet() == null : "Invalid entry: " + entry; tx.commit(); } assert "2".equals(near.get(2)); assert "3".equals(near.get(3)); assert "2".equals(dht.get(2)); assert "3".equals(dht.get(3)); assertEquals(2, near.size()); assertEquals(2, near.size()); assertEquals(2, dht.size()); assertEquals(2, dht.size()); }
int dataSize = getDataCache(igfs).size();
/** @throws Exception If failed. */ @Test public void testNearEnabledTwoNodes() throws Exception { gridCnt = 2; startGridsMultiThreaded(gridCnt); try { final int cnt = 100; grid(0).compute().broadcast(new IgniteCallable<Object>() { @IgniteInstanceResource private Ignite ignite; @Override public Object call() throws Exception { IgniteCache<Integer, String> c = ignite.cache(DEFAULT_CACHE_NAME); for (int i = 0; i < cnt; i++) c.put(i, Integer.toString(i)); return true; } }); for (int i = 0; i < gridCnt; i++) { assertEquals(cnt, internalCache(i).size()); assertEquals(0, near(i).nearSize()); } } finally { stopAllGrids(); } }
", cacheSize=" + cache.size() + ']');
/** {@inheritDoc} */ @Override public String toString() { return S.toString(GridCacheAdapter.class, this, "name", name(), "size", size()); }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void removeAll() throws IgniteCheckedException { assert ctx.isLocal(); // We do batch and recreate cursor because removing using a single cursor // will cause it to reinitialize on each merged page. List<K> keys = new ArrayList<>(Math.min(REMOVE_ALL_KEYS_BATCH, size())); do { for (Iterator<CacheDataRow> it = ctx.offheap().cacheIterator(ctx.cacheId(), true, true, null, null); it.hasNext() && keys.size() < REMOVE_ALL_KEYS_BATCH; ) keys.add((K)it.next().key()); removeAll(keys); keys.clear(); } while (!isEmpty()); }
/** * Split clearLocally all task into multiple runnables. * * @param srv Whether to clear server cache. * @param near Whether to clear near cache. * @param readers Whether to clear readers. * @return Split runnables. */ public List<GridCacheClearAllRunnable<K, V>> splitClearLocally(boolean srv, boolean near, boolean readers) { if ((isNear() && near) || (!isNear() && srv)) { int keySize = size(); int cnt = Math.min(keySize / CLEAR_ALL_SPLIT_THRESHOLD + (keySize % CLEAR_ALL_SPLIT_THRESHOLD != 0 ? 1 : 0), Runtime.getRuntime().availableProcessors()); if (cnt == 0) cnt = 1; // Still perform cleanup since there could be entries in swap. GridCacheVersion obsoleteVer = ctx.versions().next(); List<GridCacheClearAllRunnable<K, V>> res = new ArrayList<>(cnt); for (int i = 0; i < cnt; i++) res.add(new GridCacheClearAllRunnable<>(this, obsoleteVer, i, cnt, readers)); return res; } else return null; }
", cacheSize=" + cache.size() + ']');