/** * Handle topology change and clear supply context map of outdated contexts. */ void onTopologyChanged() { synchronized (scMap) { Iterator<T3<UUID, Integer, AffinityTopologyVersion>> it = scMap.keySet().iterator(); Collection<UUID> aliveNodes = grp.shared().discovery().aliveServerNodes().stream() .map(ClusterNode::id) .collect(Collectors.toList()); while (it.hasNext()) { T3<UUID, Integer, AffinityTopologyVersion> t = it.next(); if (!aliveNodes.contains(t.get1())) { // Clear all obsolete contexts. clearContext(scMap.get(t), log); it.remove(); if (log.isDebugEnabled()) log.debug("Supply context removed [grp=" + grp.cacheOrGroupName() + ", demander=" + t.get1() + "]"); } } } }
/** * Unblock indexing. * * @param nodeId Node ID. */ @SuppressWarnings("ConstantConditions") private static void unblockIndexing(UUID nodeId) { T3<CountDownLatch, AtomicBoolean, CountDownLatch> blocker = BLOCKS.remove(nodeId); assertNotNull(blocker); blocker.get1().countDown(); }
/** * Unblock indexing. * * @param nodeId Node ID. */ @SuppressWarnings("ConstantConditions") private static void unblockIndexing(UUID nodeId) { T3<CountDownLatch, AtomicBoolean, CountDownLatch> blocker = BLOCKS.remove(nodeId); assertNotNull(blocker); blocker.get1().countDown(); }
/** {@inheritDoc} */ @Override protected Object process(List<?> row) throws IgniteCheckedException { T3<Object, Object, Object> row0 = plan.processRowForUpdate(row); return new IgniteBiTuple<>(row0.get1(), row0.get3()); } }
/** {@inheritDoc} */ @SuppressWarnings("ConstantConditions") @Override protected void afterTest() throws Exception { GridQueryProcessor.idxCls = null; for (T3<CountDownLatch, AtomicBoolean, CountDownLatch> block : BLOCKS.values()) block.get1().countDown(); BLOCKS.clear(); stopAllGrids(); super.afterTest(); }
/** {@inheritDoc} */ @SuppressWarnings("ConstantConditions") @Override protected void afterTest() throws Exception { GridQueryProcessor.idxCls = null; for (T3<CountDownLatch, AtomicBoolean, CountDownLatch> block : BLOCKS.values()) block.get1().countDown(); BLOCKS.clear(); stopAllGrids(); super.afterTest(); }
/** * Reconstructs object on unmarshalling. * * @return Reconstructed object. * @throws ObjectStreamException Thrown in case of unmarshalling error. */ protected Object readResolve() throws ObjectStreamException { try { T3<GridKernalContext, String, String> t = stash.get(); return t.get1().dataStructures().set(t.get2(), t.get3(), null); } catch (IgniteCheckedException e) { throw U.withCause(new InvalidObjectException(e.getMessage()), e); } finally { stash.remove(); } }
/** * Reconstructs object on unmarshalling. * * @return Reconstructed object. * @throws ObjectStreamException Thrown in case of unmarshalling error. */ protected Object readResolve() throws ObjectStreamException { try { T3<GridKernalContext, String, String> t = stash.get(); return t.get1().dataStructures().queue(t.get2(), t.get3(), 0, null); } catch (IgniteCheckedException e) { throw U.withCause(new InvalidObjectException(e.getMessage()), e); } finally { stash.remove(); } }
/** * @param in Input to read from. * @return Read entry. * @throws IOException If failed. * @throws IgniteCheckedException If failed. */ DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException { boolean needDecryption = in.readByte() == ENCRYPTED; if (needDecryption) { if (encSpi == null) { skipEncryptedRecord(in, false); return new EncryptedDataEntry(); } T3<ByteBufferBackedDataInput, Integer, RecordType> clData = readEncryptedData(in, false); if (clData.get1() == null) return null; return readPlainDataEntry(clData.get1()); } return readPlainDataEntry(in); }
/** * @param row Row. * @return Resulting entry. * @throws IgniteCheckedException If failed. */ public Object processRowForTx(List<?> row) throws IgniteCheckedException { switch (mode()) { case INSERT: case MERGE: return processRow(row); case UPDATE: { T3<Object, Object, Object> row0 = processRowForUpdate(row); return new IgniteBiTuple<>(row0.get1(), row0.get3()); } case DELETE: return row.get(0); default: throw new UnsupportedOperationException(String.valueOf(mode())); } }
/** * @param expEvts Expected events. * @param lsnr Listener. */ private void checkEvents(List<T3<Object, Object, Object>> expEvts, CacheEventListener1 lsnr) { for (T3<Object, Object, Object> exp : expEvts) { CacheEntryEvent<?, ?> e = lsnr.evts.get(exp.get1()); assertNotNull("No event for key: " + exp.get1(), e); assertEquals("Unexpected value: " + e, exp.get2(), e.getValue()); } expEvts.clear(); lsnr.evts.clear(); }
/** * @param name Timer name to stop. */ @SuppressWarnings("ConstantConditions") private void stopTimer(String name) { ConcurrentMap<String, T3<Long, Long, Long>> m = timers.get(Thread.currentThread()); T3<Long, Long, Long> t = m.get(name); assert t != null; long now = System.currentTimeMillis(); t.set2(now); t.set3(Math.max(t.get3() == null ? 0 : t.get3(), now - t.get1())); }
@Override public Object call() throws Exception { Random rnd = new Random(); byte[] val = new byte[1024]; long locTotalOpCnt = 0; while (!done.get()) { for (int i = 0; i < 500; i++) { T3<Integer, Integer, byte[]> key = randomKey(rnd); map.put(key.get1(), key.get2(), key.get3(), val); } locTotalOpCnt += 500; opCnt.addAndGet(500); } totalOpCnt.addAndGet(locTotalOpCnt); return null; } }, threadCnt);
/** * */ private synchronized void resume() { msgCls = null; for (T3<ClusterNode, Message, IgniteInClosure> msg : queue) super.sendMessage(msg.get1(), msg.get2(), msg.get3()); queue.clear(); } }
/** * @param expEvts Expected events. * @param lsnr Listener. * @throws Exception If failed. */ private void checkEvents(final List<T3<Object, Object, Object>> expEvts, final CacheEventListener3 lsnr, boolean allowLoseEvt) throws Exception { if (!allowLoseEvt) assert GridTestUtils.waitForCondition(new PA() { @Override public boolean apply() { return lsnr.evts.size() == expEvts.size(); } }, 2000L); for (T3<Object, Object, Object> exp : expEvts) { CacheEntryEvent<?, ?> e = lsnr.evts.get(exp.get1()); assertNotNull("No event for key: " + exp.get1(), e); assertEquals("Unexpected value: " + e, exp.get2(), e.getValue()); if (allowLoseEvt) lsnr.evts.remove(exp.get1()); } if (allowLoseEvt) assert lsnr.evts.isEmpty(); expEvts.clear(); lsnr.evts.clear(); lsnr.keys.clear(); }
@Override public Object call() throws Exception { while(!stop.get()) { T3<Long, Long, Long> t; try { t = lock.snapshot(); } catch (IgniteException e) { assertEquals(oops, e.getMessage()); continue; } assertEquals(t.get3().longValue(), t.get1() + t.get2()); } return null; } }, 8, "snapshot");
/** * Await indexing. * * @param nodeId Node ID. */ @SuppressWarnings("ConstantConditions") private static void awaitIndexing(UUID nodeId) { T3<CountDownLatch, AtomicBoolean, CountDownLatch> blocker = BLOCKS.get(nodeId); if (blocker != null) { assertTrue(blocker.get2().compareAndSet(false, true)); blocker.get3().countDown(); while (true) { try { blocker.get1().await(); break; } catch (InterruptedException e) { // No-op. } } } }
/** {@inheritDoc} */ @Override public WALRecord readRecord(RecordType type, ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException { if (type == ENCRYPTED_RECORD) { if (encSpi == null) { T2<Integer, RecordType> knownData = skipEncryptedRecord(in, true); //This happen on offline WAL iteration(we don't have encryption keys available). return new EncryptedRecord(knownData.get1(), knownData.get2()); } T3<ByteBufferBackedDataInput, Integer, RecordType> clData = readEncryptedData(in, true); //This happen during startup. On first WAL iteration we restore only metastore. //So, no encryption keys available. See GridCacheDatabaseSharedManager#readMetastore if (clData.get1() == null) return new EncryptedRecord(clData.get2(), clData.get3()); return readPlainRecord(clData.get3(), clData.get1(), true); } return readPlainRecord(type, in, false); }
/** * */ private void destroyIndex(IgniteEx ignite, T3<String, String, String> pair) { IgniteCache<?, ?> cache = ignite.getOrCreateCache(pair.get1()); String createIdxQryStr = String.format("DROP INDEX %s", pair.get3()); cache.query(new SqlFieldsQuery(createIdxQryStr)).getAll(); }
/** * */ private void createIndex(IgniteEx ignite, T3<String, String, String> pair) { IgniteCache<?, ?> cache = ignite.getOrCreateCache(pair.get1()); String createIdxQryStr = String.format("CREATE INDEX %S on %s (city_id)", pair.get3(), pair.get2()); cache.query(new SqlFieldsQuery(createIdxQryStr)).getAll(); }