@Override protected T3<GridKernalContext, String, String> initialValue() { return new T3<>(); } };
/** * Reconstructs object on unmarshalling. * * @return Reconstructed object. * @throws ObjectStreamException Thrown in case of unmarshalling error. */ protected Object readResolve() throws ObjectStreamException { try { T3<GridKernalContext, String, String> t = stash.get(); return t.get1().dataStructures().queue(t.get2(), t.get3(), 0, null); } catch (IgniteCheckedException e) { throw U.withCause(new InvalidObjectException(e.getMessage()), e); } finally { stash.remove(); } }
/** * @param name Timer name to stop. */ @SuppressWarnings("ConstantConditions") private void stopTimer(String name) { ConcurrentMap<String, T3<Long, Long, Long>> m = timers.get(Thread.currentThread()); T3<Long, Long, Long> t = m.get(name); assert t != null; long now = System.currentTimeMillis(); t.set2(now); t.set3(Math.max(t.get3() == null ? 0 : t.get3(), now - t.get1())); }
/** * Handle topology change and clear supply context map of outdated contexts. */ void onTopologyChanged() { synchronized (scMap) { Iterator<T3<UUID, Integer, AffinityTopologyVersion>> it = scMap.keySet().iterator(); Collection<UUID> aliveNodes = grp.shared().discovery().aliveServerNodes().stream() .map(ClusterNode::id) .collect(Collectors.toList()); while (it.hasNext()) { T3<UUID, Integer, AffinityTopologyVersion> t = it.next(); if (!aliveNodes.contains(t.get1())) { // Clear all obsolete contexts. clearContext(scMap.get(t), log); it.remove(); if (log.isDebugEnabled()) log.debug("Supply context removed [grp=" + grp.cacheOrGroupName() + ", demander=" + t.get1() + "]"); } } } }
/** * @param expEvts Expected events. * @param lsnr Listener. */ private void checkEvents(List<T3<Object, Object, Object>> expEvts, CacheEventListener1 lsnr) { for (T3<Object, Object, Object> exp : expEvts) { CacheEntryEvent<?, ?> e = lsnr.evts.get(exp.get1()); assertNotNull("No event for key: " + exp.get1(), e); assertEquals("Unexpected value: " + e, exp.get2(), e.getValue()); } expEvts.clear(); lsnr.evts.clear(); }
/** * @param name Timer name to start. */ private void startTimer(String name) { ConcurrentMap<String, T3<Long, Long, Long>> m = timers.get(Thread.currentThread()); if (m == null) { ConcurrentMap<String, T3<Long, Long, Long>> old = timers.putIfAbsent(Thread.currentThread(), m = new ConcurrentHashMap<>()); if (old != null) m = old; } T3<Long, Long, Long> t = m.get(name); if (t == null) { T3<Long, Long, Long> old = m.putIfAbsent(name, t = new T3<>()); if (old != null) t = old; } t.set1(System.currentTimeMillis()); t.set2(0L); }
/** * @param e Event * @param expVals expected value * @return {@code True} if entries has the same key, value and oldValue. If cache start without backups * than oldValue ignoring in comparison. */ private boolean equalOldValue(CacheEntryEvent<?, ?> e, T3<Object, Object, Object> expVals) { return (e.getOldValue() == null && expVals.get3() == null) // Both null || (e.getOldValue() != null && expVals.get3() != null // Equals && e.getOldValue().equals(expVals.get3())) || (backups == 0); // If we start without backup than oldValue might be lose. }
/** * @param row Row. * @return Resulting entry. * @throws IgniteCheckedException If failed. */ public Object processRowForTx(List<?> row) throws IgniteCheckedException { switch (mode()) { case INSERT: case MERGE: return processRow(row); case UPDATE: { T3<Object, Object, Object> row0 = processRowForUpdate(row); return new IgniteBiTuple<>(row0.get1(), row0.get3()); } case DELETE: return row.get(0); default: throw new UnsupportedOperationException(String.valueOf(mode())); } }
/** * Unblock indexing. * * @param nodeId Node ID. */ @SuppressWarnings("ConstantConditions") private static void unblockIndexing(UUID nodeId) { T3<CountDownLatch, AtomicBoolean, CountDownLatch> blocker = BLOCKS.remove(nodeId); assertNotNull(blocker); blocker.get1().countDown(); }
/** * @param expEvts Expected events. * @param lsnr Listener. * @throws Exception If failed. */ private void checkEvents(final List<T3<Object, Object, Object>> expEvts, final CacheEventListener3 lsnr, boolean allowLoseEvt) throws Exception { if (!allowLoseEvt) assert GridTestUtils.waitForCondition(new PA() { @Override public boolean apply() { return lsnr.evts.size() == expEvts.size(); } }, 2000L); for (T3<Object, Object, Object> exp : expEvts) { CacheEntryEvent<?, ?> e = lsnr.evts.get(exp.get1()); assertNotNull("No event for key: " + exp.get1(), e); assertEquals("Unexpected value: " + e, exp.get2(), e.getValue()); if (allowLoseEvt) lsnr.evts.remove(exp.get1()); } if (allowLoseEvt) assert lsnr.evts.isEmpty(); expEvts.clear(); lsnr.evts.clear(); lsnr.keys.clear(); }
/** * Reconstructs object on unmarshalling. * * @return Reconstructed object. * @throws ObjectStreamException Thrown in case of unmarshalling error. */ protected Object readResolve() throws ObjectStreamException { try { T3<GridKernalContext, String, String> t = stash.get(); return t.get1().dataStructures().set(t.get2(), t.get3(), null); } catch (IgniteCheckedException e) { throw U.withCause(new InvalidObjectException(e.getMessage()), e); } finally { stash.remove(); } }
@Override protected T3<GridKernalContext, String, String> initialValue() { return new T3<>(); } };
/** * Unblock indexing. * * @param nodeId Node ID. */ @SuppressWarnings("ConstantConditions") private static void unblockIndexing(UUID nodeId) { T3<CountDownLatch, AtomicBoolean, CountDownLatch> blocker = BLOCKS.remove(nodeId); assertNotNull(blocker); blocker.get1().countDown(); }