/** * Creates a new, empty set with a default initial capacity, * load factor, and concurrencyLevel. */ public GridConcurrentWeakHashSet() { store = new GridConcurrentHashSet<>(); }
/** * Creates a new, empty set with a default initial capacity, * load factor, and concurrencyLevel. */ public GridConcurrentPhantomHashSet() { store = new GridConcurrentHashSet<>(); }
/** * Creates a new, empty set with the specified initial * capacity, and with default load factor and concurrencyLevel. * * @param initCap The initial capacity. The implementation * performs internal sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative. */ public GridConcurrentPhantomHashSet(int initCap) { store = new GridConcurrentHashSet<>(initCap); }
/** * Creates a new, empty set with the specified initial * capacity, and with default load factor and concurrencyLevel. * * @param initCap The initial capacity. The implementation * performs internal sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative. */ public GridConcurrentWeakHashSet(int initCap) { store = new GridConcurrentHashSet<>(initCap); }
/** {@inheritDoc} */ @Override public GridConcurrentHashSet call() { return new GridConcurrentHashSet(); }
/** * Creates a new, empty set with the specified initial * capacity, load factor, and concurrency level. * * @param initCap The initial capacity. The implementation * performs internal sizing to accommodate this many elements. * @param loadFactor The load factor threshold, used to control resizing. * Resizing may be performed when the average number of elements per * bin exceeds this threshold. * @param conLevel The estimated number of concurrently * updating threads. The implementation performs internal sizing * to try to accommodate this many threads. * @throws IllegalArgumentException if the initial capacity is * negative or the load factor or concurrency level are * non-positive. */ public GridConcurrentWeakHashSet(int initCap, float loadFactor, int conLevel) { store = new GridConcurrentHashSet<>(initCap, loadFactor, conLevel); }
/** * Creates a new, empty set with the specified initial * capacity, load factor, and concurrency level. * * @param initCap The initial capacity. The implementation * performs internal sizing to accommodate this many elements. * @param loadFactor The load factor threshold, used to control resizing. * Resizing may be performed when the average number of elements per * bin exceeds this threshold. * @param conLevel The estimated number of concurrently * updating threads. The implementation performs internal sizing * to try to accommodate this many threads. * @throws IllegalArgumentException if the initial capacity is * negative or the load factor or concurrency level are * non-positive. */ public GridConcurrentPhantomHashSet(int initCap, float loadFactor, int conLevel) { store = new GridConcurrentHashSet<>(initCap, loadFactor, conLevel); }
/** * @param tx Transaction to add. */ public void addTx(IgniteInternalTx tx) { if (remainingTxs == null) remainingTxs = new GridConcurrentHashSet<>(); remainingTxs.add(tx); }
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { blockMsgs = false; receivedEnsuredMsgs = new GridConcurrentHashSet<>(); }
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { ids = new GridConcurrentHashSet<>(); res = new AtomicInteger(); }
/** * Load thread batch of DataStreamerEntry. */ private void loadData(Collection<? extends DataStreamerEntry> entries, GridFutureAdapter fut) { Collection<KeyCacheObjectWrapper> keys = null; if (entries.size() > 1) { keys = new GridConcurrentHashSet<>(entries.size()); for (DataStreamerEntry e : entries) keys.add(new KeyCacheObjectWrapper(e.getKey())); } load0(entries, fut, keys, 0, null, null); }
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { Collection<InetSocketAddress> addrs = IP_FINDER.getRegisteredAddresses(); if (!F.isEmpty(addrs)) IP_FINDER.unregisterAddresses(addrs); srvIdx.set(0); clientIdx.set(0); srvNodeIds = new GridConcurrentHashSet<>(); clientNodeIds = new GridConcurrentHashSet<>(); clientsPerSrv = 2; }
/** {@inheritDoc} */ @Override public GridMultiCollectionWrapper<FullPageId> beginCheckpoint() throws IgniteException { if (segments == null) return new GridMultiCollectionWrapper<>(Collections.<FullPageId>emptyList()); Collection[] collections = new Collection[segments.length]; for (int i = 0; i < segments.length; i++) { Segment seg = segments[i]; if (seg.segCheckpointPages != null) throw new IgniteException("Failed to begin checkpoint (it is already in progress)."); collections[i] = seg.segCheckpointPages = seg.dirtyPages; seg.dirtyPages = new GridConcurrentHashSet<>(); } memMetrics.resetDirtyPages(); if (throttlingPlc != ThrottlingPolicy.DISABLED) writeThrottle.onBeginCheckpoint(); return new GridMultiCollectionWrapper<>(collections); }
executedTasks = new GridConcurrentHashSet<>();
private final GridConcurrentHashSet<Long> msgIds = new GridConcurrentHashSet<>();
final Collection<GridNioSession> sesSet = new GridConcurrentHashSet<>();
final Collection<Object> rcvMsgs = new GridConcurrentHashSet<>();
final Collection<Object> rcvMsgs = new GridConcurrentHashSet<>();
TcpDiscoverySpi coordDisco = (TcpDiscoverySpi)coord.configuration().getDiscoverySpi(); Set<TcpDiscoveryAbstractMessage> sentEnsuredMsgs = new GridConcurrentHashSet<>(); coordDisco.addSendMessageListener(msg -> { if (coordDisco.ensured(msg))
/** * @throws Exception If failed. */ @Test public void testDeliveryAllFailedMessagesInCorrectOrder() throws Exception { IgniteEx coord = startGrid("coordinator"); TcpDiscoverySpi coordDisco = (TcpDiscoverySpi)coord.configuration().getDiscoverySpi(); Set<TcpDiscoveryAbstractMessage> sentEnsuredMsgs = new GridConcurrentHashSet<>(); coordDisco.addSendMessageListener(msg -> { if (coordDisco.ensured(msg)) sentEnsuredMsgs.add(msg); }); //Node which receive message but will not send it further around the ring. IgniteEx receiver = startGrid("receiver"); //Node which will be failed first. IgniteEx dummy = startGrid("dummy"); //Node which should received all fail message in any way. startGrid("listener"); sentEnsuredMsgs.clear(); receivedEnsuredMsgs.clear(); blockMsgs = true; log.info("Sending fail node messages"); coord.context().discovery().failNode(dummy.localNode().id(), "Dummy node failed"); coord.context().discovery().failNode(receiver.localNode().id(), "Receiver node failed"); boolean delivered = GridTestUtils.waitForCondition(() -> { log.info("Waiting for messages delivery"); return receivedEnsuredMsgs.equals(sentEnsuredMsgs); }, 5000); assertTrue("Sent: " + sentEnsuredMsgs + "; received: " + receivedEnsuredMsgs, delivered); }