/** * Creates a new, empty set with a default initial capacity, * load factor, and concurrencyLevel. */ public GridConcurrentPhantomHashSet() { store = new GridConcurrentHashSet<>(); }
/** * @param topVer Topology version. * @return Future. */ public GridFutureAdapter addDataStreamerFuture(AffinityTopologyVersion topVer) { final DataStreamerFuture fut = new DataStreamerFuture(topVer); boolean add = dataStreamerFuts.add(fut); assert add; return fut; }
/** {@inheritDoc} */ @Override public int write(ByteBuffer srcBuf, long position) throws IOException { ChannelOpFuture fut = holder.get(); fut.reset(); asyncFuts.add(fut); ch.write(srcBuf, position, null, fut); try { return fut.getUninterruptibly(); } catch (IgniteCheckedException e) { throw new IOException(e); } finally { asyncFuts.remove(fut); } }
/** {@inheritDoc} */ @Override public void close() throws IOException { if (closed) return; try { if (!locks.remove(lockName)) throw new AlreadyClosedException("Lock was already released: " + this); } finally { closed = true; } }
final GridConcurrentHashSet<Integer> fields = new GridConcurrentHashSet<>(); QueryField[] expCols = new QueryField[fields.size()]; Integer[] sorted = fields.toArray(new Integer[fields.size()]);
final GridConcurrentHashSet nodes = new GridConcurrentHashSet(); assertTrue("Node get no jobs.", nodes.contains(g.name()));
/** {@inheritDoc} */ @Override public void onUpdated(Iterable<CacheEntryEvent<?, ?>> evts) { try { for (CacheEntryEvent<?, ?> evt : evts) { CountDownLatch latch = this.latch; log.info("Received cache event [evt=" + evt + ", left=" + (latch != null ? latch.getCount() : null) + ']'); this.evts.put(evt.getKey(), evt); keys.add((Integer)evt.getKey()); if (allEvts != null) allEvts.add(evt); assertTrue(latch != null); assertTrue(latch.getCount() > 0); latch.countDown(); if (latch.getCount() == 0) { this.latch = null; keys.clear(); } } } catch (Throwable e) { err = true; log.error("Unexpected error", e); } } }
/** {@inheritDoc} */ @Override public int size() { removeStale(); return store.size(); }
/** * Check TX cache. */ private void checkTx0() { System.out.println("BEFORE: " + evts.size()); caches[0].invoke(key1, new Transformer()); System.out.println("AFTER: " + evts.size()); checkEventNodeIdsStrict(Transformer.class.getName(), idsForKeys(key1)); assert evts.isEmpty(); caches[0].invokeAll(keys, new Transformer()); checkEventNodeIdsStrict(Transformer.class.getName(), idsForKeys(key1, key2)); assert evts.isEmpty(); System.out.println("BEFORE: " + evts.size()); caches[0].invoke(key1, new TransformerWithInjection()); System.out.println("AFTER: " + evts.size()); checkEventNodeIdsStrict(TransformerWithInjection.class.getName(), idsForKeys(key1)); assert evts.isEmpty(); caches[0].invokeAll(keys, new TransformerWithInjection()); checkEventNodeIdsStrict(TransformerWithInjection.class.getName(), idsForKeys(key1, key2)); }
/** {@inheritDoc} */ @Override public boolean isEmpty() { removeStale(); return store.isEmpty(); }
/** {@inheritDoc} */ @Override public void clear() { store.clear(); }
/** * Creates a new set with the same elements as the given collection. The * collection is created with a capacity of twice the number of mappings in * the given map or 11 (whichever is greater), and a default load factor * and concurrencyLevel. * * @param c Collection to add. */ public GridConcurrentHashSet(Collection<E> c) { super(new ConcurrentHashMap<E, E>(c.size())); addAll(c); }
/** {@inheritDoc} */ @Override public boolean onDone(@Nullable Void res, @Nullable Throwable err) { if (super.onDone(res, err)) { dataStreamerFuts.remove(this); return true; } return false; }
final GridConcurrentHashSet nodes = new GridConcurrentHashSet(); assertTrue("Node get no jobs.", nodes.contains(g.name()));
/** {@inheritDoc} */ @Override public int size() { removeStale(); return store.size(); }
/** {@inheritDoc} */ @Override public boolean isEmpty() { removeStale(); return store.isEmpty(); }