mutationQ.right.removeAll(rows); CassandraUtils.robustInsert(CassandraUtils.consistency, rows.toArray(new RowMutation[] {}));
channelId2Creation.remove(closedChannel.channel.getId()); partition.removeAll(closedChannels); closedCount += closedChannels.size();
@Override public synchronized boolean removeAll(Collection<?> c) { for (Object e : c) { m.remove(e); } return q.removeAll(c); }
public void removeAll(List<PublishQueueEntry<T,V>> list) { queue.removeAll(list); sizeCheck(); }
private void cleanReferenceQueue() { ObjectPhantomReference reference = (ObjectPhantomReference) referenceQueue.poll(); if (reference == null) { // nothing to do avoid locking return; } Set<ObjectPhantomReference> collectedNewReferences = new HashSet<>(); Set<ObjectPhantomReference> collectedProcessedReferences = new HashSet<>(); synchronized (summaryData) { do { HeapSummary counter = getSummary(summaryData, reference.language, reference.metaObject); long bytesDiff = reference.computeBytesDiff(); if (reference.processed) { counter.aliveInstances--; counter.aliveBytes -= bytesDiff; collectedProcessedReferences.add(reference); } else { // object never was processed alive counter.totalInstances++; counter.totalBytes += bytesDiff; collectedNewReferences.add(reference); } } while ((reference = (ObjectPhantomReference) referenceQueue.poll()) != null); // note that ConcurrentLinkedQueue actually supports doing this // the iterator does not throw a ConcurrentModificationException newReferences.removeAll(collectedNewReferences); processedReferences.removeAll(collectedProcessedReferences); } }
@Override public void run() { Queue<EmbeddedBrowser> deleteList = new LinkedList<EmbeddedBrowser>(); if (useBooting()) { booter.shutdown(); } for (EmbeddedBrowser b : available) { try { b.close(); } finally { deleteList.add(b); } } available.removeAll(deleteList); deleteList = new LinkedList<EmbeddedBrowser>(); for (EmbeddedBrowser b : taken) { try { b.close(); } finally { deleteList.add(b); } } taken.removeAll(deleteList); currentBrowser = new ThreadLocal<EmbeddedBrowser>(); assert (available.isEmpty()); assert (taken.isEmpty()); } });
public void run(Timeout timeout) throws Exception { if (isClosed.get()) return; try { if (LOGGER.isDebugEnabled()) for (String key : poolsPerKey.keySet()) { LOGGER.debug("Entry count for : {} : {}", key, poolsPerKey.get(key).size()); } long start = millisTime(); int closedCount = 0; int totalCount = 0; for (ConcurrentLinkedQueue<IdleChannel> pool : poolsPerKey.values()) { // store in intermediate unsynchronized lists to minimize the impact on the ConcurrentLinkedQueue if (LOGGER.isDebugEnabled()) totalCount += pool.size(); List<IdleChannel> closedChannels = closeChannels(expiredChannels(pool, start)); pool.removeAll(closedChannels); int poolClosedCount = closedChannels.size(); closedCount += poolClosedCount; } long duration = millisTime() - start; LOGGER.debug("Closed {} connections out of {} in {}ms", closedCount, totalCount, duration); } catch (Throwable t) { LOGGER.error("uncaught exception!", t); } scheduleNewIdleChannelDetector(timeout.getTask()); } }
channelId2Creation.remove(closedChannel.channel.getId()); partition.removeAll(closedChannels); closedCount += closedChannels.size();
channelId2Creation.remove(channelId(closedChannel.channel)); partition.removeAll(closedChannels); closedCount += closedChannels.size();
channelId2Creation.remove(channelId(closedChannel.channel)); partition.removeAll(closedChannels); closedCount += closedChannels.size();
channelId2Creation.remove(closedChannel.channel.getId()); partition.removeAll(closedChannels); closedCount += closedChannels.size();
channelId2Creation.remove(closedChannel.channel.getId()); partition.removeAll(closedChannels); closedCount += closedChannels.size();