Refine search
public WindowManager(WindowLifecycleListener<T> lifecycleListener) { windowLifecycleListener = lifecycleListener; queue = new ConcurrentLinkedQueue<>(); expiredEvents = new ArrayList<>(); prevWindowEvents = new HashSet<>(); eventsSinceLastExpiry = new AtomicInteger(); lock = new ReentrantLock(true); }
@SuppressWarnings("ArrayEquality") private void emitLargeEvents() { if (largeEventsToEmit.isEmpty()) { return; } // Don't try to emit large events until exhaustion, to avoid starvation of "normal" batches, if large event // posting rate is too high, though it should never happen in practice. largeEventsToEmit.add(LARGE_EVENTS_STOP); for (byte[] largeEvent; (largeEvent = largeEventsToEmit.poll()) != LARGE_EVENTS_STOP; ) { emitLargeEvent(largeEvent); approximateBuffersToEmitCount.decrementAndGet(); approximateLargeEventsToEmitCount.decrementAndGet(); approximateEventsToEmitCount.decrementAndGet(); } }
public void put(Node node, ClientRequest request) { // the lock protects the put from a concurrent removal of the queue for the node synchronized (unsent) { ConcurrentLinkedQueue<ClientRequest> requests = unsent.get(node); if (requests == null) { requests = new ConcurrentLinkedQueue<>(); unsent.put(node, requests); } requests.add(request); } }
private void writeLargeEvent(byte[] eventBytes) { // It's better to drop the oldest, not latest event, but dropping the oldest is not easy to implement, because // LARGE_EVENTS_STOP could be added into the queue concurrently. So just not adding the latest event. // >, not >=, because largeEventsToEmit could contain LARGE_EVENTS_STOP if (approximateBuffersToEmitCount.get() > config.getBatchQueueSizeLimit()) { log.error( "largeEventsToEmit queue size reached the limit [%d], dropping the latest large event", config.getBatchQueueSizeLimit() ); } else { largeEventsToEmit.add(eventBytes); approximateBuffersToEmitCount.incrementAndGet(); approximateLargeEventsToEmitCount.incrementAndGet(); approximateEventsToEmitCount.incrementAndGet(); } wakeUpEmittingThread(); }
private byte[] acquireBuffer() { byte[] buffer = buffersToReuse.poll(); if (buffer == null) { buffer = new byte[bufferSize]; allocatedBuffers.incrementAndGet(); } else { approximateBuffersToReuseCount.decrementAndGet(); } return buffer; }
AtomicInteger c = new AtomicInteger(); ConcurrentLinkedQueue<Thread> threads = new ConcurrentLinkedQueue<Thread>(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println("testSubscribeOnScheduling => Received: " + ts.valueCount() + " Emitted: " + c.get()); assertEquals(num, ts.valueCount()); assertTrue(c.get() < Flowable.bufferSize() * 4); Thread first = null; for (Thread t : threads) { System.out.println("testSubscribeOnScheduling => Number of batch requests seen: " + threads.size()); assertTrue(threads.size() > 1); System.out.println("-------------------------------------------------------------------------------------------");
void add(Event<T> event) { events.add(event); size.incrementAndGet(); setModified(); }
public void putRegionResult(byte[] result) { totalResultSize.addAndGet(result.length); if (totalResultSize.get() > maxSegmentCacheSize) { logger.info("stop put result to cache, since the result size:{} is larger than configured size:{}", totalResultSize.get(), maxSegmentCacheSize); return; } queue.offer(result); }
/** * Drops elements if capacity has been reached. That's OK for the ThreadPoolExecutor as dropped messages * will get retransmitted * @param t * @return */ public boolean offer(T t) { boolean retval=super.offer(t); if(retval) count.incrementAndGet(); return retval; }
@Override public boolean offer(T e) { producerIndex.getAndIncrement(); return super.offer(e); }
@NotNull private FutureTask<TryResult> executeTaskInNewThread(Callable<TryResult> tsk, GroupName groupName) { ThreadGroup threadGroup = new ThreadGroup(groupName + "_" + prefix + "TG" + count.getAndIncrement()); threadGroups.add(threadGroup); FutureTask<TryResult> ret = new FutureTask<>(tsk); Thread t = new Thread(threadGroup, ret); t.start(); return ret; }
public BytesBoundedLinkedQueue(long capacity) { delegate = new ConcurrentLinkedQueue<>(); this.capacity = capacity; }
private void drainBuffersToReuse() { while (buffersToReuse.poll() != null) { approximateBuffersToReuseCount.decrementAndGet(); } }
@Override public void onFailure(RuntimeException e) { pendingAsyncCommits.decrementAndGet(); completedOffsetCommits.add(new OffsetCommitCompletion(callback, offsets, new RetriableCommitFailedException(e))); } });
@Override public void onComplete(ClientResponse response) { this.response = response; pendingCompletion.add(this); } }
@Nullable @Override public T poll() { T v = super.poll(); if (v != null) { consumerIndex++; } return v; }