Refine search
public int increment(String key, int val) { return intCounters.computeIfAbsent(key, k -> new AtomicInteger()).addAndGet(val); }
public void putRegionResult(byte[] result) { totalResultSize.addAndGet(result.length); if (totalResultSize.get() > maxSegmentCacheSize) { logger.info("stop put result to cache, since the result size:{} is larger than configured size:{}", totalResultSize.get(), maxSegmentCacheSize); return; } queue.offer(result); }
private void evict() { long startTime = System.currentTimeMillis(); long evictTarget = mWeight.get() - mLowWaterMark; AtomicInteger evicted = new AtomicInteger(0); while (evicted.get() < evictTarget) { if (!mEvictionHead.hasNext()) { mEvictionHead = mMap.entrySet().iterator(); } if (!mEvictionHead.hasNext()) { break; // cache is empty. } Entry<Long, ListingCacheEntry> candidate = mEvictionHead.next(); if (candidate.getValue().mReferenced) { candidate.getValue().mReferenced = false; continue; } mMap.compute(candidate.getKey(), (key, entry) -> { if (entry != null && entry.mChildren != null) { mWeight.addAndGet(-weight(entry)); evicted.addAndGet(weight(entry)); return null; } return entry; }); } LOG.debug("Evicted weight={} from listing cache down to weight={} in {}ms", evicted.get(), mWeight.get(), System.currentTimeMillis() - startTime); }
CompletableFuture<Void> done = new CompletableFuture<>(); AtomicBoolean paused = new AtomicBoolean(); AtomicInteger numPause = new AtomicInteger(); server.requestHandler(req -> { Context ctx = vertx.getOrCreateContext(); fail(); }); AtomicInteger sent = new AtomicInteger(); AtomicInteger count = new AtomicInteger(); AtomicInteger drained = new AtomicInteger(); vertx.setPeriodic(1, timerID -> { if (req.writeQueueFull()) { assertTrue(paused.get()); assertEquals(1, numPause.get()); req.drainHandler(v -> { assertOnIOContext(ctx); assertEquals(0, drained.getAndIncrement()); assertEquals(1, numPause.get()); assertFalse(paused.get()); req.end(); expected.appendString(chunk); req.write(chunk); sent.addAndGet(chunk.length());
for (InternalHiveSplit internalSplit : internalSplits) { long maxSplitBytes = maxSplitSize.toBytes(); if (remainingInitialSplits.get() > 0) { if (remainingInitialSplits.getAndDecrement() > 0) { maxSplitBytes = maxInitialSplitSize.toBytes(); bufferedInternalSplitCount.addAndGet(splitsToInsert.size() - result.size());
public boolean appendMessage(final byte[] data) { int currentPos = this.wrotePosition.get(); if ((currentPos + data.length) <= this.fileSize) { try { this.fileChannel.position(currentPos); this.fileChannel.write(ByteBuffer.wrap(data)); } catch (Throwable e) { log.error("Error occurred when append message to mappedFile.", e); } this.wrotePosition.addAndGet(data.length); return true; } return false; }
private static StringBuilder formatText(String text, int indentTo, int blockWidth) { final StringBuilder row = new StringBuilder(); final AtomicInteger col = new AtomicInteger(indentTo); final int extraSpace = col.get() > indentTo ? 1 : 0; if (col.get() + word.length() + extraSpace > blockWidth) { row.append(nl()).append(repeat(" ", indentTo)); col.set(indentTo); if (col.get() > indentTo) { row.append(" "); col.incrementAndGet(); col.addAndGet(word.length());
private void addReceiveCount(String from, int amount) { //This is possibly lossy in the case where a value is deleted // because it has received no messages over the metrics collection // period and new messages are starting to come in. This is // because I don't want the overhead of a synchronize just to have // the metric be absolutely perfect. AtomicInteger i = messagesEnqueued.get(from); if (i == null) { i = new AtomicInteger(amount); AtomicInteger prev = messagesEnqueued.putIfAbsent(from, i); if (prev != null) { prev.addAndGet(amount); } } else { i.addAndGet(amount); } }
final AtomicInteger releaseCount = new AtomicInteger(); pendingMessages.forEach(op -> { releaseCount.addAndGet(op.numMessagesInBatch); try { op.recycle(); }); semaphore.release(releaseCount.get()); pendingMessages.clear(); pendingCallbacks.clear();
private void addCounter(String name, int count) { AtomicInteger ai = m_metricCounters.get(name); if (ai == null) { ai = new AtomicInteger(); AtomicInteger mapValue = m_metricCounters.putIfAbsent(name, ai); ai = (mapValue != null ? mapValue : ai); //This handles the case if one snuck in on another thread. } ai.addAndGet(count); }
public boolean appendData(final ByteBuffer data) { final int currentPos = wrotePosition.get(); final int size = data.limit(); if (currentPos + size > fileSize) { return false; } try { fileChannel.write(data, currentPos); this.wrotePosition.addAndGet(size); } catch (Throwable e) { LOG.error("Append data to log segment failed.", e); return false; } return true; }
@Test public void testStreamBatchesResults() { Flux<String> stream = Flux.just("1", "2", "3", "4", "5"); Mono<List<Integer>> s = stream.map(STRING_2_INTEGER) .collectList(); final AtomicInteger batchCount = new AtomicInteger(); final AtomicInteger count = new AtomicInteger(); s.subscribe(is -> { batchCount.incrementAndGet(); for (int i : is) { count.addAndGet(i); } }); assertThat("batchCount is 3", batchCount.get(), is(1)); assertThat("count is 15", count.get(), is(15)); }
@Override public void messageWritten(String address, int numberOfBytes) { AtomicInteger value = new AtomicInteger(); AtomicInteger existing = encoded.putIfAbsent(address, value); if (existing != null) { value = existing; } value.addAndGet(numberOfBytes); }
@Override public CollectionDeleteStatus deleteCollection( final CollectionDeleteRequestBuilder collectionDeleteRequestBuilder) { final AtomicInteger count = new AtomicInteger(); logger.info("Sending batch of {} to be deleted.", edgeScopes.size()); indexService.deleteBatch(edgeScopes, endTimestamp, AsyncEventQueueType.DELETE); count.addAndGet(edgeScopes.size() ); if( edgeScopes.size() > 0 ) { writeCursorState(jobId, edgeScopes.get(edgeScopes.size() - 1)); writeStateMeta( jobId, Status.INPROGRESS, count.get(), System.currentTimeMillis() ); }) .doOnCompleted(() -> writeStateMeta( jobId, Status.COMPLETE, count.get(), System.currentTimeMillis() )) .subscribeOn( Schedulers.io() ).subscribe();
@Override public void messageRead(String address, int numberOfBytes) { AtomicInteger value = new AtomicInteger(); AtomicInteger existing = decoded.putIfAbsent(address, value); if (existing != null) { value = existing; } value.addAndGet(numberOfBytes); }
@Test public void testGetInstanceCountMultipleVerticles() throws Exception { AtomicInteger messageCount = new AtomicInteger(0); AtomicInteger totalReportedInstances = new AtomicInteger(0); vertx.eventBus().consumer("instanceCount", event -> { messageCount.incrementAndGet(); totalReportedInstances.addAndGet((int)event.body()); if(messageCount.intValue() == 3) { assertEquals(9, totalReportedInstances.get()); testComplete(); } }); vertx.deployVerticle(TestVerticle3.class.getCanonicalName(), new DeploymentOptions().setInstances(3), ar -> { assertTrue(ar.succeeded()); }); await(); Deployment deployment = ((VertxInternal) vertx).getDeployment(vertx.deploymentIDs().iterator().next()); CountDownLatch latch = new CountDownLatch(1); vertx.undeploy(deployment.deploymentID(), ar -> latch.countDown()); awaitLatch(latch); }