Refine search
@Override public void onFailure(Throwable t) { backgroundRefreshException.incrementAndGet(); backgroundRefreshRunning.decrementAndGet(); } });
@Override public void onSuccess(List<String> result) { backgroundRefreshSuccess.incrementAndGet(); backgroundRefreshRunning.decrementAndGet(); } @Override
@Override public long addSuccess(long processingTimeInMs) { addProcessingTime(processingTimeInMs); inProgress.decrementAndGet(); return success.incrementAndGet(); }
@Override public long addError(long processingTimeInMs) { addProcessingTime(processingTimeInMs); inProgress.decrementAndGet(); return error.incrementAndGet(); }
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
public void announce(BroadcastEvent event) { if (broadcastEvents == null) return; try { counter.incrementAndGet(); broadcastEvents.putLast(event); } catch (Exception e) { counter.decrementAndGet(); logger.error("error putting BroadcastEvent", e); } }
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
@Override public List<String> call() throws Exception { backgroundRefreshQueued.decrementAndGet(); backgroundRefreshRunning.incrementAndGet(); List<String> results = load(key); return results; } });
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
/** Anything that will add N docs to the index should reserve first to * make sure it's allowed. */ private void reserveOneDoc() { if (pendingNumDocs.incrementAndGet() > IndexWriter.getActualMaxDocs()) { // Reserve failed: put the one doc back and throw exc: pendingNumDocs.decrementAndGet(); throw new IllegalArgumentException("number of documents in the index cannot exceed " + IndexWriter.getActualMaxDocs()); } }
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
/** * Insert the given batch of updates into the index defined by the given {@link IndexPopulation}. * * @param population the index population. */ @Override protected void flush( IndexPopulation population ) { activeTasks.incrementAndGet(); Collection<IndexEntryUpdate<?>> batch = population.takeCurrentBatch(); executor.execute( () -> { try { population.populator.add( batch ); } catch ( Throwable failure ) { fail( population, failure ); } finally { activeTasks.decrementAndGet(); } } ); }
@Override public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { if (task == null) { throw new NullPointerException("task"); } if (unit == null) { throw new NullPointerException("unit"); } long pendingTimeoutsCount = pendingTimeouts.incrementAndGet(); if (maxPendingTimeouts > 0 && pendingTimeoutsCount > maxPendingTimeouts) { pendingTimeouts.decrementAndGet(); throw new RejectedExecutionException("Number of pending timeouts (" + pendingTimeoutsCount + ") is greater than or equal to maximum allowed pending " + "timeouts (" + maxPendingTimeouts + ")"); } start(); // Add the timeout to the timeout queue which will be processed on the next tick. // During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket. long deadline = System.nanoTime() + unit.toNanos(delay) - startTime; // Guard against overflow. if (delay > 0 && deadline < 0) { deadline = Long.MAX_VALUE; } HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline); timeouts.add(timeout); return timeout; }
public void run() { testTaskStartCounter.incrementAndGet(); try { testTaskCounter.incrementAndGet(); synchronized (maxConcurrentTestTasks) { int activeCount = executor.getActiveCount(); if (maxConcurrentTestTasks.get() < activeCount) { maxConcurrentTestTasks.set(activeCount); } } long endTime = System.currentTimeMillis() + runTimeSecs * 1000; while (endTime >= System.currentTimeMillis()) { try { Thread.sleep(runTimeSecs * 1000); } catch (InterruptedException e) { if (!blockInterrupt) { throw e; } } } testTaskCounter.decrementAndGet(); testTaskSuccessfulCounter.incrementAndGet(); } catch (InterruptedException e) { testTaskInterruptedCounter.incrementAndGet(); } } }
Flux<T> fluxToUse = connectableFlux.doOnSubscribe(subscription -> { if (subscribers.incrementAndGet() == 1) { disposable = connectableFlux.connect(); if (subscribers.decrementAndGet() == 0) {
Hop hop = Hop.INFERRED; addOutlink(curi, target, lc, hop); numberOfLinksExtracted.incrementAndGet(); " has been removed from " + curi.getURI() + " outlinks list."); numberOfLinksExtracted.decrementAndGet(); } else { LOGGER.log(Level.FINE, "Failed to remove " +
@Override public void afterHandle(MetricsContext context, boolean success) { this.activeCount.decrementAndGet(); if (isFullStatsEnabled() && success) { this.duration.append(System.nanoTime() - ((DefaultHandlerMetricsContext) context).start); } else if (!success) { this.errorCount.incrementAndGet(); } }
@Override public void afterHandle(MetricsContext context, boolean success) { this.activeCount.decrementAndGet(); AggregatingHandlerMetricsContext aggregatingContext = (AggregatingHandlerMetricsContext) context; if (success) { if (isFullStatsEnabled() && aggregatingContext.newCount % this.sampleSize == 0) { this.duration.append(System.nanoTime() - aggregatingContext.start); } } else { this.errorCount.incrementAndGet(); } }