private long elapsedNanos() { return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos; }
public void start() { previousTimestamp = ticker.read(); }
private long tickerNanos() { return ticker.read(); }
public synchronized void startRequest() { lastRequestStart = ticker.read(); }
private long now() { return ticker.read(); }
public void setReady() { lastReady.set(ticker.read()); }
public synchronized long getBackoffDelayNanos() { int failureCount = (int) min(backoffDelayIntervalsNanos.length, this.failureCount); if (failureCount == 0) { return 0; } // expected amount of time to delay from the last failure time long currentDelay = backoffDelayIntervalsNanos[failureCount - 1]; // calculate expected delay from now long nanosSinceLastFailure = ticker.read() - lastFailureTime; return max(0, currentDelay - nanosSinceLastFailure); } }
/** * Starts the stopwatch. * * @return this {@code Stopwatch} instance * @throws IllegalStateException if the stopwatch is already running. */ @CanIgnoreReturnValue public Stopwatch start() { checkState(!isRunning, "This stopwatch is already running."); isRunning = true; startTick = ticker.read(); return this; }
/** * Stops the stopwatch. Future reads will return the fixed duration that had elapsed up to this * point. * * @return this {@code Stopwatch} instance * @throws IllegalStateException if the stopwatch is already stopped. */ @CanIgnoreReturnValue public Stopwatch stop() { long tick = ticker.read(); checkState(isRunning, "This stopwatch is already stopped."); isRunning = false; elapsedNanos += tick - startTick; return this; }
private long now() { return ticker.read(); }
public String getInfo() { return String.format("Split %-15s-%d %s (start = %s, wall = %s ms, cpu = %s ms, wait = %s ms, calls = %s)", taskHandle.getTaskId(), splitId, split.getInfo(), start.get() / 1.0e6, (int) ((ticker.read() - start.get()) / 1.0e6), (int) (cpuTimeNanos.get() / 1.0e6), (int) (waitNanos.get() / 1.0e6), processCalls.get()); }
void cleanUp() { long now = map.ticker.read(); runLockedCleanup(now); runUnlockedCleanup(); }
public synchronized Duration getFailureDuration() { if (firstFailureTime == 0) { return new Duration(0, MILLISECONDS); } long value = ticker.read() - firstFailureTime; return new Duration(value, NANOSECONDS); }
public void stop(int batchSize) { verify(previousTimestamp != NOT_INITALIZED, "start() is not called"); verify(batchSize > 0, "batchSize must be positive"); long now = ticker.read(); long delta = now - previousTimestamp; totalExecutionTimeNanos += delta; samples += batchSize; if ((totalExecutionTimeNanos / samples) < expensiveExpressionThresholdNanos) { isExpressionExpensive = false; } previousTimestamp = NOT_INITALIZED; }
V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference) throws ExecutionException { if (!valueReference.isLoading()) { throw new AssertionError(); } checkState(!Thread.holdsLock(e), "Recursive load of: %s", key); // don't consider expiration as we're concurrent with loading try { V value = valueReference.waitForValue(); if (value == null) { throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + "."); } // re-read ticker now that loading has completed long now = map.ticker.read(); recordRead(e, now); return value; } finally { statsCounter.recordMisses(1); } }
/** * This method is a convenience for testing. Code should call {@link LocalCache#containsValue} * directly. */ @VisibleForTesting boolean containsValue(Object value) { try { if (count != 0) { // read-volatile long now = map.ticker.read(); AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; int length = table.length(); for (int i = 0; i < length; ++i) { for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { V entryValue = getLiveValue(e, now); if (entryValue == null) { continue; } if (map.valueEquivalence.equivalent(value, entryValue)) { return true; } } } } return false; } finally { postReadCleanup(); } }
boolean containsKey(Object key, int hash) { try { if (count != 0) { // read-volatile long now = map.ticker.read(); ReferenceEntry<K, V> e = getLiveEntry(key, hash, now); if (e == null) { return false; } return e.getValueReference().get() != null; } return false; } finally { postReadCleanup(); } }
@Managed public long getRunAwaySplitCount() { int count = 0; for (RunningSplitInfo splitInfo : runningSplitInfos) { Duration duration = Duration.succinctNanos(ticker.read() - splitInfo.getStartTime()); if (duration.compareTo(LONG_SPLIT_WARNING_THRESHOLD) > 0) { count++; } } return count; }
abstract class AbstractCacheSet<T> extends AbstractSet<T> { @Weak final ConcurrentMap<?, ?> map; AbstractCacheSet(ConcurrentMap<?, ?> map) { this.map = map; } @Override public int size() { return map.size(); } @Override public boolean isEmpty() { return map.isEmpty(); } @Override public void clear() { map.clear(); } // super.toArray() may misbehave if size() is inaccurate, at least on old versions of Android. // https://code.google.com/p/android/issues/detail?id=36519 / http://r.android.com/47508 @Override public Object[] toArray() { return toArrayList(this).toArray(); } @Override public <E> E[] toArray(E[] a) { return toArrayList(this).toArray(a); } }
@Nullable V get(Object key, int hash) { try { if (count != 0) { // read-volatile long now = map.ticker.read(); ReferenceEntry<K, V> e = getLiveEntry(key, hash, now); if (e == null) { return null; } V value = e.getValueReference().get(); if (value != null) { recordRead(e, now); return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader); } tryDrainReferenceQueues(); } return null; } finally { postReadCleanup(); } }