@Override public int getEffectiveCount() { int result = 0; for (int i = 0; i < timingArr.length(); ++i) { int count = timingArr.get(i); if (count > 0) { result++; } } for (Map.Entry<Integer, AtomicInteger> entry : timingMap.entrySet()) { if (entry.getValue().get() > 0) { result++; } } return result; }
public RoughRecorder(int methodTag, int mostTimeThreshold) { super(methodTag); this.timingArr = new AtomicIntegerArray(mostTimeThreshold + 2); this.outThresholdCounter = new AtomicInteger(0); }
@Override public synchronized void resetRecord() { for (int i = 0; i < timingArr.length(); ++i) { timingArr.set(i, 0); } Iterator<Map.Entry<Integer, AtomicInteger>> iterator = timingMap.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<Integer, AtomicInteger> entry = iterator.next(); if ((entry.getKey() > 1.5 * timingArr.length()) || entry.getValue().get() <= 0) { iterator.remove(); } else { entry.getValue().set(0); } } hasRecord = false; }
public int getAndPresetFirstEmptyIndex() { for (int i = 0; i < maxPendingUploadMetrics; i++) { if (metricStat.get(i) == UNSET) { if (metricStat.compareAndSet(i, UNSET, PRE_SET)) { return i; } } } return -1; }
/** * Returns an int[] from the {@link AtomicIntegerArray}, null if the given atomic array is null. * * @param atomicIntegerArray the {@link AtomicIntegerArray} to convert to int[]. * @return an int[]. */ public static int[] array(AtomicIntegerArray atomicIntegerArray) { if (atomicIntegerArray == null) return null; int[] array = new int[atomicIntegerArray.length()]; for (int i = 0; i < array.length; i++) { array[i] = atomicIntegerArray.get(i); } return array; }
private boolean clearIntDirty(final int idx) { if (!this.intDirty.weakCompareAndSet(idx, 1/* expected */, 0/* update */)) { return this.intDirty.compareAndSet(idx, 1/* expected */, 0/* update */); } return true; }
@Override public void recordTime(long startNanoTime, long endNanoTime) { if (startNanoTime > endNanoTime) { return; } hasRecord = true; int elapsedTime = (int) ((endNanoTime - startNanoTime) / 1000000); if (elapsedTime < timingArr.length()) { timingArr.incrementAndGet(elapsedTime); return; } AtomicInteger count = timingMap.get(elapsedTime); if (count != null) { count.incrementAndGet(); return; } AtomicInteger oldCounter = timingMap.putIfAbsent(elapsedTime, new AtomicInteger(1)); if (oldCounter != null) { oldCounter.incrementAndGet(); } }
@SuppressWarnings({"BusyWait"}) @Override public Object call() throws Exception { GridRandom rnd = new GridRandom(); while (!restartsDone.get()) { int g; do { g = rnd.nextInt(locks.length()); } while (!locks.compareAndSet(g, 0, -1)); log.info("Stop node: " + g); stopGrid(g); Thread.sleep(rnd.nextInt(nodeLifeTime)); log.info("Start node: " + g); startGrid(g); Thread.sleep(rnd.nextInt(nodeLifeTime)); locks.set(g, 0); int c = restartCnt.incrementAndGet(); if (c % logFreq == 0) info("Node restarts: " + c); } return true; } }, restartThreadsNum, "restart-thread");
@SuppressWarnings({"unchecked", "rawtypes"}) private void addAtomicClasses() { addValues(AtomicBoolean.class, new AtomicBoolean(true), new AtomicBoolean(false), new AtomicBoolean(true)); addValues(AtomicInteger.class, new AtomicInteger(1), new AtomicInteger(2), new AtomicInteger(1)); addValues(AtomicIntegerArray.class, new AtomicIntegerArray(new int[] { 1 }), new AtomicIntegerArray(new int[] { 2 }), new AtomicIntegerArray(new int[] { 1 })); addValues(AtomicLong.class, new AtomicLong(1L), new AtomicLong(2L), new AtomicLong(1L)); addValues(AtomicLongArray.class, new AtomicLongArray(new long[] { 1L }), new AtomicLongArray(new long[] { 2L }), new AtomicLongArray(new long[] { 1L })); addFactory(AtomicMarkableReference.class, simple(r -> new AtomicMarkableReference(r, true), null)); addFactory(AtomicReference.class, simple(AtomicReference::new, null)); addFactory(AtomicStampedReference.class, simple(r -> new AtomicStampedReference(r, 0), null)); addFactory(AtomicReferenceArray.class, (tag, pv, stack) -> { TypeTag y = tag.getGenericTypes().get(0); Object[] red = new Object[] { pv.giveRed(y) }; Object[] black = new Object[] { pv.giveBlack(y) }; Object[] redCopy = new Object[] { pv.giveRedCopy(y) }; return Tuple.of(new AtomicReferenceArray(red), new AtomicReferenceArray(black), new AtomicReferenceArray(redCopy)); }); }
public void addValue(int... latency) { for (int l : latency) { /* We just wrap around the beginning and over-write if we go past 'dataLength' as that will effectively cause us to "sample" the most recent data */ list.set(index.getAndIncrement() % length, l); // TODO Alternative to AtomicInteger? The getAndIncrement may be a source of contention on high throughput circuits on large multi-core systems. // LongAdder isn't suited to this as it is not consistent. Perhaps a different data structure that doesn't need indexed adds? // A threadlocal data storage that only aggregates when fetched would be ideal. Similar to LongAdder except for accumulating lists of data. } }
@Override public void run() { int key = idGen.getAndIncrement(); List<Integer> keys = new ArrayList<>(); for (int k = 0; k < keysCnt; k++) keys.add(k); int cntr = 0; for (int i = 0; i < ITERATIONS; i++) { cntr++; int nodeId; while(!reservedIdx.compareAndSet((nodeId = r.nextInt(TOTAL_CNT)), 0, 1)) doSleep(10); U.awaitQuiet(b); final IgniteEx grid = grid(nodeId); try (final Transaction tx = grid.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 0, 0)) { reservedIdx.set(nodeId, 0); // Construct deadlock grid.cache(CACHE_NAME).get(keys.get(key)); // Should block. grid.cache(CACHE_NAME).get(keys.get((key + 1) % keysCnt)); fail("Deadlock expected"); } catch (Throwable t) { // Expected. } if (key == 0) log.info("Rolled back: " + cntr); } } }, keysCnt, "tx-lock-thread");
@SuppressWarnings("JLM_JSR166_UTILCONCURRENT_MONITORENTER") private void resize(AtomicReferenceArray oldTable, int newSize) int oldCapacity = oldTable.length(); int end = oldCapacity - 1; Object last = oldTable.get(end); if (this.size() < end && last == RESIZE_SENTINEL) if (oldTable.get(end) == null) oldTable.set(end, RESIZE_SENTINEL); if (this.partitionedSize == null && newSize >= PARTITIONED_SIZE_THRESHOLD) this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16); resizeContainer = new ResizeContainer(new AtomicReferenceArray(newSize), oldTable.length() - 1);
@SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { if (parser.lexer.token() == JSONToken.NULL) { parser.lexer.nextToken(JSONToken.COMMA); return null; } JSONArray array = new JSONArray(); parser.parseArray(array); if (clazz == AtomicIntegerArray.class) { AtomicIntegerArray atomicArray = new AtomicIntegerArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getInteger(i)); } return (T) atomicArray; } AtomicLongArray atomicArray = new AtomicLongArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getLong(i)); } return (T) atomicArray; }
public ConcurrentHashMap(int initialCapacity) { if (initialCapacity < 0) { throw new IllegalArgumentException("Illegal Initial Capacity: " + initialCapacity); } if (initialCapacity > MAXIMUM_CAPACITY) { initialCapacity = MAXIMUM_CAPACITY; } int threshold = initialCapacity; threshold += threshold >> 1; // threshold = length * 0.75 int capacity = 1; while (capacity < threshold) { capacity <<= 1; } if (capacity >= PARTITIONED_SIZE_THRESHOLD) { this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16); // we want 7 extra slots and 64 bytes for each slot. int is 4 bytes, so 64 bytes is 16 ints. } this.table = new AtomicReferenceArray(capacity + 1); }
public boolean store(int index, E item) { boolean result = elements.compareAndSet(index, null, item); if (result) indexes.incrementAndGet(tailOffset); return result; }
@VisibleForTesting void hit(long timestamp) { long numTimeUnits = timestamp / _timeBucketWidthMs; int index = (int) (numTimeUnits % BUCKET_COUNT); if (_bucketStartTime.get(index) == numTimeUnits) { _bucketHitCount.incrementAndGet(index); } else { synchronized (_bucketStartTime) { if (_bucketStartTime.get(index) != numTimeUnits) { _bucketHitCount.set(index, 1); _bucketStartTime.set(index, numTimeUnits); } else { _bucketHitCount.incrementAndGet(index); } } } }
public HitCounter(int timeRangeInSeconds) { _timeBucketWidthMs = timeRangeInSeconds * 1000 / BUCKET_COUNT; _bucketStartTime = new AtomicLongArray(BUCKET_COUNT); _bucketHitCount = new AtomicIntegerArray(BUCKET_COUNT); }
/** * @param operations Returned invocations * @param invocations Number of operations per interval * @param intervals Size of slot, in milliseconds */ public SchedulingSelector(T[] operations, int[] invocations, long[] intervals) { if (operations.length != invocations.length) throw new IllegalArgumentException(); this.operations = operations; this.invocations = invocations; this.intervals = intervals; lastIntervals = new AtomicLongArray(operations.length); todoInvocations = new AtomicIntegerArray(operations.length); for (int i = 0; i < operations.length; ++i) { lastIntervals.set(i, Long.MIN_VALUE); } }