public PercentileBucketData(int dataLength) { this.length = dataLength; this.list = new AtomicIntegerArray(dataLength); }
public RoughRecorder(int methodTag, int mostTimeThreshold) { super(methodTag); this.timingArr = new AtomicIntegerArray(mostTimeThreshold + 2); this.outThresholdCounter = new AtomicInteger(0); }
public IntegerStatsDeltaAggregator(List<String> keys) { this.keys = keys; prevCounters = new AtomicIntegerArray(keys.size()); currCounters = new AtomicIntegerArray(keys.size()); initializeArray(currCounters); }
public HitCounter(int timeRangeInSeconds) { _timeBucketWidthMs = timeRangeInSeconds * 1000 / BUCKET_COUNT; _bucketStartTime = new AtomicLongArray(BUCKET_COUNT); _bucketHitCount = new AtomicIntegerArray(BUCKET_COUNT); }
public ConcurrentHashMap(int initialCapacity) { if (initialCapacity < 0) { throw new IllegalArgumentException("Illegal Initial Capacity: " + initialCapacity); } if (initialCapacity > MAXIMUM_CAPACITY) { initialCapacity = MAXIMUM_CAPACITY; } int threshold = initialCapacity; threshold += threshold >> 1; // threshold = length * 0.75 int capacity = 1; while (capacity < threshold) { capacity <<= 1; } if (capacity >= PARTITIONED_SIZE_THRESHOLD) { this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16); // we want 7 extra slots and 64 bytes for each slot. int is 4 bytes, so 64 bytes is 16 ints. } this.table = new AtomicReferenceArray(capacity + 1); }
public ConcurrentHashMap(int initialCapacity) { if (initialCapacity < 0) { throw new IllegalArgumentException("Illegal Initial Capacity: " + initialCapacity); } if (initialCapacity > MAXIMUM_CAPACITY) { initialCapacity = MAXIMUM_CAPACITY; } int threshold = initialCapacity; threshold += threshold >> 1; // threshold = length * 0.75 int capacity = 1; while (capacity < threshold) { capacity <<= 1; } if (capacity >= PARTITIONED_SIZE_THRESHOLD) { this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16); // we want 7 extra slots and 64 bytes for each slot. int is 4 bytes, so 64 bytes is 16 ints. } this.table = new AtomicReferenceArray(capacity + 1); }
public ConcurrentHashMap(int initialCapacity) { if (initialCapacity < 0) { throw new IllegalArgumentException("Illegal Initial Capacity: " + initialCapacity); } if (initialCapacity > MAXIMUM_CAPACITY) { initialCapacity = MAXIMUM_CAPACITY; } int threshold = initialCapacity; threshold += threshold >> 1; // threshold = length * 0.75 int capacity = 1; while (capacity < threshold) { capacity <<= 1; } if (capacity >= PARTITIONED_SIZE_THRESHOLD) { this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16); // we want 7 extra slots and 64 bytes for each slot. int is 4 bytes, so 64 bytes is 16 ints. } this.table = new AtomicReferenceArray(capacity + 1); }
private AccurateRecorder(int methodTagId, int mostTimeThreshold, int outThresholdCount) { super(methodTagId); this.timingArr = new AtomicIntegerArray(mostTimeThreshold + 1); this.timingMap = new ConcurrentHashMap<>(MapUtils.getFitCapacity(outThresholdCount)); }
public ThreadStorage(int intSize, int longSize) { this.owner = Thread.currentThread(); if (intSize > 0) { this.intStore = new AtomicIntegerArray(intSize); } else { this.intStore = null; } if (longSize > 0) { this.longStore = new AtomicLongArray(longSize); } else { this.longStore = null; } } }
this.intStorage = new AtomicIntegerArray(intCount); this.intDirty = new AtomicIntegerArray(intCount); this.intReadPrepLock = new Object[intCount]; for (int i = 0; i < intCount; i++) { this.longDirty = new AtomicIntegerArray(longCount); this.longReadPrepLock = new Object[longCount]; for (int i = 0; i < longCount; i++) {
public MapReduce(Layer[] ls, double epochs, Key job) { _ls = ls; _epochs = epochs; _job = job; _key = Key.make((byte) 1, Key.DFJ_INTERNAL_USER, H2O.SELF); _instances.put(_key, this); DKV.put(_key, new Value(_key, new byte[0])); Vec[] vecs = ((VecsInput) ls[0]).vecs; assert ls[0]._a.length == VecsInput.expand(vecs); //assert vecs[0].nChunks() >= NeuralNet.cores() : "Not enough chunks, c.f. NeuralNet.reChunk"; _counts = new AtomicIntegerArray(vecs[0].nChunks()); }
if (this.partitionedSize == null && newSize >= PARTITIONED_SIZE_THRESHOLD) this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16);
if (this.partitionedSize == null && newSize >= PARTITIONED_SIZE_THRESHOLD) this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16);
public class AtomicBitSet { private final AtomicIntegerArray array; public AtomicBitSet(int length) { int intLength = (length + 31) / 32; array = new AtomicIntegerArray(intLength); } public void set(long n) { int bit = 1 << n; int idx = (int) (n >>> 5); while (true) { int num = array.get(idx); int num2 = num | bit; if (num == num2 || array.compareAndSet(idx, num, num2)) return; } } public boolean get(long n) { int bit = 1 << n; int idx = (int) (n >>> 5); int num = array.get(idx); return (num & bit) != 0; } }
if (this.partitionedSize == null && newSize >= PARTITIONED_SIZE_THRESHOLD) this.partitionedSize = new AtomicIntegerArray(SIZE_BUCKETS * 16);
public ClusterMetricsContext(final NimbusData nimbusData) { LOG.info("create cluster metrics context..."); this.nimbusData = nimbusData; this.metricCache = nimbusData.getMetricCache(); this.stormClusterState = nimbusData.getStormClusterState(); this.isShutdown = nimbusData.getIsShutdown(); clusterName = ConfigExtension.getClusterName(nimbusData.getConf()); if (clusterName == null) { throw new RuntimeException("cluster.name property must be set in storm.yaml!"); } this.maxPendingUploadMetrics = ConfigExtension.getMaxPendingMetricNum(nimbusData.getConf()); this.metricStat = new AtomicIntegerArray(this.maxPendingUploadMetrics); int cnt = 0; for (int i = 0; i < maxPendingUploadMetrics; i++) { TopologyMetricDataInfo obj = getMetricDataInfoFromCache(i); if (obj != null) { this.metricStat.set(i, SET); cnt++; } } LOG.info("pending upload metrics: {}", cnt); // track nimbus JVM heap JStormMetrics.registerWorkerGauge(JStormMetrics.NIMBUS_METRIC_KEY, MetricDef.MEMORY_USED, new AsmGauge(new Gauge<Double>() { @Override public Double getValue() { return JStormUtils.getJVMHeapMemory(); } })); }
@SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { if (parser.lexer.token() == JSONToken.NULL) { parser.lexer.nextToken(JSONToken.COMMA); return null; } JSONArray array = new JSONArray(); parser.parseArray(array); if (clazz == AtomicIntegerArray.class) { AtomicIntegerArray atomicArray = new AtomicIntegerArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getInteger(i)); } return (T) atomicArray; } AtomicLongArray atomicArray = new AtomicLongArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getLong(i)); } return (T) atomicArray; }
AtomicIntegerArray reference = new AtomicIntegerArray( length );
AtomicIntegerArray reservedIdx = new AtomicIntegerArray(TOTAL_CNT);
@SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { if (parser.lexer.token() == JSONToken.NULL) { parser.lexer.nextToken(JSONToken.COMMA); return null; } JSONArray array = new JSONArray(); parser.parseArray(array); if (clazz == AtomicIntegerArray.class) { AtomicIntegerArray atomicArray = new AtomicIntegerArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getInteger(i)); } return (T) atomicArray; } AtomicLongArray atomicArray = new AtomicLongArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getLong(i)); } return (T) atomicArray; }