Submitter() { int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT); seed = (s == 0) ? 1 : s; // ensure non-zero } }
@Override public AnalyzedToken next() { try { return anTokReadings[i.getAndAdd(1)]; } catch (ArrayIndexOutOfBoundsException e) { throw new NoSuchElementException("No such element: " + i + ", element count: " + anTokReadings.length); } } @Override
static int getAndAdd(AtomicInteger ai, int i) { if (ai instanceof BTraceAtomicInteger) { return ai.getAndAdd(i); } else { throw new IllegalArgumentException(); } }
/** * Clear out all the in-flight requests for the given node and return them * * @param node The node * @return All the in-flight requests for that node that have been removed */ public Iterable<NetworkClient.InFlightRequest> clearAll(String node) { Deque<NetworkClient.InFlightRequest> reqs = requests.get(node); if (reqs == null) { return Collections.emptyList(); } else { final Deque<NetworkClient.InFlightRequest> clearedRequests = requests.remove(node); inFlightRequestCount.getAndAdd(-clearedRequests.size()); return () -> clearedRequests.descendingIterator(); } }
public MarsagliaRandom() { this((int) System.nanoTime() + seq.getAndAdd(129)); }
public XorShift32Random() { this((int) System.nanoTime() + seq.getAndAdd(129)); }
private void dropMessages(Iterator<TaskMessage> msgs) { // We consume the iterator by traversing and thus "emptying" it. int msgCount = iteratorSize(msgs); messagesLost.getAndAdd(msgCount); LOG.info("Dropping {} messages", msgCount); }
public void incRequestInProgress(int threads) { if (this._stats != null) { this._stats.incInt(_REQUESTS_IN_PROGRESS, threads); } else { requestsInProgress.getAndAdd(threads); } }
@Override public void onSuccess(AppendBatchAsyncResultPojo result) { flumeSuccessLogCount.getAndAdd(size); callback.onSuccess(); }
public static BiMap<Integer, String> commonStrings(int version) throws IOException { // load default strings from resource files if required if (!commonStringMap.containsKey(version)) { AtomicInteger index = new AtomicInteger(1 << 31); String resourcePath = "/resources/strings/" + version + ".x.txt"; try (BufferedReader br = resourceReader(resourcePath)) { commonStringMap.put(version, br.lines().collect(Collectors.toMap( value -> index.getAndAdd(value.length() + 1), value -> value ))); } catch (NullPointerException ex) { throw new RuntimeException("No common strings file found for version " + version); } } return HashBiMap.create(commonStringMap.get(version)); }
@Override public void operationComplete(ChannelFuture future) throws Exception { pendingMessages.addAndGet(0 - numMessages); if (future.isSuccess()) { LOG.debug("sent {} messages to {}", numMessages, dstAddressPrefixedName); messagesSent.getAndAdd(batch.size()); } else { LOG.error("failed to send {} messages to {}: {}", numMessages, dstAddressPrefixedName, future.cause()); closeChannelAndReconnect(future.channel()); messagesLost.getAndAdd(numMessages); } }
/** * Append a set of records to the file. This method is not thread-safe and must be * protected with a lock. * * @param records The records to append * @return the number of bytes written to the underlying file */ public int append(MemoryRecords records) throws IOException { if (records.sizeInBytes() > Integer.MAX_VALUE - size.get()) throw new IllegalArgumentException("Append of size " + records.sizeInBytes() + " bytes is too large for segment with current file position at " + size.get()); int written = records.writeFullyTo(channel); size.getAndAdd(written); return written; }
private Map<String, AtomicInteger> getScheduledCount(TopologyDetails topologyDetails) { String topoId = topologyDetails.getId(); SchedulerAssignment assignment = cluster.getAssignmentById(topoId); Map<String, AtomicInteger> scheduledCount = new HashMap<>(); if (assignment != null) { for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : assignment.getSlotToExecutors().entrySet()) { String superId = entry.getKey().getNodeId(); String rackId = superIdToRack.get(superId); scheduledCount.computeIfAbsent(rackId, (rid) -> new AtomicInteger(0)) .getAndAdd(entry.getValue().size()); } } return scheduledCount; }
public LazyNodeSorting(TopologyDetails td, ExecutorDetails exec, List<String> favoredNodeIds, List<String> unFavoredNodeIds) { this.favoredNodeIds = favoredNodeIds; this.unFavoredNodeIds = unFavoredNodeIds; this.unFavoredNodeIds.removeAll(favoredNodeIds); skippedNodeIds.addAll(favoredNodeIds); skippedNodeIds.addAll(unFavoredNodeIds); this.td = td; this.exec = exec; String topoId = td.getId(); SchedulerAssignment assignment = cluster.getAssignmentById(topoId); if (assignment != null) { for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : assignment.getSlotToExecutors().entrySet()) { String superId = entry.getKey().getNodeId(); perNodeScheduledCount.computeIfAbsent(superId, (sid) -> new AtomicInteger(0)) .getAndAdd(entry.getValue().size()); } } sortedRacks = sortRacks(exec, td); }
@SuppressWarnings("WeakerAccess") @Synthetic synchronized void incrementPendingCallbacks(int count) { Preconditions.checkArgument(isDone(), "Not yet complete!"); if (pendingCallbacks.getAndAdd(count) == 0 && engineResource != null) { engineResource.acquire(); } }
@Override public void onFailure(Throwable throwable) { flumeFailureLogCount.getAndAdd(size); LOG.warn("Failed to store record", throwable); if (throwable instanceof IOException) { callback.onConnectionError(); } else if (throwable instanceof EventDeliveryException) { callback.onRemoteError(); } else { callback.onInternalError(); } } }
@Override public ByteBuf retain(int increment) { Preconditions.checkArgument(increment > 0, "retain(%d) argument is not positive", increment); if (isEmpty) { return this; } if (BaseAllocator.DEBUG) { historicalLog.recordEvent("retain(%d)", increment); } final int originalReferenceCount = refCnt.getAndAdd(increment); Preconditions.checkArgument(originalReferenceCount > 0); return this; }
protected int sendHttpRequest(FullHttpRequest httpRequest, AbstractHttpClientHandler callback) { final int requestId = streamId.getAndAdd(2); Channel channel = this.channel.channel(); responseChannelHandler.put(requestId, channel.write(httpRequest), callback); channel.flush(); return requestId; }
protected int sendHttpRequest(FullHttpRequest httpRequest, AbstractHttpClientHandler callback) { final int requestId = streamId.getAndAdd(2); Channel channel = this.channel.channel(); responseChannelHandler.put(requestId, channel.write(httpRequest), callback); channel.flush(); return requestId; }
@Override public void run() { doSleep(1000); //spi1.failSend = false; cnt.getAndAdd(c1.compute(c1.cluster().forNodeId(c2.cluster().localNode().id())).call(new TestClosure())); } }, 1, "hang-thread");