@Override public Object jmeClone() { UpdateControl clone = (UpdateControl)super.jmeClone(); clone.taskQueue = new ConcurrentLinkedQueue<>(); // This is kind of questionable since the tasks aren't cloned and have // no reference to the new spatial or anything. They'll get run again // but it's not clear to me why that would be desired. I'm doing it // because the old cloneForSpatial() code does. FIXME? -pspeed clone.taskQueue.addAll(taskQueue); return clone; } }
private void appendMutations(String indexName, Map<ByteBuffer, RowMutation> mutations) { Pair<AtomicInteger, ConcurrentLinkedQueue<RowMutation>> mutationQ = getMutationQueue(indexName); mutationQ.right.addAll(mutations.values()); }
public void appendMutations(String indexName, RowMutation... mutations) { Pair<AtomicInteger, ConcurrentLinkedQueue<RowMutation>> mutationQ = getMutationQueue(indexName); mutationQ.right.addAll(Arrays.asList(mutations)); }
@Override public void mutate(List<? extends Mutation> ms) throws InterruptedIOException, RetriesExhaustedWithDetailsException { checkClose(); long toAddSize = 0; int toAddCount = 0; for (Mutation m : ms) { if (m instanceof Put) { HTable.validatePut((Put) m, maxKeyValueSize); } toAddSize += m.heapSize(); ++toAddCount; } if (currentWriteBufferSize.get() == 0) { firstRecordInBufferTimestamp.set(System.currentTimeMillis()); } currentWriteBufferSize.addAndGet(toAddSize); writeAsyncBuffer.addAll(ms); undealtMutationCount.addAndGet(toAddCount); doFlush(false); }
mutationQ.right.addAll(rows);
public void remove(final KafkaDependencyInstanceContext context) { this.depInstances.remove(context); if (!this.depInstances.hasTopic(context.getTopicName())) { this.subscribedTopics.addAll(this.depInstances.getTopicList()); } }
mergedProcessingExceptions.addAll(processingExceptionsToMerge);
queue.addAll(conflated.stream() .filter(ev -> !(ev instanceof DescriptionEvent)) .collect(Collectors.toList()));
public void add(final KafkaDependencyInstanceContext context) { if (!this.depInstances.hasTopic(context.getTopicName())) { this.depInstances.add(context); this.subscribedTopics.addAll(this.depInstances.getTopicList()); } else { this.depInstances.add(context); } }
/** * If the matcher returns true, remove the dependency from collection. */ private void triggerDependencies(final Set<String> matchedList, final ConsumerRecord<String, String> record) { final List<KafkaDependencyInstanceContext> deleteList = new LinkedList<>(); for (final String it : matchedList) { final List<KafkaDependencyInstanceContext> possibleAvailableDeps = this.depInstances.getDepsByTopicAndEvent(record.topic(), it); for (final KafkaDependencyInstanceContext dep : possibleAvailableDeps) { dep.getCallback().onSuccess(dep); deleteList.add(dep); } //If dependencies that need to be removed could lead to unsubscribing topics, do the topics rebalance if (!this.depInstances.removeList(record.topic(), it, deleteList)) { this.subscribedTopics.addAll(this.depInstances.getTopicList()); } } } }
blocksToTest.addAll(Arrays.asList(blocks));
rows.addAll(workingMutations.values()); mutationQ.right.addAll(rows);
expectedOutput.addAll(Arrays.asList(expectedRecords));
@Override public void mutate(List<? extends Mutation> ms) throws InterruptedIOException, RetriesExhaustedWithDetailsException { checkClose(); long toAddSize = 0; int toAddCount = 0; for (Mutation m : ms) { if (m instanceof Put) { HTable.validatePut((Put) m, maxKeyValueSize); } toAddSize += m.heapSize(); ++toAddCount; } if (currentWriteBufferSize.get() == 0) { firstRecordInBufferTimestamp.set(System.currentTimeMillis()); } currentWriteBufferSize.addAndGet(toAddSize); writeAsyncBuffer.addAll(ms); undealtMutationCount.addAndGet(toAddCount); doFlush(false); }
/** * Move the tasks from the given queue to this one. * * @param gameTaskQueue */ public void enqueueAll(final GameTaskQueue queue) { _queue.addAll(queue._queue); queue._queue.clear(); }
@SuppressWarnings("unchecked") public static void readStaticState(ObjectInputStream ois) throws IOException, ClassNotFoundException { nextID = ois.readInt(); allMovables.clear(); allMovables.addAll((Collection<? extends ILogicMovable>) ois.readObject()); movablesByID.putAll((Map<? extends Integer, ? extends ILogicMovable>) ois.readObject()); }
private void reScheduleRefresh (int delayMillis, Set<File> filesToRefresh) { // refresh all at once Mercurial.STATUS_LOG.log(Level.FINE, "reScheduleRefresh: adding {0}", filesToRefresh); this.filesToRefresh.addAll(filesToRefresh); refreshTask.schedule(delayMillis); }
@Override public void update(float tpf) { super.update(tpf); runningCommands.addAll(pendingCommands); pendingCommands.clear(); for (PHATCommand bc : runningCommands) { bc.run(app); } runningCommands.clear(); }
private void useMagazine(Magazine m) { MagazineHolder h = new MagazineHolder(this, m); ArrayList<Handle> handles = new ArrayList<Handle>(); Magazine.Slot s; ConcurrentLinkedQueue<Handle> q = m.isFull() ? freeQueue : partialQueue; while ((s = m.get()) != null) { handles.add(new Handle(s, h)); } q.addAll(handles); }
private void useMagazine(Magazine m) { MagazineHolder h = new MagazineHolder(this, m); ArrayList<Handle> handles = new ArrayList<Handle>(); Magazine.Slot s; ConcurrentLinkedQueue<Handle> q = m.isFull() ? freeQueue : partialQueue; while ((s = m.get()) != null) { handles.add(new Handle(s, h)); } q.addAll(handles); }