/** * Reset all member variables for next test. * */ private void resetAll() { m_allTests = new ConcurrentLinkedDeque<>(); m_configIssues = new ConcurrentLinkedDeque<>(); m_numFailed = 0; }
/** * 创建无阻塞情况下,性能最优的并发双端队列 */ public static <E> Deque<E> newConcurrentNonBlockingDeque() { return new java.util.concurrent.ConcurrentLinkedDeque<E>(); }
/** * 创建无阻塞情况下,性能最优的并发双端队列 */ public static <E> Deque<E> newConcurrentNonBlockingDeque() { return new java.util.concurrent.ConcurrentLinkedDeque<E>(); }
protected Deque<PooledConnection> getPoolForEventLoop(EventLoop eventLoop) { // We don't want to block under any circumstances, so can't use CHM.computeIfAbsent(). // Instead we accept the slight inefficiency of an unnecessary instantiation of a ConcurrentLinkedDeque. Deque<PooledConnection> pool = connectionsPerEventLoop.get(eventLoop); if (pool == null) { pool = new ConcurrentLinkedDeque<>(); connectionsPerEventLoop.putIfAbsent(eventLoop, pool); } return pool; }
@Override public Queue<String> create(String[] elements) { return new ConcurrentLinkedDeque<>(MinimalCollection.of(elements)); } })
public LruCache(final int maxEntries) { this.maxEntries = maxEntries; this.map = new ConcurrentHashMap<>(maxEntries); // Don't use a ConcurrentLinkedQueue here. // There's a JDK bug, leading to OutOfMemoryError and high CPU usage: // https://bugs.openjdk.java.net/browse/JDK-8054446 this.queue = new ConcurrentLinkedDeque<>(); }
/** * Returns a new concurrent {@link Deque}. */ public static <C> Deque<C> newConcurrentDeque() { if (javaVersion() < 7) { return new LinkedBlockingDeque<C>(); } else { return new ConcurrentLinkedDeque<C>(); } }
/** * Creates a new, optionally bounded, {@link Queue} that does not require external synchronization. * * @param maxSize queue size. If {@link Integer#MAX_VALUE}, then creates an {@link ConcurrentLinkedDeque unbounded queue}. * @return a new, empty {@link Queue}. */ public static <T> Queue<T> newConcurrentQueue(int maxSize) { if (maxSize == Integer.MAX_VALUE) { return new ConcurrentLinkedDeque<>(); } return maxSize > ARRAY_QUEUE_THRESHOLD ? new LinkedBlockingQueue<>(maxSize) : new ArrayBlockingQueue<>(maxSize); }
private Deque<TaskStatusEvent> getDeque(final JobId key) { synchronized (items) { final Deque<TaskStatusEvent> deque = items.get(key); if (deque == null) { // try more assertively to get a deque final ConcurrentLinkedDeque<TaskStatusEvent> newDeque = new ConcurrentLinkedDeque<>(); items.put(key, newDeque); return newDeque; } return deque; } }
/** * Returns a new concurrent {@link Deque}. */ public static <C> Deque<C> newConcurrentDeque() { if (javaVersion() < 7) { return new LinkedBlockingDeque<C>(); } else { return new ConcurrentLinkedDeque<C>(); } }
/** * Returns a new concurrent {@link Deque}. */ public static <C> Deque<C> newConcurrentDeque() { if (javaVersion() < 7) { return new LinkedBlockingDeque<C>(); } else { return new ConcurrentLinkedDeque<C>(); } }
private DefaultAccessLogReceiver(final Executor logWriteExecutor, final Path outputDirectory, final String logBaseName, final String logNameSuffix, boolean rotate, LogFileHeaderGenerator fileHeader) { this.logWriteExecutor = logWriteExecutor; this.outputDirectory = outputDirectory; this.logBaseName = logBaseName; this.rotate = rotate; this.fileHeaderGenerator = fileHeader; this.logNameSuffix = (logNameSuffix != null) ? logNameSuffix : DEFAULT_LOG_SUFFIX; this.pendingMessages = new ConcurrentLinkedDeque<>(); this.defaultLogFile = outputDirectory.resolve(logBaseName + this.logNameSuffix); calculateChangeOverPoint(); }
@Override public int putIfAbsent(IncrementalIndexRow key, int rowIndex) { Long time = key.getTimestamp(); Deque<IncrementalIndexRow> rows = facts.get(time); if (rows == null) { facts.putIfAbsent(time, new ConcurrentLinkedDeque<>()); // in race condition, rows may be put by other thread, so always get latest status from facts rows = facts.get(time); } // setRowIndex() must be called before rows.add() for visibility of rowIndex from concurrent readers. key.setRowIndex(rowIndex); rows.add(key); // always return EMPTY_ROW_INDEX to indicate that we always add new row return IncrementalIndexRow.EMPTY_ROW_INDEX; }
private boolean offer0(Channel channel, Object partitionKey, long now) { ConcurrentLinkedDeque<IdleChannel> partition = partitions.get(partitionKey); if (partition == null) { partition = partitions.computeIfAbsent(partitionKey, pk -> new ConcurrentLinkedDeque<>()); } return partition.offerFirst(new IdleChannel(channel, now)); }
public TransientStorePool(final MessageStoreConfig storeConfig) { this.storeConfig = storeConfig; this.poolSize = storeConfig.getTransientStorePoolSize(); this.fileSize = storeConfig.getMapedFileSizeCommitLog(); this.availableBuffers = new ConcurrentLinkedDeque<>(); }
protected Deque<PooledConnection> getPoolForEventLoop(EventLoop eventLoop) { // We don't want to block under any circumstances, so can't use CHM.computeIfAbsent(). // Instead we accept the slight inefficiency of an unnecessary instantiation of a ConcurrentLinkedDeque. Deque<PooledConnection> pool = connectionsPerEventLoop.get(eventLoop); if (pool == null) { pool = new ConcurrentLinkedDeque<>(); connectionsPerEventLoop.putIfAbsent(eventLoop, pool); } return pool; }
/** * Verifies that concurrent sensor add, remove, updates and read don't result * in errors or deadlock. */ @Test public void testConcurrentReadUpdate() throws Exception { final Random random = new Random(); final Deque<Sensor> sensors = new ConcurrentLinkedDeque<>(); metrics = new Metrics(new MockTime(10)); SensorCreator sensorCreator = new SensorCreator(metrics); final AtomicBoolean alive = new AtomicBoolean(true); executorService = Executors.newSingleThreadExecutor(); executorService.submit(new ConcurrentMetricOperation(alive, "record", () -> sensors.forEach(sensor -> sensor.record(random.nextInt(10000))))); for (int i = 0; i < 10000; i++) { if (sensors.size() > 5) { Sensor sensor = random.nextBoolean() ? sensors.removeFirst() : sensors.removeLast(); metrics.removeSensor(sensor.name()); } StatType statType = StatType.forId(random.nextInt(StatType.values().length)); sensors.add(sensorCreator.createSensor(statType, i)); for (Sensor sensor : sensors) { for (KafkaMetric metric : sensor.metrics()) { assertNotNull("Invalid metric value", metric.metricValue()); } } } alive.set(false); }
@Test public void severalInARowExecutedInReverseOrder() { Queue<String> finallyOrder = new ConcurrentLinkedDeque<>(); Flux.just("b") .hide() .doFinally(s -> finallyOrder.offer("FIRST")) .doFinally(s -> finallyOrder.offer("SECOND")) .blockLast(); Assertions.assertThat(finallyOrder) .containsExactly("SECOND", "FIRST"); }
@Test public void severalInARowExecutedInReverseOrder() { Queue<String> finallyOrder = new ConcurrentLinkedDeque<>(); Flux.just("b") .hide() .doFinally(s -> finallyOrder.offer("FIRST")) .doFinally(s -> finallyOrder.offer("SECOND")) .blockLast(); Assertions.assertThat(finallyOrder) .containsExactly("SECOND", "FIRST"); }
@Test public void sendNoContext() throws Exception { int size = 1000; ConcurrentLinkedDeque<Integer> expected = new ConcurrentLinkedDeque<>(); ConcurrentLinkedDeque<Integer> obtained = new ConcurrentLinkedDeque<>(); startNodes(2); CountDownLatch latch = new CountDownLatch(1); vertices[1].eventBus().<Integer>consumer(ADDRESS1, msg -> { obtained.add(msg.body()); if (obtained.size() == expected.size()) { assertEquals(new ArrayList<>(expected), new ArrayList<>(obtained)); testComplete(); } }).completionHandler(ar -> { assertTrue(ar.succeeded()); latch.countDown(); }); latch.await(); EventBus bus = vertices[0].eventBus(); for (int i = 0;i < size;i++) { expected.add(i); bus.send(ADDRESS1, i); } await(); }