/** * Creates a new BlockingClusterStatePublishResponseHandler * @param publishingToNodes the set of nodes to which the cluster state will be published and should respond */ public BlockingClusterStatePublishResponseHandler(Set<DiscoveryNode> publishingToNodes) { this.pendingNodes = ConcurrentCollections.newConcurrentSet(); this.pendingNodes.addAll(publishingToNodes); this.latch = new CountDownLatch(pendingNodes.size()); this.failedNodes = ConcurrentCollections.newConcurrentSet(); }
public static <V> Set<V> newConcurrentSet() { return Collections.newSetFromMap(ConcurrentCollections.<V, Boolean>newConcurrentMap()); }
public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory, ThreadContext contextHolder) { BlockingQueue<Runnable> queue; if (queueCapacity < 0) { queue = ConcurrentCollections.newBlockingQueue(); } else { queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity); } return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy(), contextHolder); }
/** * Creates a new CHM with an aggressive concurrency level, aimed at high concurrent update rate long living maps. */ public static <K, V> ConcurrentMap<K, V> newConcurrentMapWithAggressiveConcurrency() { return newConcurrentMapWithAggressiveConcurrency(16); }
public ConcurrentDequeRecycler(C<T> c, int maxSize) { super(c, ConcurrentCollections.<T>newDeque(), maxSize); this.size = new AtomicInteger(); }
public static void pushIntArray(int[] ints, int sentinal) { SoftWrapper intArray = getArrayQueueForSize(ints.length); Queue<int[]> ref = intArray.get(); if (ref == null) { ref = ConcurrentCollections.newQueue(); intArray.set(ref); } Arrays.fill(ints, sentinal); ref.add(ints); }
/** * Creates a new CHM with an aggressive concurrency level, aimed at highly updateable long living maps. */ public static <V> ConcurrentMapLong<V> newConcurrentMapLongWithAggressiveConcurrency() { return new ConcurrentHashMapLong<>(ConcurrentCollections.<Long, V>newConcurrentMapWithAggressiveConcurrency()); }
public ConcurrentDequeRecycler(C<T> c, int maxSize) { super(c, ConcurrentCollections.<T>newDeque(), maxSize); this.size = new AtomicInteger(); }
public static void pushEntry(Entry entry) { entry.reset(); if (entry.bytes().bytes().length() > BYTES_LIMIT) { return; } Queue<Entry> ref = cache.get(); if (ref == null) { ref = ConcurrentCollections.newQueue(); counter.set(0); cache.set(ref); } if (counter.incrementAndGet() > COUNT_LIMIT) { counter.decrementAndGet(); } else { ref.add(entry); } }
public static <V> ConcurrentMapLong<V> newConcurrentMapLong() { return new ConcurrentHashMapLong<>(ConcurrentCollections.<Long, V>newConcurrentMap()); }
/** * Creates a new BlockingClusterStatePublishResponseHandler * @param publishingToNodes the set of nodes to which the cluster state will be published and should respond */ public BlockingClusterStatePublishResponseHandler(Set<DiscoveryNode> publishingToNodes) { this.pendingNodes = ConcurrentCollections.newConcurrentSet(); this.pendingNodes.addAll(publishingToNodes); this.latch = new CountDownLatch(pendingNodes.size()); this.failedNodes = ConcurrentCollections.newConcurrentSet(); }
Maps() { this(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()), VersionLookup.EMPTY, false); }
/** * Return a new executor that will automatically adjust the queue size based on queue throughput. * * @param size number of fixed threads to use for executing tasks * @param initialQueueCapacity initial size of the executor queue * @param minQueueSize minimum queue size that the queue can be adjusted to * @param maxQueueSize maximum queue size that the queue can be adjusted to * @param frameSize number of tasks during which stats are collected before adjusting queue size */ public static EsThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize, int maxQueueSize, int frameSize, TimeValue targetedResponseTime, ThreadFactory threadFactory, ThreadContext contextHolder) { if (initialQueueCapacity <= 0) { throw new IllegalArgumentException("initial queue capacity for [" + name + "] executor must be positive, got: " + initialQueueCapacity); } ResizableBlockingQueue<Runnable> queue = new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), initialQueueCapacity); return new QueueResizingEsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, minQueueSize, maxQueueSize, TimedRunnable::new, frameSize, targetedResponseTime, threadFactory, new EsAbortPolicy(), contextHolder); }
public ConcurrentDequeRecycler(C<T> c, int maxSize) { super(c, ConcurrentCollections.<T>newDeque(), maxSize); this.size = new AtomicInteger(); }
final ConcurrentMap<ShardId, ShardRouting> failedShardsCache = ConcurrentCollections.newConcurrentMap(); private final RepositoriesService repositoriesService;
/** * Creates a new BlockingClusterStatePublishResponseHandler * @param publishingToNodes the set of nodes to which the cluster state will be published and should respond */ public BlockingClusterStatePublishResponseHandler(Set<DiscoveryNode> publishingToNodes) { this.pendingNodes = ConcurrentCollections.newConcurrentSet(); this.pendingNodes.addAll(publishingToNodes); this.latch = new CountDownLatch(pendingNodes.size()); this.failedNodes = ConcurrentCollections.newConcurrentSet(); }
/** * Builds a new map for the refresh transition this should be called in beforeRefresh() */ Maps buildTransitionMap() { return new Maps(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(current.size())), current, shouldInheritSafeAccess()); }
public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory, ThreadContext contextHolder) { BlockingQueue<Runnable> queue; if (queueCapacity < 0) { queue = ConcurrentCollections.newBlockingQueue(); } else { queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity); } return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy(), contextHolder); }
public ConcurrentDequeRecycler(C<T> c, int maxSize) { super(c, ConcurrentCollections.<T>newDeque(), maxSize); this.size = new AtomicInteger(); }
final ConcurrentMap<DiscoveryNode, ClusterStateResponse> clusterStateResponses = ConcurrentCollections.newConcurrentMap(); try { for (final DiscoveryNode nodeToPing : nodesToPing) {