@Override public synchronized Object merge( Object key, Object value, BiFunction<? super Object, ? super Object, ? extends Object> remappingFunction ) { return storageMap.merge( key, value, remappingFunction ); }
@Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { return delegate().merge(key, value, remappingFunction); } @Override public Set<K> keySet() {
@Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { checkType(key); checkType(value); return map.merge(key, value, (k, v) -> { // No need to check the key, already check above. V output = remappingFunction.apply(k, v); if (output != null) { checkType(output); } return output; }); }
/** * merge removes when the given key is present and function returns null */ public void testMerge3() { ConcurrentMap map = map5(); map.merge(one, "Y", (x, y) -> null); assertFalse(map.containsKey(one)); }
/** * merge replaces when the given key is present */ public void testMerge2() { ConcurrentMap map = map5(); assertEquals("Z", map.merge(one, "Y", (x, y) -> "Z")); }
/** * merge adds when the given key is not present */ public void testMerge1() { ConcurrentMap map = map5(); assertEquals("Y", map.merge(six, "Y", (x, y) -> "Z")); }
@Test(dataProvider = "caches") @CacheSpec(implementation = Implementation.Caffeine, population = Population.EMPTY, maximumSize = Maximum.FULL, weigher = CacheWeigher.COLLECTION, expiryTime = Expire.ONE_MINUTE, mustExpireWithAnyOf = { AFTER_ACCESS, AFTER_WRITE, VARIABLE }, expiry = { CacheExpiry.DISABLED, CacheExpiry.CREATE, CacheExpiry.WRITE, CacheExpiry.ACCESS }, expireAfterAccess = {Expire.DISABLED, Expire.ONE_MINUTE}, expireAfterWrite = {Expire.DISABLED, Expire.ONE_MINUTE}) public void merge_weighted(Cache<Integer, List<Integer>> cache, CacheContext context) { cache.put(1, ImmutableList.of(1)); context.ticker().advance(1, TimeUnit.MINUTES); cache.asMap().merge(1, ImmutableList.of(1, 2, 3), (oldValue, v) -> { throw new AssertionError("Should never be called"); }); assertThat(cache.policy().eviction().get().weightedSize().getAsLong(), is(3L)); }
@Test(dataProvider = "caches") @CacheSpec(implementation = Implementation.Caffeine, requiresWeakOrSoft = true, expireAfterAccess = Expire.DISABLED, expireAfterWrite = Expire.DISABLED, maximumSize = Maximum.UNREACHABLE, weigher = CacheWeigher.COLLECTION, population = Population.EMPTY, stats = Stats.ENABLED, removalListener = Listener.DEFAULT, writer = Writer.DISABLED) public void merge_weighted(Cache<Integer, List<Integer>> cache, CacheContext context) { Integer key = context.absentKey(); cache.put(key, ImmutableList.of(1)); GcFinalization.awaitFullGc(); cache.asMap().merge(key, ImmutableList.of(1, 2, 3), (oldValue, v) -> { if (context.isWeakKeys() && !context.isStrongValues()) { throw new AssertionError("Should never be called"); } return v; }); assertThat(cache.policy().eviction().get().weightedSize().getAsLong(), is(3L)); }
private <T> LocalRegistrationResult<T> addLocalRegistration(String address, HandlerRegistration<T> registration, boolean replyHandler, boolean localOnly) { Objects.requireNonNull(address, "address"); Context context = Vertx.currentContext(); boolean hasContext = context != null; if (!hasContext) { // Embedded context = vertx.getOrCreateContext(); } registration.setHandlerContext(context); HandlerHolder<T> holder = new HandlerHolder<>(metrics, registration, replyHandler, localOnly, context); ConcurrentCyclicSequence<HandlerHolder> handlers = new ConcurrentCyclicSequence<HandlerHolder>().add(holder); ConcurrentCyclicSequence<HandlerHolder> actualHandlers = handlerMap.merge( address, handlers, (old, prev) -> old.add(prev.first())); if (hasContext) { HandlerEntry entry = new HandlerEntry<>(address, registration); context.addCloseHook(entry); } boolean newAddress = handlers == actualHandlers; return new LocalRegistrationResult<>(holder, newAddress); }
@Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { checkType(key); checkType(value); return map.merge(key, value, (k, v) -> { // No need to check the key, already check above. V output = remappingFunction.apply(k, v); if (output != null) { checkType(output); } return output; }); }
private <T> LocalRegistrationResult<T> addLocalRegistration(String address, HandlerRegistration<T> registration, boolean replyHandler, boolean localOnly) { Objects.requireNonNull(address, "address"); Context context = Vertx.currentContext(); boolean hasContext = context != null; if (!hasContext) { // Embedded context = vertx.getOrCreateContext(); } registration.setHandlerContext(context); HandlerHolder<T> holder = new HandlerHolder<>(metrics, registration, replyHandler, localOnly, context); ConcurrentCyclicSequence<HandlerHolder> handlers = new ConcurrentCyclicSequence<HandlerHolder>().add(holder); ConcurrentCyclicSequence<HandlerHolder> actualHandlers = handlerMap.merge( address, handlers, (old, prev) -> old.add(prev.first())); if (hasContext) { HandlerEntry entry = new HandlerEntry<>(address, registration); context.addCloseHook(entry); } boolean newAddress = handlers == actualHandlers; return new LocalRegistrationResult<>(holder, newAddress); }
ConcurrentMap<String, List<String>> mapGlobal = new ConcurrentHashMap<>(); // ... mapAdded.entrySet().parallelStream().forEach(e -> mapGlobal.merge(e.getKey(), e.getValue(), (v1, v2) -> { Set<String> set = new TreeSet<>(v1); set.addAll(v2); return new ArrayList<>(set); }));
public <T> void trackSingle(final Class<T> service, boolean optional) { this.optional.merge(service, optional, Boolean::logicalAnd); if (singleTrackers.get(service) == null) { SingleServiceTracker<T> tracker = new SingleServiceTracker<T>(bundleContext, service) { @Override public void updateState(T oldSvc, T newSvc) { updateStateSingle(service, newSvc); } }; singleTrackers.putIfAbsent(service, tracker); } }
ConcurrentMap<String, Long> map = new ConcurrentHashMap<String, Long>(); public long addTo(String key, long value) { return map.merge(key, value, Long::sum); }
public <T> void trackSingle(final Class<T> service, boolean optional) { this.optional.merge(service, optional, Boolean::logicalAnd); if (singleTrackers.get(service) == null) { SingleServiceTracker<T> tracker = new SingleServiceTracker<T>(bundleContext, service) { @Override public void updateState(T oldSvc, T newSvc) { updateStateSingle(service, newSvc); } }; singleTrackers.putIfAbsent(service, tracker); } }
@Override public void endExecution(Long executionId) { mapOfRunningTasks.merge(executionId, new LinkedList<>(), (queue, newValue) -> { queue.poll(); return !queue.isEmpty() ? queue : null; } ); }
@Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { return delegate().merge(key, value, remappingFunction); } @Override public Set<K> keySet() {
public void addExecution(Long executionId, Runnable runnable) { //It is possible that in linear flow we will have step 2 that is already running, but step 1 that still did not clean itself from the table (race condition) Future future = executorService.submit(runnable); mapOfRunningTasks.merge(executionId, newQueue(future), this::addLists); }
private void setInternal(IAggregateRoot aggregateRoot) { if (aggregateRoot == null) { throw new NullPointerException("aggregateRoot"); } _aggregateRootInfoDict.merge(aggregateRoot.uniqueId(), new AggregateCacheInfo(aggregateRoot), (oldValue, value) -> { oldValue.setAggregateRoot(aggregateRoot); oldValue.setLastUpdateTimeMillis(System.currentTimeMillis()); _logger.debug("In memory aggregate updated, type: {}, id: {}, version: {}", aggregateRoot.getClass().getName(), aggregateRoot.uniqueId(), aggregateRoot.version()); return oldValue; }); }
@Test(expected = CPUReactiveAuditException.class) public void merge() { TestTools.strict.commit(); ConcurrentMap<Object,Object> q = new ConcurrentHashMap<>(); q.merge(null, null, null); } @Test(expected = CPUReactiveAuditException.class)