@Override public T get() { T result = value.get(); if (result == null) { result = value.updateAndGet(v -> v == null ? delegate.get() : v); } return result; }
private static String keyStorePath() { final Path path = keyStorePath.updateAndGet(existing -> { if (existing != null) { return existing; } return KeyStoreUtil.createTemporaryStore("server-key-store", BASE64_ENCODED_STORE); }); return path.toAbsolutePath().toString(); } }
/** * @return the path to the temporary trust store. */ public static String trustStorePath() { final Path path = trustStorePath.updateAndGet(existing -> { if (existing != null) { return existing; } return KeyStoreUtil.createTemporaryStore("client-trust-store", BASE64_ENCODED_STORE); }); return path.toAbsolutePath().toString(); } }
public void updateSource(TaskSource sourceUpdate) { checkLockNotHeld("Can not update sources while holding the driver lock"); checkArgument( sourceOperator.isPresent() && sourceOperator.get().getSourceId().equals(sourceUpdate.getPlanNodeId()), "sourceUpdate is for a plan node that is different from this Driver's source node"); // stage the new updates pendingTaskSourceUpdates.updateAndGet(current -> current == null ? sourceUpdate : current.update(sourceUpdate)); // attempt to get the lock and process the updates we staged above // updates will be processed in close if and only if we got the lock tryWithLock(() -> TRUE); }
/** * Checks for existing exception. In case of exception will close consumer and rethrow as IOException * @throws IOException abort if possible, close consumer then rethrow exception. */ private void checkExceptions() throws IOException { if (sendExceptionRef.get() != null && sendExceptionRef.get() instanceof KafkaException && sendExceptionRef.get() .getCause() instanceof ProducerFencedException) { // producer.send() may throw a KafkaException which wraps a FencedException re throw its wrapped inner cause. sendExceptionRef.updateAndGet(e -> (KafkaException) e.getCause()); } if (sendExceptionRef.get() != null) { final Exception exception = sendExceptionRef.get(); logHints(exception); if (tryToAbortTx(exception)) { LOG.error("Aborting Transaction [{}] cause by ERROR [{}]", writerIdTopicId, exception.getMessage()); producer.abortTransaction(); } LOG.error("Closing writer [{}] caused by ERROR [{}]", writerIdTopicId, exception.getMessage()); producer.close(0, TimeUnit.MILLISECONDS); throw new IOException(exception); } }
/** * {@inheritDoc} */ @Override public void changeTimeoutDuration(final Duration timeoutDuration) { RateLimiterConfig newConfig = RateLimiterConfig.from(state.get().config) .timeoutDuration(timeoutDuration) .build(); state.updateAndGet(currentState -> new State( newConfig, currentState.activeCycle, currentState.activePermissions, currentState.nanosToWait )); }
/** * {@inheritDoc} */ @Override public void changeLimitForPeriod(final int limitForPeriod) { RateLimiterConfig newConfig = RateLimiterConfig.from(state.get().config) .limitForPeriod(limitForPeriod) .build(); state.updateAndGet(currentState -> new State( newConfig, currentState.activeCycle, currentState.activePermissions, currentState.nanosToWait )); }
@Override public void onSample(long startTime, long rtt, int inflight, boolean didDrop) { long endTime = startTime + rtt; if (rtt < minRttThreshold) { return; } if (didDrop) { sample.updateAndGet(current -> current.addDroppedSample(inflight)); } else { sample.updateAndGet(window -> window.addSample(rtt, inflight)); } if (startTime + rtt > nextUpdateTime) { synchronized (lock) { // Double check under the lock if (endTime > nextUpdateTime) { ImmutableSampleWindow current = sample.get(); if (isWindowReady(current)) { sample.set(new ImmutableSampleWindow()); nextUpdateTime = endTime + Math.min(Math.max(current.getCandidateRttNanos() * 2, minWindowTime), maxWindowTime); delegate.onSample(startTime, current.getAverateRttNanos(), current.getMaxInFlight(), current.didDrop()); } } } } }
@Override public Future<Void> shutdown() { return _shutdown.updateAndGet(fcb->{return fcb==null?newShutdownCallback():fcb;}); }
public void touch() { // if the task of killing is cancelled successfully then reset the session monitor. otherwise this session // has already been killed and there's nothing left to do with this session. kill.updateAndGet(future -> { if (null == future || !future.isDone()) { if (future != null) future.cancel(false); return this.scheduledExecutorService.schedule(() -> { logger.info("Session {} has been idle for more than {} milliseconds - preparing to close", this.session, this.configuredSessionTimeout); kill(false); }, this.configuredSessionTimeout, TimeUnit.MILLISECONDS); } return future; }); }
static <T> Supplier<T> memoize(Supplier<T> supplier) { AtomicReference<T> value = new AtomicReference<>(); return () -> { T val = value.get(); if (val == null) val = value.updateAndGet(v -> v == null ? requireNonNull(supplier.get()) : v); return val; }; } }
@Test public void raceError(){ AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.amb(Future.ofError(new RuntimeException()), Future.<Integer>future()).forEach(z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); assertThat("Values " + data,data.get().size(), equalTo(0)); Assert.assertThat(complete.get(), IsEqual.equalTo(true)); assertThat(error.get(),instanceOf(RuntimeException.class)); }
@Test public void mergeLatestEmpty(){ AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.<Integer>mergeLatest(Spouts.<ReactiveSeq<Integer>>of()) .forEach(z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); Assert.assertThat(complete.get(), equalTo(true)); }
@Test public void mergeEmpty(){ AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.<Integer>merge(Spouts.<ReactiveSeq<Integer>>of()) .forEach(z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); Assert.assertThat(complete.get(), equalTo(true)); } @Test
@Test public void race(){ AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.amb(Spouts.range(1, 10), Spouts.range(11, 20)).forEach(z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); assertThat("Values " + data,data.get(), hasItems(11, 12, 13, 14, 15, 16, 17, 18, 19)); Assert.assertThat(complete.get(), IsEqual.equalTo(true)); assertNull(error.get()); } @Test
@Test public void raceBackpressure() { AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.amb(Spouts.range(1, 10), Spouts.range(11, 20)).forEach(3,z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); assertThat("Values " + data,data.get(), hasItems(11, 12, 13)); Assert.assertThat(complete.get(), IsEqual.equalTo(false)); assertNull(error.get()); } @Test
@Test public void raceFuture(){ AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.amb(Spouts.range(1, 10), Future.future()).forEach(z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); assertThat("Values " + data,data.get(), hasItems(1, 2, 3, 4, 5, 6, 7, 8, 9)); Assert.assertThat(complete.get(), IsEqual.equalTo(true)); assertNull(error.get()); } @Test
@Test public void mergePublisherPublisher(){ AtomicReference<Vector<Integer>> data = new AtomicReference(Vector.empty()); AtomicBoolean complete = new AtomicBoolean(false); AtomicReference<Throwable> error = new AtomicReference<Throwable>(null); Spouts.mergeLatest(Flux.just(Flux.just(1, 2), Flux.just(3, 4))).forEach(z->{ assertFalse(complete.get()); data.updateAndGet(s->s.plus(z)); },e->{ error.set(e); },()->{ complete.set(true); }); assertThat("Values " + data,data.get(), hasItems(1,2,3,4)); Assert.assertThat(complete.get(), IsEqual.equalTo(true)); assertNull(error.get()); }
@Test public void peek(){ AtomicReference<Vector<Integer>> key = new AtomicReference<>(Vector.empty()); AtomicReference<Vector<Integer>> value = new AtomicReference<>(Vector.empty()); ImmutableMap<Integer,Integer> map = empty(); HashSet<Integer> values = HashSet.empty(); for(int i=0;i<80;i++){ map = map.put(i,i*2); values = values.add(i*2); } ImmutableMap<Integer,Integer> map2 = map.peek(v->value.updateAndGet(vec->vec.append(v))); assertThat(map2.values().toHashSet(),equalTo(values)); }
@Test public void bipeek(){ AtomicReference<Vector<Integer>> key = new AtomicReference<>(Vector.empty()); AtomicReference<Vector<Integer>> value = new AtomicReference<>(Vector.empty()); ImmutableMap<Integer,Integer> map = empty(); HashSet<Integer> keys = HashSet.empty(); HashSet<Integer> values = HashSet.empty(); for(int i=0;i<80;i++){ map = map.put(i,i*2); keys = keys.add(i); values = values.add(i*2); } ImmutableMap<Integer,Integer> map2 = map.bipeek(k->key.updateAndGet(v->v.append(k)), v->value.updateAndGet(vec->vec.append(v))); assertThat(map2.keys().toHashSet(),equalTo(keys)); assertThat(map2.values().toHashSet(),equalTo(values)); } @Test