public static RetryWithBackoff withExpBackoff(long initialMillis, int multiplier, int attempts) { return withExpBackoff(initialMillis, multiplier, attempts, Long.MAX_VALUE); }
public static <U> CompletableFuture<U> withIndefiniteRetriesAsync(Supplier<CompletableFuture<U>> futureSupplier, Consumer<Throwable> exceptionConsumer, ScheduledExecutorService executor) { return Retry .indefinitelyWithExpBackoff(100, 2, 10000, exceptionConsumer) .runAsync(futureSupplier, executor); }
@Override public CompletableFuture<Void> processEvent(TestBase event) { receivedForProcessing.add(event); CompletableFuture<Void> result = new CompletableFuture<>(); Retry.withExpBackoff(100, 1, 5, 100) .retryWhen(RetryableException::isRetryable) .runAsync(() -> event.process(null), executor) .whenCompleteAsync((r, e) -> { if (e != null) { Throwable cause = Exceptions.unwrap(e); if (cause instanceof OperationDisallowedException) { Retry.indefinitelyWithExpBackoff("Error writing event back into requeststream") .runAsync(() -> writer.write(event), executor) .thenAccept(v -> result.completeExceptionally(cause)); } else { result.completeExceptionally(cause); } } else { result.complete(r); } }, executor); return result; } }
private Retry.RetryAndThrowBase<? extends Exception> createRetryPolicy(int maxWriteAttempts, int writeTimeout) { int initialDelay = writeTimeout / maxWriteAttempts; int maxDelay = writeTimeout * maxWriteAttempts; return Retry.withExpBackoff(initialDelay, 2, maxWriteAttempts, maxDelay) .retryWhen(ex -> true); // Retry for every exception. }
private CompletableFuture<Void> createScope(final String scopeName) { return Futures.toVoid(Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating event processor scope " + scopeName, e)) .runAsync(() -> controller.createScope(scopeName) .thenAccept(x -> log.info("Created controller scope {}", scopeName)), executor)); }
public static <U> U withRetries(Supplier<U> supplier, Predicate<Throwable> predicate, int numOfTries) { return Retry.withExpBackoff(100, 2, numOfTries, 1000) .retryWhen(predicate) .run(supplier::get); }
private CompletableFuture<Void> retryIndefinitelyThenComplete(Supplier<CompletableFuture<Void>> futureSupplier, CompletableFuture<Void> toComplete, Throwable e) { String failureMessage = String.format("Error writing event back into stream from processor %s", getProcessorName()); return Retry.indefinitelyWithExpBackoff(failureMessage) .runAsync(futureSupplier, executor) .thenRun(() -> { if (e != null) { toComplete.completeExceptionally(e); } else { toComplete.complete(null); } }); } }
public static <U> CompletableFuture<U> withRetriesAsync(Supplier<CompletableFuture<U>> futureSupplier, Predicate<Throwable> predicate, int numOfTries, ScheduledExecutorService executor) { return Retry .withExpBackoff(100, 2, numOfTries, 10000) .retryWhen(predicate) .runAsync(futureSupplier, executor); }
private CompletableFuture<Void> createStream(String scope, String streamName, final StreamConfiguration streamConfig) { return Futures.toVoid(Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating event processor stream " + streamName, e)) .runAsync(() -> controller.createStream(scope, streamName, streamConfig) .thenAccept(x -> log.info("Created stream {}/{}", scope, streamName)), executor)); }
private RetryWithBackoff getRetryFromConfig(EventWriterConfig config) { return Retry.withExpBackoff(config.getInitalBackoffMillis(), config.getBackoffMultiple(), config.getRetryAttempts(), config.getMaxBackoffMillis()); } }
@Test public void retryIndefiniteTest() throws ExecutionException, InterruptedException { AtomicInteger i = new AtomicInteger(0); Retry.indefinitelyWithExpBackoff(10, 10, 10, e -> i.getAndIncrement()) .runAsync(() -> CompletableFuture.runAsync(() -> { if (i.get() < 10) { throw new RuntimeException("test"); } }), Executors.newSingleThreadScheduledExecutor()).get(); assert i.get() == 10; }
private RetryWithBackoff getRetryFromConfig(EventWriterConfig config) { return Retry.withExpBackoff(config.getInitalBackoffMillis(), config.getBackoffMultiple(), config.getRetryAttempts(), config.getMaxBackoffMillis()); }
private void invokeResendCallBack(WireCommand wireCommand) { if (state.needSuccessors.compareAndSet(false, true)) { Retry.indefinitelyWithExpBackoff(retrySchedule.getInitialMillis(), retrySchedule.getMultiplier(), retrySchedule.getMaxDelay(), t -> log.error(writerId + " to invoke resendToSuccessors callback: ", t)) .runInExecutor(() -> { log.debug("Invoking resendToSuccessors call back for {} on writer {}", wireCommand, writerId); resendToSuccessorsCallback.accept(Segment.fromScopedName(getSegmentName())); }, connectionFactory.getInternalExecutor()) .thenRun(() -> { log.trace("Release inflight latch for writer {}", writerId); state.waitingInflight.release(); }); } }
@Test public void retryPredicateTest() { AtomicInteger i = new AtomicInteger(0); try { Retry.withExpBackoff(10, 10, 10) .retryWhen(e -> i.getAndIncrement() != 1) .run(() -> { throw new Exception("test"); }); } catch (Exception e) { assert i.get() == 2; } }
sealedSegmentQueue.add(segment); retransmitPool.execute(() -> { Retry.indefinitelyWithExpBackoff(config.getInitalBackoffMillis(), config.getBackoffMultiple(), config.getMaxBackoffMillis(), t -> log.error("Encountered excemption when handeling a sealed segment: ", t))
/** * Creates a new instance of the DurableLog class. * * @param config Durable Log Configuration. * @param metadata The StreamSegment Container Metadata for the container which this Durable Log is part of. * @param dataFrameLogFactory A DurableDataLogFactory which can be used to create instances of DataFrameLogs. * @param readIndex A ReadIndex which can be used to store newly processed appends. * @param executor The Executor to use for async operations. * @throws NullPointerException If any of the arguments are null. */ public DurableLog(DurableLogConfig config, UpdateableContainerMetadata metadata, DurableDataLogFactory dataFrameLogFactory, ReadIndex readIndex, ScheduledExecutorService executor) { Preconditions.checkNotNull(config, "config"); this.metadata = Preconditions.checkNotNull(metadata, "metadata"); Preconditions.checkNotNull(dataFrameLogFactory, "dataFrameLogFactory"); Preconditions.checkNotNull(readIndex, "readIndex"); this.executor = Preconditions.checkNotNull(executor, "executor"); this.durableDataLog = dataFrameLogFactory.createDurableDataLog(metadata.getContainerId()); assert this.durableDataLog != null : "dataFrameLogFactory created null durableDataLog."; this.traceObjectId = String.format("DurableLog[%s]", metadata.getContainerId()); this.inMemoryOperationLog = createInMemoryLog(); this.memoryStateUpdater = new MemoryStateUpdater(this.inMemoryOperationLog, readIndex, this::triggerTailReads); MetadataCheckpointPolicy checkpointPolicy = new MetadataCheckpointPolicy(config, this::queueMetadataCheckpoint, this.executor); this.operationProcessor = new OperationProcessor(this.metadata, this.memoryStateUpdater, this.durableDataLog, checkpointPolicy, executor); Services.onStop(this.operationProcessor, this::queueStoppedHandler, this::queueFailedHandler, this.executor); this.tailReads = new HashSet<>(); this.closed = new AtomicBoolean(); this.delayedStart = new CompletableFuture<>(); this.delayedStartRetry = Retry.withExpBackoff(config.getStartRetryDelay().toMillis(), 1, Integer.MAX_VALUE) .retryWhen(ex -> Exceptions.unwrap(ex) instanceof DataLogDisabledException); }
Retry.indefinitelyWithExpBackoff(retrySchedule.getInitialMillis(), retrySchedule.getMultiplier(), retrySchedule.getMaxDelay(), t -> log.warn(writerId + " Failed to connect: ", t))
/** * Updates the reader group data at specified path by applying the updater method on the existing data. * It repeatedly invokes conditional update on specified path until is succeeds or max attempts (10) are exhausted. * * @param path Reader group node path. * @param updater Function to obtain the new data value from existing data value. * @throws Exception Throws exception thrown from Curator, or from application of updater method. */ private void updateReaderGroupData(String path, Function<ReaderGroupData, ReaderGroupData> updater) throws Exception { final long initialMillis = 100L; final int multiplier = 2; final int attempts = 10; final long maxDelay = 2000; Stat stat = new Stat(); Retry.withExpBackoff(initialMillis, multiplier, attempts, maxDelay) .retryingOn(KeeperException.BadVersionException.class) .throwingOn(Exception.class) .run(() -> { byte[] data = client.getData().storingStatIn(stat).forPath(path); ReaderGroupData groupData = groupDataSerializer.deserialize(ByteBuffer.wrap(data)); groupData = updater.apply(groupData); byte[] newData = groupDataSerializer.serialize(groupData).array(); client.setData() .withVersion(stat.getVersion()) .forPath(path, newData); return null; }); }
void cleanUp(String scope, String stream, ReaderGroupManager readerGroupManager, String readerGroupName) throws InterruptedException, ExecutionException { CompletableFuture<Boolean> sealStreamStatus = Retry.indefinitelyWithExpBackoff("Failed to seal stream. retrying ...") .runAsync(() -> controller.sealStream(scope, stream), executorService); log.info("Sealing stream {}", stream); assertTrue(sealStreamStatus.get()); CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, stream); log.info("Deleting stream {}", stream); assertTrue(deleteStreamStatus.get()); log.info("Deleting readergroup {}", readerGroupName); readerGroupManager.deleteReaderGroup(readerGroupName); CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope); log.info("Deleting scope {}", scope); assertTrue(deleteScopeStatus.get()); }
private CompletableFuture<Integer> retryFuture(final long delay, final int multiplier, final int attempts, final long maxDelay, final boolean success, final ScheduledExecutorService executorService) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .runAsync(() -> futureComputation(success, executorService), executorService); }