/** * Creates a new instance of the DurableLog class. * * @param config Durable Log Configuration. * @param metadata The StreamSegment Container Metadata for the container which this Durable Log is part of. * @param dataFrameLogFactory A DurableDataLogFactory which can be used to create instances of DataFrameLogs. * @param readIndex A ReadIndex which can be used to store newly processed appends. * @param executor The Executor to use for async operations. * @throws NullPointerException If any of the arguments are null. */ public DurableLog(DurableLogConfig config, UpdateableContainerMetadata metadata, DurableDataLogFactory dataFrameLogFactory, ReadIndex readIndex, ScheduledExecutorService executor) { Preconditions.checkNotNull(config, "config"); this.metadata = Preconditions.checkNotNull(metadata, "metadata"); Preconditions.checkNotNull(dataFrameLogFactory, "dataFrameLogFactory"); Preconditions.checkNotNull(readIndex, "readIndex"); this.executor = Preconditions.checkNotNull(executor, "executor"); this.durableDataLog = dataFrameLogFactory.createDurableDataLog(metadata.getContainerId()); assert this.durableDataLog != null : "dataFrameLogFactory created null durableDataLog."; this.traceObjectId = String.format("DurableLog[%s]", metadata.getContainerId()); this.inMemoryOperationLog = createInMemoryLog(); this.memoryStateUpdater = new MemoryStateUpdater(this.inMemoryOperationLog, readIndex, this::triggerTailReads); MetadataCheckpointPolicy checkpointPolicy = new MetadataCheckpointPolicy(config, this::queueMetadataCheckpoint, this.executor); this.operationProcessor = new OperationProcessor(this.metadata, this.memoryStateUpdater, this.durableDataLog, checkpointPolicy, executor); Services.onStop(this.operationProcessor, this::queueStoppedHandler, this::queueFailedHandler, this.executor); this.tailReads = new HashSet<>(); this.closed = new AtomicBoolean(); this.delayedStart = new CompletableFuture<>(); this.delayedStartRetry = Retry.withExpBackoff(config.getStartRetryDelay().toMillis(), 1, Integer.MAX_VALUE) .retryWhen(ex -> Exceptions.unwrap(ex) instanceof DataLogDisabledException); }
@Override public CompletableFuture<SegmentRead> read(long offset, int length) { Exceptions.checkNotClosed(closed.get(), this); WireCommands.ReadSegment request = new WireCommands.ReadSegment(segmentId.getScopedName(), offset, length, this.delegationToken); return backoffSchedule.retryWhen(t -> { Throwable ex = Exceptions.unwrap(t); if (closed.get()) { log.debug("Exception while reading from Segment : {}", segmentId, ex); } else { log.warn("Exception while reading from Segment : {}", segmentId, ex); } return ex instanceof Exception && !(ex instanceof ConnectionClosedException) && !(ex instanceof SegmentTruncatedException); }).runAsync(() -> { return getConnection() .whenComplete((connection, ex) -> { if (ex != null) { log.warn("Exception while establishing connection with Pravega " + "node", ex); closeConnection(new ConnectionFailedException(ex)); } }).thenCompose(c -> sendRequestOverConnection(request, c)); }, connectionFactory.getInternalExecutor()); }
@Test public void retryPredicateTest() { AtomicInteger i = new AtomicInteger(0); try { Retry.withExpBackoff(10, 10, 10) .retryWhen(e -> i.getAndIncrement() != 1) .run(() -> { throw new Exception("test"); }); } catch (Exception e) { assert i.get() == 2; } }
.build())); Retry.withExpBackoff(500, 2, 10) .retryWhen(ex -> true) .run(() -> this.streamManager.get().createScope(SCOPE));
val finished = new CompletableFuture<Void>(); val retry = Retry.withExpBackoff(1, 2, expectedCount) .retryWhen(t -> { if (count.get() >= expectedCount) { finished.complete(null);
val waitOn = new CompletableFuture<Void>(); val retry = Retry.withExpBackoff(1, 2, 3) .retryWhen(t -> true); val error = new AtomicReference<Throwable>(); val p = new SequentialAsyncProcessor(
private void checkReads(HashMap<String, ByteArrayOutputStream> segmentContents, StreamSegmentStore store) { for (Map.Entry<String, ByteArrayOutputStream> e : segmentContents.entrySet()) { String segmentName = e.getKey(); byte[] expectedData = e.getValue().toByteArray(); long segmentLength = store.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength(); Assert.assertEquals("Unexpected Read Index length for segment " + segmentName, expectedData.length, segmentLength); AtomicLong expectedCurrentOffset = new AtomicLong(0); // We retry a number of times on StreamSegmentNotExists. It is possible that waitForSegmentsInStorage may have // returned successfully because it detected the Segment was complete there, but the internal callback to the // ReadIndex (completeMerge) may not yet have been executed. The ReadIndex has a mechanism to cope with this, // but it only retries once, after a fixed time interval, which is more than generous on any system. // However, on very slow systems, it is possible that that callback may take a significant amount of time to even // begin executing, hence the trying to read data that was merged from a Transaction may result in a spurious // StreamSegmentNotExistsException. // This is gracefully handled by retries in AppendProcessor and/or Client, but in this case, we simply have to // do the retries ourselves, hoping that the callback eventually executes. Retry.withExpBackoff(100, 2, 10, TIMEOUT.toMillis() / 5) .retryWhen(ex -> Exceptions.unwrap(ex) instanceof StreamSegmentNotExistsException) .run(() -> { checkSegmentReads(segmentName, expectedCurrentOffset, segmentLength, store, expectedData); return null; }); } }
@Override public CompletableFuture<Void> processEvent(TestBase event) { receivedForProcessing.add(event); CompletableFuture<Void> result = new CompletableFuture<>(); Retry.withExpBackoff(100, 1, 5, 100) .retryWhen(RetryableException::isRetryable) .runAsync(() -> event.process(null), executor) .whenCompleteAsync((r, e) -> { if (e != null) { Throwable cause = Exceptions.unwrap(e); if (cause instanceof OperationDisallowedException) { Retry.indefinitelyWithExpBackoff("Error writing event back into requeststream") .runAsync(() -> writer.write(event), executor) .thenAccept(v -> result.completeExceptionally(cause)); } else { result.completeExceptionally(cause); } } else { result.complete(r); } }, executor); return result; } }
/** * Executes the given request on the given FencingTextContext.. We retry all expected exceptions, and when we do, we * make sure to execute them on the current (active) Segment Store instance (since the previous one may be unusable). */ private CompletableFuture<Void> executeWithFencing(StoreRequest request, int index, FencingTestContext context) { log.debug("Initiating Operation #{} on iteration {}.", index, context.getIteration()); AtomicReference<StreamSegmentStore> requestStore = new AtomicReference<>(context.getActiveStore()); return Retry.withExpBackoff(50, 2, 10, TIMEOUT.toMillis() / 10) .retryWhen(ex -> { requestStore.getAndSet(context.getActiveStore()); ex = Exceptions.unwrap(ex); log.info("Operation #{} (Iteration = {}) failed due to {}.", index, context.getIteration(), ex.toString()); return isExpectedFencingException(ex); }) .runAsync(() -> request.apply(requestStore.get()), executorService()); }
public static <U> CompletableFuture<U> withRetriesAsync(Supplier<CompletableFuture<U>> futureSupplier, Predicate<Throwable> predicate, int numOfTries, ScheduledExecutorService executor) { return Retry .withExpBackoff(100, 2, numOfTries, 10000) .retryWhen(predicate) .runAsync(futureSupplier, executor); }
private Retry.RetryAndThrowBase<? extends Exception> createRetryPolicy(int maxWriteAttempts, int writeTimeout) { int initialDelay = writeTimeout / maxWriteAttempts; int maxDelay = writeTimeout * maxWriteAttempts; return Retry.withExpBackoff(initialDelay, 2, maxWriteAttempts, maxDelay) .retryWhen(ex -> true); // Retry for every exception. }
public static <U> U withRetries(Supplier<U> supplier, Predicate<Throwable> predicate, int numOfTries) { return Retry.withExpBackoff(100, 2, numOfTries, 1000) .retryWhen(predicate) .run(supplier::get); }