@Override public CompletableFuture<ReadResult> read(String streamSegmentName, long offset, int maxLength, Duration timeout) { Exceptions.checkNotClosed(this.closed.get(), this); TimeoutTimer timer = new TimeoutTimer(timeout); return READ_RETRY.run(() -> getStreamSegmentInfo(streamSegmentName, timer.getRemaining()) .thenApply(si -> StreamSegmentStorageReader.read(si, offset, maxLength, MAX_READ_AT_ONCE_BYTES, this.storage))); }
@Override public CompletableFuture<SegmentProperties> getStreamSegmentInfo(String streamSegmentName, Duration timeout) { Exceptions.checkNotClosed(this.closed.get(), this); return READ_RETRY.run(() -> this.storage.getStreamSegmentInfo(streamSegmentName, timeout)); }
/** * Updates the reader group data at specified path by applying the updater method on the existing data. * It repeatedly invokes conditional update on specified path until is succeeds or max attempts (10) are exhausted. * * @param path Reader group node path. * @param updater Function to obtain the new data value from existing data value. * @throws Exception Throws exception thrown from Curator, or from application of updater method. */ private void updateReaderGroupData(String path, Function<ReaderGroupData, ReaderGroupData> updater) throws Exception { final long initialMillis = 100L; final int multiplier = 2; final int attempts = 10; final long maxDelay = 2000; Stat stat = new Stat(); Retry.withExpBackoff(initialMillis, multiplier, attempts, maxDelay) .retryingOn(KeeperException.BadVersionException.class) .throwingOn(Exception.class) .run(() -> { byte[] data = client.getData().storingStatIn(stat).forPath(path); ReaderGroupData groupData = groupDataSerializer.deserialize(ByteBuffer.wrap(data)); groupData = updater.apply(groupData); byte[] newData = groupDataSerializer.serialize(groupData).array(); client.setData() .withVersion(stat.getVersion()) .forPath(path, newData); return null; }); }
.retryingOn(StoreException.DataNotFoundException.class) .throwingOn(IllegalStateException.class) .run(() -> { Futures.getAndHandleExceptions(streamStore.getConfiguration(SCOPE, stream1, null, executor), CompletionException::new); .retryingOn(IllegalStateException.class) .throwingOn(RuntimeException.class) .run(() -> { Futures.getAndHandleExceptions( streamStore.getConfiguration(SCOPE, stream1, null, executor)
@Override public boolean write(ByteBuffer data, long expectedOffset) throws SegmentSealedException { synchronized (lock) { //Used to preserver order. long appendSequence = requestIdGenerator.get(); return retrySchedule.retryingOn(ConnectionFailedException.class) .throwingOn(SegmentSealedException.class) .run(() -> { if (client == null || client.isClosed()) { client = new RawClient(controller, connectionFactory, segmentId); long requestId = requestIdGenerator.get(); log.debug("Setting up append on segment: {}", segmentId); SetupAppend setup = new SetupAppend(requestId, writerId, segmentId.getScopedName(), delegationToken); val reply = client.sendRequest(requestId, setup); AppendSetup appendSetup = transformAppendSetup(reply.join()); if (appendSetup.getLastEventNumber() >= appendSequence) { return true; } } val request = new ConditionalAppend(writerId, appendSequence, expectedOffset, new Event(Unpooled.wrappedBuffer(data))); val reply = client.sendRequest(appendSequence, request); return transformDataAppended(reply.join()); }); } }
private int retry(long delay, int multiplier, int attempts, long maxDelay, boolean success) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .run(() -> { accumulator.getAndAdd(loopCounter.getAndIncrement()); int i = loopCounter.get(); log.debug("Loop counter = " + i); if (i % 10 == 0) { if (success) { return accumulator.get(); } else { throw new NonretryableException(); } } else { throw new RetryableException(); } }); }
@Override public int read(SegmentHandle handle, long offset, byte[] buffer, int bufferOffset, int length) throws StreamSegmentException { ensureInitializedAndNotClosed(); long traceId = LoggerHelpers.traceEnter(log, "read", handle, offset, length); if (offset < 0 || bufferOffset < 0 || length < 0 || buffer.length < bufferOffset + length) { throw new ArrayIndexOutOfBoundsException(String.format( "Offset (%s) must be non-negative, and bufferOffset (%s) and length (%s) must be valid indices into buffer of size %s.", offset, bufferOffset, length, buffer.length)); } Timer timer = new Timer(); try { return HDFS_RETRY.run(() -> { int totalBytesRead = readInternal(handle, buffer, offset, bufferOffset, length); HDFSMetrics.READ_LATENCY.reportSuccessEvent(timer.getElapsed()); HDFSMetrics.READ_BYTES.add(totalBytesRead); LoggerHelpers.traceLeave(log, "read", traceId, handle, offset, totalBytesRead); return totalBytesRead; }); } catch (IOException e) { throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), e); } catch (RetriesExhaustedException e) { throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), e.getCause()); } }
@Override public SegmentProperties getStreamSegmentInfo(String streamSegmentName) throws StreamSegmentException { ensureInitializedAndNotClosed(); long traceId = LoggerHelpers.traceEnter(log, "getStreamSegmentInfo", streamSegmentName); try { return HDFS_RETRY.run(() -> { FileStatus last = findStatusForSegment(streamSegmentName, true); boolean isSealed = isSealed(last.getPath()); StreamSegmentInformation result = StreamSegmentInformation.builder().name(streamSegmentName).length(last.getLen()).sealed(isSealed).build(); LoggerHelpers.traceLeave(log, "getStreamSegmentInfo", traceId, streamSegmentName, result); return result; }); } catch (IOException e) { throw HDFSExceptionHelpers.convertException(streamSegmentName, e); } catch (RetriesExhaustedException e) { throw HDFSExceptionHelpers.convertException(streamSegmentName, e.getCause()); } }