private <R> CompletableFuture<R> suppressException(CompletableFuture<R> future, R returnOnException, String message) { return Futures.exceptionallyExpecting(future, e -> { log.warn(message, e); return true; }, returnOnException); }
@Override public CompletableFuture<Map<String, Data>> getTxnInEpoch(int epoch) { return Futures.exceptionallyExpecting(store.getChildren(getEpochPath(epoch)), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException, Collections.emptyList()) .thenCompose(txIds -> Futures.allOfWithResults(txIds.stream().collect( Collectors.toMap(txId -> txId, txId -> Futures.exceptionallyExpecting(store.getData(getActiveTxPath(epoch, txId)), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException, EMPTY_DATA))) ).thenApply(txnMap -> txnMap.entrySet().stream().filter(x -> !x.getValue().equals(EMPTY_DATA)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))) ); }
@Override public CompletableFuture<Void> seal(@NonNull Duration timeout) { ensureInitialized(); return Futures.exceptionallyExpecting( this.storage.seal(this.handle.get(), timeout) .thenRun(() -> log.info("{}: Sealed.", this.traceObjectId)), ex -> ex instanceof StreamSegmentSealedException, null); }
/** * List the streams in scope. * * @param scopeName Name of scope * @return A map of streams in scope to their configs. */ @Override public CompletableFuture<Map<String, StreamConfiguration>> listStreamsInScope(final String scopeName) { return getScope(scopeName).listStreamsInScope().thenCompose(streams -> { HashMap<String, CompletableFuture<Optional<StreamConfiguration>>> result = new HashMap<>(); for (String s : streams) { Stream stream = getStream(scopeName, s, null); result.put(stream.getName(), Futures.exceptionallyExpecting(stream.getConfiguration(), e -> e instanceof StoreException.DataNotFoundException, null) .thenApply(Optional::ofNullable)); } return Futures.allOfWithResults(result) .thenApply(x -> { return x.entrySet().stream().filter(y -> y.getValue().isPresent()) .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().get())); }); }); }
private CompletableFuture<Void> createSegmentIfNecessary(Supplier<CompletableFuture<Void>> toRun, Duration timeout) { if (this.handle.get() == null) { // No handle so, the segment must not exist yet. Attempt to create it, then run what we wanted to. assert this.metadata.getStorageLength() == 0 : "no handle yet but metadata indicates Storage Segment not empty"; long rolloverSize = this.metadata.getAttributes().getOrDefault(Attributes.ROLLOVER_SIZE, -1L); SegmentRollingPolicy rollingPolicy = rolloverSize < 0 ? SegmentRollingPolicy.NO_ROLLING : new SegmentRollingPolicy(rolloverSize); return Futures .exceptionallyExpecting( this.storage.create(this.metadata.getName(), rollingPolicy, timeout), ex -> ex instanceof StreamSegmentExistsException, null) .thenComposeAsync(handle -> { this.handle.set(handle); return toRun.get(); }); } else { // Segment already exists. Execute what we were supposed to. return toRun.get(); } }
/** * Handles expected Attribute-related exceptions. Since the attribute index is a separate segment from the main one, * it is highly likely that it may get temporarily out of sync with the main one, thus causing spurious StreamSegmentSealedExceptions * or StreamSegmentNotExistsExceptions. If we get either of those, and they are consistent with our current state, the * we can safely ignore them; otherwise we should be rethrowing them. */ private CompletableFuture<Void> handleAttributeException(CompletableFuture<Void> future) { return Futures.exceptionallyExpecting( future, ex -> (ex instanceof StreamSegmentSealedException && this.metadata.isSealedInStorage()) || ((ex instanceof StreamSegmentNotExistsException || ex instanceof StreamSegmentMergedException) && (this.metadata.isMerged() || this.metadata.isDeleted())), null); }
/** * List the streams in scope. * * @param scopeName Name of scope * @return List of streams in scope */ @Override @Synchronized public CompletableFuture<Map<String, StreamConfiguration>> listStreamsInScope(final String scopeName) { InMemoryScope inMemoryScope = scopes.get(scopeName); if (inMemoryScope != null) { return inMemoryScope.listStreamsInScope() .thenApply(streams -> { HashMap<String, StreamConfiguration> result = new HashMap<>(); for (String stream : streams) { StreamConfiguration configuration = Futures.exceptionallyExpecting( getConfiguration(scopeName, stream, null, executor), e -> e instanceof StoreException.DataNotFoundException, null).join(); if (configuration != null) { result.put(stream, configuration); } } return result; }); } else { return Futures.failedFuture(StoreException.create(StoreException.Type.DATA_NOT_FOUND, scopeName)); } }
private CompletableFuture<WriterFlushResult> deleteSegment(TimeoutTimer timer) { // Delete the Segment from Storage, but also delete any source Segments that had pending mergers. If we do not, // we will be left with orphaned Segments in Storage. CompletableFuture<Void> deleteFuture; if (this.handle.get() == null) { // Segment does not exist in Storage. deleteFuture = CompletableFuture.completedFuture(null); } else { deleteFuture = Futures .exceptionallyExpecting( this.storage.delete(this.handle.get(), timer.getRemaining()), ex -> ex instanceof StreamSegmentNotExistsException, null); } return deleteFuture .thenComposeAsync(v -> deleteUnmergedSourceSegments(timer), this.executor) .thenApplyAsync(v -> { updateMetadataPostDeletion(this.metadata); this.hasSealPending.set(false); this.hasDeletePending.set(false); this.truncateCount.set(0); this.mergeTransactionCount.set(0); this.operations.clear(); // No point in executing any other operation now. return new WriterFlushResult(); }, this.executor); }
/** * Attempts to seal a Segment that may already be sealed. * * @param metadata The SegmentMetadata for the Segment to Seal. * @param timeout Timeout for the operation. * @return A CompletableFuture that will indicate when the operation completes. If the given segment is already sealed, * this future will already be completed, otherwise it will complete once the seal is performed. */ private CompletableFuture<Void> trySealStreamSegment(SegmentMetadata metadata, Duration timeout) { if (metadata.isSealed()) { return CompletableFuture.completedFuture(null); } else { // It is OK to ignore StreamSegmentSealedException as the segment may have already been sealed by a concurrent // call to this or via some other operation. return Futures.exceptionallyExpecting( this.durableLog.add(new StreamSegmentSealOperation(metadata.getId()), timeout), ex -> ex instanceof StreamSegmentSealedException, null); } }
/** * Deletes all the Attribute data associated with the given Segment. * * @param segmentName The name of the Segment whose attribute data should be deleted. * @param storage A Storage Adapter to execute the deletion on. * @param timeout Timeout for the operation. * @return A CompletableFuture that, when completed, will indicate that the operation finished successfully. */ static CompletableFuture<Void> delete(String segmentName, Storage storage, Duration timeout) { TimeoutTimer timer = new TimeoutTimer(timeout); String attributeSegmentName = StreamSegmentNameUtils.getAttributeSegmentName(segmentName); return Futures.exceptionallyExpecting( storage.openWrite(attributeSegmentName) .thenCompose(handle -> storage.delete(handle, timer.getRemaining())), ex -> ex instanceof StreamSegmentNotExistsException, null); }
private CompletableFuture<Void> deleteUnmergedSourceSegments(TimeoutTimer timer) { if (this.mergeTransactionCount.get() == 0) { return CompletableFuture.completedFuture(null); } // Identify all MergeSegmentOperations, pick up their names and delete them. ArrayList<CompletableFuture<Void>> toDelete = new ArrayList<>(); StorageOperation op; while ((op = this.operations.getFirst()) != null && !(isDeleteOperation(op))) { if (op instanceof MergeSegmentOperation) { // Found such a merge; get the source Segment's name and attempt to delete it. It's OK if it has already // been deleted. UpdateableSegmentMetadata m = this.dataSource.getStreamSegmentMetadata(((MergeSegmentOperation) op).getSourceSegmentId()); toDelete.add(Futures .exceptionallyExpecting( this.storage.openWrite(m.getName()) .thenCompose(handle -> this.storage.delete(handle, timer.getRemaining())) .thenAcceptAsync(v -> updateMetadataPostDeletion(m), this.executor), ex -> ex instanceof StreamSegmentNotExistsException, null)); this.operations.removeFirst(); this.mergeTransactionCount.decrementAndGet(); assert this.mergeTransactionCount.get() >= 0; } } return Futures.allOf(toDelete); }
private void cleanup() { Futures.exceptionallyExpecting( this.storage.openWrite(containerMetadata.getStreamSegmentMetadata(segmentId).getName()) .thenCompose(handle -> this.storage.delete(handle, TIMEOUT)), ex -> ex instanceof StreamSegmentNotExistsException, null).join(); Futures.exceptionallyExpecting( this.storage.openWrite(attributeSegmentName) .thenCompose(handle -> this.storage.delete(handle, TIMEOUT)), ex -> ex instanceof StreamSegmentNotExistsException, null).join(); }
.exceptionallyExpecting(deleteSegment, ex -> ex instanceof StreamSegmentNotExistsException, null) .thenComposeAsync(ignored -> clearSegmentInfo(segmentName, timer.getRemaining()), this.executor); if (log.isTraceEnabled()) {
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties sp, StreamSegmentStore readOnlyStore) { if (sp.getLength() == 0) { // Empty segments may or may not exist in Storage, so don't bother complicating ourselves with this. return CompletableFuture.completedFuture(null); } TimeoutTimer timer = new TimeoutTimer(TIMEOUT); AtomicBoolean tryAgain = new AtomicBoolean(true); return Futures.loop( tryAgain::get, () -> Futures .exceptionallyExpecting(readOnlyStore.getStreamSegmentInfo(sp.getName(), TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException, StreamSegmentInformation.builder().name(sp.getName()).build()) .thenCompose(storageProps -> { if (sp.isSealed()) { tryAgain.set(!storageProps.isSealed()); } else { tryAgain.set(sp.getLength() != storageProps.getLength()); } if (tryAgain.get() && !timer.hasRemaining()) { return Futures.<Void>failedFuture(new TimeoutException( String.format("Segment %s did not complete in Storage in the allotted time.", sp.getName()))); } else { return Futures.delayedFuture(Duration.ofMillis(100), executorService()); } }), executorService()); }
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties metadataProps, TestContext context) { if (metadataProps.getLength() == 0) { // Empty segments may or may not exist in Storage, so don't bother complicating ourselves with this. return CompletableFuture.completedFuture(null); } Function<SegmentProperties, Boolean> meetsConditions = storageProps -> storageProps.isSealed() == metadataProps.isSealed() && storageProps.getLength() >= metadataProps.getLength() && context.storageFactory.truncationOffsets.getOrDefault(metadataProps.getName(), 0L) >= metadataProps.getStartOffset(); AtomicBoolean canContinue = new AtomicBoolean(true); TimeoutTimer timer = new TimeoutTimer(TIMEOUT); return Futures.loop( canContinue::get, () -> Futures.exceptionallyExpecting( context.storage.getStreamSegmentInfo(metadataProps.getName(), TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException, StreamSegmentInformation.builder().name(metadataProps.getName()).build()) .thenCompose(storageProps -> { if (meetsConditions.apply(storageProps)) { canContinue.set(false); return CompletableFuture.completedFuture(null); } else if (!timer.hasRemaining()) { return Futures.failedFuture(new TimeoutException()); } else { return Futures.delayedFuture(Duration.ofMillis(10), executorService()); } }).thenRun(Runnables.doNothing()), executorService()); }
Futures.exceptionallyExpecting( context.storage.openWrite(a.getMetadata().getName()) .thenCompose(handle -> context.storage.delete(handle, TIMEOUT)),