/** * Convenience for {@link #writeLoggerP(DistributedFunction)} that uses * {@code toString()} as {@code toStringFn}. */ @Nonnull public static ProcessorMetaSupplier writeLoggerP() { return writeLoggerP(Object::toString); }
/** * Creates and returns the file {@link Sink} with the supplied components. */ public Sink<T> build() { return Sinks.fromProcessor("filesSink(" + directoryName + ')', writeFileP(directoryName, toStringFn, charset, append)); } }
/** * Convenience for {@link #map(String, Predicate, Projection)} * which uses a {@link DistributedFunction} as the projection function. */ @Nonnull public static <T, K, V> BatchSource<T> map( @Nonnull String mapName, @Nonnull Predicate<? super K, ? super V> predicate, @Nonnull DistributedFunction<? super Map.Entry<K, V>, ? extends T> projectionFn ) { return batchFromProcessor("mapSource(" + mapName + ')', readMapP(mapName, predicate, projectionFn)); }
public static <C, T, R> ProcessorTransform mapUsingContextTransform( @Nonnull Transform upstream, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiFunction<? super C, ? super T, ? extends R> mapFn ) { return new ProcessorTransform("mapUsingContext", upstream, mapUsingContextP(contextFactory, mapFn)); }
public static <C, T, R> ProcessorTransform flatMapUsingContextTransform( @Nonnull Transform upstream, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiFunction<? super C, ? super T, ? extends Traverser<? extends R>> flatMapFn ) { return new ProcessorTransform("flatMapUsingContext", upstream, flatMapUsingContextP(contextFactory, flatMapFn)); }
@Override public void addToDag(Planner p) { PlannerVertex pv = p.addVertex(this, p.uniqueVertexName(name()), localParallelism(), mapP(mapFn())); p.addEdges(this, pv.v); } }
@Override public void addToDag(Planner p) { PlannerVertex pv = p.addVertex(this, p.uniqueVertexName(name()), localParallelism(), flatMapP(flatMapFn())); p.addEdges(this, pv.v); } }
/** * Convenience for {@link #peekSnapshotP(DistributedFunction, * DistributedPredicate, ProcessorMetaSupplier) peekSnapshot(toStringFn, * shouldLogFn, metaSupplier} with a pass-through filter and {@code * Object#toString} as the formatting function. This variant accepts a * {@code ProcessorSupplier} instead of a meta-supplier. */ @Nonnull public static ProcessorSupplier peekSnapshotP(@Nonnull ProcessorSupplier wrapped) { return peekSnapshotP(Object::toString, alwaysTrue(), wrapped); } }
public static <C, T, R> ProcessorTransform flatMapUsingContextAsyncTransform( @Nonnull Transform upstream, @Nonnull String operationName, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiFunction<? super C, ? super T, CompletableFuture<Traverser<R>>> flatMapAsyncFn ) { // TODO use better key so that snapshots are local. Currently they will // be sent to a random member. We keep it this way for simplicity: // the number of in-flight items is limited (maxAsyncOps) return new ProcessorTransform(operationName + "UsingContextAsync", upstream, flatMapUsingContextAsyncP(contextFactory, Object::hashCode, flatMapAsyncFn)); }
/** * Convenience for {@link #peekOutputP(DistributedFunction, * DistributedPredicate, ProcessorMetaSupplier) peekOutput(toStringFn, * shouldLogFn, metaSupplier} with a pass-through filter and {@code * Object#toString} as the formatting function. */ @Nonnull public static ProcessorMetaSupplier peekOutputP(@Nonnull ProcessorMetaSupplier wrapped) { return peekOutputP(Object::toString, alwaysTrue(), wrapped); }
/** * Convenience for {@link #peekInputP(DistributedFunction, * DistributedPredicate, ProcessorMetaSupplier) peekInput(toStringFn, * shouldLogFn, metaSupplier)} with a pass-through filter and {@code * Object#toString} as the formatting function. This variant accepts a * {@code ProcessorSupplier} instead of a meta-supplier. */ @Nonnull public static ProcessorSupplier peekInputP(@Nonnull ProcessorSupplier wrapped) { return peekInputP(Object::toString, alwaysTrue(), wrapped); }
/** * Returns a supplier of processors for * {@link Sources#map(String, Predicate, DistributedFunction)}. */ @Nonnull public static <T, K, V> ProcessorMetaSupplier readMapP( @Nonnull String mapName, @Nonnull Predicate<? super K, ? super V> predicate, @Nonnull DistributedFunction<? super Entry<K, V>, ? extends T> projectionFn ) { return ReadWithPartitionIteratorP.readMapSupplier(mapName, predicate, toProjection(projectionFn)); }
public static <C, T> ProcessorTransform filterUsingContextTransform( @Nonnull Transform upstream, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiPredicate<? super C, ? super T> filterFn ) { return new ProcessorTransform("filterUsingContext", upstream, filterUsingContextP(contextFactory, filterFn)); }
/** * Returns a supplier of processors for * {@link Sources#cacheJournal(String, JournalInitialPosition)}. */ @Nonnull public static <K, V> ProcessorMetaSupplier streamCacheP( @Nonnull String cacheName, @Nonnull JournalInitialPosition initialPos, @Nonnull EventTimePolicy<? super Entry<K, V>> eventTimePolicy ) { return streamCacheP(cacheName, cachePutEvents(), cacheEventToEntry(), initialPos, eventTimePolicy); }
@Override public void addToDag(Planner p) { @SuppressWarnings("unchecked") PlannerVertex pv = p.addVertex( this, p.uniqueVertexName(name()), localParallelism(), insertWatermarksP(eventTimePolicy) ); p.addEdges(this, pv.v); }
@Override public void addToDag(Planner p) { PlannerVertex pv = p.addVertex(this, p.uniqueVertexName(name()), localParallelism(), filterP(filterFn())); p.addEdges(this, pv.v); } }
public static <C, T, K, R> PartitionedProcessorTransform<T, K> mapUsingContextPartitionedTransform( @Nonnull Transform upstream, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiFunction<? super C, ? super T, ? extends R> mapFn, @Nonnull DistributedFunction<? super T, ? extends K> partitionKeyFn ) { return new PartitionedProcessorTransform<>("mapUsingPartitionedContext", upstream, mapUsingContextP(contextFactory, mapFn), partitionKeyFn); }
public static <C, T, K, R> PartitionedProcessorTransform<T, K> flatMapUsingPartitionedContextTransform( @Nonnull Transform upstream, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiFunction<? super C, ? super T, ? extends Traverser<? extends R>> flatMapFn, @Nonnull DistributedFunction<? super T, ? extends K> partitionKeyFn ) { return new PartitionedProcessorTransform<>("flatMapUsingPartitionedContext", upstream, flatMapUsingContextP(contextFactory, flatMapFn), partitionKeyFn); }
/** * Convenience for {@link #peekSnapshotP(DistributedFunction, * DistributedPredicate, ProcessorMetaSupplier) peekSnapshot(toStringFn, * shouldLogFn, metaSupplier} with a pass-through filter and {@code * Object#toString} as the formatting function. */ @Nonnull public static ProcessorMetaSupplier peekSnapshotP(@Nonnull ProcessorMetaSupplier wrapped) { return peekSnapshotP(Object::toString, alwaysTrue(), wrapped); }
public static <C, T, K, R> PartitionedProcessorTransform<T, K> flatMapUsingPartitionedContextAsyncTransform( @Nonnull Transform upstream, @Nonnull String operationName, @Nonnull ContextFactory<C> contextFactory, @Nonnull DistributedBiFunction<? super C, ? super T, CompletableFuture<Traverser<R>>> flatMapAsyncFn, @Nonnull DistributedFunction<? super T, ? extends K> partitionKeyFn ) { return new PartitionedProcessorTransform<>(operationName + "UsingPartitionedContextAsync", upstream, flatMapUsingContextAsyncP(contextFactory, partitionKeyFn, flatMapAsyncFn), partitionKeyFn); }