/** * Returns a sink that puts {@code Map.Entry}s it receives into a Hazelcast * {@code IMap} with the specified name. * <p> * This sink provides the exactly-once guarantee thanks to <i>idempotent * updates</i>. It means that the value with the same key is not appended, * but overwritten. After the job is restarted from snapshot, duplicate * items will not change the state in the target map. * <p> * The default local parallelism for this sink is 1. */ @Nonnull public static <K, V> Sink<Entry<K, V>> map(@Nonnull String mapName) { return fromProcessor("mapSink(" + mapName + ')', writeMapP(mapName)); }
/** * Creates and returns the file {@link Sink} with the supplied components. */ public Sink<T> build() { return Sinks.fromProcessor("filesSink(" + directoryName + ')', writeFileP(directoryName, toStringFn, charset, append)); } }
/** * Convenience for {@link #mapWithMerging(String, DistributedFunction, DistributedFunction, * DistributedBinaryOperator)} with {@link Entry} as input item. */ @Nonnull public static <K, V> Sink<Entry<K, V>> mapWithMerging( @Nonnull String mapName, @Nonnull DistributedBinaryOperator<? super V> mergeFn ) { return fromProcessor("mapWithMergingSink(" + mapName + ')', mergeMapP(mapName, Entry::getKey, entryValue(), mergeFn)); }
/** * Shortcut for {@link #writeBufferedP(DistributedFunction, * DistributedBiConsumer, DistributedConsumer, DistributedConsumer)} with * a no-op {@code destroyFn}. */ @Nonnull public static <W, T> DistributedSupplier<Processor> writeBufferedP( @Nonnull DistributedFunction<? super Context, ? extends W> createFn, @Nonnull DistributedBiConsumer<? super W, ? super T> onReceiveFn, @Nonnull DistributedConsumer<? super W> flushFn ) { return writeBufferedP(createFn, onReceiveFn, flushFn, DistributedConsumer.noop()); }
/** * Convenience for {@link #mapWithUpdating(String, DistributedFunction, * DistributedBiFunction)} with {@link Entry} as the input item. */ @Nonnull public static <K, V, E extends Entry<K, V>> Sink<E> mapWithUpdating( @Nonnull String mapName, @Nonnull DistributedBiFunction<? super V, ? super E, ? extends V> updateFn ) { //noinspection Convert2MethodRef (provokes a javac 9 bug) return fromProcessor("mapWithUpdatingSink(" + mapName + ')', updateMapP(mapName, (Entry<K, V> e) -> e.getKey(), updateFn)); }
/** * Returns a sink equivalent to {@link #mapWithEntryProcessor}, but for a map * in a remote Hazelcast cluster identified by the supplied {@code * ClientConfig}. */ @Nonnull public static <E, K, V> Sink<E> remoteMapWithEntryProcessor( @Nonnull String mapName, @Nonnull ClientConfig clientConfig, @Nonnull DistributedFunction<? super E, ? extends K> toKeyFn, @Nonnull DistributedFunction<? super E, ? extends EntryProcessor<K, V>> toEntryProcessorFn ) { return fromProcessor("remoteMapWithEntryProcessorSink(" + mapName + ')', updateRemoteMapP(mapName, clientConfig, toKeyFn, toEntryProcessorFn)); }
/** * Convenience for {@link #remoteMapWithMerging} with {@link Entry} as * input item. */ @Nonnull public static <K, V> Sink<Entry<K, V>> remoteMapWithMerging( @Nonnull String mapName, @Nonnull ClientConfig clientConfig, @Nonnull DistributedBinaryOperator<V> mergeFn ) { return fromProcessor("remoteMapWithMergingSink(" + mapName + ')', mergeRemoteMapP(mapName, clientConfig, Entry::getKey, entryValue(), mergeFn)); }
/** * Returns a sink that puts {@code Map.Entry}s it receives into a Hazelcast * {@code ICache} with the specified name. * <p> * This sink provides the exactly-once guarantee thanks to <i>idempotent * updates</i>. It means that the value with the same key is not appended, * but overwritten. After the job is restarted from snapshot, duplicate * items will not change the state in the target map. * <p> * The default local parallelism for this sink is 1. */ @Nonnull public static <T extends Entry> Sink<T> cache(@Nonnull String cacheName) { return fromProcessor("cacheSink(" + cacheName + ')', writeCacheP(cacheName)); }
) { return Sinks.fromProcessor("jdbcSink", SinkProcessors.writeJdbcP(updateQuery, connectionSupplier, bindFn));
/** * Returns a supplier of processors for * {@link Sinks#socket(String, int)}. */ public static <T> ProcessorMetaSupplier writeSocketP( @Nonnull String host, int port, @Nonnull DistributedFunction<? super T, ? extends String> toStringFn, @Nonnull Charset charset ) { checkSerializable(toStringFn, "toStringFn"); String charsetName = charset.name(); return preferLocalParallelismOne(writeBufferedP( index -> new BufferedWriter(new OutputStreamWriter(new Socket(host, port).getOutputStream(), charsetName)), (bufferedWriter, item) -> { @SuppressWarnings("unchecked") T t = (T) item; bufferedWriter.write(toStringFn.apply(t)); bufferedWriter.write('\n'); }, BufferedWriter::flush, BufferedWriter::close )); }
updateMapP(mapName, toKeyFn, toEntryProcessorFn));
/** * Convenience for {@link #remoteMapWithUpdating} with {@link Entry} as * input item. */ @Nonnull public static <K, V, E extends Entry<K, V>> Sink<E> remoteMapWithUpdating( @Nonnull String mapName, @Nonnull ClientConfig clientConfig, @Nonnull DistributedBiFunction<? super V, ? super E, ? extends V> updateFn ) { //noinspection Convert2MethodRef (provokes a javac 9 bug) return fromProcessor("remoteMapWithUpdatingSink(" + mapName + ')', updateRemoteMapP(mapName, clientConfig, (Entry<K, V> e) -> e.getKey(), updateFn)); }
/** * Returns a sink equivalent to {@link #mapWithMerging(String, DistributedBinaryOperator)}, * but for a map in a remote Hazelcast cluster identified by the supplied * {@code ClientConfig}. * <p> * Due to the used API, the remote cluster must be at least 3.11. */ @Nonnull public static <T, K, V> Sink<T> remoteMapWithMerging( @Nonnull String mapName, @Nonnull ClientConfig clientConfig, @Nonnull DistributedFunction<? super T, ? extends K> toKeyFn, @Nonnull DistributedFunction<? super T, ? extends V> toValueFn, @Nonnull DistributedBinaryOperator<V> mergeFn ) { return fromProcessor("remoteMapWithMergingSink(" + mapName + ')', mergeRemoteMapP(mapName, clientConfig, toKeyFn, toValueFn, mergeFn)); }
public static CompletableFuture<Void> copyMapUsingJob(JetInstance instance, int queueSize, String sourceMap, String targetMap) { DAG dag = new DAG(); Vertex source = dag.newVertex("readMap(" + sourceMap + ')', readMapP(sourceMap)); Vertex sink = dag.newVertex("writeMap(" + targetMap + ')', writeMapP(targetMap)); dag.edge(between(source, sink).setConfig(new EdgeConfig().setQueueSize(queueSize))); JobConfig jobConfig = new JobConfig() .setName("copy-" + sourceMap + "-to-" + targetMap); return instance.newJob(dag, jobConfig).getFuture(); } }
)); Vertex sink = dag.newVertex("sink", SinkProcessors.writeFileP(OUTPUT_DIR_NAME, Object::toString, StandardCharsets.UTF_8, false));
/** * Creates and returns the {@link Sink} with the components you supplied to * this builder. */ @Nonnull public Sink<T> build() { Preconditions.checkNotNull(receiveFn, "receiveFn must be set"); DistributedSupplier<Processor> supplier = writeBufferedP(createFn, receiveFn, flushFn, destroyFn); return new SinkImpl<>(name, ProcessorMetaSupplier.of(supplier, preferredLocalParallelism)); } }
@Nonnull DistributedBiFunction<? super V, ? super T, ? extends V> updateFn ) { return fromProcessor("mapWithUpdatingSink(" + mapName + ')', updateMapP(mapName, toKeyFn, updateFn));
/** * Returns a sink equivalent to {@link #mapWithUpdating}, but for a map * in a remote Hazelcast cluster identified by the supplied {@code * ClientConfig}. * <p> * Due to the used API, the remote cluster must be at least 3.11. */ @Nonnull public static <T, K, V> Sink<T> remoteMapWithUpdating( @Nonnull String mapName, @Nonnull ClientConfig clientConfig, @Nonnull DistributedFunction<? super T, ? extends K> toKeyFn, @Nonnull DistributedBiFunction<? super V, ? super T, ? extends V> updateFn ) { return fromProcessor("remoteMapWithUpdatingSink(" + mapName + ')', updateRemoteMapP(mapName, clientConfig, toKeyFn, updateFn)); }
) { return fromProcessor("mapWithMergingSink(" + mapName + ')', mergeMapP(mapName, toKeyFn, toValueFn, mergeFn));
Vertex sink = dag.newVertex("sink", writeMapP("counts"));