@Override public DataStreamSink<Row> addSink(SinkFunction<Row> sinkFunction) { this.sinkFunction = sinkFunction; return super.addSink(sinkFunction); } }
/** * A thin wrapper layer over {@link DataStream#addSink(SinkFunction)}. * * @param sink_func The object containing the sink's invoke function. */ @PublicEvolving public void add_sink(SinkFunction<PyObject> sink_func) throws IOException { stream.addSink(new PythonSinkFunction(sink_func)); }
@Override public void emitDataStream(DataStream<Row> dataStream) { dataStream .addSink(new JDBCSinkFunction(outputFormat)) .name(TableConnectorUtil.generateRuntimeName(this.getClass(), fieldNames)); }
@Override public void emitDataStream(DataStream<Tuple2<Boolean, Row>> stream) { // add sink stream .addSink(new CollectSink<>(targetAddress, targetPort, serializer)) .name("SQL Client Stream Collect Sink") .setParallelism(1); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return stream.addSink(new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
@Override public void emitDataStream(DataStream<Row> dataStream) { final SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema, partitioner); dataStream.addSink(kafkaProducer).name(TableConnectorUtil.generateRuntimeName(this.getClass(), getFieldNames())); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return stream.addSink(new FlinkKafkaProducer011<>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
/** * Writes a DataStream to the standard output stream (stdout). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink * worker. * * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> print() { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(); return addSink(printFunction).name("Print to Std. Out"); }
@Override public CassandraSink<IN> createSink() throws Exception { final CassandraScalaProductSink<IN> sink = new CassandraScalaProductSink<>( query, builder, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) { DataStreamSink<T> result = super.addSink(sinkFunction); result.getTransformation().setStateKeySelector(keySelector); result.getTransformation().setStateKeyType(keyType); return result; }
@Override public CassandraSink<IN> createSink() throws Exception { final CassandraTupleSink<IN> sink = new CassandraTupleSink<>( query, builder, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override protected CassandraSink<Row> createSink() throws Exception { final CassandraRowSink sink = new CassandraRowSink( typeInfo.getArity(), query, builder, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override public CassandraSink<IN> createSink() throws Exception { final CassandraPojoSink<IN> sink = new CassandraPojoSink<>( typeInfo.getTypeClass(), builder, mapperOptions, keyspace, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer09<T> prod = new FlinkKafkaProducer09<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props); prod.setFlushOnCheckpoint(true); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer<T> prod = new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.of(new FlinkFixedPartitioner<>()), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer011<T> prod = new FlinkKafkaProducer011<>( topic, serSchema, props, Optional.of(new FlinkFixedPartitioner<>()), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
public static JobGraph stoppableJob(final StopJobSignal stopJobSignal) { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.addSource(new InfiniteSourceFunction(stopJobSignal)) .setParallelism(2) .shuffle() .addSink(new DiscardingSink<>()) .setParallelism(2); return env.getStreamGraph().getJobGraph(); }
@Test public void testOutputTypeConfigurationWithTwoInputTransformation() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> source1 = env.fromElements(1, 10); DataStream<Integer> source2 = env.fromElements(2, 11); ConnectedStreams<Integer, Integer> connectedSource = source1.connect(source2); OutputTypeConfigurableOperationWithTwoInputs outputTypeConfigurableOperation = new OutputTypeConfigurableOperationWithTwoInputs(); DataStream<Integer> result = connectedSource.transform( "Two input and output type configurable operation", BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation); result.addSink(new DiscardingSink<>()); env.getStreamGraph(); assertEquals(BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation.getTypeInformation()); }