Tabnine Logo
DataStream.addSink
Code IndexAdd Tabnine to your IDE (free)

How to use
addSink
method
in
org.apache.flink.streaming.api.datastream.DataStream

Best Java code snippets using org.apache.flink.streaming.api.datastream.DataStream.addSink (Showing top 20 results out of 459)

origin: apache/flink

  @Override
  public DataStreamSink<Row> addSink(SinkFunction<Row> sinkFunction) {
    this.sinkFunction = sinkFunction;
    return super.addSink(sinkFunction);
  }
}
origin: apache/flink

/**
 * A thin wrapper layer over {@link DataStream#addSink(SinkFunction)}.
 *
 * @param sink_func The object containing the sink's invoke function.
 */
@PublicEvolving
public void add_sink(SinkFunction<PyObject> sink_func) throws IOException {
  stream.addSink(new PythonSinkFunction(sink_func));
}
origin: apache/flink

@Override
public void emitDataStream(DataStream<Row> dataStream) {
  dataStream
      .addSink(new JDBCSinkFunction(outputFormat))
      .name(TableConnectorUtil.generateRuntimeName(this.getClass(), fieldNames));
}
origin: apache/flink

@Override
public void emitDataStream(DataStream<Tuple2<Boolean, Row>> stream) {
  // add sink
  stream
    .addSink(new CollectSink<>(targetAddress, targetPort, serializer))
    .name("SQL Client Stream Collect Sink")
    .setParallelism(1);
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
  return stream.addSink(new FlinkKafkaProducer<T>(
    topic,
    serSchema,
    props,
    Optional.ofNullable(partitioner),
    producerSemantic,
    FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
origin: apache/flink

@Override
public void emitDataStream(DataStream<Row> dataStream) {
  final SinkFunction<Row> kafkaProducer = createKafkaProducer(
    topic,
    properties,
    serializationSchema,
    partitioner);
  dataStream.addSink(kafkaProducer).name(TableConnectorUtil.generateRuntimeName(this.getClass(), getFieldNames()));
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
  return stream.addSink(new FlinkKafkaProducer011<>(
    topic,
    serSchema,
    props,
    Optional.ofNullable(partitioner),
    producerSemantic,
    FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
origin: apache/flink

/**
 * Writes a DataStream to the standard output stream (stdout).
 *
 * <p>For each element of the DataStream the result of {@link Object#toString()} is written.
 *
 * <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink
 * worker.
 *
 * @return The closed DataStream.
 */
@PublicEvolving
public DataStreamSink<T> print() {
  PrintSinkFunction<T> printFunction = new PrintSinkFunction<>();
  return addSink(printFunction).name("Print to Std. Out");
}
origin: apache/flink

@Override
public CassandraSink<IN> createSink() throws Exception {
  final CassandraScalaProductSink<IN> sink = new CassandraScalaProductSink<>(
    query,
    builder,
    configBuilder.build(),
    failureHandler);
  return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink"));
}
origin: apache/flink

@Override
public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) {
  DataStreamSink<T> result = super.addSink(sinkFunction);
  result.getTransformation().setStateKeySelector(keySelector);
  result.getTransformation().setStateKeyType(keyType);
  return result;
}
origin: apache/flink

@Override
public CassandraSink<IN> createSink() throws Exception {
  final CassandraTupleSink<IN> sink = new CassandraTupleSink<>(
    query,
    builder,
    configBuilder.build(),
    failureHandler);
  return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink"));
}
origin: apache/flink

@Override
protected CassandraSink<Row> createSink() throws Exception {
  final CassandraRowSink sink = new CassandraRowSink(
    typeInfo.getArity(),
    query,
    builder,
    configBuilder.build(),
    failureHandler);
  return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink"));
}
origin: apache/flink

@Override
public CassandraSink<IN> createSink() throws Exception {
  final CassandraPojoSink<IN> sink = new CassandraPojoSink<>(
    typeInfo.getTypeClass(),
    builder,
    mapperOptions,
    keyspace,
    configBuilder.build(),
    failureHandler);
  return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink"));
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
  FlinkKafkaProducer09<T> prod = new FlinkKafkaProducer09<>(topic, serSchema, props, partitioner);
  prod.setFlushOnCheckpoint(true);
  return stream.addSink(prod);
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
  FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner);
  prod.setFlushOnCheckpoint(true);
  return stream.addSink(prod);
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) {
  FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props);
  prod.setFlushOnCheckpoint(true);
  prod.setWriteTimestampToKafka(true);
  return stream.addSink(prod);
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) {
  FlinkKafkaProducer<T> prod = new FlinkKafkaProducer<T>(
    topic,
    serSchema,
    props,
    Optional.of(new FlinkFixedPartitioner<>()),
    producerSemantic,
    FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE);
  prod.setWriteTimestampToKafka(true);
  return stream.addSink(prod);
}
origin: apache/flink

@Override
public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) {
  FlinkKafkaProducer011<T> prod = new FlinkKafkaProducer011<>(
    topic, serSchema, props, Optional.of(new FlinkFixedPartitioner<>()), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE);
  prod.setWriteTimestampToKafka(true);
  return stream.addSink(prod);
}
origin: apache/flink

public static JobGraph stoppableJob(final StopJobSignal stopJobSignal) {
  final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  env.addSource(new InfiniteSourceFunction(stopJobSignal))
    .setParallelism(2)
    .shuffle()
    .addSink(new DiscardingSink<>())
    .setParallelism(2);
  return env.getStreamGraph().getJobGraph();
}
origin: apache/flink

@Test
public void testOutputTypeConfigurationWithTwoInputTransformation() throws Exception {
  StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  DataStream<Integer> source1 = env.fromElements(1, 10);
  DataStream<Integer> source2 = env.fromElements(2, 11);
  ConnectedStreams<Integer, Integer> connectedSource = source1.connect(source2);
  OutputTypeConfigurableOperationWithTwoInputs outputTypeConfigurableOperation = new OutputTypeConfigurableOperationWithTwoInputs();
  DataStream<Integer> result = connectedSource.transform(
      "Two input and output type configurable operation",
      BasicTypeInfo.INT_TYPE_INFO,
      outputTypeConfigurableOperation);
  result.addSink(new DiscardingSink<>());
  env.getStreamGraph();
  assertEquals(BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation.getTypeInformation());
}
org.apache.flink.streaming.api.datastreamDataStreamaddSink

Javadoc

Adds the given sink to this DataStream. Only streams with sinks added will be executed once the StreamExecutionEnvironment#execute()method is called.

Popular methods of DataStream

  • keyBy
    Partitions the operator state of a DataStream using field expressions. A field expression is either
  • map
    Applies a Map transformation on a DataStream. The transformation calls a MapFunction for each elemen
  • flatMap
    Applies a FlatMap transformation on a DataStream. The transformation calls a FlatMapFunction for eac
  • getType
    Gets the type of the stream.
  • union
    Creates a new DataStream by merging DataStream outputs of the same type with each other. The DataStr
  • print
    Writes a DataStream to the standard output stream (stdout).For each element of the DataStream the re
  • transform
    Method for passing user defined operators along with the type information that will transform the Da
  • getExecutionEnvironment
    Returns the StreamExecutionEnvironment that was used to create this DataStream.
  • getTransformation
    Returns the StreamTransformation that represents the operation that logically creates this DataStrea
  • rebalance
    Sets the partitioning of the DataStream so that the output elements are distributed evenly to instan
  • writeAsText
    Writes a DataStream to the file specified by path in text format.For every element of the DataStream
  • broadcast
    Sets the partitioning of the DataStream so that the output elements are broadcasted to every paralle
  • writeAsText,
  • broadcast,
  • connect,
  • assignTimestampsAndWatermarks,
  • getExecutionConfig,
  • writeAsCsv,
  • getParallelism,
  • <init>,
  • coGroup

Popular in Java

  • Finding current android device location
  • scheduleAtFixedRate (ScheduledExecutorService)
  • addToBackStack (FragmentTransaction)
  • setRequestProperty (URLConnection)
  • ObjectMapper (com.fasterxml.jackson.databind)
    ObjectMapper provides functionality for reading and writing JSON, either to and from basic POJOs (Pl
  • Deque (java.util)
    A linear collection that supports element insertion and removal at both ends. The name deque is shor
  • LinkedList (java.util)
    Doubly-linked list implementation of the List and Dequeinterfaces. Implements all optional list oper
  • TimeZone (java.util)
    TimeZone represents a time zone offset, and also figures out daylight savings. Typically, you get a
  • Executor (java.util.concurrent)
    An object that executes submitted Runnable tasks. This interface provides a way of decoupling task s
  • JTextField (javax.swing)
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now