/** * If set to true, the Flink producer will wait for all outstanding messages in the Kafka buffers * to be acknowledged by the Kafka producer on a checkpoint. * This way, the producer can guarantee that messages in the Kafka buffers are part of the checkpoint. * * @param flush Flag indicating the flushing mode (true = flush on checkpoint) */ public void setFlushOnCheckpoint(boolean flush) { producer.setFlushOnCheckpoint(flush); }
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return new StreamSink<>(prod); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props); prod.setFlushOnCheckpoint(true); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
/** * If set to true, the Flink producer will wait for all outstanding messages in the Kafka buffers * to be acknowledged by the Kafka producer on a checkpoint. * This way, the producer can guarantee that messages in the Kafka buffers are part of the checkpoint. * * @param flush Flag indicating the flushing mode (true = flush on checkpoint) */ public void setFlushOnCheckpoint(boolean flush) { producer.setFlushOnCheckpoint(flush); }
/** * If set to true, the Flink producer will wait for all outstanding messages in the Kafka buffers * to be acknowledged by the Kafka producer on a checkpoint. * This way, the producer can guarantee that messages in the Kafka buffers are part of the checkpoint. * * @param flush Flag indicating the flushing mode (true = flush on checkpoint) */ public void setFlushOnCheckpoint(boolean flush) { producer.setFlushOnCheckpoint(flush); }
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return new StreamSink<>(prod); }
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return new StreamSink<>(prod); }
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return new StreamSink<>(prod); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props); prod.setFlushOnCheckpoint(true); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props); prod.setFlushOnCheckpoint(true); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props); prod.setFlushOnCheckpoint(true); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }