KafkaTransactionState(String transactionalId, FlinkKafkaProducer<byte[], byte[]> producer) { this(transactionalId, producer.getProducerId(), producer.getEpoch(), producer); }
private static Object invoke(Object object, String methodName, Object... args) { Class<?>[] argTypes = new Class[args.length]; for (int i = 0; i < args.length; i++) { argTypes[i] = args[i].getClass(); } return invoke(object, methodName, argTypes, args); }
@Override public void flush() { kafkaProducer.flush(); if (transactionalId != null) { flushNewPartitions(); } }
private void abortTransactions(Set<String> transactionalIds) { for (String transactionalId : transactionalIds) { try (FlinkKafkaProducer<byte[], byte[]> kafkaProducer = initTransactionalProducer(transactionalId, false)) { // it suffice to call initTransactions - this will abort any lingering transactions kafkaProducer.initTransactions(); } } }
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return new Kafka010PartitionDiscoverer(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks, properties); }
protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, @SuppressWarnings("UnusedParameters") ConsumerRecord<?, ?> consumerRecord) throws Exception { // the 0.9 Fetcher does not try to extract a timestamp emitRecord(record, partition, offset); }
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return new Kafka09PartitionDiscoverer(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks, properties); }
/** * This method needs to be overridden because Kafka broke binary compatibility between 0.9 and 0.10, * changing binary signatures. */ @Override protected KafkaConsumerCallBridge010 createCallBridge() { return new KafkaConsumerCallBridge010(); }
KafkaTransactionState(String transactionalId, FlinkKafkaInternalProducer<byte[], byte[]> producer) { this(transactionalId, producer.getProducerId(), producer.getEpoch(), producer); }
@Override protected AbstractPartitionDiscoverer createPartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks) { return new KafkaPartitionDiscoverer(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks, properties); }
protected KafkaConsumerCallBridge09 createCallBridge() { return new KafkaConsumerCallBridge09(); }
protected FlinkKafkaInternalProducer<byte[], byte[]> createProducer() { return new FlinkKafkaInternalProducer<>(this.producerConfig); }
private void abortTransactions(Set<String> transactionalIds) { for (String transactionalId : transactionalIds) { try (FlinkKafkaInternalProducer<byte[], byte[]> kafkaProducer = initTransactionalProducer(transactionalId, false)) { // it suffice to call initTransactions - this will abort any lingering transactions kafkaProducer.initTransactions(); } } }
protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, ConsumerRecord<?, ?> consumerRecord) throws Exception { emitRecordWithTimestamp(record, partition, offset, consumerRecord.timestamp()); }