private void abortTransactions(Set<String> transactionalIds) { for (String transactionalId : transactionalIds) { try (FlinkKafkaProducer<byte[], byte[]> kafkaProducer = initTransactionalProducer(transactionalId, false)) { // it suffice to call initTransactions - this will abort any lingering transactions kafkaProducer.initTransactions(); } } }
@Override protected void recoverAndAbort(KafkaTransactionState transaction) { if (transaction.isTransactional()) { try ( FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transaction.transactionalId, false)) { producer.initTransactions(); } } }
/** * For each checkpoint we create new {@link FlinkKafkaProducer} so that new transactions will not clash * with transactions created during previous checkpoints ({@code producer.initTransactions()} assures that we * obtain new producerId and epoch counters). */ private FlinkKafkaProducer<byte[], byte[]> createTransactionalProducer() throws FlinkKafka011Exception { String transactionalId = availableTransactionalIds.poll(); if (transactionalId == null) { throw new FlinkKafka011Exception( FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, "Too many ongoing snapshots. Increase kafka producers pool size or decrease number of concurrent checkpoints."); } FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transactionalId, true); producer.initTransactions(); return producer; }
private void abortTransactions(Set<String> transactionalIds) { for (String transactionalId : transactionalIds) { try (FlinkKafkaProducer<byte[], byte[]> kafkaProducer = initTransactionalProducer(transactionalId, false)) { // it suffice to call initTransactions - this will abort any lingering transactions kafkaProducer.initTransactions(); } } }
private void abortTransactions(Set<String> transactionalIds) { for (String transactionalId : transactionalIds) { try (FlinkKafkaProducer<byte[], byte[]> kafkaProducer = initTransactionalProducer(transactionalId, false)) { // it suffice to call initTransactions - this will abort any lingering transactions kafkaProducer.initTransactions(); } } }
private void abortTransactions(Set<String> transactionalIds) { for (String transactionalId : transactionalIds) { try (FlinkKafkaProducer<byte[], byte[]> kafkaProducer = initTransactionalProducer(transactionalId, false)) { // it suffice to call initTransactions - this will abort any lingering transactions kafkaProducer.initTransactions(); } } }
@Override protected void recoverAndAbort(KafkaTransactionState transaction) { switch (semantic) { case EXACTLY_ONCE: try (FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transaction.transactionalId, false)) { producer.initTransactions(); } break; case AT_LEAST_ONCE: case NONE: break; default: throw new UnsupportedOperationException("Not implemented semantic"); } }
@Override protected void recoverAndAbort(KafkaTransactionState transaction) { if (transaction.isTransactional()) { try ( FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transaction.transactionalId, false)) { producer.initTransactions(); } } }
@Override protected void recoverAndAbort(KafkaTransactionState transaction) { if (transaction.isTransactional()) { try ( FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transaction.transactionalId, false)) { producer.initTransactions(); } } }
/** * For each checkpoint we create new {@link FlinkKafkaProducer} so that new transactions will not clash * with transactions created during previous checkpoints ({@code producer.initTransactions()} assures that we * obtain new producerId and epoch counters). */ private FlinkKafkaProducer<byte[], byte[]> createTransactionalProducer() throws FlinkKafka011Exception { String transactionalId = availableTransactionalIds.poll(); if (transactionalId == null) { throw new FlinkKafka011Exception( FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, "Too many ongoing snapshots. Increase kafka producers pool size or decrease number of concurrent checkpoints."); } FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transactionalId, true); producer.initTransactions(); return producer; }
/** * For each checkpoint we create new {@link FlinkKafkaProducer} so that new transactions will not clash * with transactions created during previous checkpoints ({@code producer.initTransactions()} assures that we * obtain new producerId and epoch counters). */ private FlinkKafkaProducer<byte[], byte[]> createTransactionalProducer() throws FlinkKafka011Exception { String transactionalId = availableTransactionalIds.poll(); if (transactionalId == null) { throw new FlinkKafka011Exception( FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, "Too many ongoing snapshots. Increase kafka producers pool size or decrease number of concurrent checkpoints."); } FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transactionalId, true); producer.initTransactions(); return producer; }
/** * For each checkpoint we create new {@link FlinkKafkaProducer} so that new transactions will not clash * with transactions created during previous checkpoints ({@code producer.initTransactions()} assures that we * obtain new producerId and epoch counters). */ private FlinkKafkaProducer<byte[], byte[]> createTransactionalProducer() throws FlinkKafka011Exception { String transactionalId = availableTransactionalIds.poll(); if (transactionalId == null) { throw new FlinkKafka011Exception( FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, "Too many ongoing snapshots. Increase kafka producers pool size or decrease number of concurrent checkpoints."); } FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transactionalId, true); producer.initTransactions(); return producer; }