@Override public void onFailure(Throwable cause) { callback.onCompletion(null, new IOException(cause)); }
public void complete(RuntimeException e) { result.set(e == null ? offset : -1L, RecordBatch.NO_TIMESTAMP, e); if (callback != null) { if (e == null) callback.onCompletion(metadata, null); else callback.onCompletion(null, e); } result.done(); } }
@Override public void onSuccess(DLSN value) { callback.onCompletion(new RecordMetadata(new TopicPartition(topic, 0), -1L, -1L), null); } });
public void onCompletion(RecordMetadata metadata, Exception exception) { metadata = metadata != null ? metadata : new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1); this.interceptors.onAcknowledgement(metadata, exception); if (this.userCallback != null) this.userCallback.onCompletion(metadata, exception); } }
private void completeFutureAndFireCallbacks(long baseOffset, long logAppendTime, RuntimeException exception) { // Set the future before invoking the callbacks as we rely on its state for the `onCompletion` call produceFuture.set(baseOffset, logAppendTime, exception); // execute callbacks for (Thunk thunk : thunks) { try { if (exception == null) { RecordMetadata metadata = thunk.future.value(); if (thunk.callback != null) thunk.callback.onCompletion(metadata, null); } else { if (thunk.callback != null) thunk.callback.onCompletion(null, exception); } } catch (Exception e) { log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, e); } } produceFuture.done(); }
@Override public Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) { TopicPartition tp = new TopicPartition(record.topic(), 0); RecordMetadata rm = new RecordMetadata(tp, -1L, -1L, 1L, 2L, 3, 4); if (callback != null) callback.onCompletion(rm, null); return Futures.immediateFuture(rm); }
@Override public void onCompletion(RecordMetadata metadata, @Nullable Exception exception) { try (Scope ws = current.maybeScope(span.context())) { delegate.onCompletion(metadata, exception); } finally { super.onCompletion(metadata, exception); } } }
@Override public Future<RecordMetadata> send(ProducerRecord<K, V> record, final Callback callback) { boolean error = errorManager.nextError(record.value()); if (errorManager.nextError(record.value())) { final Exception e = new Exception(); callback.onCompletion(null, e); return nullFuture; } else { return super.send(record, callback); } }
producer.getPendingCallbacks().get(0).onCompletion(null, null); producer.getPendingCallbacks().get(1).onCompletion(null, new Exception("artificial async failure for 2nd message")); producer.getPendingCallbacks().get(2).onCompletion(null, null);
log.debug("Exception occurred during message send:", e); if (callback != null) callback.onCompletion(null, e); this.errors.record(); this.interceptors.onSendError(record, tp, e);
/** * Test ensuring that if a snapshot call happens right after an async exception is caught, it should be rethrown. */ @Test public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); // let the message request return an async exception producer.getPendingCallbacks().get(0).onCompletion(null, new Exception("artificial async exception")); try { testHarness.snapshot(123L, 123L); } catch (Exception e) { // the next invoke should rethrow the async exception Assert.assertTrue(e.getCause().getMessage().contains("artificial async exception")); // test succeeded return; } Assert.fail(); }
/** * Test ensuring that if an invoke call happens right after an async exception is caught, it should be rethrown. */ @Test public void testAsyncErrorRethrownOnInvoke() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); // let the message request return an async exception producer.getPendingCallbacks().get(0).onCompletion(null, new Exception("artificial async exception")); try { testHarness.processElement(new StreamRecord<>("msg-2")); } catch (Exception e) { // the next invoke should rethrow the async exception Assert.assertTrue(e.getCause().getMessage().contains("artificial async exception")); // test succeeded return; } Assert.fail(); }
producer.getPendingCallbacks().get(0).onCompletion(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(2, producer.getPendingSize()); producer.getPendingCallbacks().get(1).onCompletion(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(1, producer.getPendingSize()); producer.getPendingCallbacks().get(2).onCompletion(null, null); Assert.assertEquals(0, producer.getPendingSize());
@Test public void sendFailsReturnsFalse() { KafkaProducer producer = mock(KafkaProducer.class); publisher.realProducer = producer; RecordMetadata metadata = new RecordMetadata(null, 0, 0, 0, Long.valueOf(0), 0, 0); ArgumentCaptor<Callback> captor = ArgumentCaptor.forClass(Callback.class); when(producer.send(any(), captor.capture())).then( invocation -> { captor.getValue().onCompletion(metadata, new TimeoutException("error")); return new CompletableFuture(); }); String[] events = { "test" }; assertThat(publisher.publishEvents(false, null, events)).isFalse(); }
public void complete(RuntimeException e) { result.done(topicPartition, e == null ? offset : -1L, e); if (callback != null) { if (e == null) callback.onCompletion(metadata, null); else callback.onCompletion(null, e); } } }
@Override public RecordMetadata call() throws Exception { callback.onCompletion(null, exception); return getRecordMetadata(record); } });
@Override public void onCompletion(RecordMetadata recordMetadata, Exception e) { // The callback will only be fired once. _acksReceived++; // Set exception to be the first exception if (e != null && _exception == null) { _exception = e; } if (e == null) { _segmentsSent++; } // Invoke user callback when receive the last callback of the large message. if (_acksReceived == _numSegments) { if (_exception == null) { _userCallback.onCompletion(recordMetadata, null); } else { _userCallback.onCompletion( null, new LargeMessageSendException(String.format("Error when sending large message. Sent %d of %d segments.", _segmentsSent, _numSegments), _exception) ); } } } }
@Override public RecordMetadata call() throws Exception { msgsSent.incrementAndGet(); RecordMetadata metadata = getRecordMetadata(record); callback.onCompletion(metadata, null); return metadata; } });
private RecordMetadata processResult(CountDownLatch latch, SenderResult<Callback> result) { RecordMetadata metadata = result.recordMetadata(); Callback cb = result.correlationMetadata(); cb.onCompletion(metadata, null); latch.countDown(); return metadata; }
@Override public void onCompletion(RecordMetadata recordMetadata, Exception e) { if (e != null) { LOG.error(String.format("Unable to send event %s with message id %s to kafka topic %s", _auditToken == null ? "[No Custom Info]" : _auditToken, (_messageId != null) ? _messageId.toString().replaceAll("-", "") : "[none]", _topic), e); // Audit the failure. _auditor.record(_auditToken, _topic, _timestamp, 1L, _serializedSize.longValue(), AuditType.FAILURE); } else { // Audit the success. _auditor.record(_auditToken, _topic, _timestamp, 1L, _serializedSize.longValue(), AuditType.SUCCESS); } if (_userCallback != null) { _userCallback.onCompletion(recordMetadata, e); } } }