public void injectCommitError() { fluxOffsetMap().put(NON_EXISTENT_PARTITION, 1L); }
TestableReceiver testReceiver = new TestableReceiver(receiver); Semaphore onNextSemaphore = new Semaphore(0); Flux<ReceiverRecord<Integer, String>> flux = receiver.receive() .doOnSubscribe(s -> { if (retriable) testReceiver.injectCommitEventForRetriableException(); }) .doOnNext(record -> { int receiveCount = count(receivedMessages); if (receiveCount == errorInjectIndex) testReceiver.injectCommitError(); if (receiveCount >= errorClearIndex) testReceiver.clearCommitError(); record.receiverOffset().acknowledge(); onNextSemaphore.release(); assertEquals(failureExpected, failed.get()); if (failureExpected) { testReceiver.waitForClose(); testReceiver.waitForClose(); clearReceivedMessages();
public Flux<ReceiverRecord<Integer, String>> receiveWithManualCommitFailures(boolean retriable, int failureCount, Semaphore receiveSemaphore, Semaphore successSemaphore, Semaphore failureSemaphore) { AtomicInteger retryCount = new AtomicInteger(); if (retriable) injectCommitEventForRetriableException(); return kafkaReceiver.receive() .doOnSubscribe(s -> { if (retriable) injectCommitEventForRetriableException(); }) .doOnNext(record -> { try { receiveSemaphore.release(); injectCommitError(); Predicate<Throwable> retryPredicate = e -> { if (retryCount.incrementAndGet() == failureCount) clearCommitError(); return retryCount.get() <= failureCount + 1; }; record.receiverOffset().commit() .doOnError(e -> failureSemaphore.release()) .doOnSuccess(i -> successSemaphore.release()) .retry(retryPredicate) .subscribe(); } catch (Exception e) { fail("Unexpected exception: " + e); } }) .doOnError(e -> log.error("KafkaFlux exception", e)); }
private void testManualCommitRetry(boolean retriableException) throws Exception { int count = 1; int failureCount = 2; Semaphore receiveSemaphore = new Semaphore(1 - count); Semaphore commitSuccessSemaphore = new Semaphore(0); Semaphore commitFailureSemaphore = new Semaphore(0); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); TestableReceiver testableReceiver = new TestableReceiver(receiver); Flux<? extends ConsumerRecord<Integer, String>> flux = testableReceiver .receiveWithManualCommitFailures(retriableException, failureCount, receiveSemaphore, commitSuccessSemaphore, commitFailureSemaphore); subscribe(flux, new CountDownLatch(count)); sendMessages(1, count); assertTrue("Did not receive messages", receiveSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertTrue("Commit did not succeed after retry", commitSuccessSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertEquals(failureCount, commitFailureSemaphore.availablePermits()); }
private void restartAndCheck(KafkaReceiver<Integer, String> receiver, int sendStartIndex, int sendCount, int maxRedelivered) throws Exception { Thread.sleep(500); // Give a little time for commits to complete before terminating abruptly new TestableReceiver(receiver).terminate(); cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAtmostOnce(); sendReceiveWithRedelivery(kafkaFlux2, sendStartIndex, sendCount, 0, maxRedelivered); clearReceivedMessages(); cancelSubscriptions(false); }
public TestableReceiver createTestFlux() { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> kafkaFlux = receiver.receive(); return new TestableReceiver(receiver, kafkaFlux); }
@Test public void manualCommitFailure() throws Exception { int count = 1; AtomicBoolean commitSuccess = new AtomicBoolean(); Semaphore commitErrorSemaphore = new Semaphore(0); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .doOnNext(record -> { ReceiverOffset offset = record.receiverOffset(); TestableReceiver.setNonExistentPartition(offset); record.receiverOffset().acknowledge(); record.receiverOffset().commit() .doOnError(e -> commitErrorSemaphore.release()) .doOnSuccess(i -> commitSuccess.set(true)) .subscribe(); }) .doOnError(e -> log.error("KafkaFlux exception", e)); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(1, count); assertTrue("Commit error callback not invoked", commitErrorSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertFalse("Commit of non existent topic succeeded", commitSuccess.get()); }
public void clearCommitError() { fluxOffsetMap().remove(NON_EXISTENT_PARTITION); }