/** * Initializes a {@link EmbeddedEventStore} as specified through this Builder. * * @return a {@link EmbeddedEventStore} as specified through this Builder */ public EmbeddedEventStore build() { return new EmbeddedEventStore(this); }
@Qualifier("eventStore") @Bean(name = "eventBus") @ConditionalOnMissingBean(EventBus.class) @ConditionalOnBean(EventStorageEngine.class) public EmbeddedEventStore eventStore(EventStorageEngine storageEngine, AxonConfiguration configuration) { return EmbeddedEventStore.builder() .storageEngine(storageEngine) .messageMonitor(configuration.messageMonitor(EventStore.class, "eventStore")) .build(); }
@Override public TrackingEventStream openStream(TrackingToken trackingToken) { Node node = findNode(trackingToken); EventConsumer eventConsumer; if (node != null && optimizeEventConsumption) { eventConsumer = new EventConsumer(node); tailingConsumers.add(eventConsumer); } else { eventConsumer = new EventConsumer(trackingToken); } return eventConsumer; }
@Test(timeout = 10000) public void testInsertConcurrentlyAndReadUsingBlockingStreams() throws Exception { int threadCount = 10, eventsPerThread = 100, inverseRollbackRate = 2, rollbacksPerThread = (eventsPerThread + inverseRollbackRate - 1) / inverseRollbackRate; int expectedEventCount = threadCount * eventsPerThread - rollbacksPerThread * threadCount; Thread[] writerThreads = storeEvents(threadCount, eventsPerThread, inverseRollbackRate); EmbeddedEventStore embeddedEventStore = EmbeddedEventStore.builder().storageEngine(testSubject).build(); TrackingEventStream readEvents = embeddedEventStore.openStream(null); int counter = 0; while (counter < expectedEventCount) { if (readEvents.hasNextAvailable()) { counter++; } } for (Thread thread : writerThreads) { thread.join(); } assertEquals("The actually read list of events is shorted than the expected value", expectedEventCount, counter); }
TimeUnit timeUnit) throws InterruptedException { if (privateIterator == null) { privateStream = storageEngine().readEvents(lastToken, false); privateIterator = privateStream.iterator();
protected void storeEvents(EventMessage<?>... events) { eventStore.publish(events); }
@Test @DirtiesContext public void testProcessorGoesToRetryModeWhenOpenStreamFails() throws Exception { eventBus = spy(eventBus); tokenStore = new InMemoryTokenStore(); eventBus.publish(createEvents(5)); when(eventBus.openStream(any())).thenThrow(new MockException()).thenCallRealMethod(); List<EventMessage<?>> ackedEvents = new ArrayList<>(); CountDownLatch countDownLatch = new CountDownLatch(5); doAnswer(invocation -> { ackedEvents.add((EventMessage<?>) invocation.getArguments()[0]); countDownLatch.countDown(); return null; }).when(mockHandler).handle(any()); testSubject = TrackingEventProcessor.builder() .name("test") .eventHandlerInvoker(eventHandlerInvoker) .messageSource(eventBus) .tokenStore(tokenStore) .transactionManager(NoTransactionManager.INSTANCE) .build(); testSubject.start(); // give it a bit of time to start Thread.sleep(200); assertTrue("Expected 5 invocations on Event Handler by now", countDownLatch.await(10, TimeUnit.SECONDS)); assertEquals(5, ackedEvents.size()); verify(eventBus, times(2)).openStream(any()); }
@SuppressWarnings("unchecked") public void testEventsWithTheSameTokenAreProcessedInTheSameBatch() throws Exception { eventBus.shutDown(); List<TrackedEventMessage<?>> events = createEvents(2).stream().map(event -> asTrackedEventMessage(event, trackingToken)).collect(toList()); when(eventBus.openStream(null)).thenReturn(trackingEventStreamOf(events.iterator())); testSubject = TrackingEventProcessor.builder() .name("test")
protected void cleanUpAfterBenchmark() { executorService.shutdown(); eventProcessor.shutDown(); eventStore.shutDown(); }
/** * Initializes an {@link EmbeddedEventStore} with given {@code storageEngine} and {@code monitor} and custom * settings. * * @param storageEngine the storage engine to use * @param monitor the metrics monitor that tracks how many events are ingested by the event store * @param cachedEvents the maximum number of events in the cache that is shared between the streams of tracking * event processors * @param fetchDelay the time to wait before fetching new events from the backing storage engine while tracking * after a previous stream was fetched and read. Note that this only applies to situations in * which no events from the current application have meanwhile been committed. If the current * application commits events then those events are fetched without delay. * @param cleanupDelay the delay between two clean ups of lagging event processors. An event processor is lagging * behind and removed from the set of processors that track cached events if the oldest event * in the cache is newer than the last processed event of the event processor. Once removed the * processor will be independently fetching directly from the event storage engine until it has * caught up again. Event processors will not notice this change during tracking (i.e. the * stream is not closed when an event processor falls behind and is removed). * @param timeUnit time unit for fetch and clean up delay * @param threadFactory the factory to create threads with */ public EmbeddedEventStore(EventStorageEngine storageEngine, MessageMonitor<? super EventMessage<?>> monitor, int cachedEvents, long fetchDelay, long cleanupDelay, TimeUnit timeUnit, ThreadFactory threadFactory) { this( storageEngine, monitor, cachedEvents, fetchDelay, cleanupDelay, timeUnit, threadFactory, fetchEventConsumptionSystemPropertyOrDefault() ); }
int expectedEventCount = threadCount * eventsPerThread - rollbacksPerThread * threadCount; Thread[] writerThreads = storeEvents(threadCount, eventsPerThread, inverseRollbackRate); EmbeddedEventStore embeddedEventStore = EmbeddedEventStore.builder() .storageEngine(testSubject) .cachedEvents(20) .cleanupDelay(1000) .build(); TrackingEventStream readEvents = embeddedEventStore.openStream(null); int counter = 0; while (counter < expectedEventCount) {
private boolean fetchData() { Node currentNewest = newest; if (!tailingConsumers.isEmpty()) { try { eventStream = storageEngine().readEvents(lastToken(), true); eventStream.forEach(event -> { Node node = new Node(nextIndex(), lastToken(), event); if (newest != null) { newest.next = node; } newest = node; if (oldest == null) { oldest = node; } notifyConsumers(); trimCache(); }); } catch (Exception e) { logger.error("Failed to read events from the underlying event storage", e); } } return !Objects.equals(newest, currentNewest); }
@Test public void testPublishedEventsGetPassedToHandler() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(2); doAnswer(invocation -> { countDownLatch.countDown(); return null; }).when(mockHandler).handle(any()); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(createEvents(2)); assertTrue("Expected Handler to have received 2 published events", countDownLatch.await(5, TimeUnit.SECONDS)); }
@Test public void testMultiThreadProcessorGoesToRetryModeWhenOpenStreamFails() throws Exception { eventBus = spy(eventBus); tokenStore = new InMemoryTokenStore(); eventBus.publish(createEvents(5)); when(eventBus.openStream(any())).thenThrow(new MockException()).thenCallRealMethod(); final AcknowledgeByThread acknowledgeByThread = new AcknowledgeByThread(); CountDownLatch countDownLatch = new CountDownLatch(5); doAnswer(invocation -> { acknowledgeByThread.addMessage(Thread.currentThread(), (EventMessage<?>) invocation.getArguments()[0]); countDownLatch.countDown(); return null; }).when(mockHandler).handle(any()); testSubject = TrackingEventProcessor.builder() .name("test") .eventHandlerInvoker(eventHandlerInvoker) .messageSource(eventBus) .tokenStore(tokenStore) .transactionManager(NoTransactionManager.INSTANCE) .build(); testSubject.start(); assertTrue("Expected 5 invocations on Event Handler by now", countDownLatch.await(10, SECONDS)); acknowledgeByThread.assertEventsAddUpTo(5); verify(eventBus, times(2)).openStream(any()); }
@After public void tearDown() { testSubject.shutDown(); eventBus.shutDown(); }
@Override public Configurer configureEmbeddedEventStore(Function<Configuration, EventStorageEngine> storageEngineBuilder) { return configureEventStore(c -> { MessageMonitor<Message<?>> monitor = messageMonitorFactoryComponent.get().apply(EmbeddedEventStore.class, "eventStore"); EmbeddedEventStore eventStore = EmbeddedEventStore.builder() .storageEngine(storageEngineBuilder.apply(c)) .messageMonitor(monitor) .build(); c.onShutdown(eventStore::shutDown); return eventStore; }); }
/** * Initializes a {@link EmbeddedEventStore} as specified through this Builder. * * @return a {@link EmbeddedEventStore} as specified through this Builder */ public EmbeddedEventStore build() { return new EmbeddedEventStore(this); }
private TrackedEventMessage<?> peekPrivateStream(boolean allowSwitchToTailingConsumer, int timeout, TimeUnit timeUnit) throws InterruptedException { if (privateIterator == null) { privateStream = storageEngine().readEvents(lastToken, false); privateIterator = privateStream.iterator(); } if (privateIterator.hasNext()) { TrackedEventMessage<?> nextEvent = privateIterator.next(); lastToken = nextEvent.trackingToken(); return nextEvent; } else if (allowSwitchToTailingConsumer) { closePrivateStream(); lastNode = findNode(lastToken); tailingConsumers.add(this); ensureProducerStarted(); return timeout > 0 ? peek(timeout, timeUnit) : null; } else { consumerLock.lock(); try { if (consumableEventsCondition.await(timeout, timeUnit) && privateIterator.hasNext()) { TrackedEventMessage<?> nextEvent = privateIterator.next(); lastToken = nextEvent.trackingToken(); return nextEvent; } return null; } finally { consumerLock.unlock(); } } }
@Test public void testHandlerIsInvokedInTransactionScope() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); AtomicInteger counter = new AtomicInteger(); AtomicInteger counterAtHandle = new AtomicInteger(); when(mockTransactionManager.startTransaction()).thenAnswer(i -> { counter.incrementAndGet(); return mockTransaction; }); doAnswer(i -> counter.decrementAndGet()).when(mockTransaction).rollback(); doAnswer(i -> counter.decrementAndGet()).when(mockTransaction).commit(); doAnswer(invocation -> { counterAtHandle.set(counter.get()); countDownLatch.countDown(); return null; }).when(mockHandler).handle(any()); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(createEvents(2)); assertTrue("Expected Handler to have received 2 published events", countDownLatch.await(5, TimeUnit.SECONDS)); assertEquals(1, counterAtHandle.get()); }
@Test public void testMultiThreadContinueFromPreviousToken() throws Exception { tokenStore = spy(new InMemoryTokenStore()); eventBus.publish(createEvents(10)); TrackedEventMessage<?> firstEvent = eventBus.openStream(null).nextAvailable(); tokenStore.storeToken(firstEvent.trackingToken(), testSubject.getName(), 0); assertEquals(firstEvent.trackingToken(), tokenStore.fetchToken(testSubject.getName(), 0)); final AcknowledgeByThread acknowledgeByThread = new AcknowledgeByThread(); CountDownLatch countDownLatch = new CountDownLatch(9); doAnswer(invocation -> { acknowledgeByThread.addMessage(Thread.currentThread(), (EventMessage<?>) invocation.getArguments()[0]); countDownLatch.countDown(); return null; }).when(mockHandler).handle(any()); configureProcessor(TrackingEventProcessorConfiguration.forParallelProcessing(2)); testSubject.start(); assertTrue("Expected 9 invocations on Event Handler by now, missing " + countDownLatch.getCount(), countDownLatch.await(60, SECONDS)); acknowledgeByThread.assertEventsAckedByMultipleThreads(); acknowledgeByThread.assertEventsAddUpTo(9); }