@Override public void publish(List<? extends EventMessage<?>> events) { Stream<MessageMonitor.MonitorCallback> ingested = events.stream().map(messageMonitor::onMessageIngested); if (CurrentUnitOfWork.isStarted()) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Assert.state(!unitOfWork.phase().isAfter(PREPARE_COMMIT), () -> "It is not allowed to publish events when the current Unit of Work has already been " + "committed. Please start a new Unit of Work before publishing events."); Assert.state(!unitOfWork.root().phase().isAfter(PREPARE_COMMIT), () -> "It is not allowed to publish events when the root Unit of Work has already been " + "committed."); unitOfWork.afterCommit(u -> ingested.forEach(MessageMonitor.MonitorCallback::reportSuccess)); unitOfWork.onRollback(uow -> ingested.forEach( message -> message.reportFailure(uow.getExecutionResult().getExceptionResult()) )); eventsQueue(unitOfWork).addAll(events); } else { try { prepareCommit(intercept(events)); commit(events); afterCommit(events); ingested.forEach(MessageMonitor.MonitorCallback::reportSuccess); } catch (Exception e) { ingested.forEach(m -> m.reportFailure(e)); throw e; } } }
private List<EventMessage<?>> eventsQueue(UnitOfWork<?> unitOfWork) { return unitOfWork.getOrComputeResource(eventsKey, r -> { unitOfWork.onPrepareCommit(u -> { if (u.parent().isPresent() && !u.parent().get().phase().isAfter(PREPARE_COMMIT)) { eventsQueue(u.parent().get()).addAll(eventQueue); } else { int processedItems = eventQueue.size(); unitOfWork.onCommit(u -> { if (u.parent().isPresent() && !u.root().phase().isAfter(COMMIT)) { u.root().onCommit(w -> doWithEvents(this::commit, eventQueue)); } else { doWithEvents(this::commit, eventQueue); unitOfWork.afterCommit(u -> { if (u.parent().isPresent() && !u.root().phase().isAfter(AFTER_COMMIT)) { u.root().afterCommit(w -> doWithEvents(this::afterCommit, eventQueue)); } else { doWithEvents(this::afterCommit, eventQueue); unitOfWork.onCleanup(u -> u.resources().remove(eventsKey)); return eventQueue; });
@Override public Object proceed() throws Exception { if (chain.hasNext()) { return chain.next().handle(unitOfWork, this); } else { return handler.handle(unitOfWork.getMessage()); } } }
/** * Returns the map of aggregates currently managed by this repository under the given unit of work. Note that the * repository keeps the managed aggregates in the root unit of work, to guarantee each Unit of Work works with the * state left by the parent unit of work. * <p> * The returns map is mutable and reflects any changes made during processing. * * @param uow The unit of work to find the managed aggregates for * @return a map with the aggregates managed by this repository in the given unit of work */ protected Map<String, A> managedAggregates(UnitOfWork<?> uow) { return uow.root().getOrComputeResource(aggregatesKey, s -> new HashMap<>()); }
private void delegateAfterCommitToParent(UnitOfWork<?> uow) { Optional<UnitOfWork<?>> parent = uow.parent(); if (parent.isPresent()) { parent.get().afterCommit(this::delegateAfterCommitToParent); } else { changePhase(Phase.AFTER_COMMIT); } }
@Override public void handle(List<? extends EventMessage<?>> events, Consumer<List<? extends EventMessage<?>>> processor) { if (CurrentUnitOfWork.isStarted()) { UnitOfWork<?> unitOfWorkRoot = CurrentUnitOfWork.get().root(); unitOfWorkRoot.getOrComputeResource(scheduledEventsKey, key -> { List<EventMessage<?>> allEvents = new ArrayList<>(); unitOfWorkRoot.afterCommit(uow -> schedule(allEvents, processor)); return allEvents; }).addAll(events); } else { schedule(events, processor); } }
@Override public Connection getConnection() throws SQLException { if (!CurrentUnitOfWork.isStarted() || CurrentUnitOfWork.get().phase().isAfter(UnitOfWork.Phase.PREPARE_COMMIT)) { return delegate.getConnection(); Connection connection = uow.root().getResource(CONNECTION_RESOURCE_NAME); if (connection == null || connection.isClosed()) { final Connection delegateConnection = delegate.getConnection(); new UoWAttachedConnectionImpl(delegateConnection), new ConnectionWrapperFactory.NoOpCloseHandler()); uow.root().resources().put(CONNECTION_RESOURCE_NAME, connection); uow.onCommit(u -> { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); try { if (cx instanceof UoWAttachedConnection) { uow.onCleanup(u -> { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); JdbcUtils.closeQuietly(cx); if (cx instanceof UoWAttachedConnection) { uow.onRollback(u -> { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); try { if (!cx.isClosed() && !cx.getAutoCommit()) {
@Override protected void appendEvents(List<? extends EventMessage<?>> events, Serializer serializer) { AppendEventTransaction sender; if (CurrentUnitOfWork.isStarted()) { sender = CurrentUnitOfWork.get().root().getOrComputeResource(APPEND_EVENT_TRANSACTION, k -> { AppendEventTransaction appendEventTransaction = eventStoreClient.createAppendEventConnection(); CurrentUnitOfWork.get().root().onRollback( u -> appendEventTransaction.rollback(u.getExecutionResult().getExceptionResult()) ); CurrentUnitOfWork.get().root().onCommit(u -> commit(appendEventTransaction)); return appendEventTransaction; }); } else { sender = eventStoreClient.createAppendEventConnection(); } for (EventMessage<?> eventMessage : events) { sender.append(map(eventMessage, serializer)); } if (!CurrentUnitOfWork.isStarted()) { commit(sender); } }
/** * Attach a transaction to this Unit of Work, using the given {@code transactionManager}. The transaction will be * managed in the lifecycle of this Unit of Work. Failure to start a transaction will cause this Unit of Work * to be rolled back. * * @param transactionManager The Transaction Manager to create, commit and/or rollback the transaction */ default void attachTransaction(TransactionManager transactionManager) { try { Transaction transaction = transactionManager.startTransaction(); onCommit(u -> transaction.commit()); onRollback(u -> transaction.rollback()); } catch (Throwable t) { rollback(t); throw t; } }
@Override public Aggregate<T> newInstance(Callable<T> factoryMethod) throws Exception { CurrentUnitOfWork.get().onRollback(u -> this.rolledBack = true); aggregate = delegate.newInstance(factoryMethod); return aggregate; }
@Override public Object handle(UnitOfWork<? extends T> unitOfWork, InterceptorChain interceptorChain) throws Exception { Transaction transaction = transactionManager.startTransaction(); unitOfWork.onCommit(u -> transaction.commit()); unitOfWork.onRollback(u -> transaction.rollback()); return interceptorChain.proceed(); } }
@Override public A newInstance(Callable<T> factoryMethod) throws Exception { UnitOfWork<?> uow = CurrentUnitOfWork.get(); AtomicReference<A> aggregateReference = new AtomicReference<>(); // a constructor may apply events, and the persistence of an aggregate must take precedence over publishing its events. uow.onPrepareCommit(x -> { A aggregate = aggregateReference.get(); // aggregate construction may have failed with an exception. In that case, no action is required on commit if (aggregate != null) { prepareForCommit(aggregate); } }); A aggregate = doCreateNew(factoryMethod); aggregateReference.set(aggregate); Assert.isTrue(aggregateModel.entityClass().isAssignableFrom(aggregate.rootType()), () -> "Unsuitable aggregate for this repository: wrong type"); Map<String, A> aggregates = managedAggregates(uow); Assert.isTrue(aggregates.putIfAbsent(aggregate.identifierAsString(), aggregate) == null, () -> "The Unit of Work already has an Aggregate with the same identifier"); uow.onRollback(u -> aggregates.remove(aggregate.identifierAsString())); return aggregate; }
@Test public void testFirstTokenIsStoredWhenUnitOfWorkIsRolledBackOnSecondEvent() throws Exception { List<? extends EventMessage<?>> events = createEvents(2); CountDownLatch countDownLatch = new CountDownLatch(2); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCommit(uow -> { if (uow.getMessage().equals(events.get(1))) { throw new MockException(); } }); return interceptorChain.proceed(); })); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(events); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, TimeUnit.SECONDS)); verify(tokenStore, atLeastOnce()).storeToken(any(), any(), anyInt()); assertNotNull(tokenStore.fetchToken(testSubject.getName(), 0)); }
/** * Commit/rollback Kafka work once a given unit of work is committed/rollback. */ private void handleActiveUnitOfWork(Producer<K, V> producer, Map<Future<RecordMetadata>, ? super EventMessage<?>> futures, Map<? super EventMessage<?>, MonitorCallback> monitorCallbacks, ConfirmationMode confirmationMode) { UnitOfWork<?> uow = CurrentUnitOfWork.get(); uow.afterCommit(u -> completeKafkaWork(monitorCallbacks, producer, confirmationMode, futures)); uow.onRollback(u -> rollbackKafkaWork(producer, confirmationMode)); }
@Override public AnnotatedSaga<T> doLoad(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); UnitOfWork<?> processRoot = unitOfWork.root(); AnnotatedSaga<T> loadedSaga = managedSagas.computeIfAbsent(sagaIdentifier, id -> { AnnotatedSaga<T> result = doLoadSaga(sagaIdentifier); if (result != null) { processRoot.onCleanup(u -> managedSagas.remove(id)); } return result; }); if (loadedSaga != null && unsavedSagaResource(processRoot).add(sagaIdentifier)) { unitOfWork.onPrepareCommit(u -> { unsavedSagaResource(processRoot).remove(sagaIdentifier); commit(loadedSaga); }); } return loadedSaga; }
/** * Either runs the provided {@link Runnable} immediately or adds it to a {@link List} as a resource to the current * {@link UnitOfWork} if {@link SimpleQueryUpdateEmitter#inStartedPhaseOfUnitOfWork} returns {@code true}. This is * done to * ensure any emitter calls made from a message handling function are executed in the * {@link UnitOfWork.Phase#AFTER_COMMIT} phase. * <p> * The latter check requires the current UnitOfWork's phase to be {@link UnitOfWork.Phase#STARTED}. This is done * to allow users to circumvent their {@code queryUpdateTask} being handled in the AFTER_COMMIT phase. They can do * this by retrieving the current UnitOfWork and performing any of the {@link QueryUpdateEmitter} calls in a * different phase. * * @param queryUpdateTask a {@link Runnable} to be ran immediately or as a resource if {@link * SimpleQueryUpdateEmitter#inStartedPhaseOfUnitOfWork} returns {@code true} */ private void runOnAfterCommitOrNow(Runnable queryUpdateTask) { if (inStartedPhaseOfUnitOfWork()) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); unitOfWork.getOrComputeResource( this.toString() + QUERY_UPDATE_TASKS_RESOURCE_KEY, resourceKey -> { List<Runnable> queryUpdateTasks = new ArrayList<>(); unitOfWork.afterCommit(uow -> queryUpdateTasks.forEach(Runnable::run)); return queryUpdateTasks; } ).add(queryUpdateTask); } else { queryUpdateTask.run(); } }
/** * Returns a set of identifiers of sagas that may have changed in the context of the given {@code unitOfWork} and * have not been saved yet. * * @param unitOfWork the unit of work to inspect for unsaved sagas * @return set of saga identifiers of unsaved sagas */ protected Set<String> unsavedSagaResource(UnitOfWork<?> unitOfWork) { return unitOfWork.getOrComputeResource(unsavedSagasResourceKey, i -> new HashSet<>()); }
/** * Run a given {@code deadlineCall} immediately, or schedule it for the {@link UnitOfWork} it's 'prepare commit' * phase if a UnitOfWork is active. This is required as the DeadlineManager schedule message which we want to happen * on order with other message being handled. * * @param deadlineCall a {@link Runnable} to be executed now or on prepare commit if a {@link UnitOfWork} is active */ protected void runOnPrepareCommitOrNow(Runnable deadlineCall) { if (CurrentUnitOfWork.isStarted()) { CurrentUnitOfWork.get().onPrepareCommit(unitOfWork -> deadlineCall.run()); } else { deadlineCall.run(); } }
@Test public void testTokenIsNotStoredWhenUnitOfWorkIsRolledBack() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCommit(uow -> { throw new MockException(); }); return interceptorChain.proceed(); })); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(createEvent()); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, TimeUnit.SECONDS)); assertNull(tokenStore.fetchToken(testSubject.getName(), 0)); }