/** * Returns the map of aggregates currently managed by this repository under the given unit of work. Note that the * repository keeps the managed aggregates in the root unit of work, to guarantee each Unit of Work works with the * state left by the parent unit of work. * <p> * The returns map is mutable and reflects any changes made during processing. * * @param uow The unit of work to find the managed aggregates for * @return a map with the aggregates managed by this repository in the given unit of work */ protected Map<String, A> managedAggregates(UnitOfWork<?> uow) { return uow.root().getOrComputeResource(aggregatesKey, s -> new HashMap<>()); }
@Override public void handle(List<? extends EventMessage<?>> events, Consumer<List<? extends EventMessage<?>>> processor) { if (CurrentUnitOfWork.isStarted()) { UnitOfWork<?> unitOfWorkRoot = CurrentUnitOfWork.get().root(); unitOfWorkRoot.getOrComputeResource(scheduledEventsKey, key -> { List<EventMessage<?>> allEvents = new ArrayList<>(); unitOfWorkRoot.afterCommit(uow -> schedule(allEvents, processor)); return allEvents; }).addAll(events); } else { schedule(events, processor); } }
@Override public AnnotatedSaga<T> doLoad(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); UnitOfWork<?> processRoot = unitOfWork.root(); AnnotatedSaga<T> loadedSaga = managedSagas.computeIfAbsent(sagaIdentifier, id -> { AnnotatedSaga<T> result = doLoadSaga(sagaIdentifier); if (result != null) { processRoot.onCleanup(u -> managedSagas.remove(id)); } return result; }); if (loadedSaga != null && unsavedSagaResource(processRoot).add(sagaIdentifier)) { unitOfWork.onPrepareCommit(u -> { unsavedSagaResource(processRoot).remove(sagaIdentifier); commit(loadedSaga); }); } return loadedSaga; }
private void lockSagaAccess(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Lock lock = lockFactory.obtainLock(sagaIdentifier); unitOfWork.root().onCleanup(u -> lock.release()); }
@Override public AnnotatedSaga<T> doCreateInstance(String sagaIdentifier, Supplier<T> sagaFactory) { try { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(), processRoot = unitOfWork.root(); T sagaRoot = sagaFactory.get(); resourceInjector.injectResources(sagaRoot); AnnotatedSaga<T> saga = new AnnotatedSaga<>(sagaIdentifier, Collections.emptySet(), sagaRoot, sagaModel); unsavedSagaResource(processRoot).add(sagaIdentifier); unitOfWork.onPrepareCommit(u -> { if (saga.isActive()) { storeSaga(saga); saga.getAssociationValues().commit(); unsavedSagaResource(processRoot).remove(sagaIdentifier); } }); managedSagas.put(sagaIdentifier, saga); processRoot.onCleanup(u -> managedSagas.remove(sagaIdentifier)); return saga; } catch (Exception e) { throw new SagaCreationException("An error occurred while attempting to create a new managed instance", e); } }
Connection connection = uow.root().getResource(CONNECTION_RESOURCE_NAME); if (connection == null || connection.isClosed()) { final Connection delegateConnection = delegate.getConnection(); new UoWAttachedConnectionImpl(delegateConnection), new ConnectionWrapperFactory.NoOpCloseHandler()); uow.root().resources().put(CONNECTION_RESOURCE_NAME, connection); uow.onCommit(u -> { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); try { if (cx instanceof UoWAttachedConnection) { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); JdbcUtils.closeQuietly(cx); if (cx instanceof UoWAttachedConnection) { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); try { if (!cx.isClosed() && !cx.getAutoCommit()) {
if (u.parent().isPresent() && !u.root().phase().isAfter(COMMIT)) { u.root().onCommit(w -> doWithEvents(this::commit, eventQueue)); } else { doWithEvents(this::commit, eventQueue); if (u.parent().isPresent() && !u.root().phase().isAfter(AFTER_COMMIT)) { u.root().afterCommit(w -> doWithEvents(this::afterCommit, eventQueue)); } else { doWithEvents(this::afterCommit, eventQueue);
@Override public void publish(List<? extends EventMessage<?>> events) { Stream<MessageMonitor.MonitorCallback> ingested = events.stream().map(messageMonitor::onMessageIngested); if (CurrentUnitOfWork.isStarted()) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Assert.state(!unitOfWork.phase().isAfter(PREPARE_COMMIT), () -> "It is not allowed to publish events when the current Unit of Work has already been " + "committed. Please start a new Unit of Work before publishing events."); Assert.state(!unitOfWork.root().phase().isAfter(PREPARE_COMMIT), () -> "It is not allowed to publish events when the root Unit of Work has already been " + "committed."); unitOfWork.afterCommit(u -> ingested.forEach(MessageMonitor.MonitorCallback::reportSuccess)); unitOfWork.onRollback(uow -> ingested.forEach( message -> message.reportFailure(uow.getExecutionResult().getExceptionResult()) )); eventsQueue(unitOfWork).addAll(events); } else { try { prepareCommit(intercept(events)); commit(events); afterCommit(events); ingested.forEach(MessageMonitor.MonitorCallback::reportSuccess); } catch (Exception e) { ingested.forEach(m -> m.reportFailure(e)); throw e; } } }
@Override protected void appendEvents(List<? extends EventMessage<?>> events, Serializer serializer) { AppendEventTransaction sender; if (CurrentUnitOfWork.isStarted()) { sender = CurrentUnitOfWork.get().root().getOrComputeResource(APPEND_EVENT_TRANSACTION, k -> { AppendEventTransaction appendEventTransaction = eventStoreClient.createAppendEventConnection(); CurrentUnitOfWork.get().root().onRollback( u -> appendEventTransaction.rollback(u.getExecutionResult().getExceptionResult()) ); CurrentUnitOfWork.get().root().onCommit(u -> commit(appendEventTransaction)); return appendEventTransaction; }); } else { sender = eventStoreClient.createAppendEventConnection(); } for (EventMessage<?> eventMessage : events) { sender.append(map(eventMessage, serializer)); } if (!CurrentUnitOfWork.isStarted()) { commit(sender); } }
/** * Returns the map of aggregates currently managed by this repository under the given unit of work. Note that the * repository keeps the managed aggregates in the root unit of work, to guarantee each Unit of Work works with the * state left by the parent unit of work. * <p> * The returns map is mutable and reflects any changes made during processing. * * @param uow The unit of work to find the managed aggregates for * @return a map with the aggregates managed by this repository in the given unit of work */ protected Map<String, A> managedAggregates(UnitOfWork<?> uow) { return uow.root().getOrComputeResource(aggregatesKey, s -> new HashMap<>()); }
/** * Returns the map of aggregates currently managed by this repository under the given unit of work. Note that the * repository keeps the managed aggregates in the root unit of work, to guarantee each Unit of Work works with the * state left by the parent unit of work. * <p> * The returns map is mutable and reflects any changes made during processing. * * @param uow The unit of work to find the managed aggregates for * @return a map with the aggregates managed by this repository in the given unit of work */ protected Map<String, A> managedAggregates(UnitOfWork<?> uow) { return uow.root().getOrComputeResource(aggregatesKey, s -> new HashMap<>()); }
private void lockSagaAccess(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Lock lock = lockFactory.obtainLock(sagaIdentifier); unitOfWork.root().onCleanup(u -> lock.release()); }
@Override public void handle(List<? extends EventMessage<?>> events, Consumer<List<? extends EventMessage<?>>> processor) { if (CurrentUnitOfWork.isStarted()) { UnitOfWork<?> unitOfWorkRoot = CurrentUnitOfWork.get().root(); unitOfWorkRoot.getOrComputeResource(scheduledEventsKey, key -> { List<EventMessage<?>> allEvents = new ArrayList<>(); unitOfWorkRoot.afterCommit(uow -> schedule(allEvents, processor)); return allEvents; }).addAll(events); } else { schedule(events, processor); } }
@Override public void handle(List<? extends EventMessage<?>> events, Consumer<List<? extends EventMessage<?>>> processor) { if (CurrentUnitOfWork.isStarted()) { UnitOfWork<?> unitOfWorkRoot = CurrentUnitOfWork.get().root(); unitOfWorkRoot.getOrComputeResource(scheduledEventsKey, key -> { List<EventMessage<?>> allEvents = new ArrayList<>(); unitOfWorkRoot.afterCommit(uow -> schedule(allEvents, processor)); return allEvents; }).addAll(events); } else { schedule(events, processor); } }
@Override public AnnotatedSaga<T> doLoad(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); UnitOfWork<?> processRoot = unitOfWork.root(); AnnotatedSaga<T> loadedSaga = managedSagas.computeIfAbsent(sagaIdentifier, id -> { AnnotatedSaga<T> result = doLoadSaga(sagaIdentifier); if (result != null) { processRoot.onCleanup(u -> managedSagas.remove(id)); } return result; }); if (loadedSaga != null && unsavedSagaResource(processRoot).add(sagaIdentifier)) { unitOfWork.onPrepareCommit(u -> { unsavedSagaResource(processRoot).remove(sagaIdentifier); commit(loadedSaga); }); } return loadedSaga; }
@Override public AnnotatedSaga<T> doLoad(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); UnitOfWork<?> processRoot = unitOfWork.root(); AnnotatedSaga<T> loadedSaga = managedSagas.computeIfAbsent(sagaIdentifier, id -> { AnnotatedSaga<T> result = doLoadSaga(sagaIdentifier); if (result != null) { processRoot.onCleanup(u -> managedSagas.remove(id)); } return result; }); if (loadedSaga != null && unsavedSagaResource(processRoot).add(sagaIdentifier)) { unitOfWork.onPrepareCommit(u -> { unsavedSagaResource(processRoot).remove(sagaIdentifier); commit(loadedSaga); }); } return loadedSaga; }
private void lockSagaAccess(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Lock lock = lockFactory.obtainLock(sagaIdentifier); unitOfWork.root().onCleanup(u -> lock.release()); }
@Override public AnnotatedSaga<T> doCreateInstance(String sagaIdentifier, Supplier<T> sagaFactory) { try { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(), processRoot = unitOfWork.root(); T sagaRoot = sagaFactory.get(); resourceInjector.injectResources(sagaRoot); AnnotatedSaga<T> saga = new AnnotatedSaga<>(sagaIdentifier, Collections.emptySet(), sagaRoot, sagaModel); unsavedSagaResource(processRoot).add(sagaIdentifier); unitOfWork.onPrepareCommit(u -> { if (saga.isActive()) { storeSaga(saga); saga.getAssociationValues().commit(); unsavedSagaResource(processRoot).remove(sagaIdentifier); } }); managedSagas.put(sagaIdentifier, saga); processRoot.onCleanup(u -> managedSagas.remove(sagaIdentifier)); return saga; } catch (Exception e) { throw new SagaCreationException("An error occurred while attempting to create a new managed instance", e); } }
@Override public AnnotatedSaga<T> doCreateInstance(String sagaIdentifier, Supplier<T> sagaFactory) { try { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(), processRoot = unitOfWork.root(); T sagaRoot = sagaFactory.get(); injector.injectResources(sagaRoot); AnnotatedSaga<T> saga = new AnnotatedSaga<>(sagaIdentifier, Collections.emptySet(), sagaRoot, null, sagaModel); unsavedSagaResource(processRoot).add(sagaIdentifier); unitOfWork.onPrepareCommit(u -> { if (saga.isActive()) { storeSaga(saga); saga.getAssociationValues().commit(); unsavedSagaResource(processRoot).remove(sagaIdentifier); } }); managedSagas.put(sagaIdentifier, saga); processRoot.onCleanup(u -> managedSagas.remove(sagaIdentifier)); return saga; } catch (Exception e) { throw new SagaCreationException("An error occurred while attempting to create a new managed instance", e); } }
@Override protected void appendEvents(List<? extends EventMessage<?>> events, Serializer serializer) { AppendEventTransaction sender; if (CurrentUnitOfWork.isStarted()) { sender = CurrentUnitOfWork.get().root().getOrComputeResource(APPEND_EVENT_TRANSACTION, k -> { AppendEventTransaction appendEventTransaction = eventStoreClient.createAppendEventConnection(); CurrentUnitOfWork.get().root().onRollback( u -> appendEventTransaction.rollback(u.getExecutionResult().getExceptionResult()) ); CurrentUnitOfWork.get().root().onCommit(u -> commit(appendEventTransaction)); return appendEventTransaction; }); } else { sender = eventStoreClient.createAppendEventConnection(); } for (EventMessage<?> eventMessage : events) { sender.append(map(eventMessage, serializer)); } if (!CurrentUnitOfWork.isStarted()) { commit(sender); } }