@Override public AnnotatedSaga<T> doLoad(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); UnitOfWork<?> processRoot = unitOfWork.root(); AnnotatedSaga<T> loadedSaga = managedSagas.computeIfAbsent(sagaIdentifier, id -> { AnnotatedSaga<T> result = doLoadSaga(sagaIdentifier); if (result != null) { processRoot.onCleanup(u -> managedSagas.remove(id)); } return result; }); if (loadedSaga != null && unsavedSagaResource(processRoot).add(sagaIdentifier)) { unitOfWork.onPrepareCommit(u -> { unsavedSagaResource(processRoot).remove(sagaIdentifier); commit(loadedSaga); }); } return loadedSaga; }
private void lockSagaAccess(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Lock lock = lockFactory.obtainLock(sagaIdentifier); unitOfWork.root().onCleanup(u -> lock.release()); }
@Override public AnnotatedSaga<T> doCreateInstance(String sagaIdentifier, Supplier<T> sagaFactory) { try { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(), processRoot = unitOfWork.root(); T sagaRoot = sagaFactory.get(); resourceInjector.injectResources(sagaRoot); AnnotatedSaga<T> saga = new AnnotatedSaga<>(sagaIdentifier, Collections.emptySet(), sagaRoot, sagaModel); unsavedSagaResource(processRoot).add(sagaIdentifier); unitOfWork.onPrepareCommit(u -> { if (saga.isActive()) { storeSaga(saga); saga.getAssociationValues().commit(); unsavedSagaResource(processRoot).remove(sagaIdentifier); } }); managedSagas.put(sagaIdentifier, saga); processRoot.onCleanup(u -> managedSagas.remove(sagaIdentifier)); return saga; } catch (Exception e) { throw new SagaCreationException("An error occurred while attempting to create a new managed instance", e); } }
/** * Perform the actual loading of an aggregate. The necessary locks have been obtained. * * @param aggregateIdentifier the identifier of the aggregate to load * @param expectedVersion The expected version of the aggregate * @return the fully initialized aggregate * @throws AggregateNotFoundException if aggregate with given id cannot be found */ @Override protected LockAwareAggregate<T, A> doLoad(String aggregateIdentifier, Long expectedVersion) { Lock lock = lockFactory.obtainLock(aggregateIdentifier); try { final A aggregate = doLoadWithLock(aggregateIdentifier, expectedVersion); CurrentUnitOfWork.get().onCleanup(u -> lock.release()); return new LockAwareAggregate<>(aggregate, lock); } catch (Throwable ex) { logger.debug("Exception occurred while trying to load an aggregate. Releasing lock.", ex); lock.release(); throw ex; } }
@Override public void start() { if (logger.isDebugEnabled()) { logger.debug("Starting Unit Of Work"); } Assert.state(Phase.NOT_STARTED.equals(phase()), () -> "UnitOfWork is already started"); rolledBack = false; onRollback(u -> rolledBack = true); CurrentUnitOfWork.ifStarted(parent -> { // we're nesting. this.parentUnitOfWork = parent; root().onCleanup(r -> changePhase(Phase.CLEANUP, Phase.CLOSED)); }); changePhase(Phase.STARTED); CurrentUnitOfWork.set(this); }
@Override protected LockAwareAggregate<T, A> doCreateNew(Callable<T> factoryMethod) throws Exception { A aggregate = doCreateNewForLock(factoryMethod); final String aggregateIdentifier = aggregate.identifierAsString(); Lock lock = lockFactory.obtainLock(aggregateIdentifier); try { CurrentUnitOfWork.get().onCleanup(u -> lock.release()); } catch (Throwable ex) { if (lock != null) { logger.debug("Exception occurred while trying to add an aggregate. Releasing lock.", ex); lock.release(); } throw ex; } return new LockAwareAggregate<>(aggregate, lock); }
@Test public void testMultiThreadTokenIsStoredWhenEventIsRead() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(2); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); eventBus.publish(createEvents(2)); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, SECONDS)); verify(tokenStore, atLeastOnce()).storeToken(any(), any(), anyInt()); assertThat(tokenStore.fetchToken(testSubject.getName(), 0), notNullValue()); assertThat(tokenStore.fetchToken(testSubject.getName(), 1), notNullValue()); }
@Test public void testTokenIsStoredWhenEventIsRead() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(createEvent()); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, TimeUnit.SECONDS)); verify(tokenStore).extendClaim(eq(testSubject.getName()), anyInt()); verify(tokenStore).storeToken(any(), any(), anyInt()); assertNotNull(tokenStore.fetchToken(testSubject.getName(), 0)); }
unitOfWork.onCleanup(u -> u.resources().remove(eventsKey)); return eventQueue; });
@Test public void testFirstTokenIsStoredWhenUnitOfWorkIsRolledBackOnSecondEvent() throws Exception { List<? extends EventMessage<?>> events = createEvents(2); CountDownLatch countDownLatch = new CountDownLatch(2); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCommit(uow -> { if (uow.getMessage().equals(events.get(1))) { throw new MockException(); } }); return interceptorChain.proceed(); })); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(events); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, TimeUnit.SECONDS)); verify(tokenStore, atLeastOnce()).storeToken(any(), any(), anyInt()); assertNotNull(tokenStore.fetchToken(testSubject.getName(), 0)); }
@Test public void testTokenIsStoredOncePerEventBatch() throws Exception { testSubject = TrackingEventProcessor.builder() .name("test") .eventHandlerInvoker(eventHandlerInvoker) .messageSource(eventBus) .tokenStore(tokenStore) .transactionManager(NoTransactionManager.INSTANCE) .build(); CountDownLatch countDownLatch = new CountDownLatch(2); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(createEvents(2)); assertTrue("Expected Unit of Work to have reached clean up phase for 2 messages", countDownLatch.await(5, TimeUnit.SECONDS)); InOrder inOrder = inOrder(tokenStore); inOrder.verify(tokenStore, times(1)).extendClaim(eq(testSubject.getName()), anyInt()); inOrder.verify(tokenStore, times(1)).storeToken(any(), any(), anyInt()); assertNotNull(tokenStore.fetchToken(testSubject.getName(), 0)); }
uow.onCleanup(u -> { Connection cx = u.root().getResource(CONNECTION_RESOURCE_NAME); JdbcUtils.closeQuietly(cx);
unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); }));
@Test public void testTokenIsNotStoredWhenUnitOfWorkIsRolledBack() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCommit(uow -> { throw new MockException(); }); return interceptorChain.proceed(); })); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); // give it a bit of time to start Thread.sleep(200); eventBus.publish(createEvent()); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, TimeUnit.SECONDS)); assertNull(tokenStore.fetchToken(testSubject.getName(), 0)); }
@Test public void testMultiThreadTokensAreStoredWhenUnitOfWorkIsRolledBackOnSecondEvent() throws Exception { List<? extends EventMessage<?>> events = createEvents(2); CountDownLatch countDownLatch = new CountDownLatch(2); //noinspection Duplicates testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCommit(uow -> { if (uow.getMessage().equals(events.get(1))) { throw new MockException(); } }); return interceptorChain.proceed(); })); testSubject.registerHandlerInterceptor(((unitOfWork, interceptorChain) -> { unitOfWork.onCleanup(uow -> countDownLatch.countDown()); return interceptorChain.proceed(); })); testSubject.start(); eventBus.publish(events); assertTrue("Expected Unit of Work to have reached clean up phase", countDownLatch.await(5, SECONDS)); assertNotNull(tokenStore.fetchToken(testSubject.getName(), 0)); assertNotNull(tokenStore.fetchToken(testSubject.getName(), 1)); }
private void lockSagaAccess(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Lock lock = lockFactory.obtainLock(sagaIdentifier); unitOfWork.root().onCleanup(u -> lock.release()); }
private void lockSagaAccess(String sagaIdentifier) { UnitOfWork<?> unitOfWork = CurrentUnitOfWork.get(); Lock lock = lockFactory.obtainLock(sagaIdentifier); unitOfWork.root().onCleanup(u -> lock.release()); }
@Override public void start() { if (logger.isDebugEnabled()) { logger.debug("Starting Unit Of Work"); } Assert.state(Phase.NOT_STARTED.equals(phase()), () -> "UnitOfWork is already started"); rolledBack = false; onRollback(u -> rolledBack = true); CurrentUnitOfWork.ifStarted(parent -> { // we're nesting. this.parentUnitOfWork = parent; root().onCleanup(r -> changePhase(Phase.CLEANUP, Phase.CLOSED)); }); changePhase(Phase.STARTED); CurrentUnitOfWork.set(this); }
@Override public void start() { if (logger.isDebugEnabled()) { logger.debug("Starting Unit Of Work"); } Assert.state(Phase.NOT_STARTED.equals(phase()), () -> "UnitOfWork is already started"); rolledBack = false; onRollback(u -> rolledBack = true); CurrentUnitOfWork.ifStarted(parent -> { // we're nesting. this.parentUnitOfWork = parent; root().onCleanup(r -> changePhase(Phase.CLEANUP, Phase.CLOSED)); }); changePhase(Phase.STARTED); CurrentUnitOfWork.set(this); }
@Override protected LockAwareAggregate<T, A> doCreateNew(Callable<T> factoryMethod) throws Exception { A aggregate = doCreateNewForLock(factoryMethod); final String aggregateIdentifier = aggregate.identifierAsString(); Lock lock = lockFactory.obtainLock(aggregateIdentifier); try { CurrentUnitOfWork.get().onCleanup(u -> lock.release()); } catch (Throwable ex) { if (lock != null) { logger.debug("Exception occurred while trying to add an aggregate. Releasing lock.", ex); lock.release(); } throw ex; } return new LockAwareAggregate<>(aggregate, lock); }