/** * Aggregates the input executions into the result {@link StepExecution} * delegating to the delegate aggregator once the input has been refreshed * from the {@link JobExplorer}. * * @see StepExecutionAggregator #aggregate(StepExecution, Collection) */ @Override public void aggregate(StepExecution result, Collection<StepExecution> executions) { Assert.notNull(result, "To aggregate into a result it must be non-null."); if (executions == null) { return; } Collection<StepExecution> updates = new ArrayList<>(); for (StepExecution stepExecution : executions) { Long id = stepExecution.getId(); Assert.state(id != null, "StepExecution has null id. It must be saved first: " + stepExecution); StepExecution update = jobExplorer.getStepExecution(stepExecution.getJobExecutionId(), id); Assert.state(update != null, "Could not reload StepExecution from JobRepository: " + stepExecution); updates.add(update); } delegate.aggregate(result, updates); }
@Test public void testAggregateEmpty() { aggregator.aggregate(result, Collections.<StepExecution> emptySet()); }
@Test public void testAggregateNull() { aggregator.aggregate(result, null); }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName()); // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
@Test public void testAggregateStatusSunnyDay() { stepExecution1.setStatus(BatchStatus.COMPLETED); stepExecution2.setStatus(BatchStatus.COMPLETED); aggregator.aggregate(result, Arrays.<StepExecution> asList(stepExecution1, stepExecution2)); assertNotNull(result); assertEquals(BatchStatus.STARTING, result.getStatus()); }
@Test public void testAggregateStatusIncomplete() { stepExecution1.setStatus(BatchStatus.COMPLETED); stepExecution2.setStatus(BatchStatus.FAILED); aggregator.aggregate(result, Arrays.<StepExecution> asList(stepExecution1, stepExecution2)); assertNotNull(result); assertEquals(BatchStatus.FAILED, result.getStatus()); }
@Test public void testAggregateStatusFromFailure() { result.setStatus(BatchStatus.FAILED); stepExecution1.setStatus(BatchStatus.COMPLETED); stepExecution2.setStatus(BatchStatus.COMPLETED); aggregator.aggregate(result, Arrays.<StepExecution> asList(stepExecution1, stepExecution2)); assertNotNull(result); assertEquals(BatchStatus.FAILED, result.getStatus()); }
@Test public void testAggregateExitStatusSunnyDay() { stepExecution1.setExitStatus(ExitStatus.EXECUTING); stepExecution2.setExitStatus(ExitStatus.FAILED); aggregator.aggregate(result, Arrays.<StepExecution> asList(stepExecution1, stepExecution2)); assertNotNull(result); assertEquals(ExitStatus.FAILED.and(ExitStatus.EXECUTING), result.getExitStatus()); }
stepExecutionAggregator.aggregate(stepExecution, stepExecutions);
@Test public void testAggregateCountsSunnyDay() { stepExecution1.setCommitCount(1); stepExecution1.setFilterCount(2); stepExecution1.setProcessSkipCount(3); stepExecution1.setReadCount(4); stepExecution1.setReadSkipCount(5); stepExecution1.setRollbackCount(6); stepExecution1.setWriteCount(7); stepExecution1.setWriteSkipCount(8); stepExecution2.setCommitCount(11); stepExecution2.setFilterCount(12); stepExecution2.setProcessSkipCount(13); stepExecution2.setReadCount(14); stepExecution2.setReadSkipCount(15); stepExecution2.setRollbackCount(16); stepExecution2.setWriteCount(17); stepExecution2.setWriteSkipCount(18); aggregator.aggregate(result, Arrays.<StepExecution> asList(stepExecution1, stepExecution2)); assertEquals(12, result.getCommitCount()); assertEquals(14, result.getFilterCount()); assertEquals(16, result.getProcessSkipCount()); assertEquals(18, result.getReadCount()); assertEquals(20, result.getReadSkipCount()); assertEquals(22, result.getRollbackCount()); assertEquals(24, result.getWriteCount()); assertEquals(26, result.getWriteSkipCount()); } }
/** * Aggregates the input executions into the result {@link StepExecution} * delegating to the delegate aggregator once the input has been refreshed * from the {@link JobExplorer}. * * @see StepExecutionAggregator #aggregate(StepExecution, Collection) */ @Override public void aggregate(StepExecution result, Collection<StepExecution> executions) { Assert.notNull(result, "To aggregate into a result it must be non-null."); if (executions == null) { return; } Collection<StepExecution> updates = new ArrayList<StepExecution>(); for (StepExecution stepExecution : executions) { Long id = stepExecution.getId(); Assert.state(id != null, "StepExecution has null id. It must be saved first: " + stepExecution); StepExecution update = jobExplorer.getStepExecution(stepExecution.getJobExecutionId(), id); Assert.state(update != null, "Could not reload StepExecution from JobRepository: " + stepExecution); updates.add(update); } delegate.aggregate(result, updates); }
/** * Aggregates the input executions into the result {@link StepExecution} * delegating to the delegate aggregator once the input has been refreshed * from the {@link JobExplorer}. * * @see StepExecutionAggregator #aggregate(StepExecution, Collection) */ @Override public void aggregate(StepExecution result, Collection<StepExecution> executions) { Assert.notNull(result, "To aggregate into a result it must be non-null."); if (executions == null) { return; } Collection<StepExecution> updates = new ArrayList<StepExecution>(); for (StepExecution stepExecution : executions) { Long id = stepExecution.getId(); Assert.state(id != null, "StepExecution has null id. It must be saved first: " + stepExecution); StepExecution update = jobExplorer.getStepExecution(stepExecution.getJobExecutionId(), id); Assert.state(update != null, "Could not reload StepExecution from JobRepository: " + stepExecution); updates.add(update); } delegate.aggregate(result, updates); }
/** * Aggregates the input executions into the result {@link StepExecution} * delegating to the delegate aggregator once the input has been refreshed * from the {@link JobExplorer}. * * @see StepExecutionAggregator #aggregate(StepExecution, Collection) */ @Override public void aggregate(StepExecution result, Collection<StepExecution> executions) { Assert.notNull(result, "To aggregate into a result it must be non-null."); if (executions == null) { return; } Collection<StepExecution> updates = new ArrayList<StepExecution>(); for (StepExecution stepExecution : executions) { Long id = stepExecution.getId(); Assert.state(id != null, "StepExecution has null id. It must be saved first: " + stepExecution); StepExecution update = jobExplorer.getStepExecution(stepExecution.getJobExecutionId(), id); Assert.state(update != null, "Could not reload StepExecution from JobRepository: " + stepExecution); updates.add(update); } delegate.aggregate(result, updates); }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName()); // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName()); // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
stepExecutionAggregator.aggregate(stepExecution, stepExecutions);
stepExecutionAggregator.aggregate(stepExecution, stepExecutions);
stepExecutionAggregator.aggregate(stepExecution, stepExecutions);