public void process(StepContribution contribution, Chunk<Object> chunk) throws Exception { count += chunk.size(); } });
/** * Extension point for subclasses to allow them to memorise the contents of * the inputs, in case they are needed for accounting purposes later. The * default implementation sets up some user data to remember the original * size of the inputs. If this method is overridden then some or all of * {@link #isComplete(Chunk)}, {@link #getFilterCount(Chunk, Chunk)} and * {@link #getAdjustedOutputs(Chunk, Chunk)} might also need to be, to * ensure that the user data is handled consistently. * * @param inputs the inputs for the process */ protected void initializeUserData(Chunk<I> inputs) { inputs.setUserData(inputs.size()); }
/** * Extension point for subclasses to calculate the filter count. Defaults to * the difference between input size and output size. * * @param inputs the inputs after transformation * @param outputs the outputs after transformation * * @return the difference in sizes * * @see #initializeUserData(Chunk) */ protected int getFilterCount(Chunk<I> inputs, Chunk<O> outputs) { return (Integer) inputs.getUserData() - outputs.size(); }
if(output.size() > 0) { persist(contribution, output);
@Override public Object doWithRetry(RetryContext context) throws Exception { contextHolder.set(context); if (!data.scanning()) { chunkMonitor.setChunkSize(inputs.size()); try { doWrite(outputs.getItems()); } catch (Exception e) { if (rollbackClassifier.classify(e)) { throw e; } /* * If the exception is marked as no-rollback, we need to * override that, otherwise there's no way to write the * rest of the chunk or to honour the skip listener * contract. */ throw new ForceRollbackForWriteSkipException( "Force rollback on skippable exception so that skipped item can be located.", e); } contribution.incrementWriteCount(outputs.size()); } else { scan(contribution, inputs, outputs, chunkMonitor, false); } return null; } };
/** * Simple implementation delegates to the {@link #doWrite(List)} method and * increments the write count in the contribution. Subclasses can handle * more complicated scenarios, e.g.with fault tolerance. If output items are * skipped they should be removed from the inputs as well. * * @param contribution the current step contribution * @param inputs the inputs that gave rise to the outputs * @param outputs the outputs to write * @throws Exception if there is a problem */ protected void write(StepContribution contribution, Chunk<I> inputs, Chunk<O> outputs) throws Exception { try { doWrite(outputs.getItems()); } catch (Exception e) { /* * For a simple chunk processor (no fault tolerance) we are done * here, so prevent any more processing of these inputs. */ inputs.clear(); throw e; } contribution.incrementWriteCount(outputs.size()); }
@Override @SuppressWarnings({ "unchecked", "rawtypes" }) public Object doWithRetry(RetryContext context) throws Exception { chunkMonitor.setChunkSize(chunk.size()); try { doPersist(contribution, chunk); } catch (Exception e) { if (shouldSkip(skipPolicy, e, contribution.getStepSkipCount())) { // Per section 9.2.7 of JSR-352, the SkipListener receives all the items within the chunk ((MulticasterBatchListener) getListener()).onSkipInWrite(chunk.getItems(), e); } else { getListener().onRetryWriteException((List<Object>) chunk.getItems(), e); if (rollbackClassifier.classify(e)) { throw e; } } /* * If the exception is marked as no-rollback, we need to * override that, otherwise there's no way to write the * rest of the chunk or to honour the skip listener * contract. */ throw new ForceRollbackForWriteSkipException( "Force rollback on skippable exception so that skipped item can be located.", e); } contribution.incrementWriteCount(chunk.size()); return null; } };
@Override public Object recover(RetryContext context) throws Exception { Throwable e = context.getLastThrowable(); if (outputs.size() > 1 && !rollbackClassifier.classify(e)) { throw new RetryException("Invalid retry state during write caused by " + "exception that does not classify for rollback: ", e); } Chunk<I>.ChunkIterator inputIterator = inputs.iterator(); for (Chunk<O>.ChunkIterator outputIterator = outputs.iterator(); outputIterator.hasNext();) { inputIterator.next(); outputIterator.next(); checkSkipPolicy(inputIterator, outputIterator, e, contribution, true); if (!rollbackClassifier.classify(e)) { throw new RetryException( "Invalid retry state during recovery caused by exception that does not classify for rollback: ", e); } } return null; }
/** * Extension point for subclasses to allow them to memorise the contents of * the inputs, in case they are needed for accounting purposes later. The * default implementation sets up some user data to remember the original * size of the inputs. If this method is overridden then some or all of * {@link #isComplete(Chunk)}, {@link #getFilterCount(Chunk, Chunk)} and * {@link #getAdjustedOutputs(Chunk, Chunk)} might also need to be, to * ensure that the user data is handled consistently. * * @param inputs the inputs for the process */ protected void initializeUserData(Chunk<I> inputs) { inputs.setUserData(inputs.size()); }
/** * Extension point for subclasses to allow them to memorise the contents of * the inputs, in case they are needed for accounting purposes later. The * default implementation sets up some user data to remember the original * size of the inputs. If this method is overridden then some or all of * {@link #isComplete(Chunk)}, {@link #getFilterCount(Chunk, Chunk)} and * {@link #getAdjustedOutputs(Chunk, Chunk)} might also need to be, to * ensure that the user data is handled consistently. * * @param inputs the inputs for the process */ protected void initializeUserData(Chunk<I> inputs) { inputs.setUserData(inputs.size()); }
/** * Extension point for subclasses to calculate the filter count. Defaults to * the difference between input size and output size. * * @param inputs the inputs after transformation * @param outputs the outputs after transformation * * @return the difference in sizes * * @see #initializeUserData(Chunk) */ protected int getFilterCount(Chunk<I> inputs, Chunk<O> outputs) { return (Integer) inputs.getUserData() - outputs.size(); }
/** * Extension point for subclasses to allow them to memorise the contents of * the inputs, in case they are needed for accounting purposes later. The * default implementation sets up some user data to remember the original * size of the inputs. If this method is overridden then some or all of * {@link #isComplete(Chunk)}, {@link #getFilterCount(Chunk, Chunk)} and * {@link #getAdjustedOutputs(Chunk, Chunk)} might also need to be, to * ensure that the user data is handled consistently. * * @param inputs the inputs for the process */ protected void initializeUserData(Chunk<I> inputs) { inputs.setUserData(inputs.size()); }
/** * Extension point for subclasses to calculate the filter count. Defaults to * the difference between input size and output size. * * @param inputs the inputs after transformation * @param outputs the outputs after transformation * * @return the difference in sizes * * @see #initializeUserData(Chunk) */ protected int getFilterCount(Chunk<I> inputs, Chunk<O> outputs) { return (Integer) inputs.getUserData() - outputs.size(); }
/** * Extension point for subclasses to calculate the filter count. Defaults to * the difference between input size and output size. * * @param inputs the inputs after transformation * @param outputs the outputs after transformation * * @return the difference in sizes * * @see #initializeUserData(Chunk) */ protected int getFilterCount(Chunk<I> inputs, Chunk<O> outputs) { return (Integer) inputs.getUserData() - outputs.size(); }
/** * Extension point for subclasses to allow them to memorise the contents of * the inputs, in case they are needed for accounting purposes later. The * default implementation sets up some user data to remember the original * size of the inputs. If this method is overridden then some or all of * {@link #isComplete(Chunk)}, {@link #getFilterCount(Chunk, Chunk)} and * {@link #getAdjustedOutputs(Chunk, Chunk)} might also need to be, to * ensure that the user data is handled consistently. * * @param inputs the inputs for the process */ protected void initializeUserData(Chunk<I> inputs) { inputs.setUserData(inputs.size()); }
/** * Extension point for subclasses to calculate the filter count. Defaults to * the difference between input size and output size. * * @param inputs the inputs after transformation * @param outputs the outputs after transformation * * @return the difference in sizes * * @see #initializeUserData(Chunk) */ protected int getFilterCount(Chunk<I> inputs, Chunk<O> outputs) { return (Integer) inputs.getUserData() - outputs.size(); }
/** * Simple implementation delegates to the {@link #doWrite(List)} method and * increments the write count in the contribution. Subclasses can handle * more complicated scenarios, e.g.with fault tolerance. If output items are * skipped they should be removed from the inputs as well. * * @param contribution the current step contribution * @param inputs the inputs that gave rise to the ouputs * @param outputs the outputs to write * @throws Exception if there is a problem */ protected void write(StepContribution contribution, Chunk<I> inputs, Chunk<O> outputs) throws Exception { doWrite(outputs.getItems()); contribution.incrementWriteCount(outputs.size()); }
@Override public Object recover(RetryContext context) throws Exception { Throwable e = context.getLastThrowable(); if (outputs.size() > 1 && !rollbackClassifier.classify(e)) { throw new RetryException("Invalid retry state during write caused by " + "exception that does not classify for rollback: ", e); } Chunk<I>.ChunkIterator inputIterator = inputs.iterator(); for (Chunk<O>.ChunkIterator outputIterator = outputs.iterator(); outputIterator.hasNext();) { inputIterator.next(); outputIterator.next(); checkSkipPolicy(inputIterator, outputIterator, e, contribution, true); if (!rollbackClassifier.classify(e)) { throw new RetryException( "Invalid retry state during recovery caused by exception that does not classify for rollback: ", e); } } return null; }
@Override public Object recover(RetryContext context) throws Exception { Throwable e = context.getLastThrowable(); if (outputs.size() > 1 && !rollbackClassifier.classify(e)) { throw new RetryException("Invalid retry state during write caused by " + "exception that does not classify for rollback: ", e); } Chunk<I>.ChunkIterator inputIterator = inputs.iterator(); for (Chunk<O>.ChunkIterator outputIterator = outputs.iterator(); outputIterator.hasNext();) { inputIterator.next(); outputIterator.next(); checkSkipPolicy(inputIterator, outputIterator, e, contribution, true); if (!rollbackClassifier.classify(e)) { throw new RetryException( "Invalid retry state during recovery caused by exception that does not classify for rollback: ", e); } } return null; }
public Object recover(RetryContext context) throws Exception { Throwable e = context.getLastThrowable(); if (outputs.size() > 1 && !rollbackClassifier.classify(e)) { throw new RetryException("Invalid retry state during write caused by " + "exception that does not classify for rollback: ", e); } Chunk<I>.ChunkIterator inputIterator = inputs.iterator(); for (Chunk<O>.ChunkIterator outputIterator = outputs.iterator(); outputIterator.hasNext();) { inputIterator.next(); outputIterator.next(); checkSkipPolicy(inputIterator, outputIterator, e, contribution); if (!rollbackClassifier.classify(e)) { throw new RetryException( "Invalid retry state during recovery caused by exception that does not classify for rollback: ", e); } } return null; }