public BatchWriter(BatchWriteSet writeSet) { if ( writeSet.getWriteSet().size() == 0 ) { throw new IllegalStateException("Attempt to write an empty batch"); } this.writeSet = writeSet; }
private void sendSuccessToListeners(BatchWriteSet batchWriteSet) { batchWriteSet.setItemsSoFar(itemsSoFar.addAndGet(batchWriteSet.getWriteSet().size())); WriteBatch batch = batchWriteSet.getBatchOfWriteEvents(); for ( WriteBatchListener successListener : successListeners ) { try { successListener.processEvent(batch); } catch (Throwable t) { logger.error("Exception thrown by an onBatchSuccess listener", t); } } }
private void closeAllHandles() throws Throwable { Throwable lastThrowable = null; for ( DocumentWriteOperation doc : writeSet.getWriteSet() ) { try { if ( doc.getContent() instanceof Closeable ) { ((Closeable) doc.getContent()).close(); } if ( doc.getMetadata() instanceof Closeable ) { ((Closeable) doc.getMetadata()).close(); } } catch (Throwable t) { logger.error("error calling close()", t); lastThrowable = t; } } if ( lastThrowable != null ) throw lastThrowable; } }
@Override public WriteBatcher add(DocumentWriteOperation writeOperation) { if ( writeOperation.getUri() == null ) throw new IllegalArgumentException("uri must not be null"); if ( writeOperation.getContent() == null ) throw new IllegalArgumentException("contentHandle must not be null"); initialize(); requireNotStopped(); queue.add(writeOperation); logger.trace("add uri={}", writeOperation.getUri()); // if we have queued batchSize, it's time to flush a batch long recordNum = batchCounter.incrementAndGet(); boolean timeToWriteBatch = (recordNum % getBatchSize()) == 0; if ( timeToWriteBatch ) { BatchWriteSet writeSet = newBatchWriteSet(false); int i=0; for ( ; i < getBatchSize(); i++ ) { DocumentWriteOperation doc = queue.poll(); if ( doc != null ) { writeSet.getWriteSet().add(doc); } else { // strange, there should have been a full batch of docs in the queue... break; } } if ( writeSet.getWriteSet().size() > 0 ) { threadPool.submit( new BatchWriter(writeSet) ); } } return this; }
for ( ; j < getBatchSize() && iter.hasNext(); j++ ) { DocumentWriteOperation doc = iter.next(); writeSet.getWriteSet().add(doc);
}); for ( WriteEvent doc : writerTask.writeSet.getBatchOfWriteEvents().getItems() ) { writeSet.getWriteSet().add(doc.getTargetUri(), doc.getMetadata(), doc.getContent());
if ( writeSet.getTemporalCollection() == null ) { writeSet.getClient().newDocumentManager().write( writeSet.getWriteSet(), writeSet.getTransform(), transaction ); } else { docMgr.setContentFormat(Format.UNKNOWN); docMgr.write( writeSet.getWriteSet(), writeSet.getTransform(), transaction, writeSet.getTemporalCollection() );
private void retry(WriteBatch batch, boolean callFailListeners) { if ( isStopped() == true ) { logger.warn("Job is now stopped, aborting the retry"); return; } if ( batch == null ) throw new IllegalArgumentException("batch must not be null"); boolean forceNewTransaction = true; BatchWriteSet writeSet = newBatchWriteSet(forceNewTransaction, batch.getJobBatchNumber()); if ( !callFailListeners ) { writeSet.onFailure(throwable -> { if ( throwable instanceof RuntimeException ) throw (RuntimeException) throwable; else throw new DataMovementException("Failed to retry batch", throwable); }); } for (WriteEvent doc : batch.getItems()) { writeSet.getWriteSet().add(doc.getTargetUri(), doc.getMetadata(), doc.getContent()); } BatchWriter runnable = new BatchWriter(writeSet); runnable.run(); } @Override
public WriteBatch getBatchOfWriteEvents() { WriteBatchImpl batch = new WriteBatchImpl() .withBatcher(batcher) .withClient(client) .withJobBatchNumber(batchNumber) .withJobWritesSoFar(itemsSoFar) .withJobTicket(batcher.getJobTicket()); WriteEvent[] writeEvents = getWriteSet().stream() .map(writeOperation -> new WriteEventImpl() .withTargetUri(writeOperation.getUri()) .withContent(writeOperation.getContent()) .withMetadata(writeOperation.getMetadata()) ) .toArray(WriteEventImpl[]::new); batch.withItems(writeEvents); return batch; } }