/** * This implements the WriteFailureListener interface * * @param batch the batch of WriteEvents * @param throwable the exception */ public void processFailure(WriteBatch batch, Throwable throwable) { boolean isHostUnavailableException = processException(batch.getBatcher(), throwable, batch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed batch: {}, results so far: {}, uris: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar(), Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList())); batch.getBatcher().retryWithFailureListeners(batch); } catch (RuntimeException e) { logger.error("Exception during retry", e); processFailure(batch, e); } } }
private void retry(WriteBatch batch, boolean callFailListeners) { if ( isStopped() == true ) { logger.warn("Job is now stopped, aborting the retry"); return; } if ( batch == null ) throw new IllegalArgumentException("batch must not be null"); boolean forceNewTransaction = true; BatchWriteSet writeSet = newBatchWriteSet(forceNewTransaction, batch.getJobBatchNumber()); if ( !callFailListeners ) { writeSet.onFailure(throwable -> { if ( throwable instanceof RuntimeException ) throw (RuntimeException) throwable; else throw new DataMovementException("Failed to retry batch", throwable); }); } for (WriteEvent doc : batch.getItems()) { writeSet.getWriteSet().add(doc.getTargetUri(), doc.getMetadata(), doc.getContent()); } BatchWriter runnable = new BatchWriter(writeSet); runnable.run(); } @Override
batch -> { logger.debug("[testWrites_{}] batch: {}, items: {}", testName, batch.getJobBatchNumber(), batch.getItems().length); successfulBatchCount.incrementAndGet(); for ( WriteEvent event : batch.getItems() ) { successfulCount.incrementAndGet(); if ( expectedBatchSize != batch.getItems().length) { if ( batch.getJobBatchNumber() != expectedBatches ) { failures.append("ERROR: There should be " + expectedBatchSize + " items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length); batchTicket.set(batch.getJobTicket()); batchTimestamp.set(batch.getTimestamp()); (batch, throwable) -> { failureBatchCount.incrementAndGet(); failureCount.addAndGet(batch.getItems().length); throwable.printStackTrace(); for ( WriteEvent event : batch.getItems() ) { logger.debug("failure event.getTargetUri()=[{}]", event.getTargetUri()); if ( expectedBatchSize != batch.getItems().length) { if ( batch.getJobBatchNumber() != expectedBatches ) { failures.append("ERROR: There should be " + expectedBatchSize + " items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length);
@Test public void testWriteBatcher() { assertEquals(null, client.newDocumentManager().exists("doc1.txt")); assertEquals(null, client.newDocumentManager().exists("doc2.txt")); // begin copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java WriteBatcher whb = dataMovementManager.newWriteBatcher() .withBatchSize(100) .withThreadCount(20) .onBatchSuccess(batch -> { logger.debug("batch # {}, so far: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar()); }) .onBatchFailure((batch,throwable) -> throwable.printStackTrace() ); JobTicket ticket = dataMovementManager.startJob(whb); // the add or addAs methods could be called in separate threads on the // single whb instance whb.add ("doc1.txt", new StringHandle("doc1 contents")); whb.addAs("doc2.txt", "doc2 contents"); whb.flushAndWait(); // send the two docs even though they're not a full batch dataMovementManager.stopJob(ticket); // end copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java assertTrue(null != client.newDocumentManager().exists("doc1.txt")); assertTrue(null != client.newDocumentManager().exists("doc2.txt")); } }
@Override public void processFailure(WriteBatch batch, Throwable failure) { // Increment the number of batches that failed failureBatchesCount.incrementAndGet(); // Get the number of written documents that failed failureEventsCount.addAndGet(batch.getItems().length); }
.onBatchSuccess( batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue48] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){ .onBatchFailure( (batch, throwable) -> { throwable.printStackTrace(); System.out.println("Failure Batch size "+batch.getItems().length); for(WriteEvent w:batch.getItems()){ System.out.println("Failure "+w.getTargetUri());
@Override public void processEvent(WriteBatch batch) { // Increment the number of batches that succeeded successBatchesCount.incrementAndGet(); // Get the number of written documents that succeeded successEventsCount.addAndGet(batch.getItems().length); } }
boolean stopped = isStopped.getAndSet(true); if ( stopped == false ) { moveMgr.stopJob(batch.getBatcher()); logger.debug("[testStopBeforeFlush_Issue595] Job stopped"); count.addAndGet(batch.getItems().length); logger.debug("[testStopBeforeFlush_Issue595] batch: " + batch.getJobBatchNumber() + ", items: " + batch.getItems().length + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost()); }) .onBatchFailure( (batch, throwable) -> { if ( cause instanceof InterruptedIOException ) { logger.debug("An expected InterruptedIOException occurred because the job was stopped prematurely" + ", batch: " + batch.getJobBatchNumber() + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost() + ", uris: " + Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList())); return; logger.debug("[testStopBeforeFlush_Issue595] Failed Batch: batch: " + batch.getJobBatchNumber() + ", batch: " + batch.getJobBatchNumber() + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost() + ", uris: " + Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList())); });
batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue61] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){ (batch, throwable) -> { throwable.printStackTrace(); for(WriteEvent w:batch.getItems()){ System.out.println("Failure "+w.getTargetUri());
else throw new DataMovementException("Failed to retry batch after failover", throwable); }); for ( WriteEvent doc : writerTask.writeSet.getBatchOfWriteEvents().getItems() ) { writeSet.getWriteSet().add(doc.getTargetUri(), doc.getMetadata(), doc.getContent());
batch -> { logger.debug("[testSimple] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w: batch.getItems()){ successBatch.append(w.getTargetUri()+":"); .onBatchFailure( (batch, throwable) -> { for(WriteEvent w: batch.getItems()){ failureBatch.append(w.getTargetUri()+":");
public OurJbossESBPlugin(DatabaseClient client) { moveMgr = client.newDataMovementManager(); batcher = moveMgr.newWriteBatcher() .withJobName("OurJbossESBPlugin") .withBatchSize(BATCH_SIZE) // every time a batch is full, write it to the database via mlcp // this is the default, only included here to make it obvious //.onBatchFull( new MlcpBatchFullListener() ) // log a summary report after each successful batch .onBatchSuccess( batch -> logger.info(getSummaryReport()) ) .onBatchFailure( (batch, throwable) -> { List<String> uris = new ArrayList<>(); for ( WriteEvent event : batch.getItems() ) { uris.add(event.getTargetUri()); } logger.warn("FAILURE on batch:" + uris + "\n", throwable); }); ticket = moveMgr.startJob(batcher); }
batch -> { logger.debug("[testMultipleFlushAnStop_Issue109] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){ (batch, throwable) -> { for(WriteEvent w:batch.getItems()){
.withForestConfig(ffg) .onBatchSuccess( batch -> { for ( WriteEvent event : batch.getItems() ) { sentUris.add(event.getTargetUri());
batch -> { logger.debug("[testUndeclaredFormat_Issue60] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){ successfulCount.incrementAndGet();
batch -> { logger.debug("[testWrites] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); successListenerWasRun.append("true"); if ( 2 != batch.getItems().length) { failures.append("ERROR: There should be 2 items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length); (batch, throwable) -> { failListenerWasRun.append("true"); if ( 2 != batch.getItems().length) { failures.append("ERROR: There should be 2 items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length);