public void testExceptions(DocumentWriteSet docs, int expectedSuccesses, int expectedFailures) { WriteBatcher batcher = moveMgr.newWriteBatcher() .onBatchSuccess( batch -> { throw new InternalError(errorMessage); } ) .onBatchFailure( (batch, throwable) -> { throw new InternalError(errorMessage); } ); testExceptions(batcher, docs, expectedSuccesses, expectedFailures); batcher = moveMgr.newWriteBatcher() .onBatchSuccess( batch -> { throw new RuntimeException(errorMessage); } ) .onBatchFailure( (batch, throwable) -> { throw new RuntimeException(errorMessage); } ); testExceptions(batcher, docs, expectedSuccesses, expectedFailures); cleanupDocs(docs); }
public void testExceptions(WriteBatcher writeBatcher, DocumentWriteSet docs, int expectedSuccesses, int expectedFailures) { final AtomicInteger successfulBatchCount = new AtomicInteger(0); final AtomicInteger failureBatchCount = new AtomicInteger(0); writeBatcher .withBatchSize(1) .onBatchSuccess( batch -> successfulBatchCount.incrementAndGet() ) .onBatchFailure( (batch, throwable) -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(writeBatcher); for ( DocumentWriteOperation doc : docs ) { writeBatcher.add(doc.getUri(), doc.getContent()); } // while batchSize=1 means all batches are queued, we still need to wait for them to finish writeBatcher.flushAndWait(); moveMgr.stopJob(writeBatcher); assertEquals(expectedSuccesses, successfulBatchCount.get()); assertEquals(expectedFailures, failureBatchCount.get()); }
public OurJbossESBPlugin(DatabaseClient client) { moveMgr = client.newDataMovementManager(); batcher = moveMgr.newWriteBatcher() .withJobName("OurJbossESBPlugin") .withBatchSize(BATCH_SIZE) // every time a batch is full, write it to the database via mlcp // this is the default, only included here to make it obvious //.onBatchFull( new MlcpBatchFullListener() ) // log a summary report after each successful batch .onBatchSuccess( batch -> logger.info(getSummaryReport()) ) .onBatchFailure( (batch, throwable) -> { List<String> uris = new ArrayList<>(); for ( WriteEvent event : batch.getItems() ) { uris.add(event.getTargetUri()); } logger.warn("FAILURE on batch:" + uris + "\n", throwable); }); ticket = moveMgr.startJob(batcher); }
.onBatchSuccess(new CloseBatchListener()) .onBatchFailure(new CloseFailureListener());
batcher.withBatchSize(120); batcher .onBatchSuccess( batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue48] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length);
batcher.onBatchSuccess(batch -> { }).onBatchFailure((batch, throwable) -> { throw new RuntimeException(throwable);
WriteBatcher batcher = moveMgr.newWriteBatcher(); batcher.withBatchSize(100); batcher.onBatchSuccess( batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue61] batch: {}, items: {}",
ihbMT.withBatchSize(7).withThreadCount(60); ihbMT.onBatchSuccess( batch -> { if (count.get() > 6 ) { boolean stopped = isStopped.getAndSet(true);
WriteBatcher ihbMT = moveMgr.newWriteBatcher(); ihbMT.withBatchSize(11); ihbMT.onBatchSuccess( batch -> { logger.debug("[testMultipleFlushAnStop_Issue109] batch: {}, items: {}",
WriteBatcher ihb1 = moveMgr.newWriteBatcher() .withBatchSize(1) .onBatchSuccess( batch -> { logger.debug("[testSimple] batch: {}, items: {}",
.withBatchSize(10) .withForestConfig(ffg) .onBatchSuccess( batch -> { for ( WriteEvent event : batch.getItems() ) { sentUris.add(event.getTargetUri());
.withBatchSize(batchSize) .withThreadCount(batcherThreadCount) .onBatchSuccess( batch -> { logger.debug("[testWrites_{}] batch: {}, items: {}", testName,
batcher.withBatchSize(1); final AtomicInteger successfulCount = new AtomicInteger(0); batcher.onBatchSuccess( batch -> { logger.debug("[testUndeclaredFormat_Issue60] batch: {}, items: {}",
.addParameter("newValue", "test1a") .onBatchSuccess( batch -> { logger.debug("[testWrites] batch: {}, items: {}",
.withBatchSize(2000) .withThreadCount(8) .onBatchSuccess(batch -> installDocsFinished = true) .onBatchFailure((batch, failure) -> { failure.printStackTrace();
.withBatchSize(100) .withThreadCount(4) .onBatchSuccess(batch -> installDocsFinished = true) .onBatchFailure((batch, failure) -> { failure.printStackTrace();
.withBatchSize(100) .withThreadCount(4) .onBatchSuccess(batch -> installDocsFinished = true) .onBatchFailure((batch, failure) -> { failure.printStackTrace();
@Test public void testWriteBatcher() { assertEquals(null, client.newDocumentManager().exists("doc1.txt")); assertEquals(null, client.newDocumentManager().exists("doc2.txt")); // begin copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java WriteBatcher whb = dataMovementManager.newWriteBatcher() .withBatchSize(100) .withThreadCount(20) .onBatchSuccess(batch -> { logger.debug("batch # {}, so far: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar()); }) .onBatchFailure((batch,throwable) -> throwable.printStackTrace() ); JobTicket ticket = dataMovementManager.startJob(whb); // the add or addAs methods could be called in separate threads on the // single whb instance whb.add ("doc1.txt", new StringHandle("doc1 contents")); whb.addAs("doc2.txt", "doc2 contents"); whb.flushAndWait(); // send the two docs even though they're not a full batch dataMovementManager.stopJob(ticket); // end copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java assertTrue(null != client.newDocumentManager().exists("doc1.txt")); assertTrue(null != client.newDocumentManager().exists("doc2.txt")); } }
batcher.onBatchSuccess(batch -> { }).onBatchFailure((batch, throwable) -> { throwable.printStackTrace();
assertEquals(1, successListeners.length); batcher.onBatchSuccess(successListener); successListeners = batcher.getBatchSuccessListeners(); assertEquals(2, successListeners.length);