@Override public void waitForCompletion() { if (writeBatcher != null) { writeBatcher.flushAndWait(); if (jobTicket != null) { dataMovementManager.stopJob(writeBatcher); } else { dataMovementManager.stopJob(writeBatcher); } } }
writer.flushAndWait(); dmm.stopJob(ticket); dmm.release(); ); writer.flushAndWait(); dmm.stopJob(ticket); dmm.release();
public void testExceptions(WriteBatcher writeBatcher, DocumentWriteSet docs, int expectedSuccesses, int expectedFailures) { final AtomicInteger successfulBatchCount = new AtomicInteger(0); final AtomicInteger failureBatchCount = new AtomicInteger(0); writeBatcher .withBatchSize(1) .onBatchSuccess( batch -> successfulBatchCount.incrementAndGet() ) .onBatchFailure( (batch, throwable) -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(writeBatcher); for ( DocumentWriteOperation doc : docs ) { writeBatcher.add(doc.getUri(), doc.getContent()); } // while batchSize=1 means all batches are queued, we still need to wait for them to finish writeBatcher.flushAndWait(); moveMgr.stopJob(writeBatcher); assertEquals(expectedSuccesses, successfulBatchCount.get()); assertEquals(expectedFailures, failureBatchCount.get()); }
@Ignore public void testIssue646() throws Exception { WriteBatcher ihb2 = moveMgr.newWriteBatcher() .withBatchSize(10); ihb2.onBatchFailure( (batch, throwable) -> throwable.printStackTrace() ); for (int j =0 ;j < 21; j++){ String uri ="/local/string-"+ j; DocumentMetadataHandle meta6 = new DocumentMetadataHandle().withProperty("docMeta-1", "true"); ihb2.addAs(uri , meta6, "test"); } ihb2.flushAndWait(); }
@Test public void testCloseHandles() throws Exception { DocumentMetadataHandle meta = new DocumentMetadataHandle() .withCollections(whbTestCollection); final AtomicInteger failCount = new AtomicInteger(0); WriteBatcher batcher = moveMgr.newWriteBatcher() .onBatchFailure( (batch, throwable) -> { logger.error("Error in testCloseHandles", throwable); failCount.incrementAndGet(); } ); JobTicket ticket = moveMgr.startJob(batcher); AtomicBoolean closed = new AtomicBoolean(false); FileInputStream fileStream = new FileInputStream("src/test/resources/test.xml") { public void close() throws IOException { super.close(); closed.set(true); } }; batcher.add("test.xml", meta, new InputStreamHandle(fileStream)); // when we call flushAndWait, the WriteBatcher should write the batch the close all the handles batcher.flushAndWait(); assertEquals(true, closed.get()); moveMgr.stopJob(ticket); assertEquals(0, failCount.get()); }
throw new RuntimeException(e); batcher.flushAndWait();
sentUris.add(uri); writeBatcher.flushAndWait(); moveMgr.stopJob(writeBatcher);
wb.addAs(uri, meta, uri); wb.flushAndWait(); moveMgr.stopJob(wb);
batcher.flushAndWait(); moveMgr.stopJob(ticket);
meta = new DocumentMetadataHandle().withCollections(collection, whbTestCollection); batcher.add(uri4, meta, new JacksonHandle(doc4)); batcher.flushAndWait(); assertEquals("The success listener should have run", "true", successListenerWasRun.toString()); assertEquals("The failure listener should have run", "true", failListenerWasRun.toString());
batcher.addAs(uris[i], meta, docContents); batcher.flushAndWait();
writeBatcher.flushAndWait(); assertTrue("Doc install not finished", installDocsFinished ); assertFalse("Doc install failed: " + installDocError, installDocsFailed);
batcher1.addAs(collection + "/test_doc_" + i + ".json", meta, "{ \"testProperty\": \"test3\" }"); batcher1.flushAndWait(); moveMgr.stopJob(ticket1);
writeBatcher.flushAndWait(); assertTrue(installDocsFinished, "Doc install not finished"); assertFalse(installDocsFailed, "Doc install failed: " + installDocError);
writeBatcher.flushAndWait(); assertTrue(installDocsFinished, "Doc install not finished"); assertFalse(installDocsFailed, "Doc install failed: " + installDocError);
@Test public void testWriteBatcher() { assertEquals(null, client.newDocumentManager().exists("doc1.txt")); assertEquals(null, client.newDocumentManager().exists("doc2.txt")); // begin copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java WriteBatcher whb = dataMovementManager.newWriteBatcher() .withBatchSize(100) .withThreadCount(20) .onBatchSuccess(batch -> { logger.debug("batch # {}, so far: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar()); }) .onBatchFailure((batch,throwable) -> throwable.printStackTrace() ); JobTicket ticket = dataMovementManager.startJob(whb); // the add or addAs methods could be called in separate threads on the // single whb instance whb.add ("doc1.txt", new StringHandle("doc1 contents")); whb.addAs("doc2.txt", "doc2 contents"); whb.flushAndWait(); // send the two docs even though they're not a full batch dataMovementManager.stopJob(ticket); // end copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java assertTrue(null != client.newDocumentManager().exists("doc1.txt")); assertTrue(null != client.newDocumentManager().exists("doc2.txt")); } }
flowRunnerDataMovementManager.startJob(batcher); batcher.add("/input" + fileSuffix + "." + dataFormat.toString(), handle); batcher.flushAndWait();
writeBatcher.addAs(uris[i], meta, docContents); writeBatcher.flushAndWait();