public void testExceptions(WriteBatcher writeBatcher, DocumentWriteSet docs, int expectedSuccesses, int expectedFailures) { final AtomicInteger successfulBatchCount = new AtomicInteger(0); final AtomicInteger failureBatchCount = new AtomicInteger(0); writeBatcher .withBatchSize(1) .onBatchSuccess( batch -> successfulBatchCount.incrementAndGet() ) .onBatchFailure( (batch, throwable) -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(writeBatcher); for ( DocumentWriteOperation doc : docs ) { writeBatcher.add(doc.getUri(), doc.getContent()); } // while batchSize=1 means all batches are queued, we still need to wait for them to finish writeBatcher.flushAndWait(); moveMgr.stopJob(writeBatcher); assertEquals(expectedSuccesses, successfulBatchCount.get()); assertEquals(expectedFailures, failureBatchCount.get()); }
@Override public void initialize() { writeBatcher = this.dataMovementManager.newWriteBatcher(); writeBatcher.withThreadCount(threadCount); writeBatcher.withBatchSize(batchSize); jobTicket = dataMovementManager.startJob(writeBatcher); }
public JobReportImpl(WriteBatcher batcher) { WriteJobReportListener writeJobSuccessListener = null; WriteJobReportListener writeJobFailureListener = null; WriteBatchListener[] batchListeners = batcher.getBatchSuccessListeners(); for(WriteBatchListener batchListener : batchListeners) { if(batchListener instanceof WriteJobReportListener) { WriteFailureListener[] failureListeners = batcher.getBatchFailureListeners(); for(WriteFailureListener failureListener : failureListeners) { if(failureListener instanceof WriteJobReportListener) { successEventsCount = writeJobSuccessListener.getSuccessEventsCount(); failureEventsCount = writeJobSuccessListener.getFailureEventsCount(); isJobComplete = batcher.isStopped(); reportTimestamp = Calendar.getInstance(); jobStartTime = batcher.getJobStartTime(); jobEndTime = batcher.getJobEndTime();
public JobTicket startJob(WriteBatcher batcher, ConcurrentHashMap<String, JobTicket> activeJobs) { String jobId = batcher.getJobId() != null ? batcher.getJobId() : generateJobId(); if (batcher.getJobId() == null && ! batcher.isStarted() ) batcher.withJobId(jobId); if (!batcher.isStarted() && activeJobs.containsKey(jobId)) { throw new DataMovementException( "Cannot start the batcher because the given job Id already exists in the active jobs", null); } JobTicket jobTicket = new JobTicketImpl(jobId, JobTicket.JobType.WRITE_BATCHER) .withWriteBatcher((WriteBatcherImpl) batcher); ((WriteBatcherImpl) batcher).start(jobTicket); activeJobs.put(jobId, jobTicket); return jobTicket; }
final int expectedBatches = (int) Math.ceil(totalDocCount / expectedBatchSize); WriteBatcher batcher = moveMgr.newWriteBatcher() .withBatchSize(batchSize) .withThreadCount(batcherThreadCount) .onBatchSuccess( batch -> { logger.debug("[testWrites_{}] batch: {}, items: {}", testName, .onBatchFailure( (batch, throwable) -> { failureBatchCount.incrementAndGet(); .withJobId(writeBatcherJobId); long batchMinTime = new Date().getTime(); assertFalse("Job should not be started yet", batcher.isStarted()); moveMgr.startJob(batcher); assertTrue("Job should be started now", batcher.isStarted()); JobTicket ticket = moveMgr.getActiveJob(writeBatcherJobId); assertEquals(batchSize, batcher.getBatchSize()); assertEquals(writeBatcherJobId, batcher.getJobId()); assertEquals(batcherThreadCount, batcher.getThreadCount()); batcher.flushAndWait(); int leftover = (totalDocCount % docsPerExternalThread); DocumentMetadataHandle meta = new DocumentMetadataHandle() .withCollections(whbTestCollection, collection); batcher.add(uri, meta, new StringHandle("test").withFormat(Format.TEXT));
WriteBatcher batcher = dataMovementManager.newWriteBatcher(); batcher .withBatchSize(10) .withThreadCount(4) .withTransform(runFlow) .onBatchFailure((batch, failure) -> { errorMessage = failure.getMessage(); runFlowFailed = true; dataMovementManager.startJob(batcher); batcher.add("/employee1.xml", new StringHandle(getResource("flow-manager-test/input/employee1.xml")).withFormat(Format.XML)); batcher.add("/employee2.xml", new StringHandle(getResource("flow-manager-test/input/employee2.xml")).withFormat(Format.XML)); batcher.flushAndWait();
WriteBatcher writer = dmm .newWriteBatcher() .withJobName("Load jobs") .withBatchSize(50); JobTicket ticket = dmm.startJob(writer); String entryText = s.hasNext() ? s.next() : ""; writer.add( entry.getName(), jobMetadata, writer.flushAndWait(); dmm.stopJob(ticket); dmm.release(); writer = dmm .newWriteBatcher() .withJobName("Load traces"); ticket = dmm.startJob(writer); writer.add( entry.getName(), traceMetadata, writer.flushAndWait(); dmm.stopJob(ticket); dmm.release();
.withBatchSize(1) .onBatchSuccess(new CloseBatchListener()) .onBatchFailure(new CloseFailureListener());
@Ignore public void testIssue646() throws Exception { WriteBatcher ihb2 = moveMgr.newWriteBatcher() .withBatchSize(10); ihb2.onBatchFailure( (batch, throwable) -> throwable.printStackTrace() ); for (int j =0 ;j < 21; j++){ String uri ="/local/string-"+ j; DocumentMetadataHandle meta6 = new DocumentMetadataHandle().withProperty("docMeta-1", "true"); ihb2.addAs(uri , meta6, "test"); } ihb2.flushAndWait(); }
.withBatchSize(10) .withForestConfig(ffg) .onBatchSuccess( batch -> { for ( WriteEvent event : batch.getItems() ) { sentUris.add(event.getTargetUri()); .onBatchFailure( (batch, throwable) -> throwable.printStackTrace() ); for (int j =0 ;j < 10; j++){ String uri ="/testAgainstRealHosts/"+ j; writeBatcher.addAs(uri, meta6, "test"); sentUris.add(uri); writeBatcher.flushAndWait(); moveMgr.stopJob(writeBatcher);
public static void setup() throws Exception { StringBuffer failures = new StringBuffer(); WriteBatcher writeBatcher = moveMgr.newWriteBatcher() .withBatchSize(10) .onBatchFailure((event, throwable) -> { throwable.printStackTrace(); failures.append("ERORR:[" + throwable.toString() + "]"); }); moveMgr.startJob(writeBatcher); // a collection so we're only looking at docs related to this test run DocumentMetadataHandle meta = new DocumentMetadataHandle().withCollections(collection); for ( int i=1; i <= numDocs; i++ ) { writeBatcher.addAs(collection + "/doc_" + i + ".txt", meta, "test contents"); } writeBatcher.flushAsync(); writeBatcher.awaitCompletion(); if ( failures.length() > 0 ) fail(failures.toString()); logger.info("Successfully wrote {} docs to collection {}", numDocs, collection); }
public OurJbossESBPlugin(DatabaseClient client) { moveMgr = client.newDataMovementManager(); batcher = moveMgr.newWriteBatcher() .withJobName("OurJbossESBPlugin") .withBatchSize(BATCH_SIZE) // every time a batch is full, write it to the database via mlcp // this is the default, only included here to make it obvious //.onBatchFull( new MlcpBatchFullListener() ) // log a summary report after each successful batch .onBatchSuccess( batch -> logger.info(getSummaryReport()) ) .onBatchFailure( (batch, throwable) -> { List<String> uris = new ArrayList<>(); for ( WriteEvent event : batch.getItems() ) { uris.add(event.getTargetUri()); } logger.warn("FAILURE on batch:" + uris + "\n", throwable); }); ticket = moveMgr.startJob(batcher); }
WriteBatchListener[] successListeners = batcher.getBatchSuccessListeners(); assertEquals(1, successListeners.length); batcher.onBatchSuccess(successListener); successListeners = batcher.getBatchSuccessListeners(); assertEquals(2, successListeners.length); assertEquals(successListener, successListeners[1]); WriteFailureListener[] failureListeners = batcher.getBatchFailureListeners(); assertEquals(3, failureListeners.length); assertEquals(HostAvailabilityListener.class, failureListeners[0].getClass()); assertEquals(NoResponseListener.class, failureListeners[2].getClass()); batcher.onBatchFailure(failureListener); failureListeners = batcher.getBatchFailureListeners(); assertEquals(4, failureListeners.length); assertEquals(failureListener, failureListeners[3]); batcher.setBatchSuccessListeners(); successListeners = batcher.getBatchSuccessListeners(); assertEquals(0, successListeners.length); batcher.setBatchSuccessListeners(successListener); successListeners = batcher.getBatchSuccessListeners(); assertEquals(1, successListeners.length); assertEquals(successListener, successListeners[0]); batcher.setBatchSuccessListeners((WriteBatchListener[]) null); successListeners = batcher.getBatchSuccessListeners();
@Test public void testCloseHandles() throws Exception { DocumentMetadataHandle meta = new DocumentMetadataHandle() .withCollections(whbTestCollection); final AtomicInteger failCount = new AtomicInteger(0); WriteBatcher batcher = moveMgr.newWriteBatcher() .onBatchFailure( (batch, throwable) -> { logger.error("Error in testCloseHandles", throwable); failCount.incrementAndGet(); } ); JobTicket ticket = moveMgr.startJob(batcher); AtomicBoolean closed = new AtomicBoolean(false); FileInputStream fileStream = new FileInputStream("src/test/resources/test.xml") { public void close() throws IOException { super.close(); closed.set(true); } }; batcher.add("test.xml", meta, new InputStreamHandle(fileStream)); // when we call flushAndWait, the WriteBatcher should write the batch the close all the handles batcher.flushAndWait(); assertEquals(true, closed.get()); moveMgr.stopJob(ticket); assertEquals(0, failCount.get()); }
moveMgr.startJob(wb); for ( String uri : uris ) { wb.addAs(uri, meta, uri); wb.flushAndWait(); moveMgr.stopJob(wb);
@Override public void write(List<? extends DocumentWriteOperation> items) { for (DocumentWriteOperation op : items) { writeBatcher.add(op.getUri(), op.getMetadata(), op.getContent()); } }
@Override public void waitForCompletion() { if (writeBatcher != null) { writeBatcher.flushAndWait(); if (jobTicket != null) { dataMovementManager.stopJob(writeBatcher); } else { dataMovementManager.stopJob(writeBatcher); } } }
public void testExceptions(DocumentWriteSet docs, int expectedSuccesses, int expectedFailures) { WriteBatcher batcher = moveMgr.newWriteBatcher() .onBatchSuccess( batch -> { throw new InternalError(errorMessage); } ) .onBatchFailure( (batch, throwable) -> { throw new InternalError(errorMessage); } ); testExceptions(batcher, docs, expectedSuccesses, expectedFailures); batcher = moveMgr.newWriteBatcher() .onBatchSuccess( batch -> { throw new RuntimeException(errorMessage); } ) .onBatchFailure( (batch, throwable) -> { throw new RuntimeException(errorMessage); } ); testExceptions(batcher, docs, expectedSuccesses, expectedFailures); cleanupDocs(docs); }
WriteFailureListener[] writeFailureListeners = ((WriteBatcher) batcher).getBatchFailureListeners(); for (WriteFailureListener writeFailureListener : writeFailureListeners) { if ( writeFailureListener instanceof NoResponseListener ) {
String query1 = "fn:count(fn:collection('" + collection + "'))"; WriteBatcher batcher = moveMgr.newWriteBatcher(); batcher.withBatchSize(120); batcher .onBatchSuccess( batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue48] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); .onBatchFailure( (batch, throwable) -> { throwable.printStackTrace(); System.out.println("Failure Batch size "+batch.getItems().length);