@Override public void processEvent(QueryBatch batch) { // Increment the number of batches that succeeded successBatchesCount.incrementAndGet(); // Get the number of documents that have been read successfully successEventsCount.addAndGet(batch.getItems().length); }
Arrays.asList(batch.getItems()));
@Override public void processEvent(QueryBatch queryBatch) { String[] uris = queryBatch.getItems();
@Override public void processEvent(QueryBatch queryBatch) { String[] uris = queryBatch.getItems(); StringBuilder sb = new StringBuilder();
try { synchronized(writer) { for ( String uri : batch.getItems() ) { try { if (prefix != null) writer.write(prefix);
protected DocumentPage getDocs(QueryBatch batch) { GenericDocumentManager docMgr = batch.getClient().newDocumentManager(); if ( view != null ) docMgr.setSearchView(view); if ( categories != null ) docMgr.setMetadataCategories(categories); if ( nonDocumentFormat != null ) docMgr.setNonDocumentFormat(nonDocumentFormat); if ( consistentSnapshot == true ) { return ((GenericDocumentImpl) docMgr).read( batch.getServerTimestamp(), transform, batch.getItems() ); } else { return docMgr.read( transform, batch.getItems() ); } }
@Override public void processFailure(QueryBatch batch, Throwable throwable) { boolean isHostUnavailableException = processException(batch.getBatcher(), throwable, batch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed listener batch: {}, results so far: {}, uris: {}", batch.getJobBatchNumber(), batch.getJobResultsSoFar(), Arrays.toString(batch.getItems())); batch.getBatcher().retryListener(batch, queryBatchListener); } catch (RuntimeException e) { logger.error("Exception during listener retry", e); processFailure(batch, e); } } } }
/** * The standard BatchListener action called by QueryBatcher. */ @Override public void processEvent(QueryBatch batch) { try { batch.getClient().newDocumentManager().delete( batch.getItems() ); } catch (Throwable t) { for ( BatchFailureListener<Batch<String>> listener : failureListeners ) { try { listener.processFailure(batch, t); } catch (Throwable t2) { logger.error("Exception thrown by an onBatchFailure listener", t2); } } for ( BatchFailureListener<QueryBatch> queryBatchFailureListener : queryBatchFailureListeners ) { try { queryBatchFailureListener.processFailure(batch, t); } catch (Throwable t2) { logger.error("Exception thrown by an onFailure listener", t2); } } } }
public List<String> testQueryExceptions(QueryDefinition query, int expectedSuccesses, int expectedFailures) { QueryBatcher queryBatcher = newQueryBatcher(query) .onUrisReady( batch -> { throw new InternalError(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new InternalError(errorMessage); } ); testExceptions(queryBatcher, expectedSuccesses, expectedFailures); // collect the uris this time List<String> matchingUris = Collections.synchronizedList(new ArrayList<>()); queryBatcher = newQueryBatcher(query) .onUrisReady( batch -> matchingUris.addAll(Arrays.asList(batch.getItems())) ) .onUrisReady( batch -> { throw new RuntimeException(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new RuntimeException(errorMessage); } ); testExceptions(queryBatcher, expectedSuccesses, expectedFailures); return matchingUris; }
private void testListenerException(QueryBatchListener listener) { final AtomicInteger failureBatchCount = new AtomicInteger(); Iterator<String> iterator = Arrays.asList(new String[] {uri1}).iterator(); QueryBatcher queryBatcher = moveMgr.newQueryBatcher(iterator) .onUrisReady( batch -> logger.debug("uri={}", batch.getItems()[0]) ) .onUrisReady(listener) .onQueryFailure( queryThrowable -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(queryBatcher); queryBatcher.awaitCompletion(); moveMgr.stopJob(queryBatcher); // there should be no failure sent to the QueryBatcher onQueryFailure listeners assertEquals(0, failureBatchCount.get()); }
uris.addAll(Arrays.asList(batch.getItems())); batchCount.incrementAndGet(); if(moveMgr.getJobReport(queryTicket.get()).getSuccessEventsCount() > 40){
.withBatchSize(100) .onUrisReady( new UrisToWriterListener(writer) ) .onUrisReady(batch -> successDocs1.addAndGet(batch.getItems().length)) .onQueryFailure( throwable -> { throwable.printStackTrace(); .withThreadCount(6) .withBatchSize(19) .onUrisReady(batch -> successDocs2.addAndGet(batch.getItems().length)) .onQueryFailure( throwable -> { throwable.printStackTrace();
.withThreadCount(6) .withBatchSize(5000) .onUrisReady( batch -> uris.addAll(Arrays.asList(batch.getItems())) ) .onQueryFailure( throwable -> { throwable.printStackTrace(); .withBatchSize(99) .onUrisReady(new DeleteListener()) .onUrisReady(batch -> successDocs.addAndGet(batch.getItems().length)) .onUrisReady(batch -> uris2.addAll(Arrays.asList(batch.getItems()))) .onQueryFailure( throwable -> { throwable.printStackTrace();
successDocs.addAndGet(batch.getItems().length); logger.info("Retrieved {} docs, sleeping for 1 second", batch.getItems().length); .withConsistentSnapshot() .onUrisReady(new DeleteListener()) .onUrisReady(batch -> logger.info("Deleting {} docs", batch.getItems().length) ) .onQueryFailure(failListener); moveMgr.startJob(deleteBatcher);
.withBatchSize(2) .withThreadCount(5) .onUrisReady(batch -> retrievedUris.addAll(Arrays.asList(batch.getItems())) ) .onQueryFailure(exception -> exception.printStackTrace() ); moveMgr.startJob(getUris);
.onUrisReady(batch -> deletedCount.addAndGet(batch.getItems().length)) .onQueryFailure(exception -> exception.printStackTrace()); moveMgr.startJob(qb);
.withTransform(transform) .withApplyResult(ApplyResult.REPLACE) .onSuccess(batch -> count2.addAndGet(batch.getItems().length)) .onBatchFailure((batch, throwable) -> throwable.printStackTrace()); QueryBatcher batcher = moveMgr.newQueryBatcher(query2) QueryBatcher batcher3 = moveMgr.newQueryBatcher(query3) .withBatchSize(100) .onUrisReady(batch -> count3.addAndGet(batch.getItems().length)) .onQueryFailure((throwable) -> throwable.printStackTrace()); JobTicket ticket3 = moveMgr.startJob( batcher3 );
@Override public void retryListener(QueryBatch batch, QueryBatchListener queryBatchListener) { // We get the batch and modify the client alone in order to make use // of the new forest client in case if the original host is unavailable. DatabaseClient client = null; Forest[] forests = batch.getBatcher().getForestConfig().listForests(); for(Forest forest : forests) { if(forest.equals(batch.getForest())) client = getMoveMgr().getForestClient(forest); } QueryBatchImpl retryBatch = new QueryBatchImpl() .withClient( client ) .withBatcher( batch.getBatcher() ) .withTimestamp( batch.getTimestamp() ) .withServerTimestamp( batch.getServerTimestamp() ) .withItems( batch.getItems() ) .withJobTicket( batch.getJobTicket() ) .withJobBatchNumber( batch.getJobBatchNumber() ) .withJobResultsSoFar( batch.getJobResultsSoFar() ) .withForestBatchNumber( batch.getForestBatchNumber() ) .withForestResultsSoFar( batch.getForestResultsSoFar() ) .withForest( batch.getForest() ) .withJobTicket( batch.getJobTicket() ); queryBatchListener.processEvent(retryBatch); }
@Test public void testQueryBatcher() { client.newDocumentManager().writeAs(collection + "/test1.json", meta, "[true]"); client.newDocumentManager().writeAs(collection + "/test1.xml", meta, "<xml/>"); client.newDocumentManager().writeAs(collection + "/test1.txt", meta, "text"); assertEquals(3, client.newQueryManager().search(collectionQuery, new SearchHandle()).getTotalResults()); StructuredQueryDefinition query = collectionQuery; // begin copy from "Using QueryBatcher" in src/main/java/com/marklogic/datamovement/package-info.java QueryBatcher qhb = dataMovementManager.newQueryBatcher(query) .withBatchSize(1000) .withThreadCount(20) .withConsistentSnapshot() .onUrisReady(batch -> { for ( String uri : batch.getItems() ) { if ( uri.endsWith(".txt") ) { client.newDocumentManager().delete(uri); } } }) .onQueryFailure(queryBatchException -> queryBatchException.printStackTrace()); JobTicket ticket = dataMovementManager.startJob(qhb); qhb.awaitCompletion(); dataMovementManager.stopJob(ticket); // end copy from "Using QueryBatcher" in src/main/java/com/marklogic/datamovement/package-info.java SearchHandle results = client.newQueryManager().search(collectionQuery, new SearchHandle()); assertEquals(2, results.getTotalResults()); for ( MatchDocumentSummary match : results.getMatchResults() ) { assertTrue(match.getUri().matches(".*/test1.(json|xml)")); } }