/** * This implements the QueryFailureListener interface * * @param queryBatch the exception with information about the failed query attempt */ public void processFailure(QueryBatchException queryBatch) { boolean isHostUnavailableException = processException(queryBatch.getBatcher(), queryBatch, queryBatch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed batch: {}, results so far: {}, forest: {}, forestBatch: {}, forest results so far: {}", queryBatch.getJobBatchNumber(), queryBatch.getJobResultsSoFar(), queryBatch.getForest().getForestName(), queryBatch.getForestBatchNumber(), queryBatch.getForestResultsSoFar()); queryBatch.getBatcher().retryWithFailureListeners(queryBatch); } catch (RuntimeException e) { logger.error("Exception during retry", e); processFailure(new QueryBatchException(queryBatch, e)); } } }
batcher.onQueryFailure(throwable -> throwable.printStackTrace());
StringBuilder failures = new StringBuilder(); QueryFailureListener failListener = throwable -> { throwable.printStackTrace(); logger.error("ERORR:[{}]", throwable); failures.append("ERORR:[" + throwable.toString() + "]"); }; QueryBatcher exportBatcher = moveMgr.newQueryBatcher(query)
for (QueryFailureListener listener : failureListeners) { try { listener.processFailure(new QueryBatchException(batch, t)); } catch (Throwable e) { logger.error("Exception thrown by an onQueryFailure listener", e);
.onUrisReady(batch -> successDocs1.addAndGet(batch.getItems().length)) .onQueryFailure( throwable -> { throwable.printStackTrace(); failures.append("ERROR:[" + throwable + "]\n"); }); .onUrisReady(batch -> successDocs2.addAndGet(batch.getItems().length)) .onQueryFailure( throwable -> { throwable.printStackTrace(); failures2.append("ERROR:[" + throwable + "]\n"); });
for (QueryFailureListener listener : failureListeners) { try { listener.processFailure(new QueryBatchException(batch, t)); } catch (Throwable e) { logger.error("Exception thrown by an onQueryFailure listener", e); listener.processFailure(new QueryBatchException(batch, t)); } catch (Throwable e) { logger.error("Exception thrown by an onQueryFailure listener", e);
.onUrisReady( batch -> uris.addAll(Arrays.asList(batch.getItems())) ) .onQueryFailure( throwable -> { throwable.printStackTrace(); failures.append("ERROR:[" + throwable + "]\n"); }); .onUrisReady(batch -> uris2.addAll(Arrays.asList(batch.getItems()))) .onQueryFailure( throwable -> { throwable.printStackTrace(); failures2.append("ERROR:[" + throwable + "]\n"); });
for ( QueryFailureListener listener : failureListeners ) { try { listener.processFailure(new QueryBatchException(batch, t)); } catch (Throwable e2) { logger.error("Exception thrown by an onQueryFailure listener", e2);
.onBatchFailure((batch, throwable) -> throwable.printStackTrace()) .onQueryFailure(throwable -> throwable.printStackTrace()); JobTicket ticket = moveMgr.startJob( batcher ); batcher.awaitCompletion();
throwable -> { failureBatchCount.incrementAndGet(); throwable.printStackTrace(); failures.append("ERROR:[" + throwable + "]\n");
.withThreadCount(5) .onUrisReady(batch -> retrievedUris.addAll(Arrays.asList(batch.getItems())) ) .onQueryFailure(exception -> exception.printStackTrace() ); moveMgr.startJob(getUris); getUris.awaitCompletion();
.withBatchSize(5000) .onUrisReady( new UrisToWriterListener(writer) ) .onQueryFailure(exception -> exception.printStackTrace()); JobTicket getUrisTicket = dataMovementManager.startJob(getUris); getUris.awaitCompletion(); dataMovementManager.newQueryBatcher(reader.lines().iterator()) .onUrisReady(new DeleteListener()) .onQueryFailure(exception-> exception.printStackTrace()); JobTicket ticket = dataMovementManager.startJob(performDelete); performDelete.awaitCompletion();
.onQueryFailure(exception -> exception.printStackTrace()); moveMgr.startJob(qb); qb.awaitCompletion();
.withBatchSize(10) .onUrisReady(exportListener) .onQueryFailure( throwable -> throwable.printStackTrace() ); moveMgr.startJob( queryJob );
.withBatchSize(100) .onUrisReady(batch -> count3.addAndGet(batch.getItems().length)) .onQueryFailure((throwable) -> throwable.printStackTrace()); JobTicket ticket3 = moveMgr.startJob( batcher3 ); batcher3.awaitCompletion();
throwable.printStackTrace(); }); moveMgr.startJob(batcher);
@Test public void testQueryBatcher() { client.newDocumentManager().writeAs(collection + "/test1.json", meta, "[true]"); client.newDocumentManager().writeAs(collection + "/test1.xml", meta, "<xml/>"); client.newDocumentManager().writeAs(collection + "/test1.txt", meta, "text"); assertEquals(3, client.newQueryManager().search(collectionQuery, new SearchHandle()).getTotalResults()); StructuredQueryDefinition query = collectionQuery; // begin copy from "Using QueryBatcher" in src/main/java/com/marklogic/datamovement/package-info.java QueryBatcher qhb = dataMovementManager.newQueryBatcher(query) .withBatchSize(1000) .withThreadCount(20) .withConsistentSnapshot() .onUrisReady(batch -> { for ( String uri : batch.getItems() ) { if ( uri.endsWith(".txt") ) { client.newDocumentManager().delete(uri); } } }) .onQueryFailure(queryBatchException -> queryBatchException.printStackTrace()); JobTicket ticket = dataMovementManager.startJob(qhb); qhb.awaitCompletion(); dataMovementManager.stopJob(ticket); // end copy from "Using QueryBatcher" in src/main/java/com/marklogic/datamovement/package-info.java SearchHandle results = client.newQueryManager().search(collectionQuery, new SearchHandle()); assertEquals(2, results.getTotalResults()); for ( MatchDocumentSummary match : results.getMatchResults() ) { assertTrue(match.getUri().matches(".*/test1.(json|xml)")); } }