protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryPhase, final AtomicArray<? extends SearchPhaseResult> fetchResults) { try { final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(), fetchResults::get); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them abgain in the next roundtrip. String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), 0, buildTookInMillis(), buildShardFailures(), SearchResponse.Clusters.EMPTY)); } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } }
private void finishHim() { threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) { @Override public void doRun() throws IOException { final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); } listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures())); releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); } @Override public void onFailure(Throwable t) { try { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } super.onFailure(failure); } finally { releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); } } }); } }
private void finishHim() { try { innerFinishHim(); } catch (Throwable e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures())); } }
void executeFetchPhase() { try { innerExecuteFetchPhase(); } catch (Throwable e) { listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures())); } }
private void finishHim() { try { innerFinishHim(); } catch (Throwable e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures())); } }
@Override public void onFailure(Throwable t) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } super.onFailure(t); } });
@Override public void onFailure(Throwable t) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } super.onFailure(failure); } });
private void finishHim() { try { innerFinishHim(); } catch (Throwable e) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } listener.onFailure(failure); } }
@Override public void onFailure(Throwable t) { try { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } super.onFailure(failure); } finally { releaseIrrelevantSearchContexts(firstResults, docIdsToLoad); } } });
@Override public void onFailure(Throwable t) { try { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } super.onFailure(failure); } finally { releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); } } });
protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryPhase, final AtomicArray<? extends SearchPhaseResult> fetchResults) { try { final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(), fetchResults::get); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them abgain in the next roundtrip. String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), buildTookInMillis(), buildShardFailures())); } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } }
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) { result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id())); processFirstPhaseResult(shardIndex, result); // we need to increment successful ops first before we compare the exit condition otherwise if we // are fast we could concurrently update totalOps but then preempt one of the threads which can // cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc. successfulOps.incrementAndGet(); // increment all the "future" shards to update the total ops since we some may work and some may not... // and when that happens, we break on total ops, so we must maintain them final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1); if (xTotalOps == expectedTotalOps) { try { innerMoveToSecondPhase(); } catch (Throwable e) { if (logger.isDebugEnabled()) { logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e); } raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures())); } } else if (xTotalOps > expectedTotalOps) { raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]")); } }
protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryPhase, final AtomicArray<? extends SearchPhaseResult> fetchResults) { try { final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(), fetchResults::get); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them abgain in the next roundtrip. String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), 0, buildTookInMillis(), buildShardFailures(), SearchResponse.Clusters.EMPTY)); } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } }
protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryPhase, final AtomicArray<? extends SearchPhaseResult> fetchResults) { try { final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(), fetchResults::get); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them abgain in the next roundtrip. String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), 0, buildTookInMillis(), buildShardFailures(), SearchResponse.Clusters.EMPTY)); } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } }
innerMoveToSecondPhase(); } catch (Throwable e) { raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));