@Override public void onThrowable(Throwable t) { RequestLog.restore(logCtx); RequestLog.stopTiming(timerName); if (outstanding.decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); } exceptionMeter.mark(); LOG.error("druid {} request failed:", serviceConfig.getNameAndUrl(), t); failure.invoke(t); } });
@Override public void run() { try { Thread.sleep(1000); // release waiting requests synchronized (responses) { for (Pair<AsyncResponse, RequestLog> response : responses) { RequestLog.restore(response.second); response.first.resume("OK"); } responses.clear(); } } catch (InterruptedException ignore) { // Do nothing } }
@Override public void run() { try { Thread.sleep(1000); // release waiting requests synchronized (responses) { for (Pair<AsyncResponse, RequestLog> response : responses) { RequestLog.restore(response.second); response.first.resume("OK"); } responses.clear(); } } catch (InterruptedException ignore) { // Do nothing } }
@Override public Future<JsonNode> executeQuery( DruidQuery<?> druidQuery, SuccessCallback successCallback, FailureCallback failureCallback ) { RequestLog logCtx = RequestLog.dump(); //todo eventually stop/start RequestLog phases return CompletableFuture.supplyAsync(() -> { try { JsonNode jsonNode = executeAndProcessQuery((DruidAggregationQuery) druidQuery); if (successCallback != null) { successCallback.invoke(jsonNode); } return jsonNode; } catch (Throwable t) { LOG.warn("Failed while querying ", t); if (failureCallback != null) { failureCallback.dispatch(t); } } finally { RequestLog.restore(logCtx); } return null; } ); }
@Override public void processResponse(JsonNode json, DruidAggregationQuery<?> druidQuery, LoggingContext metadata) { if (failed.get()) { return; } AtomicInteger sharedIndex; Interval interval = druidQuery.getIntervals().get(0); if (interval == null || (sharedIndex = expectedIntervals.get(interval)) == null) { fail(UNEXPECTED_INTERVAL_FORMAT, druidQuery, interval); return; } int index; if ((index = sharedIndex.getAndSet(-1)) < 0 || completedIntervals.get(index) != null) { fail(EXTRA_RETURN_FORMAT, druidQuery, interval); return; } completedIntervals.set(index, new Pair<>(json, metadata)); if (completed.decrementAndGet() == 0) { Pair<JsonNode, LoggingContext> mergedResponse = mergeResponses(completedIntervals); RequestLog.restore(mergedResponse.getValue().getRequestLog()); next.processResponse(mergedResponse.getKey(), queryBeforeSplit, mergedResponse.getValue()); } }
RequestLog.restore(logCtx); next.handleRequest(context, request, q, mergingResponse);
Status status ) { RequestLog.restore(logCtx); RequestLog.stopTiming(timerName); if (outstanding.decrementAndGet() == 0) {
@Override public void processResponse(JsonNode json, DruidAggregationQuery<?> druidQuery, LoggingContext metadata) { try { RequestLog.restore(metadata.getRequestLog()); ResultSet resultSet = buildResultSet(json, druidQuery, apiRequest.getTimeZone()); resultSet = mapResultSet(resultSet);
RequestLog.restore(logCtx); RequestLog.stopTiming(timerName); if (outstanding.decrementAndGet() == 0) {
/** * Take a list of Jackson ArrayNodes and merge their contents, preserving order. * * @param responses A list of pairs that encompass JSON nodes and response metadata * * @return A new pair holding the merged json and the aggregate request log context */ private Pair<JsonNode, LoggingContext> mergeResponses(List<Pair<JsonNode, LoggingContext>> responses) { JsonNodeFactory factory = new JsonNodeFactory(true); ArrayNode result = factory.arrayNode(); RequestLog.restore(logCtx); for (Pair<JsonNode, LoggingContext> entry : responses) { for (JsonNode jsonNode : entry.getKey()) { result.add(jsonNode); } RequestLog.accumulate(entry.getValue().getRequestLog()); } RequestLog updatedCtx = RequestLog.dump(); return new Pair<>(result, new LoggingContext(updatedCtx)); } }