@Override public Future<JsonNode> executeQuery( DruidQuery<?> druidQuery, SuccessCallback successCallback, FailureCallback failureCallback ) { RequestLog logCtx = RequestLog.dump(); //todo eventually stop/start RequestLog phases return CompletableFuture.supplyAsync(() -> { try { JsonNode jsonNode = executeAndProcessQuery((DruidAggregationQuery) druidQuery); if (successCallback != null) { successCallback.invoke(jsonNode); } return jsonNode; } catch (Throwable t) { LOG.warn("Failed while querying ", t); if (failureCallback != null) { failureCallback.dispatch(t); } } finally { RequestLog.restore(logCtx); } return null; } ); }
final RequestLog logCtx = RequestLog.dump();
RequestLog logCtx = RequestLog.dump(); nextResponse.processResponse( mapper.readTree(jsonResult),
/** * Collect async responses in list then respond to all 1 second later. This keeps Grizzly happy. * * @param uriInfo Information about the URL for the request * @param asyncResponse The response object to send the final response to */ @GET @Timed(name = "testTimed") @Metered(name = "testMetered") @ExceptionMetered(name = "testExc") @Produces(MediaType.APPLICATION_JSON) @Path("/data") public void getData(@Context UriInfo uriInfo, @Suspended AsyncResponse asyncResponse) { synchronized (responses) { if (responses.size() == 0) { // start release thread new Thread(this).start(); } responses.add(new Pair<>(asyncResponse, RequestLog.dump())); } }
/** * Collect async responses in list then respond to all 1 second later. This keeps Grizzly happy. * * @param uriInfo Information about the URL for the request * @param asyncResponse The response object to send the final response to */ @GET @Timed(name = "testTimed") @Metered(name = "testMetered") @ExceptionMetered(name = "testExc") @Produces(MediaType.APPLICATION_JSON) @Path("/data") public void getData(@Context UriInfo uriInfo, @Suspended AsyncResponse asyncResponse) { synchronized (responses) { if (responses.size() == 0) { // start release thread new Thread(this).start(); } responses.add(new Pair<>(asyncResponse, RequestLog.dump())); } }
RequestLog logCtx = RequestLog.dump(); nextResponse.processResponse( mapper.readTree(cacheEntry.getValue()),
) { RequestLog.startTiming(timerName); final RequestLog logCtx = RequestLog.dump(); try { return requestBuilder.execute(
/** * Take a list of Jackson ArrayNodes and merge their contents, preserving order. * * @param responses A list of pairs that encompass JSON nodes and response metadata * * @return A new pair holding the merged json and the aggregate request log context */ private Pair<JsonNode, LoggingContext> mergeResponses(List<Pair<JsonNode, LoggingContext>> responses) { JsonNodeFactory factory = new JsonNodeFactory(true); ArrayNode result = factory.arrayNode(); RequestLog.restore(logCtx); for (Pair<JsonNode, LoggingContext> entry : responses) { for (JsonNode jsonNode : entry.getKey()) { result.add(jsonNode); } RequestLog.accumulate(entry.getValue().getRequestLog()); } RequestLog updatedCtx = RequestLog.dump(); return new Pair<>(result, new LoggingContext(updatedCtx)); } }