Status status ) { RequestLog.restore(logCtx); RequestLog.stopTiming(timerName); if (outstanding.decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); RequestLog.record(new DruidResponse(druidQueryId));
@Override public void invoke(JsonNode rootNode) { response.processResponse(rootNode, druidQuery, new LoggingContext(RequestLog.copy())); } };
@Override public void filter(ClientRequestContext request, ClientResponseContext response) { RequestLog.stopTiming("TestLogWrapper"); RequestLog.log(); } }
/** * Stop the most recent stopwatch and start this one. * Time is accumulated if the stopwatch is already registered. * * @param nextPhase the name of the stopwatch to be started * * @deprecated This method is too dependent on context that can be too easily changed by internal method calls. * Each timer should be explicitly started by {@link RequestLog#startTiming(String)} and stopped by * {@link RequestLog#stopTiming(String)} instead */ @Deprecated public static void switchTiming(String nextPhase) { stopMostRecentTimer(); startTiming(nextPhase); }
/** * Exports a snapshot of the request log of the current thread and also resets the request log for that thread. * * @return the log context of the current thread */ public static RequestLog dump() { RequestLog current = RLOG.get(); RequestLog copy = new RequestLog(current); current.clear(); RLOG.remove(); return copy; }
/** * Stop the request timer, start the response timer, and then invoke the failure callback code. * * @param error The error that caused the failure */ default void dispatch(Throwable error) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); invoke(error); } }
Supplier<Response> responseSender; try { RequestLog.startTiming(this); RequestLog.record(new FeatureFlagRequest(flagName)); responseSender = () -> Response.status(Response.Status.BAD_REQUEST).entity(msg).build(); } finally { RequestLog.stopTiming(this);
final AtomicLong outstanding ) { RequestLog.startTiming(timerName); final RequestLog logCtx = RequestLog.dump(); try { return requestBuilder.execute( RequestLog.restore(logCtx); RequestLog.stopTiming(timerName); if (outstanding.decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER);
@Override public void onThrowable(Throwable t) { RequestLog.restore(logCtx); RequestLog.stopTiming(timerName); if (outstanding.decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); } exceptionMeter.mark(); LOG.error("druid {} request failed:", serviceConfig.getNameAndUrl(), t); failure.invoke(t); } });
/** * Start a stopwatch. * Time is accumulated if the stopwatch is already registered * * @param caller the caller to name this stopwatch with its class's simple name * * @return The stopwatch */ public static TimedPhase startTiming(Object caller) { return startTiming(caller.getClass().getSimpleName()); }
try { if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); RequestLog logCtx = RequestLog.dump(); nextResponse.processResponse( mapper.readTree(cacheEntry.getValue()), LOG.debug("Cache entry present but invalid for query with id: {}", RequestLog.getId()); CACHE_POTENTIAL_HITS.mark(1); CACHE_MISSES.mark(1);
@Override public Future<JsonNode> executeQuery( DruidQuery<?> druidQuery, SuccessCallback successCallback, FailureCallback failureCallback ) { RequestLog logCtx = RequestLog.dump(); //todo eventually stop/start RequestLog phases return CompletableFuture.supplyAsync(() -> { try { JsonNode jsonNode = executeAndProcessQuery((DruidAggregationQuery) druidQuery); if (successCallback != null) { successCallback.invoke(jsonNode); } return jsonNode; } catch (Throwable t) { LOG.warn("Failed while querying ", t); if (failureCallback != null) { failureCallback.dispatch(t); } } finally { RequestLog.restore(logCtx); } return null; } ); }
/** * Take a list of Jackson ArrayNodes and merge their contents, preserving order. * * @param responses A list of pairs that encompass JSON nodes and response metadata * * @return A new pair holding the merged json and the aggregate request log context */ private Pair<JsonNode, LoggingContext> mergeResponses(List<Pair<JsonNode, LoggingContext>> responses) { JsonNodeFactory factory = new JsonNodeFactory(true); ArrayNode result = factory.arrayNode(); RequestLog.restore(logCtx); for (Pair<JsonNode, LoggingContext> entry : responses) { for (JsonNode jsonNode : entry.getKey()) { result.add(jsonNode); } RequestLog.accumulate(entry.getValue().getRequestLog()); } RequestLog updatedCtx = RequestLog.dump(); return new Pair<>(result, new LoggingContext(updatedCtx)); } }
try { if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); RequestLog logCtx = RequestLog.dump(); nextResponse.processResponse( mapper.readTree(jsonResult),
throws IOException { appendRequestId(request.getHeaders().getFirst(X_REQUEST_ID_HEADER)); RequestLog.startTiming(this); StringBuilder debugMsgBuilder = new StringBuilder(); RequestLog.record(new Epilogue(msg, status, responseLengthObserver)); Observable.just((long) response.getLength()).subscribe(responseLengthObserver); LOG.debug(debugMsgBuilder.toString()); RequestLog.stopTiming(this); RequestLog.stopTiming(TOTAL_TIMER); RequestLog.log();
/** * Publish final response to user. * * @param response The Response to send back to the user */ private void publishResponse(Response response) { if (RequestLog.isRunning(RESPONSE_WORKFLOW_TIMER)) { RequestLog.stopTiming(RESPONSE_WORKFLOW_TIMER); } asyncResponse.resume(response); } }
@Override public void run() { try { Thread.sleep(1000); // release waiting requests synchronized (responses) { for (Pair<AsyncResponse, RequestLog> response : responses) { RequestLog.restore(response.second); response.first.resume("OK"); } responses.clear(); } } catch (InterruptedException ignore) { // Do nothing } }
/** * Intercept the Container request to add length of request and a start timestamp. * * @param request Request to intercept * * @throws IOException if there's a problem processing the request */ @Override public void filter(ContainerRequestContext request) throws IOException { appendRequestId(request.getHeaders().getFirst(X_REQUEST_ID_HEADER)); RequestLog.startTiming(TOTAL_TIMER); try (TimedPhase timer = RequestLog.startTiming(this)) { RequestLog.record(new Preface(request)); // sets PROPERTY_REQ_LEN if content-length not defined lengthOfRequestEntity(request); // store start time to later calculate elapsed time request.setProperty(PROPERTY_NANOS, System.nanoTime()); } }
/** * Collect async responses in list then respond to all 1 second later. This keeps Grizzly happy. * * @param uriInfo Information about the URL for the request * @param asyncResponse The response object to send the final response to */ @GET @Timed(name = "testTimed") @Metered(name = "testMetered") @ExceptionMetered(name = "testExc") @Produces(MediaType.APPLICATION_JSON) @Path("/data") public void getData(@Context UriInfo uriInfo, @Suspended AsyncResponse asyncResponse) { synchronized (responses) { if (responses.size() == 0) { // start release thread new Thread(this).start(); } responses.add(new Pair<>(asyncResponse, RequestLog.dump())); } }
/** * Pause a stopwatch. * * @param caller the caller to name this stopwatch with its class's simple name */ public static void stopTiming(Object caller) { stopTiming(caller.getClass().getSimpleName()); }