/** * Adds the given value to the set of values on which the average is based. * <p> * If the sum of all values is greater as <tt>Double.MAX_VALUE / 2</tt> or the count of all values is greater as * <tt>Long.Max_VALUE / 2</tt>, the average is resetted. * * @param value to value to add to the average */ public void addValue(long value) { addValue((double) value); }
protected void addNanos(long durationInNanos) { avg.addValue(durationInNanos / 1000); changedSinceLastCheck = true; } }
@Override public void iterate(Function<Row, Boolean> handler, @Nullable Limit limit) throws SQLException { Watch w = Watch.start(); try (ResultSet rs = query.prepareStmt().executeQuery()) { query.avarage.addValue(w.elapsedMillis()); TaskContext tc = TaskContext.get(); processResultSet(handler, limit, rs, tc); } }
private void handleTracingAndReporting(String collection, Watch w) { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "FIND ALL - " + collection + ": " + filterObject); } traceIfRequired(collection, w); }
/** * Executes one or more Redis commands and returns a value of the given type. * * @param description a description of the actions performed used for debugging and tracing * @param task the actual task to perform using redis * @param <T> the generic type of the result * @return a result computed by <tt>task</tt> */ public <T> T query(Supplier<String> description, Function<Jedis, T> task) { Watch w = Watch.start(); try (Operation op = new Operation(description, Duration.ofSeconds(10)); Jedis redis = getConnection()) { return task.apply(redis); } catch (Exception e) { throw Exceptions.handle(Redis.LOG, e); } finally { redisInstance.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("redis", description.get()); } } }
mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO,
/** * Counts the number of documents in the result of the given query. * <p> * Note that limits are ignored for this query. * * @param collection the collection to search in * @return the number of documents found */ public long countIn(String collection) { Watch w = Watch.start(); try { return mongo.db().getCollection(collection).count(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "COUNT - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
/** * Executes the insert statement into the given collection. * * @param collection the collection to insert the document into * @return the inserted document */ public Doc into(String collection) { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("INSERT: %s\nObject: %s", collection, obj); } Watch w = Watch.start(); mongo.db().getCollection(collection).insertOne(obj); mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "INSERT - " + collection + ": " + obj); } return new Doc(obj); } }
@Override public int[] executeBatch() throws SQLException { Watch w = Watch.start(); try (Operation op = new Operation(() -> "executeBatch: " + preparedSQL, Duration.ofSeconds(30))) { int[] result = delegate.executeBatch(); w.submitMicroTiming("BATCH-SQL", preparedSQL); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC batch query was executed (%s): %s (%s rows)\n%s", w.duration(), preparedSQL, result.length, ExecutionPoint.snapshot().toString()); } return result; } }
/** * Executes the delete statement on the given collection. * * @param collection the name of the collection to delete documents from * @return the result of the delete operation */ public DeleteResult singleFrom(String collection) { Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("DELETE: %s\nFilter: %s", collection, filterObject); } return mongo.db().getCollection(collection).deleteOne(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "DELETE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "FIND ONE - " + collection + ": " + filterObject);
/** * Executes the delete statement on the given collection. * * @param collection the name of the collection to delete documents from * @return the result of the delete operation */ public DeleteResult manyFrom(String collection) { Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("DELETE: %s\nFilter: %s", collection, filterObject); } return mongo.db().getCollection(collection).deleteMany(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "DELETE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } } }
@Override public void run() { try { Watch w = Watch.start(); try { if (ctx == null) { CallContext.initialize(); } else { CallContext.setCurrent(ctx); } TaskContext.get().setSystem(SYSTEM_ASYNC).setSubSystem(category).setJob(String.valueOf(jobNumber)); runnable.run(); promise.success(null); } finally { CallContext.detach(); durationAverage.addValue(w.elapsedMillis()); } } catch (Exception t) { Exceptions.handle(Tasks.LOG, t); promise.fail(t); } }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
private <E extends Entity> E executeUpdate(E entity, EntityDescriptor descriptor, IndexRequestBuilder irb, final boolean runSaveChecks) { Watch w = Watch.start(); IndexResponse indexResponse = irb.execute().actionGet(); if (LOG.isFINE()) { LOG.FINE("SAVE: %s.%s: %s (%d) SUCCEEDED", schema.getIndex(entity), descriptor.getType(), indexResponse.getId(), indexResponse.getVersion()); } entity.id = indexResponse.getId(); entity.version = indexResponse.getVersion(); if (runSaveChecks) { entity.afterSave(); } queryDuration.addValue(w.elapsedMillis()); w.submitMicroTiming("ES", "UPDATE " + entity.getClass().getName()); traceChange(entity); return entity; }
/** * Executes the update on the given collection. * * @param collection the collection to update * @return the result of the update */ public UpdateResult executeFor(String collection) { Document updateObject = prepareUpdate(collection); Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("UPDATE: %s\nFilter: %s\n Update:%s", collection, filterObject, updateObject); } UpdateOptions updateOptions = new UpdateOptions().upsert(this.upsert); if (many) { return mongo.db().getCollection(collection).updateMany(filterObject, updateObject, updateOptions); } else { return mongo.db().getCollection(collection).updateOne(filterObject, updateObject, updateOptions); } } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "UPDATE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
.handle(); } finally { elastic.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("elastic", method + ": " + uri);
protected void handlePubSubMessage(String channel, String message, Subscriber subscriber) { tasks.executor("redis-pubsub").start(() -> { Watch w = Watch.start(); try { subscriber.onMessage(message); } catch (Exception e) { Exceptions.handle() .to(LOG) .error(e) .withSystemErrorMessage("Failed to process a message '%s' for topic '%s': %s (%s)", message, subscriber.getTopic()) .handle(); } w.submitMicroTiming("redis", channel); messageDuration.addValue(w.elapsedMillis()); }); }
private <E extends Entity> List<E> executeBulkUpdate(List<E> entities, BulkRequestBuilder brb) { Watch w = Watch.start(); BulkResponse indexResponse = brb.execute().actionGet(); if (!indexResponse.hasFailures() && LOG.isFINE()) { LOG.FINE("BULK-SAVE SUCCEEDED"); } else if (indexResponse.hasFailures()) { Exceptions.handle().withSystemErrorMessage(indexResponse.buildFailureMessage()).handle(); } for (int i = 0; i < indexResponse.getItems().length; i++) { E entity = entities.get(i); entity.id = indexResponse.getItems()[i].getId(); entity.version = indexResponse.getItems()[i].getVersion(); if (!indexResponse.getItems()[i].isFailed()) { entity.afterSave(); } traceChange(entity); } queryDuration.addValue(w.elapsedMillis()); w.submitMicroTiming("ES", "BULK-UPDATE"); return entities; }