WrappedConnection(Connection c, Database database) { super(c); this.database = database; Databases.numUses.inc(); }
/** * Manually blocks the current thread for n seconds, to make e.g. a bulk write visible in ES. * <p> * Consider using {@link #callAfterUpdate(Runnable)} which does not block system resources. Only use this method * if absolutely necessary. * * @param seconds the number of seconds to block */ public void blockThreadForUpdate(int seconds) { blocks.inc(); Wait.seconds(seconds); }
@Override public void handleLogMessage(LogMessage msg) { if (msg.isReceiverWouldLog()) { synchronized (messages) { messages.add(0, msg); numLogMessages.inc(); while (messages.size() > maxMsg) { messages.remove(messages.size() - 1); } } } }
@Override public Connection createConnection() throws SQLException { Databases.numConnects.inc(); return actualFactory.createConnection(); } };
@Override public void handle(Incident incident) throws Exception { synchronized (incidents) { boolean unique = true; Iterator<Incident> iter = incidents.iterator(); while (iter.hasNext()) { if (Strings.areEqual(iter.next().getLocation(), incident.getLocation())) { iter.remove(); unique = false; } } incidents.add(0, incident); numIncidents.inc(); if (unique) { numUniqueIncidents.inc(); } while (incidents.size() > maxErrors) { incidents.remove(incidents.size() - 1); } } }
/** * Adds an action to the delay line, which ensures that it is at least delayed for one second * * @param cmd to command to be delayed */ public void callAfterUpdate(final Runnable cmd) { synchronized (oneSecondDelayLine) { if (oneSecondDelayLine.size() < 100) { delays.inc(); oneSecondDelayLine.add(new WaitingBlock(cmd)); return; } } blockThreadForUpdate(); cmd.run(); }
private void executeNow(ExecutionBuilder.TaskWrapper wrapper) { wrapper.prepare(); AsyncExecutor exec = findExecutor(wrapper.category); wrapper.jobNumber = exec.executed.inc(); wrapper.durationAverage = exec.duration; if (wrapper.synchronizer != null) { scheduleTable.put(wrapper.synchronizer, System.currentTimeMillis()); } exec.execute(wrapper); }
private static CallContext initialize(boolean install, String externalFlowId) { CallContext ctx = new CallContext(); ctx.addToMDC(MDC_FLOW, externalFlowId); interactionCounter.inc(); if (install) { setCurrent(ctx); } return ctx; }
StoredObject object = new StoredObject(file); if (useLimit && (!usePrefix || name.startsWith(prefix))) { long numObjects = objectCount.inc(); if (numObjects <= limit) { output.beginObject("Contents");
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
/** * Handles POST /bucket/id?uploads * * @param ctx the context describing the current request * @param bucket the bucket containing the object to upload * @param id name of the object to upload */ private void startMultipartUpload(WebContext ctx, Bucket bucket, String id) { Response response = ctx.respondWith(); Map<String, String> properties = Maps.newTreeMap(); for (String name : ctx.getRequest().headers().names()) { String nameLower = name.toLowerCase(); if (nameLower.startsWith("x-amz-meta-") || "content-md5".equals(nameLower) || "content-type".equals( nameLower) || "x-amz-acl".equals(nameLower)) { properties.put(name, ctx.getHeader(name)); response.addHeader(name, ctx.getHeader(name)); } } response.setHeader(HTTP_HEADER_NAME_CONTENT_TYPE, CONTENT_TYPE_XML); String uploadId = String.valueOf(uploadIdCounter.inc()); multipartUploads.add(uploadId); getUploadDir(uploadId).mkdirs(); XMLStructuredOutput out = response.xml(); out.beginOutput("InitiateMultipartUploadResult"); out.property(RESPONSE_BUCKET, bucket.getName()); out.property("Key", id); out.property("UploadId", uploadId); out.endOutput(); }
@Override public int[] executeBatch() throws SQLException { Watch w = Watch.start(); try (Operation op = new Operation(() -> "executeBatch: " + preparedSQL, Duration.ofSeconds(30))) { int[] result = delegate.executeBatch(); w.submitMicroTiming("BATCH-SQL", preparedSQL); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC batch query was executed (%s): %s (%s rows)\n%s", w.duration(), preparedSQL, result.length, ExecutionPoint.snapshot().toString()); } return result; } }
protected void traceIfRequired(String collection, Watch w) { if (mongo.tracing && w.elapsedMillis() >= mongo.traceLimit) { String location = determineLocation(); Doc explanation = explain(collection); mongo.traceData.put(location, Tuple.create(collection + ": " + filterObject.toString() + " [" + w.duration() + "]", explanation.toString())); } if (w.elapsedMillis() > mongo.getLogQueryThresholdMillis()) { mongo.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow MongoDB query was executed (%s): %s\n%s\n%s", w.duration(), collection, filterObject, ExecutionPoint.snapshot().toString()); } }
elastic.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow Elasticsearch query was executed (%s): %s\n%s\n%s", w.duration(),
optimisticLockErrors.inc(); throw new OptimisticLockException(e, entity); } catch (Exception e) {
LOG.FINE("Version conflict on updating: %s", entity); optimisticLockErrors.inc(); throw new OptimisticLockException(e, entity); } catch (Exception e) {