Refine search
/** * Write a whole batch to eventhub */ public Future<WriteResponse> write (Batch<String> batch, WriteCallback callback) { Timer.Context context = writeTimer.time(); int returnCode = 0; LOG.info ("Dispatching batch " + batch.getId()); recordsAttempted.mark(batch.getRecords().size()); try { String encoded = encodeBatch(batch); returnCode = request (encoded); WriteResponse<Integer> response = WRITE_RESPONSE_WRAPPER.wrap(returnCode); callback.onSuccess(response); bytesWritten.mark(encoded.length()); recordsSuccess.mark(batch.getRecords().size()); } catch (Exception e) { LOG.error("Dispatching batch " + batch.getId() + " failed :" + e.toString()); callback.onFailure(e); recordsFailed.mark(batch.getRecords().size()); } context.close(); Future<Integer> future = Futures.immediateFuture(returnCode); return new WriteResponseFuture<>(future, WRITE_RESPONSE_WRAPPER); }
@Override public void handle(final Request req, final Response rsp) throws Throwable { MetricRegistry registry = req.require(MetricRegistry.class); Counter counter = registry.counter("request.actives"); Timer.Context timer = registry.timer("request").time(); counter.inc(); rsp.complete((ereq, ersp, x) -> { timer.stop(); counter.dec(); Meter meter = registry.meter("responses." + rsp.status().orElse(Status.OK).value()); meter.mark(); }); }
private <T> T runWithMetrics(String opName, Function<Void,T> impl) { Preconditions.checkNotNull(opName); Preconditions.checkNotNull(impl); final MetricManager mgr = MetricManager.INSTANCE; mgr.getCounter(metricsPrefix, opName, M_CALLS).inc(); final Timer.Context tc = mgr.getTimer(metricsPrefix, opName, M_TIME).time(); try { return impl.apply(null); } catch (RuntimeException e) { mgr.getCounter(metricsPrefix, opName, M_EXCEPTIONS).inc(); throw e; } finally { tc.stop(); } } }
@Override public Object invoke(MethodInvocation invocation) throws Throwable { String timerName = invocation.getMethod().getAnnotation(Timed.class).value(); if (timerName.isEmpty()) { timerName = MetricRegistry.name(invocation.getThis().getClass().getSuperclass(), invocation.getMethod().getName()); } Timer.Context timerContext = metricsServiceProvider .get() .getMetricRegistry() .timer(timerName) .time(); try { return invocation.proceed(); } finally { timerContext.stop(); } }
Object secondInitInstance = HelixKafkaMirrorMakerMetricsReporter.get(); Assert.assertTrue(firstInitInstance == secondInitInstance); Counter testCounter0 = new Counter(); Meter testMeter0 = new Meter(); Timer testTimer0 = new Timer(); HelixKafkaMirrorMakerMetricsReporter.get().registerMetric("testCounter0", testCounter0); HelixKafkaMirrorMakerMetricsReporter.get().registerMetric("testMeter0", testMeter0); HelixKafkaMirrorMakerMetricsReporter.get().registerMetric("testTimer0", testTimer0); testCounter0.inc(); testMeter0.mark(); Context context = testTimer0.time(); context.stop(); HelixKafkaMirrorMakerMetricsReporter.get().getRegistry().getCounters().get("testCounter0") .getCount(), 1); Assert.assertEquals( HelixKafkaMirrorMakerMetricsReporter.get().getRegistry().getMeters().get("testMeter0") .getCount(), 1); Assert.assertEquals( HelixKafkaMirrorMakerMetricsReporter.get().getRegistry().getTimers().get("testTimer0") .getCount(), 1);
@Override public Messages process(Messages messages) { for (final MessageFilter filter : filterRegistry) { for (Message msg : messages) { final String timerName = name(filter.getClass(), "executionTime"); final Timer timer = metricRegistry.timer(timerName); final Timer.Context timerContext = timer.time(); try { LOG.debug("Applying filter [{}] on message <{}>.", filter.getName(), msg.getId()); if (filter.filter(msg)) { LOG.debug("Filter [{}] marked message <{}> to be discarded. Dropping message.", filter.getName(), msg.getId()); msg.setFilterOut(true); filteredOutMessages.mark(); journal.markJournalOffsetCommitted(msg.getJournalOffset()); } } catch (Exception e) { LOG.error("Could not apply filter [" + filter.getName() + "] on message <" + msg.getId() + ">: ", e); } finally { final long elapsedNanos = timerContext.stop(); msg.recordTiming(serverStatus, timerName, elapsedNanos); } } } return messages; }
public synchronized void requestJenkinsSlave(Mesos.SlaveRequest request, Mesos.SlaveResult result) { Metrics.metricRegistry().meter("mesos.scheduler.slave.requests").mark(); LOGGER.fine("Enqueuing jenkins slave request"); requests.add(new Request(request, result)); if (driver != null) { // Ask mesos to send all offers, even the those we declined earlier. // See comment in resourceOffers() for further details. Timer.Context ctx = Metrics.metricRegistry().timer("mesos.scheduler.revives").time(); driver.reviveOffers(); ctx.stop(); } }
public synchronized void rebalanceCurrentCluster(List<LiveInstance> liveInstances) { Context context = _rebalanceTimer.time(); LOGGER.info("AutoRebalanceLiveInstanceChangeListener.onLiveInstanceChange() wakes up!"); try { _numLiveInstances.inc(liveInstances.size() - _numLiveInstances.getCount()); if (!_helixManager.isLeader()) { LOGGER.info("Not leader, do nothing!"); _rebalanceRate.mark(); } finally { context.close();
@Override public void onSuccess( Void param ) { /** * Release semaphore first in case there are other problems with communicating with Cassandra */ if (logger.isDebugEnabled()) { logger.debug("Job succeeded with the job id {}", execution.getJobId()); } capacitySemaphore.release(); timer.stop(); runCounter.dec(); successCounter.inc(); //TODO, refactor into the execution itself for checking if done if ( execution.getStatus() == Status.IN_PROGRESS ) { logger.debug( "Successful completion of bulkJob {}", execution ); execution.completed(); } jobAccessor.save( execution ); if ( currentListener != null ) { currentListener.onSuccess( execution ); } }
@Override public Integer call() throws Exception { loggerForCleaner.debug("Beginning log cleanup"); int total = 0; final Timer.Context ctx = new Timer().time(); for (final Log kafkaLog : JavaConversions.asJavaIterable(logManager.allLogs())) { if (kafkaLog.config().compact()) continue; loggerForCleaner.debug("Garbage collecting {}", kafkaLog.name()); total += cleanupExpiredSegments(kafkaLog) + cleanupSegmentsToMaintainSize(kafkaLog) + cleanupSegmentsToRemoveCommitted(kafkaLog); } loggerForCleaner.debug("Log cleanup completed. {} files deleted in {} seconds", total, NANOSECONDS.toSeconds(ctx.stop())); return total; }
@Override public void response(Response<ByteString> response) { requestRateCounter.ifPresent(consumer -> consumer.accept(response)); responseSizeHistogram .ifPresent(histogram -> response.payload() .ifPresent(payload -> histogram.update(payload.size()))); sentReplies.mark(); timerContext.ifPresent(timer -> { final long duration = timer.stop(); final long durationMs = TimeUnit.NANOSECONDS.toMillis(duration); durationThresholdTracker.ifPresent(counter -> { counter.markDurationThresholds(durationMs); }); }); StatusType.Family family = response.status().family(); if (family != INFORMATIONAL && family != SUCCESSFUL) { sentErrors.mark(); } if (family == CLIENT_ERROR) { sentErrors4xx.mark(); } if (family == SERVER_ERROR) { sentErrors5xx.mark(); } }
private void endFunction(String function, MetaStoreEndFunctionContext context) { com.codahale.metrics.Timer.Context timerContext = timerContexts.get().remove(function); if (timerContext != null) { timerContext.close(); } Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function); if (counter != null) { counter.dec(); } for (MetaStoreEndFunctionListener listener : endFunctionListeners) { listener.onEndFunction(function, context); } }