@Override public EventDataWrap receive() { long start = System.currentTimeMillis(); Iterable<EventData> receivedEvents = null; /*Get one message at a time for backward compatibility behaviour*/ try { receivedEvents = receiver.receiveSync(1); } catch (ServiceBusException e) { logger.error("Exception occured during receive" + e.toString()); return null; } long end = System.currentTimeMillis(); long millis = (end - start); receiveApiLatencyMean.update(millis); receiveApiCallCount.incr(); if (receivedEvents == null || receivedEvents.spliterator().getExactSizeIfKnown() == 0) { return null; } receiveMessageCount.incr(); EventData receivedEvent = receivedEvents.iterator().next(); MessageId messageId = new MessageId(partitionId, receivedEvent.getSystemProperties().getOffset(), receivedEvent.getSystemProperties().getSequenceNumber()); return EventDataWrap.create(receivedEvent, messageId); }
private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) { long start = System.nanoTime(); ByteBufferMessageSet msgs = null; msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset); long end = System.nanoTime(); long millis = (end - start) / 1000000; _kafkaMeanFetchLatencyMetric.update(millis); _kafkaMaxFetchLatencyMetric.update(millis); return msgs; }
private void updateLatency(Tuple tuple) { if (metricsEnabled && tuple.size() > 1) { // Could use named fields instead Long timestamp = (Long) tuple.getValue(TopologyConstants.RECORD_TIMESTAMP_POSITION); averageLatency.update(System.currentTimeMillis() - timestamp); } } }
private void updateLatency(Tuple tuple) { if (metricsEnabled && tuple.size() > 1) { // Could use named fields instead Long timestamp = (Long) tuple.getValue(TopologyConstants.RECORD_TIMESTAMP_POSITION); averageLatency.update(System.currentTimeMillis() - timestamp); } } }
@Override public void fail(Object msgId) { LOGGER.info("Failing For... {} Current TimeInMillis since epoch {}", msgId, System.currentTimeMillis()); int numFailures = this.failureMessages.size(); if (numFailures >= this.spoutConfig.getFailureConfig().getNumMaxTotalFailAllowed()) { throw new RuntimeException("Failure count greater than configured allowed failures...Stopping"); } long scn = (Long) msgId; RetryTransactionEvent event = this.pendingMessagesToBeAcked.get(scn); if (event != null) { this.txEventFailTimeInTopology.update(System.currentTimeMillis() - event.getTimeWhenEmitted()); this.failureMessages.put(scn, System.currentTimeMillis()); this.failureMetric.update(this.failureMessages.size()); } else { LOGGER.warn("Failures happening for seemingly non-existent tuples..."); } }
/** * Internal helper to record the value of a timer. * @param key String representation of the key to record the timer under * @param elapsedTimeMs How long the timer ran for, in milliseconds. */ private void recordTimer(final String key, final long elapsedTimeMs) { // Update averaged timer key timers.scope(key).update(elapsedTimeMs); // Increment total time counter, this keeps a running count of total time spent in this timer counters.scope(key + "_totalTimeMs").incrBy(elapsedTimeMs); }
averagedMetrics.update(end - start);
@Override public void execute(Tuple input) { tupleCount++; updateCount(countMetric, 1L); if (metricsEnabled) { // (1 + 2 + 3 + 4 + ...) / (1 + 1 + 1 + 1 + ...) averagingMetric.update(tupleCount); } if (input != null) { onMeta(input); } }
this.txEventProcessTime.update(System.currentTimeMillis() - ( txEvent.getStartTimeInNanos() / 1000000 )); String txJson = MAPPER.writeValueAsString(txEvent); BinLogPosition binLogPosition = new BinLogPosition(txEvent.getBinLogPosition(),
long millis = (end - start) / 1000000; _fetchAPILatencyMax.update(millis); _fetchAPILatencyMean.update(millis); _fetchAPICallCount.incr(); if (msgs != null) {
averagedMetrics.scope("wait_time").update(timeWaiting); averagedMetrics.scope("fetch_time").update(timeFetching); averagedMetrics.scope("bytes_fetched").update(byteLength); eventCounter.scope("fetched").incrBy(1); eventCounter.scope("bytes_fetched").incrBy(byteLength); perSecMetrics.scope("bytes_fetched_perSec").update(byteLength); perSecMetrics.scope("fetched_perSec").update(1);
averagedMetrics.scope("fetch_time").update(timeFetching); averagedMetrics.scope("time_in_queues") .update(timeInQueues); averagedMetrics.scope("bytes_fetched").update(byteLength); perSecMetrics.scope("bytes_fetched_perSec").update( byteLength); perSecMetrics.scope("fetched_perSec").update(1); eventCounter.scope("fetched").incrBy(1); eventCounter.scope("bytes_fetched").incrBy(byteLength);
perSecMetrics.scope("Indexed").update(1);