if (stats != null) { final ServiceMetricEvent.Builder builder = builder() .setDimension("netName", netconf.getName()) .setDimension("netAddress", netconf.getAddress()) .setDimension("netHwaddr", netconf.getHwaddr()); MonitorUtils.addDimensionsToBuilder(builder, dimensions); for (Map.Entry<String, Long> entry : stats.entrySet()) { emitter.emit(builder.build(entry.getKey(), entry.getValue()));
@Override public boolean doMonitor(ServiceEmitter emitter) { final ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder(); Map<String, Long> diff = keyedDiff.to( "queryCountStats", ImmutableMap.of("query/success/count", statsProvider.getSuccessfulQueryCount(), "query/failed/count", statsProvider.getFailedQueryCount(), "query/interrupted/count", statsProvider.getInterruptedQueryCount() ) ); if (diff != null) { for (Map.Entry<String, Long> diffEntry : diff.entrySet()) { emitter.emit(builder.build(diffEntry.getKey(), diffEntry.getValue())); } } return true; }
new ServiceMetricEvent.Builder().build( "segment/overShadowed/count", stats.getGlobalStat("overShadowedCount") .forEach((final String serverName, final LoadQueuePeon queuePeon) -> { emitter.emit( new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.SERVER, serverName).build( "segment/loadQueue/size", queuePeon.getLoadQueueSize() new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.SERVER, serverName).build( "segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount() new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.SERVER, serverName).build( "segment/loadQueue/count", queuePeon.getSegmentsToLoad().size() new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.SERVER, serverName).build( "segment/dropQueue/count", queuePeon.getSegmentsToDrop().size() final long count = entry.getLongValue(); emitter.emit( new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.DATASOURCE, dataSource).build( "segment/unavailable/count", count new ServiceMetricEvent.Builder()
new ServiceMetricEvent.Builder().build( "coordinator/merge/count", stats.getGlobalStat("mergedCount")
if (stats != null) { final ServiceMetricEvent.Builder builder = builder() .setDimension("fsDevName", fs.getDevName()) .setDimension("fsDirName", fs.getDirName()) .setDimension("fsTypeName", fs.getTypeName()) .setDimension("fsSysTypeName", fs.getSysTypeName()) .setDimension("fsOptions", fs.getOptions().split(",")); MonitorUtils.addDimensionsToBuilder(builder, dimensions); for (Map.Entry<String, Long> entry : stats.entrySet()) { emitter.emit(builder.build(entry.getKey(), entry.getValue()));
.setDimension("task", task.getId()) .setDimension("dataSource", task.getDataSource()) .setDimension("graceful", String.valueOf(graceful)) .setDimension("error", String.valueOf(error)); emitter.emit(metricBuilder.build("task/interrupt/count", 1L)); emitter.emit(metricBuilder.build("task/interrupt/elapsed", elapsed));
.build(); final ServiceMetricEvent.Builder builder = builder() .setDimension("fsDevName", fs.getDevName()) .setDimension("fsDirName", fs.getDirName()) .setDimension("fsTypeName", fs.getTypeName()) .setDimension("fsSysTypeName", fs.getSysTypeName()) .setDimension("fsOptions", fs.getOptions().split(",")); MonitorUtils.addDimensionsToBuilder(builder, dimensions); for (Map.Entry<String, Long> entry : stats.entrySet()) { emitter.emit(builder.build(entry.getKey(), entry.getValue()));
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder() .setDimension("dataSource", task.getDataSource()) .setDimension("taskType", task.getType());
emitter.emit(new ServiceMetricEvent.Builder().build("segment/max", serverConfig.getMaxSize())); final long pendingDeleteSize = entry.getLongValue(); emitter.emit( new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.DATASOURCE, dataSource) .setDimension("tier", serverConfig.getTier()) .setDimension("priority", String.valueOf(serverConfig.getPriority())) .build("segment/pendingDelete", pendingDeleteSize) ); new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource) .setDimension("tier", serverConfig.getTier()) .setDimension("priority", String.valueOf(serverConfig.getPriority())); emitter.emit(builder.build("segment/used", used)); final double usedPercent = serverConfig.getMaxSize() == 0 ? 0 : used / (double) serverConfig.getMaxSize(); emitter.emit(builder.build("segment/usedPercent", usedPercent)); long count = entry.getValue(); final ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource) .setDimension("tier", serverConfig.getTier()) .setDimension( "priority", String.valueOf(serverConfig.getPriority()) ); emitter.emit(builder.build("segment/count", count));
final ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder(); final DataSegment mergedSegment = computeMergedSegment(getDataSource(), myLock.getVersion(), segments); final File mergeDir = toolbox.getMergeDir(); emitter.emit(builder.build("merger/numMerged", segments.size())); emitter.emit(builder.build("merger/mergeTime", System.currentTimeMillis() - startTime)); emitter.emit(builder.build("merger/uploadTime", System.currentTimeMillis() - uploadStart)); emitter.emit(builder.build("merger/mergeSize", uploadedSegment.getSize()));
.setDimension("cpuName", Integer.toString(i)) .setDimension("cpuTime", "usr"); final ServiceMetricEvent.Builder builderSys = builder() .setDimension("cpuName", Integer.toString(i)) .setDimension("cpuTime", "sys"); MonitorUtils.addDimensionsToBuilder(builderUsr, dimensions); MonitorUtils.addDimensionsToBuilder(builderSys, dimensions); emitter.emit(builderUsr.build( dateTime, "cgroup/cpu_time_delta_ns", snapshot.usrTime(i) - priorSnapshotHolder.metric.usrTime(i) )); emitter.emit(builderSys.build( dateTime, "cgroup/cpu_time_delta_ns", emitter.emit(builder().build(dateTime, "cgroup/cpu_time_delta_ns_elapsed", elapsedNs));
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) .setDimension(DruidMetrics.TASK_TYPE, task.getType()); metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); toolbox.getEmitter().emit(metricBuilder.build("segment/moved/bytes", segment.getSize()));
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) .setDimension(DruidMetrics.TASK_TYPE, task.getType()); metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); toolbox.getEmitter().emit(metricBuilder.build("segment/nuked/bytes", segment.getSize()));
final ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.DATASOURCE, fireDepartment.getDataSchema().getDataSource()); MonitorUtils.addDimensionsToBuilder(builder, dimensions); emitter.emit(builder.build("ingest/events/thrownAway", thrownAway)); final long unparseable = metrics.unparseable() - previous.unparseable(); if (unparseable > 0) { log.error("[%,d] Unparseable events! Turn on debug logging to see exception stack trace.", unparseable); emitter.emit(builder.build("ingest/events/unparseable", unparseable)); emitter.emit(builder.build("ingest/events/processed", metrics.processed() - previous.processed())); emitter.emit(builder.build("ingest/rows/output", metrics.rowOutput() - previous.rowOutput())); emitter.emit(builder.build("ingest/persists/count", metrics.numPersists() - previous.numPersists())); emitter.emit(builder.build("ingest/persists/time", metrics.persistTimeMillis() - previous.persistTimeMillis())); emitter.emit(builder.build("ingest/persists/cpu", metrics.persistCpuTime() - previous.persistCpuTime())); emitter.emit( builder.build( "ingest/persists/backPressure", metrics.persistBackPressureMillis() - previous.persistBackPressureMillis() emitter.emit(builder.build("ingest/persists/failed", metrics.failedPersists() - previous.failedPersists())); emitter.emit(builder.build("ingest/handoff/failed", metrics.failedHandoffs() - previous.failedHandoffs())); emitter.emit(builder.build("ingest/merge/time", metrics.mergeTimeMillis() - previous.mergeTimeMillis())); emitter.emit(builder.build("ingest/merge/cpu", metrics.mergeCpuTime() - previous.mergeCpuTime())); emitter.emit(builder.build("ingest/handoff/count", metrics.handOffCount() - previous.handOffCount())); emitter.emit(builder.build("ingest/sink/count", metrics.sinkCount())); emitter.emit(builder.build("ingest/events/messageGap", metrics.messageGap())); previousValues.put(fireDepartment, metrics);
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) .setDimension(DruidMetrics.TASK_TYPE, task.getType()); toolbox.getEmitter().emit(metricBuilder.build("segment/txn/success", 1)); } else { toolbox.getEmitter().emit(metricBuilder.build("segment/txn/failure", 1)); metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize()));
for (Map.Entry<String, Long> entry : stats.entrySet()) { final ServiceMetricEvent.Builder builder = builder() .setDimension("cpuName", name) .setDimension("cpuTime", entry.getKey()); MonitorUtils.addDimensionsToBuilder(builder, dimensions); emitter.emit(builder.build("sys/cpu", entry.getValue() * 100 / total)); // [0,100]
@Override public void emit(ServiceEmitter emitter) { Swap swap = null; try { swap = sigar.getSwap(); } catch (SigarException e) { log.error(e, "Failed to get Swap"); } if (swap != null) { long currPageIn = swap.getPageIn(); long currPageOut = swap.getPageOut(); final Map<String, Long> stats = ImmutableMap.of( "sys/swap/pageIn", (currPageIn - prevPageIn), "sys/swap/pageOut", (currPageOut - prevPageOut), "sys/swap/max", swap.getTotal(), "sys/swap/free", swap.getFree() ); final ServiceMetricEvent.Builder builder = builder(); MonitorUtils.addDimensionsToBuilder(builder, dimensions); for (Map.Entry<String, Long> entry : stats.entrySet()) { emitter.emit(builder.build(entry.getKey(), entry.getValue())); } this.prevPageIn = currPageIn; this.prevPageOut = currPageOut; } } }
private Runnable emitLag() { return () -> { try { Map<Integer, Long> highestCurrentOffsets = getHighestCurrentOffsets(); if (latestOffsetsFromKafka == null) { throw new ISE("Latest offsets from Kafka have not been fetched"); } if (!latestOffsetsFromKafka.keySet().equals(highestCurrentOffsets.keySet())) { log.warn( "Lag metric: Kafka partitions %s do not match task partitions %s", latestOffsetsFromKafka.keySet(), highestCurrentOffsets.keySet() ); } long lag = getLagPerPartition(highestCurrentOffsets) .values() .stream() .mapToLong(x -> Math.max(x, 0)) .sum(); emitter.emit( ServiceMetricEvent.builder().setDimension("dataSource", dataSource).build("ingest/kafka/lag", lag) ); } catch (Exception e) { log.warn(e, "Unable to compute Kafka lag"); } }; }