public static void addDimensionsToBuilder(ServiceMetricEvent.Builder builder, Map<String, String[]> dimensions) { for (Map.Entry<String, String[]> keyValue : dimensions.entrySet()) { builder.setDimension(keyValue.getKey(), keyValue.getValue()); } } }
@Override public void interval(QueryType query) { checkModifiedFromOwnerThread(); builder.setDimension( DruidMetrics.INTERVAL, query.getIntervals().stream().map(Interval::toString).toArray(String[]::new) ); }
@Override public boolean doMonitor(ServiceEmitter emitter) { final Map<String, Long> priorValues = this.priorValues.get(); final Map<String, Long> currentValues = getCurrentValues(); final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder(); for (Map.Entry<String, Long> entry : currentValues.entrySet()) { emitter.emit( builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/total", entry.getValue()) ); final Long prior = priorValues.get(entry.getKey()); if (prior != null) { emitter.emit( builder.setDimension("memcached metric", entry.getKey()).build( "query/cache/memcached/delta", entry.getValue() - prior ) ); } } if (!this.priorValues.compareAndSet(priorValues, currentValues)) { log.error("Prior value changed while I was reporting! updating anyways"); this.priorValues.set(currentValues); } return true; }
.setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) .setDimension(DruidMetrics.TASK_TYPE, task.getType()); metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); toolbox.getEmitter().emit(metricBuilder.build("segment/moved/bytes", segment.getSize()));
.setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) .setDimension(DruidMetrics.TASK_TYPE, task.getType()); metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); toolbox.getEmitter().emit(metricBuilder.build("segment/nuked/bytes", segment.getSize()));
.setDimension(DruidMetrics.DATASOURCE, fireDepartment.getDataSchema().getDataSource()); MonitorUtils.addDimensionsToBuilder(builder, dimensions);
.setDimension("cpuName", Integer.toString(i)) .setDimension("cpuTime", "usr"); final ServiceMetricEvent.Builder builderSys = builder() .setDimension("cpuName", Integer.toString(i)) .setDimension("cpuTime", "sys"); MonitorUtils.addDimensionsToBuilder(builderUsr, dimensions); MonitorUtils.addDimensionsToBuilder(builderSys, dimensions);
.setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) .setDimension(DruidMetrics.TASK_TYPE, task.getType()); metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize()));
private Runnable emitLag() { return () -> { try { Map<Integer, Long> highestCurrentOffsets = getHighestCurrentOffsets(); if (latestOffsetsFromKafka == null) { throw new ISE("Latest offsets from Kafka have not been fetched"); } if (!latestOffsetsFromKafka.keySet().equals(highestCurrentOffsets.keySet())) { log.warn( "Lag metric: Kafka partitions %s do not match task partitions %s", latestOffsetsFromKafka.keySet(), highestCurrentOffsets.keySet() ); } long lag = getLagPerPartition(highestCurrentOffsets) .values() .stream() .mapToLong(x -> Math.max(x, 0)) .sum(); emitter.emit( ServiceMetricEvent.builder().setDimension("dataSource", dataSource).build("ingest/kafka/lag", lag) ); } catch (Exception e) { log.warn(e, "Unable to compute Kafka lag"); } }; }
final String kind = entry.getKey(); final MemoryUsage usage = entry.getValue(); final ServiceMetricEvent.Builder builder = builder().setDimension("memKind", kind); MonitorUtils.addDimensionsToBuilder(builder, dimensions); final MemoryUsage usage = pool.getUsage(); final ServiceMetricEvent.Builder builder = builder() .setDimension("poolKind", kind) .setDimension("poolName", pool.getName()); MonitorUtils.addDimensionsToBuilder(builder, dimensions);
for (Map.Entry<String, Long> entry : stats.entrySet()) { final ServiceMetricEvent.Builder builder = builder() .setDimension("cpuName", name) .setDimension("cpuTime", entry.getKey()); MonitorUtils.addDimensionsToBuilder(builder, dimensions); emitter.emit(builder.build("sys/cpu", entry.getValue() * 100 / total)); // [0,100]
private void handleStatus(final TaskStatus status) { try { // If we're not supposed to be running anymore, don't do anything. Somewhat racey if the flag gets set // after we check and before we commit the database transaction, but better than nothing. if (!active) { log.info("Abandoning task due to shutdown: %s", task.getId()); return; } notifyStatus(task, status); // Emit event and log, if the task is done if (status.isComplete()) { metricBuilder.setDimension(DruidMetrics.TASK_STATUS, status.getStatusCode().toString()); emitter.emit(metricBuilder.build("task/run/time", status.getDuration())); log.info( "Task %s: %s (%d run duration)", status.getStatusCode(), task, status.getDuration() ); } } catch (Exception e) { log.makeAlert(e, "Failed to handle task status") .addData("task", task.getId()) .addData("statusCode", status.getStatusCode()) .emit(); } } }
@Override public void doAudit(AuditEntry auditEntry, Handle handle) throws IOException { emitter.emit( new ServiceMetricEvent.Builder() .setDimension("key", auditEntry.getKey()) .setDimension("type", auditEntry.getType()) .setDimension("author", auditEntry.getAuditInfo().getAuthor()) .build("config/audit", 1) ); handle.createStatement( StringUtils.format( "INSERT INTO %s ( audit_key, type, author, comment, created_date, payload) VALUES (:audit_key, :type, :author, :comment, :created_date, :payload)", getAuditTable() ) ) .bind("audit_key", auditEntry.getKey()) .bind("type", auditEntry.getType()) .bind("author", auditEntry.getAuditInfo().getAuthor()) .bind("comment", auditEntry.getAuditInfo().getComment()) .bind("created_date", auditEntry.getAuditTime().toString()) .bind("payload", jsonMapper.writeValueAsBytes(auditEntry)) .execute(); }
@Override public boolean doMonitor(ServiceEmitter emitter) { for (Map.Entry<String, EventReceiverFirehoseMetric> entry : register.getMetrics()) { final String serviceName = entry.getKey(); final EventReceiverFirehoseMetric metric = entry.getValue(); final ServiceMetricEvent.Builder builder = createEventBuilder(serviceName) .setDimension( "bufferCapacity", String.valueOf(metric.getCapacity()) ); emitter.emit(builder.build("ingest/events/buffered", metric.getCurrentBufferSize())); Map<String, Long> diff = keyedDiff.to( serviceName, ImmutableMap.of("ingest/bytes/received", metric.getBytesReceived()) ); if (diff != null) { final ServiceMetricEvent.Builder eventBuilder = createEventBuilder(serviceName); for (Map.Entry<String, Long> diffEntry : diff.entrySet()) { emitter.emit(eventBuilder.build(diffEntry.getKey(), diffEntry.getValue())); } } } return true; }
@Override public void emit(ServiceEmitter emitter) { for (String dir : dirList) { DirUsage du = null; try { du = sigar.getDirUsage(dir); } catch (SigarException e) { log.error("Failed to get DiskUsage for [%s] due to [%s]", dir, e.getMessage()); } if (du != null) { final Map<String, Long> stats = ImmutableMap.of( "sys/storage/used", du.getDiskUsage() ); final ServiceMetricEvent.Builder builder = builder() .setDimension("fsDirName", dir); // fsDirName because FsStats uses fsDirName MonitorUtils.addDimensionsToBuilder(builder, dimensions); for (Map.Entry<String, Long> entry : stats.entrySet()) { emitter.emit(builder.build(entry.getKey(), entry.getValue())); } } } } }
private void emitTieredStat( final ServiceEmitter emitter, final String metricName, final String tier, final double value ) { emitter.emit( new ServiceMetricEvent.Builder() .setDimension(DruidMetrics.TIER, tier) .build(metricName, value) ); }
private ServiceMetricEvent.Builder createEventBuilder(String serviceName) { ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder() .setDimension("serviceName", serviceName); MonitorUtils.addDimensionsToBuilder(builder, dimensions); return builder; } }
void emit(ServiceEmitter emitter, Map<String, String[]> dimensions) { final ServiceMetricEvent.Builder builder = builder(); MonitorUtils.addDimensionsToBuilder(builder, dimensions); builder.setDimension("gcGenSpaceName", name); emitter.emit(builder.build("jvm/gc/mem/max", maxCounter.getLong())); emitter.emit(builder.build("jvm/gc/mem/capacity", capacityCounter.getLong())); emitter.emit(builder.build("jvm/gc/mem/used", usedCounter.getLong())); emitter.emit(builder.build("jvm/gc/mem/init", initCounter.getLong())); } }
private void emitDirectMemMetrics(ServiceEmitter emitter) { for (BufferPoolMXBean pool : ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class)) { final ServiceMetricEvent.Builder builder = builder().setDimension("bufferpoolName", pool.getName()); MonitorUtils.addDimensionsToBuilder(builder, dimensions); emitter.emit(builder.build("jvm/bufferpool/capacity", pool.getTotalCapacity())); emitter.emit(builder.build("jvm/bufferpool/used", pool.getMemoryUsed())); emitter.emit(builder.build("jvm/bufferpool/count", pool.getCount())); } }
protected void setDimension(String dimension, String value) { checkModifiedFromOwnerThread(); builder.setDimension(dimension, value); }