@SuppressWarnings({ "unchecked", "rawtypes" }) @Override public void prepare(Map conf, TopologyContext context, OutputCollector collector) { super.prepare(conf, context, collector); _collector = collector; try { connection = SolrConnection.getConnection(conf, BOLT_TYPE); } catch (Exception e) { LOG.error("Can't connect to Solr: {}", e); throw new RuntimeException(e); } this.eventCounter = context.registerMetric("SolrIndexerBolt", new MultiCountMetric(), 10); }
@Override public void beforeBulk(long executionId, BulkRequest request) { eventCounter.scope("BulkRequest").incrBy(1); }
@Override public void beforeBulk(long executionId, BulkRequest request) { LOG.debug("beforeBulk {} with {} actions", executionId, request.numberOfActions()); eventCounter.scope("bulks_received").incrBy(1); }
@Override public void ack(Object msgId) { beingProcessed.remove(msgId); eventCounter.scope("acked").incrBy(1); }
@SuppressWarnings({ "unchecked", "rawtypes" }) @Override public void prepare(Map conf, TopologyContext context, OutputCollector collector) { super.prepare(conf, context, collector); _collector = collector; this.eventCounter = context.registerMetric("SQLIndexer", new MultiCountMetric(), 10); this.tableName = ConfUtils.getString(conf, SQL_INDEX_TABLE_PARAM_NAME); this.conf = conf; }
@Override public void fail(Object msgId) { beingProcessed.remove(msgId); eventCounter.scope("failed").incrBy(1); }
@Override public void prepare(Map conf, TopologyContext context, OutputCollector collector) { super.prepare(conf, context, collector); bucketName = ConfUtils.getString(conf, BUCKET); boolean bucketExists = client.doesBucketExist(bucketName); if (!bucketExists) { String message = "Bucket " + bucketName + " does not exist"; throw new RuntimeException(message); } this.eventCounter = context.registerMetric("s3cache_counter", new MultiCountMetric(), 10); }
@Override public void countBy(final MetricDefinition metric, final long incrementBy, final Object... metricParameters) { final String key = generateKey(metric, metricParameters); counters.scope(key).incrBy(incrementBy); }
partitioner.configure(stormConf); this.eventCounter = context.registerMetric("counter", new MultiCountMetric(), 10);
@Override public void afterBulk(long executionId, BulkRequest request, Throwable throwable) { eventCounter.scope("bulks_received").incrBy(1); LOG.error("Exception with bulk {} - failing the whole lot ", executionId, throwable); synchronized (waitAck) { // WHOLE BULK FAILED // mark all the docs as fail Iterator<DocWriteRequest<?>> itreq = request.requests().iterator(); while (itreq.hasNext()) { DocWriteRequest bir = itreq.next(); String id = bir.id(); List<Tuple> xx = waitAck.getIfPresent(id); if (xx != null) { LOG.debug("Failed {} tuple(s) for ID {}", xx.size(), id); for (Tuple x : xx) { // fail it _collector.fail(x); } waitAck.invalidate(id); } else { LOG.warn("Could not find unacked tuple for {}", id); } } } }
@Override public void prepare(Map conf, TopologyContext context, OutputCollector collector) { super.prepare(conf, context, collector); bucketName = ConfUtils.getString(conf, BUCKET); boolean bucketExists = client.doesBucketExist(bucketName); if (!bucketExists) { String message = "Bucket " + bucketName + " does not exist"; throw new RuntimeException(message); } this.eventCounter = context.registerMetric(getMetricPrefix() + "s3cache_counter", new MultiCountMetric(), 10); }
private void handleException(String url, Throwable e, Metadata metadata, Tuple tuple, String errorSource, String errorMessage) { LOG.error(errorMessage); // send to status stream in case another component wants to update // its status metadata.setValue(Constants.STATUS_ERROR_SOURCE, errorSource); metadata.setValue(Constants.STATUS_ERROR_MESSAGE, errorMessage); collector.emit(StatusStreamName, tuple, new Values(url, metadata, Status.ERROR)); collector.ack(tuple); // Increment metric that is context specific String s = "error_" + errorSource.replaceAll(" ", "_") + "_"; eventCounter.scope(s + e.getClass().getSimpleName()).incrBy(1); // Increment general metric eventCounter.scope("parse exception").incrBy(1); }
.getSimpleName(), new MultiCountMetric(), 10);
@Override public void open(final Map<String, Object> spoutConfig, final TopologyContext topologyContext) { // Load configuration items. // Determine our time bucket window, in seconds, defaulted to 60. int timeBucketSeconds = 60; if (spoutConfig.containsKey(SpoutConfig.METRICS_RECORDER_TIME_BUCKET)) { final Object timeBucketCfgValue = spoutConfig.get(SpoutConfig.METRICS_RECORDER_TIME_BUCKET); if (timeBucketCfgValue instanceof Number) { timeBucketSeconds = ((Number) timeBucketCfgValue).intValue(); } } // Conditionally enable prefixing with taskId if (spoutConfig.containsKey(SpoutConfig.METRICS_RECORDER_ENABLE_TASK_ID_PREFIX)) { final Object taskIdCfgValue = spoutConfig.get(SpoutConfig.METRICS_RECORDER_ENABLE_TASK_ID_PREFIX); if (taskIdCfgValue instanceof Boolean && (Boolean) taskIdCfgValue) { this.metricPrefix = "task-" + topologyContext.getThisTaskIndex(); } } this.keyBuilder = new KeyBuilder(this.metricPrefix); // Log how we got configured. logger.info("Configured with time window of {} seconds and using taskId prefixes?: {}", timeBucketSeconds, Boolean.toString(metricPrefix.isEmpty())); // Register the top level metrics. assignedValues = topologyContext.registerMetric("GAUGES", new MultiAssignableMetric(), timeBucketSeconds); timers = topologyContext.registerMetric("TIMERS", new MultiReducedMetric(new MeanReducer()), timeBucketSeconds); counters = topologyContext.registerMetric("COUNTERS", new MultiCountMetric(), timeBucketSeconds); }
/** * Internal helper to record the value of a timer. * @param key String representation of the key to record the timer under * @param elapsedTimeMs How long the timer ran for, in milliseconds. */ private void recordTimer(final String key, final long elapsedTimeMs) { // Update averaged timer key timers.scope(key).update(elapsedTimeMs); // Increment total time counter, this keeps a running count of total time spent in this timer counters.scope(key + "_totalTimeMs").incrBy(elapsedTimeMs); }