private void reportWrite(int numOps, int dataSize) { if (metricsCollector != null) { metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_COUNT, numOps); metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_BYTES, dataSize); metricsCollector.increment(Constants.Metrics.Name.Dataset.OP_COUNT, numOps); } }
/** * Persists all {@link PendingStoreRequest} currently in the queue with the given writer. */ void persist(StoreRequestWriter<?> writer) { // Capture all current events. // The reason for capturing instead of using a live iterator is to avoid the possible case of infinite write // time. E.g. while generating the entry to write to the storage table, a new store request get enqueued. // The number of requests in the queue is bounded by the number of threads that call this method. // Since this method is expected to be called (indirectly) from a http handler thread, that is bounded by // the thread pool size used by the http service. inflightRequests.clear(); PendingStoreRequest request = writeQueue.poll(); while (request != null) { inflightRequests.add(request); request = writeQueue.poll(); } metricsCollector.gauge("persist.queue.size", inflightRequests.size()); try { writer.write(inflightRequests.iterator()); completeAll(null); } catch (Throwable t) { completeAll(t); } }
/** * Persists all {@link PendingStoreRequest} currently in the queue with the given writer. */ void persist(StoreRequestWriter<?> writer) { // Capture all current events. // The reason for capturing instead of using a live iterator is to avoid the possible case of infinite write // time. E.g. while generating the entry to write to the storage table, a new store request get enqueued. // The number of requests in the queue is bounded by the number of threads that call this method. // Since this method is expected to be called (indirectly) from a http handler thread, that is bounded by // the thread pool size used by the http service. inflightRequests.clear(); PendingStoreRequest request = writeQueue.poll(); while (request != null) { inflightRequests.add(request); request = writeQueue.poll(); } metricsCollector.gauge("persist.queue.size", inflightRequests.size()); try { writer.write(inflightRequests.iterator()); completeAll(null); } catch (Throwable t) { completeAll(t); } }
private void reportWrite(int numOps, int dataSize) { if (metricsCollector != null) { metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_COUNT, numOps); metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_BYTES, dataSize); metricsCollector.increment(Constants.Metrics.Name.Dataset.OP_COUNT, numOps); } }
private void incrementMetric(String metricName, long value) { if (metrics != null) { metrics.increment(metricName, value); } }
private void incrementMetric(String metricName, long value) { if (metrics != null) { metrics.increment(metricName, value); } }
pendingStoreQueue.enqueue(pendingStoreRequest); metricsCollector.increment("persist.requested", 1L); metricsCollector.increment("persist.success", 1L); if (!pendingStoreRequest.isTransactional()) { return null; pendingStoreRequest.getEndTimestamp(), pendingStoreRequest.getEndSequenceId()); } else { metricsCollector.increment("persist.failure", 1L); Throwables.propagateIfInstanceOf(pendingStoreRequest.getFailureCause(), IOException.class); throw new IOException("Unable to write message to " + storeRequest.getTopicId(),
pendingStoreQueue.enqueue(pendingStoreRequest); metricsCollector.increment("persist.requested", 1L); metricsCollector.increment("persist.success", 1L); if (!pendingStoreRequest.isTransactional()) { return null; pendingStoreRequest.getEndTimestamp(), pendingStoreRequest.getEndSequenceId()); } else { metricsCollector.increment("persist.failure", 1L); Throwables.propagateIfInstanceOf(pendingStoreRequest.getFailureCause(), IOException.class); throw new IOException("Unable to write message to " + storeRequest.getTopicId(),
public void add(List<Fact> facts) { // Simply collecting all rows/cols/values that need to be put to the underlying table. NavigableMap<byte[], NavigableMap<byte[], byte[]>> gaugesTable = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); NavigableMap<byte[], NavigableMap<byte[], byte[]>> incrementsTable = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); for (Fact fact : facts) { for (Measurement measurement : fact.getMeasurements()) { byte[] rowKey = codec.createRowKey(fact.getDimensionValues(), measurement.getName(), fact.getTimestamp()); byte[] column = codec.createColumn(fact.getTimestamp()); if (MeasureType.COUNTER == measurement.getType()) { inc(incrementsTable, rowKey, column, measurement.getValue()); } else { set(gaugesTable, rowKey, column, Bytes.toBytes(measurement.getValue())); } } } NavigableMap<byte[], NavigableMap<byte[], Long>> convertedIncrementsTable = Maps.transformValues(incrementsTable, TRANSFORM_MAP_BYTE_ARRAY_TO_LONG); NavigableMap<byte[], NavigableMap<byte[], Long>> convertedGaugesTable = Maps.transformValues(gaugesTable, TRANSFORM_MAP_BYTE_ARRAY_TO_LONG); // todo: replace with single call, to be able to optimize rpcs in underlying table timeSeriesTable.put(convertedGaugesTable); timeSeriesTable.increment(convertedIncrementsTable); if (metrics != null) { metrics.increment(putCountMetric, convertedGaugesTable.size()); metrics.increment(incrementCountMetric, convertedIncrementsTable.size()); } }