void writeCompleted(Duration elapsed) { this.totalWriteLatency.reportSuccessEvent(elapsed); }
public void currentState(int queueSize, int inFlightCount) { this.operationQueueSize.reportSuccessValue(queueSize); this.operationsInFlight.reportSuccessValue(inFlightCount); }
@Override public void close() { this.generationSpread.close(); } }
public void memoryCommit(int metadataUpdateTxnCount, Duration elapsed) { this.metadataCommitTxnCount.reportSuccessValue(metadataUpdateTxnCount); this.memoryCommitLatency.reportSuccessEvent(elapsed); }
/** * Test Event and Value registered and worked well with OpStats. */ @Test public void testOpStatsData() { Timer startTime = new Timer(); OpStatsLogger opStatsLogger = statsLogger.createStats("testOpStatsLogger"); // register 2 event: 1 success, 1 fail. opStatsLogger.reportSuccessEvent(startTime.getElapsed()); opStatsLogger.reportFailEvent(startTime.getElapsed()); opStatsLogger.reportSuccessValue(startTime.getElapsedMillis()); opStatsLogger.reportFailValue(startTime.getElapsedMillis()); opStatsLogger.reportSuccessValue(1); opStatsLogger.reportFailValue(1); opStatsLogger.reportSuccessValue(1); OpStatsData statsData = opStatsLogger.toOpStatsData(); // 2 = 2 event + 2 value assertEquals(4, statsData.getNumSuccessfulEvents()); assertEquals(3, statsData.getNumFailedEvents()); }
/** * If there isn't already an append outstanding against the store, write a new one. * Appends are opportunistically batched here. i.e. If many are waiting they are combined into a single append and * that is written. */ private void performNextWrite() { Append append = getNextAppend(); if (append == null) { return; } long traceId = LoggerHelpers.traceEnter(log, "storeAppend", append); Timer timer = new Timer(); storeAppend(append) .whenComplete((v, e) -> { handleAppendResult(append, e); LoggerHelpers.traceLeave(log, "storeAppend", traceId, v, e); if (e == null) { WRITE_STREAM_SEGMENT.reportSuccessEvent(timer.getElapsed()); } else { WRITE_STREAM_SEGMENT.reportFailEvent(timer.getElapsed()); } }) .whenComplete((v, e) -> append.getData().release()); }
@Override public OpStatsData toOpStatsData() { return getInstance().toOpStatsData(); }
public void operationsFailed(Collection<CompletableOperation> operations) { operations.forEach(o -> { long millis = o.getTimer().getElapsedMillis(); this.operationLatency.reportFailValue(millis); GLOBAL_OPERATION_LATENCY.reportFailValue(millis); }); } }
@Override public void reportFailEvent(Duration duration) { getInstance().reportFailEvent(duration); }
@Override public void createSegment(CreateSegment createStreamSegment) { Timer timer = new Timer(); final String operation = "createSegment"; Collection<AttributeUpdate> attributes = Arrays.asList( new AttributeUpdate(SCALE_POLICY_TYPE, AttributeUpdateType.Replace, ((Byte) createStreamSegment.getScaleType()).longValue()), new AttributeUpdate(SCALE_POLICY_RATE, AttributeUpdateType.Replace, ((Integer) createStreamSegment.getTargetRate()).longValue()), new AttributeUpdate(CREATION_TIME, AttributeUpdateType.None, System.currentTimeMillis()) ); if (!verifyToken(createStreamSegment.getSegment(), createStreamSegment.getRequestId(), createStreamSegment.getDelegationToken(), operation)) { return; } log.info(createStreamSegment.getRequestId(), "Creating stream segment {}.", createStreamSegment); segmentStore.createStreamSegment(createStreamSegment.getSegment(), attributes, TIMEOUT) .thenAccept(v -> { this.createStreamSegment.reportSuccessEvent(timer.getElapsed()); connection.send(new SegmentCreated(createStreamSegment.getRequestId(), createStreamSegment.getSegment())); }) .whenComplete((res, e) -> { if (e == null) { if (statsRecorder != null) { statsRecorder.createSegment(createStreamSegment.getSegment(), createStreamSegment.getScaleType(), createStreamSegment.getTargetRate()); } } else { this.createStreamSegment.reportFailEvent(timer.getElapsed()); handleException(createStreamSegment.getRequestId(), createStreamSegment.getSegment(), operation, e); } }); }
long expectedMetricsSuccesses = FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents(); expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get()); Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents()); expectedMetricsSuccesses += 1; Assert.assertEquals("WRITE_LATENCY should increase the count of successful events in case of successful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents()); Assert.assertEquals("WRITE_BYTES should increase by the size of successful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get()); expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get()); Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents()); expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get()); Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
@Override public void reportFailValue(long value) { getInstance().reportFailValue(value); }
public void processOperations(int batchSize, long millis) { this.processOperationsBatchSize.reportSuccessValue(batchSize); this.processOperationsLatency.reportSuccessValue(millis); }
void bookKeeperWriteCompleted(int length, Duration elapsed) { this.writeLatency.reportSuccessEvent(elapsed); DYNAMIC_LOGGER.incCounterValue(MetricsNames.BK_WRITE_BYTES, length); } }
@Test(timeout = 20000) public void testCreateSegment() throws Exception { // Set up PravegaRequestProcessor instance to execute requests against String streamSegmentName = "testCreateSegment"; @Cleanup ServiceBuilder serviceBuilder = newInlineExecutionInMemoryBuilder(getBuilderConfig()); serviceBuilder.initialize(); StreamSegmentStore store = serviceBuilder.createStreamSegmentService(); ServerConnection connection = mock(ServerConnection.class); InOrder order = inOrder(connection); PravegaRequestProcessor processor = new PravegaRequestProcessor(store, mock(TableStore.class), connection); // Execute and Verify createSegment/getStreamSegmentInfo calling stack is executed as design. processor.createSegment(new WireCommands.CreateSegment(1, streamSegmentName, WireCommands.CreateSegment.NO_SCALE, 0, "")); assertTrue(append(streamSegmentName, 1, store)); processor.getStreamSegmentInfo(new WireCommands.GetStreamSegmentInfo(1, streamSegmentName, "")); assertTrue(append(streamSegmentName, 2, store)); order.verify(connection).send(new WireCommands.SegmentCreated(1, streamSegmentName)); order.verify(connection).send(Mockito.any(WireCommands.StreamSegmentInfo.class)); // TestCreateSealDelete may executed before this test case, // so createSegmentStats may record 1 or 2 createSegment operation here. OpStatsData createSegmentStats = processor.getCreateStreamSegment().toOpStatsData(); assertNotEquals(0, createSegmentStats.getNumSuccessfulEvents()); assertEquals(0, createSegmentStats.getNumFailedEvents()); }
public void processingDelay(int millis) { this.operationProcessorDelay.reportSuccessValue(millis); }
@Override public void reportSuccessEvent(Duration duration) { getInstance().reportSuccessEvent(duration); }
public void operationQueueWaitTime(long queueWaitTimeMillis) { this.operationQueueWaitTime.reportSuccessValue(queueWaitTimeMillis); }