void writeCompleted(Duration elapsed) { this.totalWriteLatency.reportSuccessEvent(elapsed); }
void bookKeeperWriteCompleted(int length, Duration elapsed) { this.writeLatency.reportSuccessEvent(elapsed); DYNAMIC_LOGGER.incCounterValue(MetricsNames.BK_WRITE_BYTES, length); } }
@Override public void reportSuccessEvent(Duration duration) { getInstance().reportSuccessEvent(duration); }
public void memoryCommit(int metadataUpdateTxnCount, Duration elapsed) { this.metadataCommitTxnCount.reportSuccessValue(metadataUpdateTxnCount); this.memoryCommitLatency.reportSuccessEvent(elapsed); }
public void operationsCompleted(int operationCount, Duration commitElapsed) { DYNAMIC_LOGGER.incCounterValue(this.operationLogSize, operationCount); this.operationCommitLatency.reportSuccessEvent(commitElapsed); }
private int doRead(SegmentHandle handle, long offset, byte[] buffer, int bufferOffset, int length) throws IOException { long traceId = LoggerHelpers.traceEnter(log, "read", handle.getSegmentName(), offset, bufferOffset, length); Timer timer = new Timer(); Path path = Paths.get(config.getRoot(), handle.getSegmentName()); long fileSize = Files.size(path); if (fileSize < offset) { throw new IllegalArgumentException(String.format("Reading at offset (%d) which is beyond the " + "current size of segment (%d).", offset, fileSize)); } try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { int totalBytesRead = 0; do { ByteBuffer readBuffer = ByteBuffer.wrap(buffer, bufferOffset, length); int bytesRead = channel.read(readBuffer, offset); bufferOffset += bytesRead; totalBytesRead += bytesRead; length -= bytesRead; } while (length != 0); FileSystemMetrics.READ_LATENCY.reportSuccessEvent(timer.getElapsed()); FileSystemMetrics.READ_BYTES.add(totalBytesRead); LoggerHelpers.traceLeave(log, "read", traceId, totalBytesRead); return totalBytesRead; } }
/** * If there isn't already an append outstanding against the store, write a new one. * Appends are opportunistically batched here. i.e. If many are waiting they are combined into a single append and * that is written. */ private void performNextWrite() { Append append = getNextAppend(); if (append == null) { return; } long traceId = LoggerHelpers.traceEnter(log, "storeAppend", append); Timer timer = new Timer(); storeAppend(append) .whenComplete((v, e) -> { handleAppendResult(append, e); LoggerHelpers.traceLeave(log, "storeAppend", traceId, v, e); if (e == null) { WRITE_STREAM_SEGMENT.reportSuccessEvent(timer.getElapsed()); } else { WRITE_STREAM_SEGMENT.reportFailEvent(timer.getElapsed()); } }) .whenComplete((v, e) -> append.getData().release()); }
FileSystemMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed()); FileSystemMetrics.WRITE_BYTES.add(totalBytesWritten); LoggerHelpers.traceLeave(log, "write", traceId);
@Override public void readSegment(ReadSegment readSegment) { Timer timer = new Timer(); final String segment = readSegment.getSegment(); final String operation = "readSegment"; if (!verifyToken(segment, readSegment.getOffset(), readSegment.getDelegationToken(), operation)) { return; } final int readSize = min(MAX_READ_SIZE, max(TYPE_PLUS_LENGTH_SIZE, readSegment.getSuggestedLength())); long trace = LoggerHelpers.traceEnter(log, operation, readSegment); segmentStore.read(segment, readSegment.getOffset(), readSize, TIMEOUT) .thenAccept(readResult -> { LoggerHelpers.traceLeave(log, operation, trace, readResult); handleReadResult(readSegment, readResult); readStreamSegment.reportSuccessEvent(timer.getElapsed()); }) .exceptionally(ex -> handleException(readSegment.getOffset(), segment, operation, wrapCancellationException(ex))); }
@Override public int read(SegmentHandle handle, long offset, byte[] buffer, int bufferOffset, int length) throws StreamSegmentException { ensureInitializedAndNotClosed(); long traceId = LoggerHelpers.traceEnter(log, "read", handle, offset, length); if (offset < 0 || bufferOffset < 0 || length < 0 || buffer.length < bufferOffset + length) { throw new ArrayIndexOutOfBoundsException(String.format( "Offset (%s) must be non-negative, and bufferOffset (%s) and length (%s) must be valid indices into buffer of size %s.", offset, bufferOffset, length, buffer.length)); } Timer timer = new Timer(); try { return HDFS_RETRY.run(() -> { int totalBytesRead = readInternal(handle, buffer, offset, bufferOffset, length); HDFSMetrics.READ_LATENCY.reportSuccessEvent(timer.getElapsed()); HDFSMetrics.READ_BYTES.add(totalBytesRead); LoggerHelpers.traceLeave(log, "read", traceId, handle, offset, totalBytesRead); return totalBytesRead; }); } catch (IOException e) { throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), e); } catch (RetriesExhaustedException e) { throw HDFSExceptionHelpers.convertException(handle.getSegmentName(), e.getCause()); } }
HDFSMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed()); HDFSMetrics.WRITE_BYTES.add(length); LoggerHelpers.traceLeave(log, "write", traceId, handle, offset, length);
@Override public void createSegment(CreateSegment createStreamSegment) { Timer timer = new Timer(); final String operation = "createSegment"; Collection<AttributeUpdate> attributes = Arrays.asList( new AttributeUpdate(SCALE_POLICY_TYPE, AttributeUpdateType.Replace, ((Byte) createStreamSegment.getScaleType()).longValue()), new AttributeUpdate(SCALE_POLICY_RATE, AttributeUpdateType.Replace, ((Integer) createStreamSegment.getTargetRate()).longValue()), new AttributeUpdate(CREATION_TIME, AttributeUpdateType.None, System.currentTimeMillis()) ); if (!verifyToken(createStreamSegment.getSegment(), createStreamSegment.getRequestId(), createStreamSegment.getDelegationToken(), operation)) { return; } log.info(createStreamSegment.getRequestId(), "Creating stream segment {}.", createStreamSegment); segmentStore.createStreamSegment(createStreamSegment.getSegment(), attributes, TIMEOUT) .thenAccept(v -> { this.createStreamSegment.reportSuccessEvent(timer.getElapsed()); connection.send(new SegmentCreated(createStreamSegment.getRequestId(), createStreamSegment.getSegment())); }) .whenComplete((res, e) -> { if (e == null) { if (statsRecorder != null) { statsRecorder.createSegment(createStreamSegment.getSegment(), createStreamSegment.getScaleType(), createStreamSegment.getTargetRate()); } } else { this.createStreamSegment.reportFailEvent(timer.getElapsed()); handleException(createStreamSegment.getRequestId(), createStreamSegment.getSegment(), operation, e); } }); }
/** * Test Event and Value registered and worked well with OpStats. */ @Test public void testOpStatsData() { Timer startTime = new Timer(); OpStatsLogger opStatsLogger = statsLogger.createStats("testOpStatsLogger"); // register 2 event: 1 success, 1 fail. opStatsLogger.reportSuccessEvent(startTime.getElapsed()); opStatsLogger.reportFailEvent(startTime.getElapsed()); opStatsLogger.reportSuccessValue(startTime.getElapsedMillis()); opStatsLogger.reportFailValue(startTime.getElapsedMillis()); opStatsLogger.reportSuccessValue(1); opStatsLogger.reportFailValue(1); opStatsLogger.reportSuccessValue(1); OpStatsData statsData = opStatsLogger.toOpStatsData(); // 2 = 2 event + 2 value assertEquals(4, statsData.getNumSuccessfulEvents()); assertEquals(3, statsData.getNumFailedEvents()); }