/** * Report the original write timestamp of a SEP operation that was received. Assuming that SEP * operations are delivered in the same order as they are originally written in HBase (which * will always be the case except for when a region split or move takes place), this metric will always * hold the write timestamp of the most recent operation in HBase that has been handled by the SEP system. * * @param timestamp The write timestamp of the last SEP operation */ public void reportSepTimestamp(long writeTimestamp) { lastTimestampInputProcessed.set(writeTimestamp); }
/** * Report the original write timestamp of a SEP operation that was received. Assuming that SEP * operations are delivered in the same order as they are originally written in HBase (which * will always be the case except for when a region split or move takes place), this metric will always * hold the write timestamp of the most recent operation in HBase that has been handled by the SEP system. * * @param timestamp The write timestamp of the last SEP operation */ public void reportSepTimestamp(long writeTimestamp) { lastTimestampInputProcessed.set(writeTimestamp); }
/** * Set the age of the last edit that was applied * @param timestamp write time of the edit */ public void setAgeOfLastAppliedOp(long timestamp) { ageOfLastAppliedOp.set(System.currentTimeMillis() - timestamp); } @Override
/** * Set the age of the last edit that was shipped * @param timestamp write time of the edit */ public void setAgeOfLastShippedOp(long timestamp) { lastTimestampForAge = timestamp; ageOfLastShippedOp.set(System.currentTimeMillis() - lastTimestampForAge); }
void report(Action action, long duration) { rates.get(action).inc(duration); if (action != Action.READ) { lastMutationEventTimestamp.set(System.currentTimeMillis()); } }
public synchronized void setNumericMetric(String name, long amt) { MetricsLongValue m = (MetricsLongValue)registry.get(name); if (m == null) { m = new MetricsLongValue(name, this.registry); this.needsUpdateMessage = true; } m.set(amt); }
this.numAddCache.set(stats.numAdd); this.numAddNewCache.set(stats.numAddNew); this.numAddExistingCache.set(stats.numAddExisting); this.numRenameCache.set(stats.numRename); this.numRemoveCache.set(stats.numRemove); this.numEvictCache.set(stats.numEvict); this.numGetAttempts.set(stats.numGetAttempts); this.numGetHits.set(stats.numGetHits);
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List, * int) */ public void collect(List<Chunk> events, int maxSize) throws InterruptedException { synchronized (this) { // we can't just say queue.take() here, since we're holding a lock. while (queue.isEmpty()) { this.wait(); } int size = 0; while (!queue.isEmpty() && (size < maxSize)) { Chunk e = this.queue.remove(); metrics.removedChunk.inc(); int chunkSize = e.getData().length; size += chunkSize; dataSize -= chunkSize; metrics.dataSize.set(dataSize); events.add(e); } metrics.queueSize.set(queue.size()); this.notifyAll(); } if (log.isDebugEnabled()) { log.debug("WaitingQueue.inQueueCount:" + queue.size() + "\tWaitingQueue.collectCount:" + events.size()); } }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List, * int) */ public void collect(List<Chunk> events, int maxSize) throws InterruptedException { synchronized (this) { // we can't just say queue.take() here, since we're holding a lock. while (queue.isEmpty()) { this.wait(); } int size = 0; while (!queue.isEmpty() && (size < maxSize)) { Chunk e = this.queue.remove(); metrics.removedChunk.inc(); int chunkSize = e.getData().length; size += chunkSize; dataSize -= chunkSize; metrics.dataSize.set(dataSize); events.add(e); } metrics.queueSize.set(queue.size()); this.notifyAll(); } if (log.isDebugEnabled()) { log.debug("WaitingQueue.inQueueCount:" + queue.size() + "\tWaitingQueue.collectCount:" + events.size()); } }
/** * This is called from the ReplicationMonitor to process over * replicated blocks. */ private void processOverReplicatedBlocksAsync() { List<Block> blocksToProcess = new LinkedList<Block>(); writeLock(); try { int size = Math.min(overReplicatedBlocks.size(), 1000); NameNode.getNameNodeMetrics().numOverReplicatedBlocks.set(overReplicatedBlocks.size()); blocksToProcess = overReplicatedBlocks.pollN(size); } finally { writeUnlock(); } for (Block block : blocksToProcess) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog .debug("BLOCK* NameSystem.processOverReplicatedBlocksAsync: " + block); } processOverReplicatedBlock(block, (short) -1, null, null); } }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk) */ public void add(Chunk chunk) throws InterruptedException { assert chunk != null : "can't enqueue null chunks"; synchronized (this) { while (chunk.getData().length + dataSize > MAX_MEM_USAGE) { try { if(dataSize == 0) { //queue is empty, but data is still too big log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType() + " and source =" +chunk.getStreamName()); return; //return without sending; otherwise we'd deadlock. //this error should probably be fatal; there's no way to recover. } metrics.fullQueue.set(1); this.wait(); log.info("MemLimitQueue is full [" + dataSize + "]"); } catch (InterruptedException e) { } } metrics.fullQueue.set(0); dataSize += chunk.getData().length; queue.add(chunk); metrics.addedChunk.inc(); metrics.queueSize.set(queue.size()); metrics.dataSize.set(dataSize); this.notifyAll(); } }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk) */ public void add(Chunk chunk) throws InterruptedException { assert chunk != null : "can't enqueue null chunks"; int chunkSize = chunk.getData().length; synchronized (this) { if (chunkSize + dataSize > MAX_MEM_USAGE) { if (dataSize == 0) { // queue is empty, but data is still too big log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType() + " and source =" + chunk.getStreamName()); return; // return without sending; otherwise we'd deadlock. // this error should probably be fatal; there's no way to // recover. } else { metrics.fullQueue.set(1); log.warn("Discarding chunk due to NonBlockingMemLimitQueue full [" + dataSize + "]"); return; } } metrics.fullQueue.set(0); dataSize += chunk.getData().length; queue.add(chunk); metrics.addedChunk.inc(); metrics.queueSize.set(queue.size()); metrics.dataSize.set(dataSize); this.notifyAll(); } }
this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024))); this.metrics.mbInMemoryWithoutWAL.set((int) (dataInMemoryWithoutWAL / (1024 * 1024))); this.metrics.numPutsWithoutWAL.set(numPutsWithoutWAL); this.metrics.storefileIndexSizeMB.set( (int) (storefileIndexSize / (1024 * 1024))); this.metrics.totalStaticBloomSizeKB.set( (int) (totalStaticBloomSize / 1024)); this.metrics.readRequestsCount.set(readRequestsCount); this.metrics.writeRequestsCount.set(writeRequestsCount); this.metrics.compactionQueueSize.set(compactSplitThread .getCompactionQueueSize()); this.metrics.blockCacheCount.set(blockCache.size()); this.metrics.blockCacheFree.set(blockCache.getFreeSize()); this.metrics.blockCacheSize.set(blockCache.getCurrentSize()); CacheStats cacheStats = blockCache.getStats(); this.metrics.blockCacheHitCount.set(cacheStats.getHitCount()); this.metrics.blockCacheMissCount.set(cacheStats.getMissCount()); this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount()); double ratio = blockCache.getStats().getHitRatio(); int percent = (int) (ratio * 100);
blocksTotal.set((int)fsNameSystem.getBlocksTotal()); diskSpaceTotalGB.set(roundBytesToGBytes(fsNameSystem.getDiskSpaceTotal())); capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal())); capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed())); blockCapacity.set(fsNameSystem.getBlockCapacity()); numLeases.set(fsNameSystem.leaseManager.countLease()); numUnderConstructionFiles.set(fsNameSystem.leaseManager.countPath()); upgradeTime.set(fsNameSystem.getUpgradeTime());