public String dumpReplicationSummary() { StringBuilder sb = new StringBuilder(); if (!deletedQueues.isEmpty()) { sb.append("Found " + deletedQueues.size() + " deleted queues" + ", run hbck -fixReplication in order to remove the deleted replication queues\n"); for (String deletedQueue : deletedQueues) { sb.append(" " + deletedQueue + "\n"); } } if (!deadRegionServers.isEmpty()) { sb.append("Found " + deadRegionServers.size() + " dead regionservers" + ", restart one regionserver to transfer the queues of dead regionservers\n"); for (String deadRs : deadRegionServers) { sb.append(" " + deadRs + "\n"); } } if (!peersQueueSize.isEmpty()) { sb.append("Dumping all peers's number of WALs in replication queue\n"); for (Map.Entry<String, Long> entry : peersQueueSize.asMap().entrySet()) { sb.append(" PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); } } sb.append(" Total size of WALs on HDFS: " + StringUtils.humanSize(totalSizeOfWALs) + "\n"); if (numWalsNotFound > 0) { sb.append(" ERROR: There are " + numWalsNotFound + " WALs not found!!!\n"); } return sb.toString(); }
protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots, final int offset, final int count) throws IOException { long totalSynced = 0; for (int i = 0; i < count; ++i) { final ByteSlot data = slots[offset + i]; data.writeTo(stream); totalSynced += data.size(); } syncStream(stream); sendPostSyncSignal(); if (LOG.isTraceEnabled()) { LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + StringUtils.humanSize(totalSynced)); } return totalSynced; }
private String formatQueue(ServerName regionserver, ReplicationQueueStorage queueStorage, ReplicationQueueInfo queueInfo, String queueId, List<String> wals, boolean isDeleted, boolean hdfs) throws Exception { StringBuilder sb = new StringBuilder(); List<ServerName> deadServers; sb.append("Dumping replication queue info for RegionServer: [" + regionserver + "]" + "\n"); sb.append(" Queue znode: " + queueId + "\n"); sb.append(" PeerID: " + queueInfo.getPeerId() + "\n"); sb.append(" Recovered: " + queueInfo.isQueueRecovered() + "\n"); deadServers = queueInfo.getDeadRegionServers(); if (deadServers.isEmpty()) { sb.append(" No dead RegionServers found in this queue." + "\n"); } else { sb.append(" Dead RegionServers: " + deadServers + "\n"); } sb.append(" Was deleted: " + isDeleted + "\n"); sb.append(" Number of WALs in replication queue: " + wals.size() + "\n"); peersQueueSize.addAndGet(queueInfo.getPeerId(), wals.size()); for (String wal : wals) { long position = queueStorage.getWALPosition(regionserver, queueInfo.getPeerId(), wal); sb.append(" Replication position for " + wal + ": " + (position > 0 ? position : "0" + " (not started or nothing to replicate)") + "\n"); } if (hdfs) { FileSystem fs = FileSystem.get(getConf()); sb.append(" Total size of WALs on HDFS for this queue: " + StringUtils.humanSize(getTotalWALSize(fs, wals, regionserver)) + "\n"); } return sb.toString(); }
float rollTsSec = getMillisFromLastRoll() / 1000.0f; LOG.trace(String.format("Waiting for data. flushed=%s (%s/sec)", StringUtils.humanSize(totalSynced.get()), StringUtils.humanSize(totalSynced.get() / rollTsSec))); LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)", StringUtils.humanTimeDiff(syncWaitMs), slotIndex, StringUtils.humanSize(totalSyncedToStore), StringUtils.humanSize(syncedPerSec)));
@Override protected boolean shouldSplit() { boolean force = region.shouldForceSplit(); boolean foundABigStore = false; // Get count of regions that have the same common table as this.region int tableRegionsCount = getCountOfCommonTableRegions(); // Get size to check long sizeToCheck = getSizeToCheck(tableRegionsCount); for (HStore store : region.getStores()) { // If any of the stores is unable to split (eg they contain reference files) // then don't split if (!store.canSplit()) { return false; } // Mark if any store is big enough long size = store.getSize(); if (size > sizeToCheck) { LOG.debug("ShouldSplit because " + store.getColumnFamilyName() + " size=" + StringUtils.humanSize(size) + ", sizeToCheck=" + StringUtils.humanSize(sizeToCheck) + ", regionsWithCommonTable=" + tableRegionsCount); foundABigStore = true; } } return foundABigStore || force; }
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(StringUtils.humanSize(entry.getValue().getFileSize())), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(StringUtils.humanSize(entry.getValue().getCurrentPosition())), jamonWriter);
@Test public void testPerfSingleThread() { final TableName TABLE_NAME = TableName.valueOf("testPerf"); final int NRUNS = 1 * 1000000; // 1M final RegionStates stateMap = new RegionStates(); long st = System.currentTimeMillis(); for (int i = 0; i < NRUNS; ++i) { stateMap.createRegionStateNode(createRegionInfo(TABLE_NAME, i)); } long et = System.currentTimeMillis(); LOG.info(String.format("PERF SingleThread: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); } }
, HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND , region.getTableDescriptor().getTableName() , StringUtils.humanSize(flushSizeLowerBound) + ")"); } else {
@Ignore @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; for (int i = 1; i <= NITEMS; ++i) { final byte[] data = new byte[256 << i]; LOG.info(String.format("Writing %s", StringUtils.humanSize(data.length))); TestProcedure proc = new TestProcedure(i, 0, data); procStore.insert(proc, null); } // check that we are able to read the big proc-blobs ProcedureTestingUtility.storeRestartAndAssert(procStore, NITEMS, NITEMS, 0, 0); } }
System.out.println("******************************************"); System.out.println("Time - addBack : " + StringUtils.humanTimeDiff(addBackTime)); System.out.println("Ops/sec - addBack : " + StringUtils.humanSize(numOps / addBackTimeSec)); System.out.println("Time - poll : " + StringUtils.humanTimeDiff(pollTime)); System.out.println("Ops/sec - poll : " + StringUtils.humanSize(numOps / pollTimeSec)); System.out.println("Num Operations : " + numOps); System.out.println();
void checkResources() throws RegionTooBusyException { // If catalog region, do not impose resource constraints or block updates. if (this.getRegionInfo().isMetaRegion()) return; MemStoreSize mss = this.memStoreSizing.getMemStoreSize(); if (mss.getHeapSize() + mss.getOffHeapSize() > this.blockingMemStoreSize) { blockedRequestsCount.increment(); requestFlush(); // Don't print current limit because it will vary too much. The message is used as a key // over in RetriesExhaustedWithDetailsException processing. throw new RegionTooBusyException("Over memstore limit=" + org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(this.blockingMemStoreSize) + ", regionName=" + (this.getRegionInfo() == null? "unknown": this.getRegionInfo().getEncodedName()) + ", server=" + (this.getRegionServerServices() == null? "unknown": this.getRegionServerServices().getServerName())); } }
protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots, final int offset, final int count) throws IOException { long totalSynced = 0; for (int i = 0; i < count; ++i) { final ByteSlot data = slots[offset + i]; data.writeTo(stream); totalSynced += data.size(); } syncStream(stream); sendPostSyncSignal(); if (LOG.isTraceEnabled()) { LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + StringUtils.humanSize(totalSynced)); } return totalSynced; }
protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots, final int offset, final int count) throws IOException { long totalSynced = 0; for (int i = 0; i < count; ++i) { final ByteSlot data = slots[offset + i]; data.writeTo(stream); totalSynced += data.size(); } syncStream(stream); sendPostSyncSignal(); if (LOG.isTraceEnabled()) { LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + StringUtils.humanSize(totalSynced)); } return totalSynced; }
@Test public void testPerfSingleThread() { final TableName TABLE_NAME = TableName.valueOf("testPerf"); final int NRUNS = 1 * 1000000; // 1M final RegionStates stateMap = new RegionStates(); long st = System.currentTimeMillis(); for (int i = 0; i < NRUNS; ++i) { stateMap.createRegionStateNode(createRegionInfo(TABLE_NAME, i)); } long et = System.currentTimeMillis(); LOG.info(String.format("PERF SingleThread: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); } }
@Ignore @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; for (int i = 1; i <= NITEMS; ++i) { final byte[] data = new byte[256 << i]; LOG.info(String.format("Writing %s", StringUtils.humanSize(data.length))); TestProcedure proc = new TestProcedure(i, 0, data); procStore.insert(proc, null); } // check that we are able to read the big proc-blobs ProcedureTestingUtility.storeRestartAndAssert(procStore, NITEMS, NITEMS, 0, 0); } }
@Ignore @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; for (int i = 1; i <= NITEMS; ++i) { final byte[] data = new byte[256 << i]; LOG.info(String.format("Writing %s", StringUtils.humanSize(data.length))); TestProcedure proc = new TestProcedure(i, 0, data); procStore.insert(proc, null); } // check that we are able to read the big proc-blobs ProcedureTestingUtility.storeRestartAndAssert(procStore, NITEMS, NITEMS, 0, 0); } }
System.out.println("******************************************"); System.out.println("Time - addBack : " + StringUtils.humanTimeDiff(addBackTime)); System.out.println("Ops/sec - addBack : " + StringUtils.humanSize(numOps / addBackTimeSec)); System.out.println("Time - poll : " + StringUtils.humanTimeDiff(pollTime)); System.out.println("Ops/sec - poll : " + StringUtils.humanSize(numOps / pollTimeSec)); System.out.println("Num Operations : " + numOps); System.out.println();
protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count) throws IOException { long totalSynced = 0; for (int i = 0; i < count; ++i) { ByteSlot data = slots[offset + i]; data.writeTo(stream); totalSynced += data.size(); } if (useHsync) { stream.hsync(); } else { stream.hflush(); } sendPostSyncSignal(); if (LOG.isTraceEnabled()) { LOG.trace("Sync slots=" + count + '/' + slots.length + ", flushed=" + StringUtils.humanSize(totalSynced)); } return totalSynced; }