@Override public Object call() throws Exception { SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); if (isMajor) { assertEquals(0, stats.getMajorCompaction().getCount()); } else { assertEquals(0, stats.getMinorCompaction().getCount()); } return null; } };
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); LocalRegion lr = (LocalRegion) r; SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); long readsFromHDFS = stats.getRead().getCount(); assertEquals(0, readsFromHDFS); if (isPutAll) { Map m = new HashMap(); // map with only one entry m.put("key0", "value0"); DistributedPutAllOperation ev = lr.newPutAllOperation(m); lr.basicPutAll(m, ev, null); m.clear(); // map with multiple entries for (int i=1; i<100; i++) { m.put("key"+i, "value"+i); } ev = lr.newPutAllOperation(m); lr.basicPutAll(m, ev, null); } else { for (int i=0; i<100; i++) { r.put("key"+i, "value"+i); } } return null; } });
/** * Validates two buckets belonging to same region update the same stats */ public void testRegionBucketShareStats() throws Exception { HoplogOrganizer bucket1 = regionManager.create(0); HoplogOrganizer bucket2 = regionManager.create(1); // validate flush stats assertEquals(0, stats.getFlush().getCount()); assertEquals(0, stats.getActiveFileCount()); ArrayList<TestEvent> items = new ArrayList<TestEvent>(); for (int i = 0; i < 100; i++) { items.add(new TestEvent("key-" + i, "value-" + System.nanoTime())); } bucket1.flush(items.iterator(), 100); assertEquals(1, stats.getFlush().getCount()); assertEquals(1, stats.getActiveFileCount()); items.clear(); for (int i = 0; i < 100; i++) { items.add(new TestEvent("key-" + i, "value-" + System.nanoTime())); } bucket2.flush(items.iterator(), 100); assertEquals(2, stats.getFlush().getCount()); assertEquals(2, stats.getActiveFileCount()); }
public void test030RemoveOperationalData() throws Exception { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); sleep(r.getFullPath()); PartitionedRegion lr = (PartitionedRegion) r; expectedReadsFromHDFS = 250; // initial 100 + 200 for reads + 50 for assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); for (int i=0; i<50; i++) { assertNull(lr.get(i, null, true, false, false, null, lr.discoverJTA(), null, null, false, false/*allowReadFromHDFS*/)); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
public void test020GetOperationalData() throws Exception { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); expectedReadsFromHDFS = 300; // initial 100 + 200 for reads assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); for (int i=0; i<200; i++) { assertNull(lr.get(i, null, true, false, false, null, lr.discoverJTA(), null, null, false, false/*allowReadFromHDFS*/)); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
public void test010PUTDMLSupport() { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); } assertEquals(100, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); LocalRegion lr = (LocalRegion) r; for (int i=0; i<200; i++) { EntryEventImpl ev = lr.newPutEntryEvent(i, "value"+i, null); lr.validatedPut(ev, System.currentTimeMillis()); } // verify that read count on HDFS does not change assertEquals(100, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); for (int i=0; i<200; i++) { assertEquals("value"+i, r.get(i)); } if (getBatchTimeInterval() > 1000) { // reads from async queue assertEquals(100, stats.getRead().getCount()); } else { assertEquals(300, stats.getRead().getCount()); } }
assertEquals(0, stats.getFlush().getCount()); assertEquals(0, stats.getFlush().getBytes()); assertEquals(0, stats.getActiveFileCount()); assertEquals(j + 1, stats.getFlush().getCount()); assertTrue(stats.getFlush().getBytes() > bytesSent); assertEquals(j + 1, stats.getActiveFileCount()); assertEquals(0, stats.getMinorCompaction().getCount()); assertEquals(0, stats.getMinorCompaction().getBytes()); assertEquals(0, stats.getInactiveFileCount()); bucket.getCompactor().compact(false, false); assertEquals(1, stats.getMinorCompaction().getCount()); assertEquals(1, stats.getActiveFileCount()); assertEquals(0, stats.getInactiveFileCount());
public void testReadStats() throws Exception { HoplogOrganizer<SortedHoplogPersistedEvent> bucket = regionManager.create(0); ArrayList<TestEvent> items = new ArrayList<TestEvent>(); for (int i = 0; i < 100; i++) { items.add(new TestEvent("key-" + i, "value-" + System.nanoTime())); } bucket.flush(items.iterator(), 100); // validate read stats assertEquals(0, stats.getRead().getCount()); assertEquals(0, stats.getRead().getBytes()); // number of bytes read must be greater than size of key and value and must be increasing int bytesRead = "key-1".getBytes().length + "value=1233232".getBytes().length; for (int i = 0; i < 5; i++) { long previousRead = stats.getRead().getBytes(); PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + i)); assertNotNull(e); assertEquals(i + 1, stats.getRead().getCount()); assertTrue( (bytesRead + previousRead) < stats.getRead().getBytes()); } //Make sure the block cache stats are being updated. assertTrue(storeStats.getBlockCache().getMisses() > 0); assertTrue(storeStats.getBlockCache().getBytesCached() > 0); assertTrue(storeStats.getBlockCache().getCached() > 0); //Do a duplicate read to make sure we get a hit in the cache PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + 0)); assertTrue(storeStats.getBlockCache().getHits() > 0); }
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); for (int i=0; i<500; i++) { r.put("key"+i, "value"+i); if (i%100 == 0) { // wait for flush pause(3000); } } pause(3000); PartitionedRegion pr = (PartitionedRegion) r; long lastCompactionTS = pr.lastMajorHDFSCompaction(); assertEquals(0, lastCompactionTS); long beforeCompact = System.currentTimeMillis(); pr.forceHDFSCompaction(true, isSynchronous ? 0 : 1); if (isSynchronous) { final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); assertTrue(stats.getMajorCompaction().getCount() > 0); assertTrue(pr.lastMajorHDFSCompaction() >= beforeCompact); } return null; } });
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); long readsFromHDFS = stats.getRead().getCount(); assertEquals(0, readsFromHDFS); for (int i=0; i<100; i++) { r.getCache().getLogger().info("SWAP:DOING PUT:key"+i); r.put("key"+i, "value"+i); } return null; } });
@Override public Object call() throws Exception { SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); return stats.getRead().getCount(); } };
@Override public Object call() throws Exception { final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); assertTrue(stats.getMajorCompaction().getCount() > 0); return null; } };
@Override public Object call() throws Exception { SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); return stats.getRead().getCount(); } };