public void beforeSwitchingOplog() { DiskRegion dr = ((LocalRegion)region).getDiskRegion(); if (switchedOplog[0] == null) { switchedOplog[0] = dr.testHook_getChild(); } } });
public void verifyOplogSizeZeroAfterRecovery(Region region) { assertEquals(Oplog.OPLOG_DISK_STORE_REC_SIZE*2 + EMPTY_RVV_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE*2, ((LocalRegion)region).getDiskRegion().testHook_getChild().getOplogSize()); }
File oplogFile = null; try { oplogFile = ((LocalRegion)region).getDiskRegion().testHook_getChild() .getOplogFile();
.testHook_getChild().getFileChannel(); oplogFileChannel.close(); try {
public void testGetChild() { deleteFiles(); region = DiskRegionHelperFactory .getAsyncPersistOnlyRegion(cache, diskProps); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); Oplog oplog = dr.testHook_getChild(); long id = oplog.getOplogId(); StatisticsFactory factory = region.getCache().getDistributedSystem(); Oplog newOplog = new Oplog(id, dr.getOplogSet(), new DirectoryHolder(factory, dirs[0], 1000000, 0)); dr.getDiskStore().persistentOplogs.setChild(newOplog); assertEquals(newOplog, dr.testHook_getChild()); dr.setChild(oplog); assertEquals(oplog, dr.testHook_getChild()); newOplog.close(); newOplog = null; closeDown(); }
.testHook_getChild().getFileChannel(); oplogFileChannel.close(); try {
.testHook_getChild().getFileChannel(); oplogFileChannel.close(); try {
private static void validateRuningBridgeServerList() throws Exception{ /*Region region = gemfirecache.getRegion(Region.SEPARATOR + REGION_NAME); assertNotNull(region);*/ try { region.create("key1", new byte[16]); region.create("key2", new byte[16]); // Get the oplog handle & hence the underlying file & close it FileChannel oplogFileChannel = ((LocalRegion)region).getDiskRegion() .testHook_getChild().getFileChannel(); try { oplogFileChannel.close(); region.put("key2", new byte[16]); }catch(DiskAccessException dae) { //OK expected }catch (IOException e) { fail("test failed due to ", e); } assertTrue(region.isDestroyed()); region = null; List bsRunning = gemfirecache.getBridgeServers(); assertTrue(bsRunning.isEmpty()); } finally { if (region != null) { region.destroyRegion(); } } }
Oplog oplog = dr.testHook_getChild();
Oplog old = dr.testHook_getChild(); ByteBuffer oldWriteBuf = old.getWriteBuf(); Oplog switched = dr.testHook_getChild(); assertTrue(old != switched); assertEquals(dr.getDiskStore().persistentOplogs.getChild(2), switched);
/** * If IOException occurs while destroying an entry in a persist only synch mode, * DiskAccessException should occur & region should be destroyed * * @throws Exception */ private void entryDestructionInSynchPersistTypeForIOExceptionCase(Region region) throws Exception { try { region.create("key1", "value1"); // Get the oplog handle & hence the underlying file & close it ((LocalRegion)region).getDiskRegion().testHook_getChild().testClose(); try { region.destroy("key1"); fail("Should have encountered DiskAccessException"); } catch (DiskAccessException dae) { // OK } assertTrue(region.isDestroyed()); region = null; } finally { if (region != null) { region.destroyRegion(); } } }
.testHook_getChild().getFileChannel();
.testHook_getChild().getFileChannel(); oplogFileChannel.close(); try {
Thread.yield(); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); dr.testHook_getChild().forceRolling(dr, false);
final byte[] payload = new byte[100]; region.put("key0", payload); assertEquals(dirs[0], ((LocalRegion)region).getDiskRegion().testHook_getChild().getDirectoryHolder().getDir()); region.close(); ((LocalRegion)region).getDiskStore().close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); region.put("key1", payload); assertEquals(dirs[1], ((LocalRegion)region).getDiskRegion().testHook_getChild().getDirectoryHolder().getDir()); region.close(); ((LocalRegion)region).getDiskStore().close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); region.put("key2", payload); assertEquals(dirs[2], ((LocalRegion)region).getDiskRegion().testHook_getChild().getDirectoryHolder().getDir()); region.close(); ((LocalRegion)region).getDiskStore().close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); region.put("key3", payload); assertEquals(dirs[3], ((LocalRegion)region).getDiskRegion().testHook_getChild().getDirectoryHolder().getDir()); region.close(); ((LocalRegion)region).getDiskStore().close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); region.put("key4", payload); assertEquals(dirs[0], ((LocalRegion)region).getDiskRegion().testHook_getChild().getDirectoryHolder().getDir());
.testHook_getChild().getFileChannel(); oplogFileChannel.close();
diskProps); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); Oplog oplog = dr.testHook_getChild(); long id = oplog.getOplogId(); oplog.close(); diskProps, Scope.LOCAL); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); Oplog oplog = dr.testHook_getChild(); long id = oplog.getOplogId(); oplog.close();
/** * Confirm that forceCompaction waits for the compaction to finish */ public void testForceCompactionIsSync() { DiskRegionProperties props = new DiskRegionProperties(); props.setRegionName("testForceCompactionDoesRoll"); props.setRolling(false); props.setDiskDirs(dirs); props.setAllowForceCompaction(true); props.setPersistBackup(true); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); logWriter.info("putting key1"); region.put("key1", "value1"); logWriter.info("putting key2"); region.put("key2", "value2"); logWriter.info("removing key1"); region.remove("key1"); logWriter.info("removing key2"); region.remove("key2"); // now that it is compactable the following forceCompaction should // go ahead and do a roll and compact it. Oplog oplog = dr.testHook_getChild(); boolean compacted = ((LocalRegion)region).getDiskStore().forceCompaction(); assertEquals(true, oplog.testConfirmCompacted()); assertEquals(true, compacted); }