/** * Have DatanodeManager check decommission state. * @param dm the DatanodeManager to manipulate */ public static void recheckDecommissionState(DatanodeManager dm) throws ExecutionException, InterruptedException { dm.getDecomManager().runMonitor(); } }
@Deprecated @Test(timeout=120000) public void testNodesPerInterval() throws Exception { Configuration newConf = new Configuration(conf); org.apache.log4j.Logger.getLogger(DecommissionManager.class) .setLevel(Level.TRACE); // Set the deprecated configuration key which limits the # of nodes per // interval newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1); // Disable the normal monitor runs newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, Integer.MAX_VALUE); startCluster(1, 3, newConf); final FileSystem fs = cluster.getFileSystem(); final DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager(); final DecommissionManager decomManager = datanodeManager.getDecomManager(); // Write a 3 block file, so each node has one block. Should scan 1 node // each time. DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA); for (int i=0; i<3; i++) { doDecomCheck(datanodeManager, decomManager, 1); } }
@Test(timeout=120000) public void testBlocksPerInterval() throws Exception { Configuration newConf = new Configuration(conf); org.apache.log4j.Logger.getLogger(DecommissionManager.class) .setLevel(Level.TRACE); // Turn the blocks per interval way down newConf.setInt( DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY, 3); // Disable the normal monitor runs newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, Integer.MAX_VALUE); startCluster(1, 3, newConf); final FileSystem fs = cluster.getFileSystem(); final DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager(); final DecommissionManager decomManager = datanodeManager.getDecomManager(); // Write a 3 block file, so each node has one block. Should scan 3 nodes. DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA); doDecomCheck(datanodeManager, decomManager, 3); // Write another file, should only scan two DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA); doDecomCheck(datanodeManager, decomManager, 2); // One more file, should only scan 1 DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA); doDecomCheck(datanodeManager, decomManager, 1); // blocks on each DN now exceeds limit, still scan at least one node DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA); doDecomCheck(datanodeManager, decomManager, 1); }
final DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager(); final DecommissionManager decomManager = datanodeManager.getDecomManager();
assertTrackedAndPending(dm.getDecomManager(), 2, 0);
.getBlocks()[0].getDatanode(0); cluster.getNameNode().getNamesystem().getBlockManager() .getDatanodeManager().getDecomManager().startDecommission(dn); String dnName = dn.getXferAddr();
dnManager.getDecomManager().startDecommission(d); d.setDecommissioned();
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager()); dnm.getDecomManager().startDecommission(dnd); expectedInServiceNodes--; assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes, getNumDNInService(namesystem)); dnm.getDecomManager().stopDecommission(dnd); assertEquals(expectedInServiceNodes, getNumDNInService(namesystem)); dnm.getDatanode(datanodes.get(i).getDatanodeId()); expectedInServiceLoad -= dnd.getXceiverCount(); dnm.getDecomManager().startDecommission(dnd); DataNodeTestUtils.triggerHeartbeat(datanodes.get(i)); Thread.sleep(100);