/** * Restart the namenode at a given index. */ public synchronized void restartNameNode(int nnIndex) throws IOException { restartNameNode(nnIndex, true); }
/** * Restart the namenode. Optionally wait for the cluster to become active. */ public synchronized void restartNameNode(boolean waitActive) throws IOException { checkSingleNameNode(); restartNameNode(0, waitActive); }
/** * Restart all namenodes. */ public synchronized void restartNameNodes() throws IOException { for (int i = 0; i < nameNodes.length; i++) { restartNameNode(i, false); } waitActive(); }
/** * Restart the namenode. */ public synchronized void restartNameNode(String... args) throws IOException { checkSingleNameNode(); restartNameNode(0, true, args); }
@Override void run(MiniDFSCluster cluster) throws IOException { cluster.restartNameNode(0); cluster.transitionToActive(1); } };
@Test public void testParentDirWithUCFileDeleteWithSnapShot() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // delete parent directory fs.delete(new Path("/test/test"), true); cluster.restartNameNode(); }
/** * Restarts the NameNode and obtains a new FileSystem. * * @throws IOException if there is an I/O error */ private void restartNameNode() throws IOException { IOUtils.cleanup(null, fs); cluster.restartNameNode(); fs = cluster.getFileSystem(); }
@Test public void testUCFileDeleteWithSnapShot() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // delete files separately fs.delete(new Path("/test/test/test2"), true); fs.delete(new Path("/test/test/test3"), true); cluster.restartNameNode(); }
private void restartActive() throws IOException { cluster.shutdownNameNode(0); // Set the safemode extension to be lengthy, so that the tests // can check the safemode message after the safemode conditions // have been achieved, without being racy. cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000); cluster.restartNameNode(0); nn0 = cluster.getNameNode(0); }
private void ensureClusterRestartFails(MiniDFSCluster cluster) { try { cluster.restartNameNode(); fail("Cluster should not have successfully started"); } catch (Exception expected) { LOG.info("Expected exception thrown " + expected); } assertFalse(cluster.isClusterUp()); }
private void ensureClusterRestartSucceeds(MiniDFSCluster cluster) throws IOException { cluster.restartNameNode(); cluster.waitActive(); assertTrue(cluster.isClusterUp()); }
@Override public void run() { try { //sleep, restart, and then wait active TimeUnit.SECONDS.sleep(30); assertFalse(HdfsUtils.isHealthy(uri)); cluster.restartNameNode(0, false); cluster.waitActive(); assertTrue(HdfsUtils.isHealthy(uri)); } catch (Exception e) { exceptions.add(e); } } }).start();
@Test public void testPolicyPersistenceInEditLog() throws IOException { startUpCluster(false, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, 0, true); cluster.restartNameNode(true); // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); }
private void restartStandby() throws IOException { cluster.shutdownNameNode(1); // Set the safemode extension to be lengthy, so that the tests // can check the safemode message after the safemode conditions // have been achieved, without being racy. cluster.getConfiguration(1).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000); cluster.getConfiguration(1).setInt( DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); cluster.restartNameNode(1); nn1 = cluster.getNameNode(1); assertEquals(nn1.getNamesystem().getTransactionsSinceLastLogRoll(), 0L); }
private void rollbackRollingUpgrade() throws Exception { // Shutdown datanodes and namenodes // Restart the namenode with rolling upgrade rollback LOG.info("Starting rollback of the rolling upgrade"); MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0); dnprop.setDnArgs("-rollback"); cluster.shutdownNameNodes(); cluster.restartNameNode("-rollingupgrade", "rollback"); cluster.restartDataNode(dnprop); cluster.waitActive(); nn = cluster.getNameNode(0); dn0 = cluster.getDataNodes().get(0); triggerHeartBeats(); LOG.info("The cluster is active after rollback"); }
/** * Test that, if there are no blocks in the filesystem, * the NameNode doesn't enter the "safemode extension" period. */ @Test(timeout=45000) public void testNoExtensionIfNoBlocks() throws IOException { cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 60000); cluster.restartNameNode(); // Even though we have safemode extension set high, we should immediately // exit safemode on startup because there are no blocks in the namespace. String status = cluster.getNameNode().getNamesystem().getSafemode(); assertEquals("", status); }
/** * Test that getContentSummary on Standby should should throw standby * exception. */ @Test(expected = StandbyException.class) public void testgetContentSummaryOnStandby() throws Exception { Configuration nn1conf =cluster.getConfiguration(1); // just reset the standby reads to default i.e False on standby. HAUtil.setAllowStandbyReads(nn1conf, false); cluster.restartNameNode(1); cluster.getNameNodeRpc(1).getContentSummary("/"); } }
private static void rollbackRollingUpgrade(Path foo, Path bar, Path file, byte[] data, MiniDFSCluster cluster) throws IOException { final DataNodeProperties dnprop = cluster.stopDataNode(0); cluster.restartNameNode("-rollingUpgrade", "rollback"); cluster.restartDataNode(dnprop, true); final DistributedFileSystem dfs = cluster.getFileSystem(); Assert.assertTrue(dfs.exists(foo)); Assert.assertFalse(dfs.exists(bar)); AppendTestUtil.checkFullFile(dfs, file, data.length, data); }
@Test public void testLongLivedReadClientAfterRestart() throws IOException { FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); // Restart the NN and DN, after which the client's encryption key will no // longer be valid. cluster.restartNameNode(); assertTrue(cluster.restartDataNode(0)); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); }
@Test public void testLongLivedWriteClientAfterRestart() throws IOException { setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf).build(); fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); // Restart the NN and DN, after which the client's encryption key will no // longer be valid. cluster.restartNameNode(); assertTrue(cluster.restartDataNodes()); cluster.waitActive(); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); }