util.getDFSCluster().shutdownDataNodes();
/** * Shut down the servers that are up. */ public void shutdown() { System.out.println("Shutting down the Mini HDFS Cluster"); shutdownDataNodes(); if (nameNode != null) { nameNode.stop(); nameNode.join(); nameNode = null; } }
@After public void shutDownCluster() throws IOException { if (fs != null) fs.close(); if (cluster != null) { cluster.shutdownDataNodes(); cluster.shutdown(); } }
@After public void shutDownCluster() throws IOException { if(dfs != null) { dfs.close(); } if(cluster != null) { cluster.shutdownDataNodes(); cluster.shutdown(); } }
@After public void shutDownCluster() throws IOException { fs.close(); cluster.shutdownDataNodes(); cluster.shutdown(); }
/** * Restart the datanodes with a new volume tolerated value. * @param volTolerated number of dfs data dir failures to tolerate * @param manageDfsDirs whether the mini cluster should manage data dirs * @throws IOException */ private void restartDatanodes(int volTolerated, boolean manageDfsDirs) throws IOException { // Make sure no datanode is running cluster.shutdownDataNodes(); conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, volTolerated); cluster.startDataNodes(conf, 1, manageDfsDirs, null, null); cluster.waitActive(); }
+ (dfs.getFileSystem()).getUri().getPort(); dfs.shutdownDataNodes();
@After public void shutDownCluster() throws IOException { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdownDataNodes(); cluster.shutdown(); } }
@After public void shutDownCluster() throws IOException { if(dfs != null) { dfs.close(); } if(cluster != null) { cluster.shutdownDataNodes(); cluster.shutdown(); } }
@After public void shutDownCluster() throws IOException { if (fs != null) fs.close(); if (cluster != null) { cluster.shutdownDataNodes(); cluster.shutdown(); } }
/** * Shutdown all the nodes in the cluster. */ public void shutdown(boolean deleteDfsDir) { LOG.info("Shutting down the Mini HDFS Cluster"); if (checkExitOnShutdown) { if (ExitUtil.terminateCalled()) { LOG.fatal("Test resulted in an unexpected exit", ExitUtil.getFirstExitException()); ExitUtil.resetFirstExitException(); throw new AssertionError("Test resulted in an unexpected exit"); } } shutdownDataNodes(); for (NameNodeInfo nnInfo : nameNodes) { if (nnInfo == null) continue; NameNode nameNode = nnInfo.nameNode; if (nameNode != null) { nameNode.stop(); nameNode.join(); nameNode = null; } } if (deleteDfsDir) { base_dir.delete(); } else { base_dir.deleteOnExit(); } }
@After public void shutDownCluster() throws Exception { // Dump all RamDisk JMX metrics before shutdown the cluster printRamDiskJMXMetrics(); if (fs != null) { fs.close(); fs = null; client = null; } if (cluster != null) { cluster.shutdownDataNodes(); cluster.shutdown(); cluster = null; } if (jmx != null) { jmx = null; } IOUtils.closeQuietly(sockDir); sockDir = null; }
@After public void shutDownCluster() throws IOException { client.close(); fs.close(); cluster.shutdownDataNodes(); cluster.shutdown(); }
} finally { if (cluster != null) { cluster.shutdownDataNodes();
cluster.shutdownDataNodes(); Thread.sleep(30000L); assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1));
util.getDFSCluster().shutdownDataNodes();
assertFalse(isReady); cluster.shutdownDataNodes(); cluster.setDataNodesDead(); try {
cluster.shutdownDataNodes(); cluster.shutdownNameNode(0);
cluster.shutdownDataNodes(); cluster.restartNameNode(false);
assertEquals(isVersionCompatible(nameNodeVersion, versions[i]), cluster.isDataNodeUp()); cluster.shutdownDataNodes();