/** * Set the MiniDFSCluster * @param cluster cluster to use * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before * it is set. * @throws IllegalStateException if the passed cluster is up when it is required to be down * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown) throws IllegalStateException, IOException { if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) { throw new IllegalStateException("DFSCluster is already running! Shut it down first."); } this.dfsCluster = cluster; this.setFs(); }
} finally { while (cluster.isClusterUp()){ LOG.error("Waiting for cluster to go down"); Thread.sleep(1000); assertFalse(cluster.isClusterUp()); cluster = null; for (int i = 0; i < 100; i++) {
public void setDFSCluster(MiniDFSCluster cluster) throws IOException { if (dfsCluster != null && dfsCluster.isClusterUp()) { throw new IOException("DFSCluster is already running! Shut it down first."); } this.dfsCluster = cluster; }
/** * wait for the cluster to get out of * safemode. */ public void waitClusterUp() { if (numDataNodes > 0) { while (!isClusterUp()) { try { LOG.warn("Waiting for the Mini HDFS Cluster to start..."); Thread.sleep(1000); } catch (InterruptedException e) { } } } }
/** * clean up */ public void tearDown() throws Exception { if(cluster.isClusterUp()) cluster.shutdown(); File data_dir = new File(cluster.getDataDirectory()); if(data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Could not delete hdfs directory in tearDown '" + data_dir + "'"); } }
private void sleepForever() { while (true) { try { Thread.sleep(SLEEP_INTERVAL_MS); if (!dfs.isClusterUp()) { LOG.info("Cluster is no longer up, exiting"); return; } } catch (InterruptedException _) { // nothing } } }
/** * wait for the cluster to get out of safemode. */ public void waitClusterUp() throws IOException { int i = 0; if (numDataNodes > 0) { while (!isClusterUp()) { try { LOG.warn("Waiting for the Mini HDFS Cluster to start..."); Thread.sleep(1000); } catch (InterruptedException e) { } if (++i > 10) { final String msg = "Timed out waiting for Mini HDFS Cluster to start"; LOG.error(msg); throw new IOException(msg); } } } }
/** * Set the MiniDFSCluster * @param cluster cluster to use * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before * it is set. * @throws IllegalStateException if the passed cluster is up when it is required to be down * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown) throws IllegalStateException, IOException { if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) { throw new IllegalStateException("DFSCluster is already running! Shut it down first."); } this.dfsCluster = cluster; this.setFs(); }
/** * clean up */ @After public void tearDown() throws Exception { if(cluster.isClusterUp()) cluster.shutdown(); File data_dir = new File(cluster.getDataDirectory()); if(data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Could not delete hdfs directory in tearDown '" + data_dir + "'"); } }
private void ensureClusterRestartFails(MiniDFSCluster cluster) { try { cluster.restartNameNode(); fail("Cluster should not have successfully started"); } catch (Exception expected) { LOG.info("Expected exception thrown " + expected); } assertFalse(cluster.isClusterUp()); }
private void ensureClusterRestartSucceeds(MiniDFSCluster cluster) throws IOException { cluster.restartNameNode(); cluster.waitActive(); assertTrue(cluster.isClusterUp()); }
@Test(timeout=100000) public void testIsClusterUpAfterShutdown() throws Throwable { Configuration conf = new HdfsConfiguration(); File testDataCluster4 = new File(testDataPath, CLUSTER_4); String c4Path = testDataCluster4.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path); MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem dfs = cluster4.getFileSystem(); dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); cluster4.shutdown(); } finally { while(cluster4.isClusterUp()){ Thread.sleep(1000); } } }
@Before public void resetCluster() throws Exception { if (!cluster.isClusterUp()) { // Previous test seems to have left cluster in a bad state; // recreate the cluster to protect subsequent tests cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); } }
} finally { while (cluster.isClusterUp()){ LOG.error("Waiting for cluster to go down"); Thread.sleep(1000); assertFalse(cluster.isClusterUp()); cluster = null; for (int i = 0; i < 100; i++) {