public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) throws Exception { createDirsAndSetProperties(); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); // Error level to skip some warnings specific to the minicluster. See HBASE-4709 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class). setLevel(org.apache.log4j.Level.ERROR); org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class). setLevel(org.apache.log4j.Level.ERROR); TraceUtil.initTracer(conf); this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); //reset the test directory for test file system dataTestDirOnTestFS = null; String dataTestDir = getDataTestDir().toString(); conf.set(HConstants.HBASE_DIR, dataTestDir); LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir); return this.dfsCluster; }
@Before public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(120*1000); cluster = TEST_UTIL.getMiniHBaseCluster(); admin = TEST_UTIL.getAdmin(); admin.setBalancerRunning(false, true); }
@Before public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); cluster = TEST_UTIL.getMiniHBaseCluster(); master = TEST_UTIL.getMiniHBaseCluster().getMaster(); admin = TEST_UTIL.getAdmin(); admin.setBalancerRunning(false, true); }
LOG.info("Starting a new datanode with the name=" + host4); cluster.startDataNodes(conf, 1, true, null, new String[]{"/r4"}, new String[]{host4}, null); cluster.waitClusterUp();
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(), CONF); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); }
private void init(Configuration conf) throws IOException { if (cluster != null) { cluster.shutdown(); } cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitClusterUp(); fileSystem = cluster.getFileSystem(); }
@Override public void init() throws InitUnitException { try { miniDFSCluster = new MiniDFSCluster.Builder(gatherConfigs()).build(); miniDFSCluster.waitClusterUp(); createHomeDirectory(); cmdUnit.declare("hdfs", new HdfsShell(getConfig())); } catch (IOException e) { throw new InitUnitException("Failed to start hdfs", e); } }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { SupportsBlocks = true; CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); cluster = new MiniDFSCluster.Builder(CONF) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(); }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); URI uri0 = cluster.getURI(0); fc = FileContext.getFileContext(uri0, CONF); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); }
@Before public void startUpCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build(); assertNotNull("Failed Cluster Creation", cluster); cluster.waitClusterUp(); dfs = (DistributedFileSystem) cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); nn = cluster.getNameNode(); assertNotNull("Failed to get NameNode", nn); }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES) .build(); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); datanodes = cluster.getDataNodes(); }
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); }
public static void createCluster() throws IOException { HdfsConfiguration conf = new HdfsConfiguration(); conf.addResource(CONTRACT_HDFS_XML); //hack in a 256 byte block size conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitClusterUp(); }
@BeforeClass public static void init() throws Exception { Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); dfsCluster.waitClusterUp(); fs = dfsCluster.getFileSystem(); codec = CryptoCodec.getInstance(conf); }
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), clusterConf); fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf); }
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2).build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { SupportsBlocks = true; CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), CONF); }
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); }
@Before public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(120*1000); cluster = TEST_UTIL.getMiniHBaseCluster(); admin = TEST_UTIL.getAdmin(); admin.setBalancerRunning(false, true); }
@Before public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); cluster = TEST_UTIL.getMiniHBaseCluster(); master = TEST_UTIL.getMiniHBaseCluster().getMaster(); admin = TEST_UTIL.getAdmin(); admin.setBalancerRunning(false, true); }