@Override public void evaluate() throws Throwable { try { System.setProperty(TEST_BUILD_DATA, "target/test/data"); hadoopConf = hadoopConfSupplier.get(); dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); dfscluster.waitActive(); } finally { if (dfscluster != null) { dfscluster.shutdown(); } System.clearProperty(TEST_BUILD_DATA); } } };
@Test public void testCopyFilesParallel() throws Exception { MiniDFSCluster cluster = htu.startMiniDFSCluster(1); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); Path src = new Path("/src"); fs.mkdirs(src); for (int i = 0; i < 50; i++) { WriteDataToHDFS(fs, new Path(src, String.valueOf(i)), 1024); } Path sub = new Path(src, "sub"); fs.mkdirs(sub); for (int i = 0; i < 50; i++) { WriteDataToHDFS(fs, new Path(sub, String.valueOf(i)), 1024); } Path dst = new Path("/dst"); List<Path> allFiles = FSUtils.copyFilesParallel(fs, src, fs, dst, conf, 4); assertEquals(102, allFiles.size()); FileStatus[] list = fs.listStatus(dst); assertEquals(51, list.length); FileStatus[] sublist = fs.listStatus(new Path(dst, "sub")); assertEquals(50, sublist.length); }
cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem();
cluster.waitActive(); fs = cluster.getFileSystem(); LOG.info("STARTED second instance.");
dfsCluster.waitActive(); LOG.info("Data Nodes restarted"); validateData(table, 1002); dfsCluster.waitActive(); LOG.info("Data Nodes restarted"); validateData(table, 1004);
.numDataNodes(numDataNodes).format(format) .racks(racks).nnTopology(topo).build(); miniDFSCluster.waitActive(); miniDFSCluster.transitionToActive(0); } else {
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }
protected static void startCluster() throws IOException { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); }
@BeforeClass public static void setUp() throws IOException { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); }
@Before public void setUp() throws Exception { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); dfs = cluster.getFileSystem(); }
@BeforeClass public static void setup() throws Exception { // start a cluster with single datanode cluster = new MiniDFSCluster.Builder(CONF).build(); cluster.waitActive(); fs = cluster.getFileSystem(); final String str = "hftp://" + CONF.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); hftpURI = new URI(str); hftpFs = cluster.getHftpFileSystem(0); }
@Before public void setUp() throws IOException { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); dir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); }
@Before public void startCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .build(); cluster.waitActive(); dfs = cluster.getFileSystem(); SetSpanReceiver.SetHolder.spans.clear(); }
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); }
private Socket createSocket() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); LOG.info("MiniDFSCluster started."); return DFSOutputStream.createSocketForPipeline( new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()), 1, cluster.getFileSystem().getClient()); } }
/** * Set up the cluster and start NameNode and DataNodes according to the * corresponding scheme. */ void setupCluster() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(clusterScheme .numDataNodes).storageTypes(clusterScheme.storageTypes) .storageCapacities(clusterScheme.storageCapacities).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); }
public void setUpMiniCluster(Configuration conf, boolean manageNameDfsDirs) throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .manageNameDfsDirs(manageNameDfsDirs).checkExitOnShutdown(false).build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
private void startCluster() throws IOException { conf = new HdfsConfiguration(); conf.setInt("dfs.blocksize", 1024*1024); cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build(); cluster.waitActive(); fs = cluster.getFileSystem(); nn = cluster.getNameNode(0); assertNotNull(nn); dn0 = cluster.getDataNodes().get(0); assertNotNull(dn0); blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId(); }