@BeforeClass public static void createHDFS() throws Exception { final File baseDir = TMP.newFolder(); Configuration hdConf = new Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem(); fs = new HadoopFileSystem(hdfs); basePath = new Path(hdfs.getUri().toString() + "/tests"); }
@BeforeClass public static void createHDFS() throws Exception { final File baseDir = TEMP_FOLDER.newFolder(); final Configuration hdConf = new Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); final org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem(); fileSystem = new HadoopFileSystem(hdfs); basePath = new Path(hdfs.getUri() + "/tests"); }
@BeforeClass public static void createHDFS() throws Exception { Assume.assumeTrue(!OperatingSystem.isWindows()); final File tempDir = CLASS_TEMP_DIR.newFolder(); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); hdfsRootPath = new Path(hdfsCluster.getURI()); }
private Socket createSocket() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); LOG.info("MiniDFSCluster started."); return DFSOutputStream.createSocketForPipeline( new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()), 1, cluster.getFileSystem().getClient()); } }
@BeforeClass public static void setup() throws Exception { // start a cluster with single datanode cluster = new MiniDFSCluster.Builder(CONF).build(); cluster.waitActive(); fs = cluster.getFileSystem(); final String str = "hftp://" + CONF.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); hftpURI = new URI(str); hftpFs = cluster.getHftpFileSystem(0); }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); fc = FileContext.getFileContext(cluster.getURI(0), conf); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); }
@BeforeClass public static void setupCluster() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.set("fs.hdfs.impl.disable.cache", "true"); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .build(); fs = cluster.getFileSystem(); }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); fc = FileContext.getFileContext(cluster.getURI(0), conf); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); }
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); fs = cluster.getFileSystem(); buildExpectedValuesMap(); }
public void start() throws IOException { baseDir = Files.createTempDirectory(baseDirName).toFile().getAbsoluteFile(); Configuration conf = new Configuration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); conf.setBoolean("dfs.permissions.enabled", true); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); hdfsCluster = builder.build(); }
miniDFS = new MiniDFSCluster.Builder(conf).build();
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }
@BeforeClass public static void setup() throws IOException { // create configuration, dfs, file system localFs = FileSystem.getLocal(conf); testRootDir = new Path("target", TestJobResourceUploaderWithSharedCache.class.getName() + "-tmpDir") .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); remoteFs = dfs.getFileSystem(); }
miniDFSCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDataNodes).format(format) .racks(racks).nnTopology(topo).build(); miniDFSCluster.waitActive(); miniDFSCluster.transitionToActive(0); miniDFSCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDataNodes).format(format) .racks(racks).build();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@Override public void evaluate() throws Throwable { try { System.setProperty(TEST_BUILD_DATA, "target/test/data"); hadoopConf = hadoopConfSupplier.get(); dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); dfscluster.waitActive(); } finally { if (dfscluster != null) { dfscluster.shutdown(); } System.clearProperty(TEST_BUILD_DATA); } } };
@BeforeClass public static void createHDFS() throws Exception { Assume.assumeTrue(!OperatingSystem.isWindows()); final File tempDir = TEMP_DIR.newFolder(); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); hdfsRootPath = new Path(hdfsCluster.getURI()); }
@BeforeClass public static void createHDFS() throws Exception { Assume.assumeTrue(!OperatingSystem.isWindows()); final File tempDir = TEMP_DIR.newFolder(); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); hdfsRootPath = new Path(hdfsCluster.getURI()); }