Refine search
@BeforeClass public static void createHDFS() throws Exception { final File baseDir = TMP.newFolder(); Configuration hdConf = new Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem(); fs = new HadoopFileSystem(hdfs); basePath = new Path(hdfs.getUri().toString() + "/tests"); }
conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, 5); conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, 0); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0); conf.setBoolean("dfs.datanode.transferTo.allowed", false); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fileSys = cluster.getFileSystem(); try { Path p = new Path("preadtest.dat"); } finally { fileSys.close(); cluster.shutdown();
@BeforeClass public static void setup() throws IOException { // create configuration, dfs, file system localFs = FileSystem.getLocal(conf); testRootDir = new Path("target", TestJobResourceUploaderWithSharedCache.class.getName() + "-tmpDir") .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); remoteFs = dfs.getFileSystem(); }
@Before public void setUp() throws IOException { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); dir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); }
@BeforeClass public static void createHDFS() throws Exception { Assume.assumeTrue(!OperatingSystem.isWindows()); final File tempDir = CLASS_TEMP_DIR.newFolder(); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); hdfsRootPath = new Path(hdfsCluster.getURI()); }
/** * Initialize the cluster, wait for it to become active, and get FileSystem. * * @param format if true, format the NameNode and DataNodes before starting up * @param xAttrsEnabled if true, XAttr support is enabled * @throws Exception if any step fails */ private static void initCluster(boolean format, boolean xAttrsEnabled) throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xAttrsEnabled); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///"); conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) .build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
public void start() throws IOException { baseDir = Files.createTempDirectory(baseDirName).toFile().getAbsoluteFile(); Configuration conf = new Configuration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); conf.setBoolean("dfs.permissions.enabled", true); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); hdfsCluster = builder.build(); }
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
@BeforeClass public static void beforeClass() throws IOException { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); }
@BeforeClass public static void init() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true) .build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
@BeforeClass public static void setUp() throws Exception { final Configuration conf = new Configuration(); conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG"); final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); builder.checkExitOnShutdown(true); builder.numDataNodes(numSlaves); builder.format(true); builder.racks(null); dfsCluster = builder.build(); mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs); mrCluster.init(conf); mrCluster.start(); NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p1 = fs.makeQualified(p1); }
@BeforeClass public static void setUp() throws IOException { conf = new Configuration(); conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, CustomizedFilter.class.getName()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); fs = FileSystem.get( URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf); cluster.waitActive(); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .format(true).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); }
protected void setUp() throws Exception { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); base = cluster.getFileSystem().makeQualified(new Path("/nested")); src = generateSources(conf); } protected void tearDown() throws Exception {
@BeforeClass public static void beforeClass() throws IOException { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); }
@BeforeClass public static void setup() throws Exception { cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1) .format(true).build(); totalFileSize = 0; for (int i=0; i<N_FILES; ++i) totalFileSize += createFile("/tmp/source/" + String.valueOf(i), SIZEOF_EACH_FILE); }
@BeforeClass public static void setUp() throws IOException { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); }
@BeforeClass public static void setupCluster() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.set("fs.hdfs.impl.disable.cache", "true"); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .build(); fs = cluster.getFileSystem(); }
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception { System.setProperty("test.build.data", "hdfs-test-cluster"); Configuration hconf = new HdfsConfiguration(); for (Entry<String, String> entry : map.entrySet()) { hconf.set(entry.getKey(), entry.getValue()); } hconf.set("dfs.namenode.fs-limits.min-block-size", "1024"); Builder builder = new MiniDFSCluster.Builder(hconf); builder.numDataNodes(numDN); builder.nameNodePort(port); MiniDFSCluster cluster = builder.build(); return cluster; }
@BeforeClass public static void setUp() throws Exception { clearBaseDir(); Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); }