Refine search
private void setupMiniDfsAndMrClusters() { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/tmp/logs/"); } m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 1); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
@BeforeClass public static void setUp() throws IOException { int dataNodes = 1; int port = 29999; JobConf conf = new JobConf(); channel = new MemoryChannel(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); conf.set("fs.default.name", "hdfs://localhost:29999"); dfsCluster = new MiniDFSCluster(port, conf, dataNodes, true, true, null, null); fileSystem = dfsCluster.getFileSystem(); fileSystem.delete(new Path("/logs"), true); source = new AvroSource(); sink = new KaaHdfsSink(); logSchemasRootDir = new File("schemas"); if (logSchemasRootDir.exists()) { logSchemasRootDir.delete(); } prepareSchema(logSchemasRootDir); }
protected void setUp() throws Exception { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); } protected void tearDown() throws Exception {
miniDfs = new MiniDFSCluster(new Configuration(), 1, true, null); miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema")); FSDataOutputStream out = miniDfs.getFileSystem().create( new Path("/path/to/schema/schema.avsc")); out.writeBytes(RECORD_SCHEMA); out.close(); String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc"; Configuration conf = new Configuration(); Properties tbl = createPropertiesForHiveAvroSchemaUrl(onHDFS); serDe.initialize(conf, tbl);
protected void setUp() throws Exception { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); } protected void tearDown() throws Exception {
/** * Creates Hadoop instance based on constructor configuration before * a test case is run. * * @throws Exception */ protected void setUp() throws Exception { super.setUp(); if (localFS) { fileSystem = FileSystem.getLocal(new JobConf()); } else { dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null); fileSystem = dfsCluster.getFileSystem(); } if (localMR) { } else { //noinspection deprecation mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getUri().toString(), 1); } }
protected void setUp() throws Exception { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); base = cluster.getFileSystem().makeQualified(new Path("/nested")); src = generateSources(conf); } protected void tearDown() throws Exception {
@BeforeClass public static void setupFS() throws IOException { final Configuration conf = new Configuration(); cluster = new MiniDFSCluster(new Configuration(), 1, true, null); // Builder is not compatible with hadoop1 //cluster = new MiniDFSCluster.Builder(conf).build(); dfs = getFS.invoke(cluster); lfs = FileSystem.getLocal(conf); }
dfsCluster = new MiniDFSCluster(jobConfig, dataNodes, true, null); fileSystem = dfsCluster.getFileSystem(); mrCluster = new MiniMRCluster(0, 0, taskTrackers, fileSystem.getUri().toString(), 1, null, null, null, jobConfig);
protected void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); dfs = dfsCluster.getFileSystem(); mrCluster = new MiniMRCluster(NUM_HADOOP_SLAVES, dfs.getUri().toString(), 1); } protected void tearDown() throws Exception {
final Configuration dfsConf = new Configuration(); cluster = new MiniDFSCluster(dfsConf, 2, true, null); cluster.waitActive(); final Configuration userConf = new Configuration(); final FileSystem fs = USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
public void testWithDFS() throws IOException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; JobConf conf = new JobConf(); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir")); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
protected void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 2, true, null); mrCluster = new MiniMRCluster(2, dfsCluster.getFileSystem().getUri().toString(), 1); } protected void tearDown() throws Exception {
public static MiniDFSCluster setupClass(String dataDir) throws Exception { File dir = new File(dataDir); new File(dataDir).mkdirs(); savedLocale = Locale.getDefault(); // TODO: we HACK around HADOOP-9643 Locale.setDefault(Locale.ENGLISH); int dataNodes = 2; Configuration conf = new Configuration(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions.enabled", "false"); conf.set("hadoop.security.authentication", "simple"); conf.set("hdfs.minidfs.basedir", dir.getAbsolutePath() + File.separator + "hdfsBaseDir"); conf.set("dfs.namenode.name.dir", dir.getAbsolutePath() + File.separator + "nameNodeNameDir"); System.setProperty("test.build.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "build"); System.setProperty("test.cache.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "cache"); System.setProperty("solr.lock.type", "hdfs"); MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); return dfsCluster; }
this.dfsCluster = new MiniDFSCluster(8020 /* Lily change */, this.conf, servers, format /* Lily change */, true, true, null, null, hosts, null); FileSystem fs = this.dfsCluster.getFileSystem(); this.conf.set("fs.defaultFS", fs.getUri().toString());
@Override public void setUp() { try { // create configuration, dfs, file system and mapred cluster dfs = new MiniDFSCluster(new Configuration(), 1, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(2, fileSys.getUri().toString(), 1); } catch (IOException ioe) { tearDown(); } }
@BeforeClass public static void setupFS() throws IOException { final Configuration conf = new Configuration(); cluster = new MiniDFSCluster(new Configuration(), 1, true, null); // Builder is not compatible with hadoop1 //cluster = new MiniDFSCluster.Builder(conf).build(); dfs = getFS.invoke(cluster); }
public void setUp() throws Exception { dfs = new MiniDFSCluster(conf, 4, true, null); fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() { public FileSystem run() throws IOException { return dfs.getFileSystem(); } }); // Home directories for users mkdir(fs, "/user", "nobody", "nogroup", (short)01777); mkdir(fs, "/user/alice", "alice", "nogroup", (short)0755); mkdir(fs, "/user/bob", "bob", "nogroup", (short)0755); // staging directory root with sticky bit UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); mkdir(fs, "/staging", MR_UGI.getShortUserName(), "nogroup", (short)01777); JobConf mrConf = new JobConf(); mrConf.set(JTConfig.JT_STAGING_AREA_ROOT, "/staging"); mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(), 1, null, null, MR_UGI, mrConf); jobTrackerName = "localhost:" + mr.getJobTrackerPort(); }
@Override protected void setUp() throws Exception { super.setUp(); final int taskTrackers = 4; Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 4, true, null); jc = new JobConf(); jc.setClass(JTConfig.JT_TASK_SCHEDULER, TestTaskScheduler.class, TaskScheduler.class); jc.setLong(JTConfig.JT_RUNNINGTASKS_PER_JOB, 10L); mrCluster = new MiniMRCluster(0, 0, taskTrackers, dfsCluster .getFileSystem().getUri().toString(), 1, null, null, null, jc); }
@BeforeClass @SuppressWarnings("deprecation") public static void setupFS() throws IOException { if (cluster == null) { Configuration c = new Configuration(); c.setLong("fs.trash.interval", 1); c.setBoolean("dfs.webhdfs.enabled", true); // if this fails with "The directory is already locked" set umask to 0022 cluster = new MiniDFSCluster(c, 1, true, null); //cluster = new MiniDFSCluster.Builder(new Configuration()).build(); dfs = getFS.invoke(cluster); conf = new Configuration(dfs.getConf()); lfs = FileSystem.getLocal(conf); } }