Refine search
@AfterMethod public void tearDown() throws IOException { // Clean up the staging and/or output directories if necessary File testRootDir = new File(TestConstants.TEST_ROOT_DIR); if (testRootDir.exists()) { FileUtil.fullyDelete(testRootDir); } } }
@AfterClass public void tearDown() throws IOException { // Clean up the staging and/or output directories if necessary File testRootDir = new File(TestConstants.TEST_ROOT_DIR); if (testRootDir.exists()) { FileUtil.fullyDelete(testRootDir); } }
@AfterClass public void tearDown() throws IOException { // Clean up the staging and/or output directories if necessary File testRootDir = new File(TestConstants.TEST_ROOT_DIR); if (testRootDir.exists()) { FileUtil.fullyDelete(testRootDir); } }
public static File handleWorkDir() throws IOException { testName = "test_hcat_partitionpublish_" + Math.abs(new Random().nextLong()); String testDir = System.getProperty("test.data.dir", "./"); testDir = testDir + "/" + testName + "/"; File workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); return workDir; } @BeforeClass
private static void createWorkDir() throws IOException { String testDir = System.getProperty("test.tmp.dir", "./"); testDir = testDir + "/test_multiout_" + Math.abs(new Random().nextLong()) + "/"; workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); }
@BeforeClass public static void createHDFS() { try { baseDir = new File("./target/localfs/fs_tests").getAbsoluteFile(); FileUtil.fullyDelete(baseDir); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); localFsURI = "file:///" + baseDir + "/"; localFs = new org.apache.hadoop.fs.Path(localFsURI).getFileSystem(hdConf); } catch (Throwable e) { e.printStackTrace(); Assert.fail("Test failed " + e.getMessage()); } }
public static void writeLinesTextFile(File dir) throws IOException { FileUtil.fullyDelete(dir); File fileLines = new File(dir, "lines.avro"); fileLines.getParentFile().mkdirs(); try(PrintStream out = new PrintStream(fileLines)) { for (String line : LINES) { out.println(line); } } }
@BeforeClass public static void setUpTestDataDir() throws Exception { LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR); File f = new File(TEST_WAREHOUSE_DIR); if (f.exists()) { FileUtil.fullyDelete(f); } Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs()); }
@After public void tearDown() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); FileUtil.fullyDelete(new File(c.get("hadoop.tmp.dir"))); }
@BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem()); FileUtil.fullyDelete(new File(OUTPUT_DIR)); }
public static void writeLinesBytesFile(File dir) throws IOException { FileUtil.fullyDelete(dir); File fileLines = new File(dir + "/lines.avro"); fileLines.getParentFile().mkdirs(); DatumWriter<ByteBuffer> writer = new GenericDatumWriter<>(); try(DataFileWriter<ByteBuffer> out = new DataFileWriter<>(writer)) { out.create(Schema.create(Schema.Type.BYTES), fileLines); for (String line : LINES) { out.append(ByteBuffer.wrap(line.getBytes(StandardCharsets.UTF_8))); } } }
@Test public void testsJob8D() throws Exception { String input = "src/test/resources/data/base_cuboid/"; String output = "target/test-output/8d_cuboid"; String cubeName = "test_kylin_cube_with_slr_1_new_segment"; String segmentName = "20130331080000_20131212080000"; String jobname = "8d_cuboid"; String level = "1"; FileUtil.fullyDelete(new File(output)); String[] args = { "-input", input, "-cubename", cubeName, "-segmentname", segmentName, "-output", output, "-jobname", jobname, "-level", level }; assertEquals("Job failed", 0, ToolRunner.run(conf, new NDCuboidJob(), args)); }
@Test public void testJob7D() throws Exception { final String input = "src/test/resources/data/8d_cuboid/"; final String output = "target/test-output/7d_cuboid"; final String cubeName = "test_kylin_cube_with_slr_1_new_segment"; String segmentName = "20130331080000_20131212080000"; String jobname = "7d_cuboid"; String level = "2"; FileUtil.fullyDelete(new File(output)); String[] args = { "-input", input, "-cubename", cubeName, "-segmentname", segmentName, "-output", output, "-jobname", jobname, "-level", level }; assertEquals("Job failed", 0, ToolRunner.run(conf, new NDCuboidJob(), args)); } }
FileUtil.fullyDelete(tempConfPathForSecureRun); tempConfPathForSecureRun = null; File target = new File("../target" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY)); if (!target.mkdirs()) { LOG.warn("Error creating dirs to {}", target);
@Test public void test() throws Exception { // String input = // "src/test/resources/data/base_cuboid,src/test/resources/data/6d_cuboid"; String output = "target/test-output/merged_cuboid"; String cubeName = "test_kylin_cube_with_slr_ready"; String jobname = "merge_cuboid"; File baseFolder = File.createTempFile("kylin-f24668f6-dcff-4cb6-a89b-77f1119df8fa-", "base"); FileUtils.forceDelete(baseFolder); baseFolder.mkdir(); FileUtils.copyDirectory(new File("src/test/resources/data/base_cuboid"), baseFolder); FileUtils.forceDeleteOnExit(baseFolder); File eightFoler = File.createTempFile("kylin-f24668f6-dcff-4cb6-a89b-77f1119df8fa-", "8d"); FileUtils.forceDelete(eightFoler); eightFoler.mkdir(); FileUtils.copyDirectory(new File("src/test/resources/data/base_cuboid"), eightFoler); FileUtils.forceDeleteOnExit(eightFoler); FileUtil.fullyDelete(new File(output)); // CubeManager cubeManager = // CubeManager.getInstanceFromEnv(getTestConfig()); String[] args = { "-input", baseFolder.getAbsolutePath() + "," + eightFoler.getAbsolutePath(), "-cubename", cubeName, "-segmentname", "20130331080000_20131212080000", "-output", output, "-jobname", jobname }; assertEquals("Job failed", 0, ToolRunner.run(conf, new MergeCuboidJob(), args)); }
@BeforeClass public static void setup() throws Exception { System.clearProperty("mapred.job.tracker"); String testDir = System.getProperty("test.tmp.dir", "./"); testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/"; workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on // the same server warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE)); // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 Configuration conf = new Configuration(true); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); FileSystem fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf(); initializeSetup(metastoreConf); warehousedir.getFileSystem(conf).mkdirs(warehousedir); }
private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); job.setOutputFormatClass(NullOutputFormat.class); job.setNumReduceTasks(0); Scan scan = new Scan(); scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setTimeRange(MINSTAMP, MAXSTAMP); scan.setMaxVersions(); TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job); job.waitForCompletion(true); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { if (job != null) { FileUtil.fullyDelete( new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } }
@AfterClass public static void cleanup() throws Exception { server.stop(); FileUtil.fullyDelete(new File(BASEDIR)); KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); clientSslFactory.destroy(); }
@Test public void testMiniClusterWithSSLOn() throws Exception { final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + "/" + TestHBaseTestingUtility.class.getSimpleName(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHBaseTestingUtility.class); String keystoresDir = new File(BASEDIR).getAbsolutePath(); HBaseTestingUtility hbt = new HBaseTestingUtility(); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, hbt.getConfiguration(), false); hbt.getConfiguration().set("hbase.ssl.enabled", "true"); hbt.getConfiguration().addResource("ssl-server.xml"); hbt.getConfiguration().addResource("ssl-client.xml"); MiniHBaseCluster cluster = hbt.startMiniCluster(); try { assertEquals(1, cluster.getLiveRegionServerThreads().size()); } finally { hbt.shutdownMiniCluster(); } }
}finally{ t.close(); FileUtil.fullyDelete(new File(OUTPUT_DIR));