Path archivePath = new Path(harFile.substring(0, lastSep)); final String[] args = { "-archiveName", dir, "*", archivePath.toString() }; Configuration newConf = new Configuration(); FileSystem fs = archivePath.getFileSystem(newConf); newConf.set(HCatConstants.CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY, hadoopTokenFileLocationEnvSetting); int rc = ToolRunner.run(har, args); if (rc != 0) { throw new Exception("Har returned error code " + rc); fs.delete(new Path(dir), true); } catch (Exception e) { throw new HCatException("Error creating Har [" + harFile + "] from [" + dir + "]", e);
ToolRunner.printGenericCommandUsage(getErr()); return 1; getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, args[++i]); } else if (args[i].equals("-f") || (args[i].equals("-force"))) { return 0; } else { ToolRunner.printGenericCommandUsage(getErr()); return 1;
private boolean canBeSafelyDeleted(PathData item) throws IOException { boolean shouldDelete = true; if (safeDelete) { final long deleteLimit = getConf().getLong( HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES, HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT); if (deleteLimit > 0) { ContentSummary cs = item.fs.getContentSummary(item.path); final long numFiles = cs.getFileCount(); if (numFiles > deleteLimit) { if (!ToolRunner.confirmPrompt("Proceed deleting " + numFiles + " files?")) { System.err.println("Delete aborted at user request.\n"); shouldDelete = false; } } } } return shouldDelete; }
/** copy empty directory on dfs file system */ public void testEmptyDir() throws Exception { String namenode = null; MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); final FileSystem hdfs = cluster.getFileSystem(); namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration()); fs.mkdirs(new Path("/empty")); ToolRunner.run(new DistCp(conf), new String[] { "-log", namenode+"/logs", namenode+"/empty", namenode+"/dest"}); fs = FileSystem.get(URI.create(namenode+"/destdat"), conf); assertTrue("Destination directory does not exist.", fs.exists(new Path(namenode+"/dest"))); deldir(hdfs, "/dest"); deldir(hdfs, "/empty"); deldir(hdfs, "/logs"); } } finally { if (cluster != null) { cluster.shutdown(); } } }
originalDir = new Path(getOriginalLocation(p)); } else { originalDir = p.getDataLocation(); Path intermediateArchivedDir = new Path(originalDir.getParent(), originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX); Path intermediateOriginalDir = new Path(originalDir.getParent(), args.add(tmpPath.toString()); ret = ToolRunner.run(har, args.toArray(new String[0])); } catch (Exception e) { throw new HiveException(e); throw new HiveException("The intermediate archive directory already exists."); fs.rename(tmpPath, intermediateArchivedDir); } catch (IOException e) { throw new HiveException("Error while moving tmp directory"); URI harPartitionDir = harHelper.getHarUri(originalPartitionUri); StringBuilder authority = new StringBuilder(); if(harPartitionDir.getUserInfo() != null) { authority.append(harPartitionDir.getUserInfo()).append("@"); authority.append(harPartitionDir.getHost()); if(harPartitionDir.getPort() != -1) { authority.append(":").append(harPartitionDir.getPort());
Partition p = partitions.get(0); if(ArchiveUtils.isArchived(p)) { originalDir = new Path(getOriginalLocation(p)); } else { originalDir = new Path(p.getLocation()); URI originalUri = ArchiveUtils.addSlash(originalDir.toUri()); Path intermediateArchivedDir = new Path(originalDir.getParent(), originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX); archiveUri, originalUri); URI sourceUri = harHelper.getHarUri(originalUri); Path sourceDir = new Path(sourceUri.getScheme(), sourceUri.getAuthority(), sourceUri.getPath()); FileSystem srcFs = FileSystem.get(sourceDir.toUri(), conf); srcFs.initialize(sourceDir.toUri(), conf); ret = ToolRunner.run(fss, args.toArray(new String[0])); } catch (Exception e) { throw new HiveException(e); if (fs.exists(intermediateExtractedDir)) { throw new HiveException("Invalid state: the intermediate extracted " + "directory already exists.");
final String testDst = testRoot + "/" + testDstRel; String nnUri = FileSystem.getDefaultUri(conf).toString(); DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); fs.mkdirs(new Path(testRoot)); if (createSrcDir) { fs.mkdirs(new Path(testSrc)); fs.mkdirs(new Path(testDst)); nnUri+testDst} : new String[]{"-pub", nnUri+testSrc, nnUri+testDst}; ToolRunner.run(conf, new DistCp(), args);
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); final FileSystem hdfs = cluster.getFileSystem(); final String namenode = hdfs.getUri().toString(); FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration()); deldir(hdfs,"/logs"); Path srcPath = new Path("/srcdat", testfilename); Path destPath = new Path("/destdat", testfilename); FSDataOutputStream out = fs.create(srcPath, true); out.writeUTF(srcData); out.close(); ToolRunner.run(new DistCp(conf), new String[] { "-p", "-update", ToolRunner.run(new DistCp(conf), new String[] { "-p", "-update", if (cluster != null) { cluster.shutdown(); }
final Configuration conf = new Configuration(); conf.setInt("fs.trash.interval", 60); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, 2, true, null); final URI nnURI = FileSystem.getDefaultUri(conf); final String nnUri = nnURI.toString(); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); final DistCp distcp = new DistCp(conf); final FsShell shell = new FsShell(conf); ToolRunner.run(distcp, new String[]{"-delete", "-update", "-log", "/log", nnUri+srcrootdir, nnUri+dstrootdir}); create(fs, new Path(dstrootdir, "foo")); create(fs, new Path(dstrootdir, "foobar")); ToolRunner.run(distcp, new String[]{"-delete", "-update", "-log", "/log2", nnUri+srcrootdir, nnUri+dstrootdir}); assertTrue(fs.exists(new Path(fs.getHomeDirectory(), ".Trash/Current" + dstrootdir + "/foo"))); assertTrue(fs.exists(new Path(fs.getHomeDirectory(), if (cluster != null) { cluster.shutdown(); }
protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, String[] args, int valueMultiplier) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); FileSystem fs = FileSystem.get(conf); Path inputPath = fs.makeQualified(new Path(util .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); op.write(Bytes.toBytes(data)); op.close(); LOG.debug(String.format("Wrote test data to file: %s", inputPath)); if (conf.getBoolean(FORCE_COMBINER_CONF, true)) { LOG.debug("Forcing combiner."); conf.setInt("mapreduce.map.combine.minspills", 1); argv.add(inputPath.toString()); Tool tool = new ImportTsv(); LOG.debug("Running ImportTsv with arguments: " + argv); try { assertEquals(0, ToolRunner.run(conf, tool, argv.toArray(args))); } finally {
final String testDst = testRoot + "/" + testDstRel; String nnUri = FileSystem.getDefaultUri(conf).toString(); FileSystem fs = FileSystem.get(URI.create(nnUri), conf); fs.mkdirs(new Path(testRoot)); if (createSrcDir) { fs.mkdirs(new Path(testSrc)); fs.mkdirs(new Path(testDst)); nnUri+testDst} : new String[]{"-pu", nnUri+testSrc, nnUri+testDst}; ToolRunner.run(conf, new DistCp(), args);
/** tests basedir option copying files from dfs file system to dfs file system */ public void testBasedir() throws Exception { String namenode = null; MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); final FileSystem hdfs = cluster.getFileSystem(); namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat"); ToolRunner.run(new DistCp(conf), new String[] { "-basedir", "/basedir", namenode+"/basedir/middle/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(hdfs, "/destdat/middle/srcdat", files)); deldir(hdfs, "/destdat"); deldir(hdfs, "/basedir"); deldir(hdfs, "/logs"); } } finally { if (cluster != null) { cluster.shutdown(); } } }
public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); int status = ToolRunner.run(conf, new BulkImportTool(), args); if (status != 0) { System.exit(status); } SchemaMetrics.configureGlobally(conf); status = ToolRunner.run(new LoadIncrementalHFiles(conf), new String[]{conf.get(HFILE_PATH), conf.get(LilyJythonMapper.TABLE_NAME)}); FileSystem.get(conf).delete(new Path(new URI(conf.get(HFILE_PATH))), true); System.exit(status); }
/** * Main program * * @param args * @throws Exception */ public static void main(String[] args) throws Exception { // create a fsck object Configuration conf = HBaseConfiguration.create(); Path hbasedir = new Path(conf.get(HConstants.HBASE_DIR)); URI defaultFs = hbasedir.getFileSystem(conf).getUri(); conf.set("fs.defaultFS", defaultFs.toString()); // for hadoop 0.21+ conf.set("fs.default.name", defaultFs.toString()); // for hadoop 0.20 int ret = ToolRunner.run(new HBaseFsck(conf), args); System.exit(ret); }
job.setLong("distcp.bytes.per.map", totsize / 3); ToolRunner.run(new DistCp(job), new String[] {"-m", "100", "-log", FileStatus[] logs = fs.listStatus(new Path(logdir)); ToolRunner.run(new DistCp(job), new String[] {"-m", "1", "-log", logs = fs.globStatus(new Path(namenode+"/logs/part*")); assertTrue("Unexpected map count, logs.length=" + logs.length, logs.length == 1); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); }
/** * Test that users can copy a snapshot while preserving its xattrs. */ @Test (timeout = 120000) public void testCopySnapshotShouldPreserveXAttrs() throws Exception { FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); hdfs.setXAttr(path, name1, value1); hdfs.setXAttr(path, name2, value2); SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); Path snapshotCopy = new Path(path.toString() + "-copy"); String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(), snapshotCopy.toUri().toString() }; int ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("cp -px is not working on a snapshot", SUCCESS, ret); Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy); assertArrayEquals(value1, xattrs.get(name1)); assertArrayEquals(value2, xattrs.get(name2)); }
public void testSpaces() throws Exception { fs.delete(archivePath, true); Configuration conf = mapred.createJobConf(); HadoopArchives har = new HadoopArchives(conf); String[] args = new String[6]; args[0] = "-archiveName"; args[1] = "foo bar.har"; args[2] = "-p"; args[3] = fs.getHomeDirectory().toString(); args[4] = "test"; args[5] = archivePath.toString(); int ret = ToolRunner.run(har, args); assertTrue("failed test", ret == 0); Path finalPath = new Path(archivePath, "foo bar.har"); Path fsPath = new Path(inputPath.toUri().getPath()); Path filePath = new Path(finalPath, "test"); // make it a har path Path harPath = new Path("har://" + filePath.toUri().getPath()); FileSystem harFs = harPath.getFileSystem(conf); FileStatus[] statuses = harFs.listStatus(finalPath); } }
final Path sourceDir, Path copyDir, final boolean overwrite, final RegionPredicate bypassregionPredicate, boolean success) throws Exception { URI hdfsUri = FileSystem.get(conf).getUri(); FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration()); copyDir = copyDir.makeQualified(fs); opts.add(Bytes.toString(snapshotName)); opts.add("--copy-to"); opts.add(copyDir.toString()); if (targetName != snapshotName) { opts.add("--target"); int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); assertEquals(success ? 0 : 1, res); if (!success) {
private Path doCopyAndTest(FsShell shell, Path dest, Path src, String cpArgs, int expectedExitCode) throws Exception { final Path target = new Path(dest, "targetfile" + counter.getAndIncrement()); final String[] argv = cpArgs == null ? new String[] { "-cp", src.toUri().toString(), target.toUri().toString() } : new String[] { "-cp", cpArgs, src.toUri().toString(), target.toUri().toString() }; final int ret = ToolRunner.run(shell, argv); assertEquals("cp -p is not working", expectedExitCode, ret); return target; }
private int doSearch(Configuration conf, String keysDir) throws Exception { Path inputDir = new Path(keysDir); getConf().set(SEARCHER_INPUTDIR_KEY, inputDir.toString()); SortedSet<byte []> keys = readKeysToSearch(getConf()); if (keys.isEmpty()) throw new RuntimeException("No keys to find"); LOG.info("Count of keys to find: " + keys.size()); for(byte [] key: keys) LOG.info("Key: " + Bytes.toStringBinary(key)); // Now read all WALs. In two dirs. Presumes certain layout. Path walsDir = new Path(CommonFSUtils.getWALRootDir(getConf()), HConstants.HREGION_LOGDIR_NAME); Path oldWalsDir = new Path( CommonFSUtils.getWALRootDir(getConf()), HConstants.HREGION_OLDLOGDIR_NAME); LOG.info("Running Search with keys inputDir=" + inputDir + " against " + getConf().get(HConstants.HBASE_DIR)); int ret = ToolRunner.run(new WALSearcher(getConf()), new String [] {walsDir.toString(), ""}); if (ret != 0) return ret; return ToolRunner.run(new WALSearcher(getConf()), new String [] {oldWalsDir.toString(), ""}); }