Refine search
Partition p = partitions.get(0); if(ArchiveUtils.isArchived(p)) { originalDir = new Path(getOriginalLocation(p)); } else { originalDir = new Path(p.getLocation()); URI originalUri = ArchiveUtils.addSlash(originalDir.toUri()); Path intermediateArchivedDir = new Path(originalDir.getParent(), originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX); FileSystem srcFs = FileSystem.get(sourceDir.toUri(), conf); srcFs.initialize(sourceDir.toUri(), conf); FsShell fss = new FsShell(conf); int ret = 0; try { if (fs.exists(intermediateExtractedDir)) { throw new HiveException("Invalid state: the intermediate extracted " + "directory already exists.");
/** * Tests that HdfsUtils#setFullFileStatus * does not thrown an exception when setting permissions and with recursion. */ @Test public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); } }
try { FileStatus fStatus = sourceStatus.getFileStatus(); String group = fStatus.getGroup(); boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true"); FsPermission sourcePerm = fStatus.getPermission(); List<AclEntry> aclEntries = null; if (aclEnabled) { fsShell.setConf(conf); run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); if (targetGroup == null || !group.equals(targetGroup)) { fs.setOwner(target, null, group); fs.setAcl(target, aclEntries); fs.setPermission(target, sourcePerm); "Unable to inherit permissions for file " + target + " from file " + sourceStatus.getFileStatus().getPath(), e.getMessage()); LOG.debug("Exception while inheriting permissions", e);
/** * main() has some simple utility methods * @param argv the command and its arguments * @throws Exception upon error */ public static void main(String argv[]) throws Exception { FsShell shell = newShellInstance(); Configuration conf = new Configuration(); conf.setQuietMode(false); shell.setConf(conf); int res; try { res = ToolRunner.run(shell, argv); } finally { shell.close(); } System.exit(res); }
protected void init() throws IOException { getConf().setQuietMode(true); UserGroupInformation.setConfiguration(getConf()); if (commandFactory == null) { commandFactory = new CommandFactory(getConf()); commandFactory.addObject(new Help(), "-help"); commandFactory.addObject(new Usage(), "-usage"); registerCommands(commandFactory); } }
/** * (Re)create zero-length file at the specified path. * This will be replaced by a more UNIX-like touch when files may be * modified. */ void touchz(String src) throws IOException { Path f = new Path(src); FileSystem srcFs = f.getFileSystem(getConf()); FileStatus st; if (srcFs.exists(f)) { st = srcFs.getFileStatus(f); if (st.isDir()) { // TODO: handle this throw new IOException(src + " is a directory"); } else if (st.getLen() != 0) throw new IOException(src + " must be a zero-length file"); } FSDataOutputStream out = srcFs.create(f); out.close(); }
emptierThread.start(); FsShell shell = new FsShell(); shell.setConf(conf); shell.init(); Path myPath = new Path(TEST_DIR, "test/mkdirs"); mkdir(fs, myPath); int fileIndex = 0; while (true) { Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++); writeFile(fs, myFile); args[1] = myFile.toString(); int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from Trash.run " + Path trashDir = shell.getCurrentTrashDir(); FileStatus files[] = fs.listStatus(trashDir.getParent()); String fileName = file.getPath().getName(); checkpoints.add(fileName);
fs.setAcl(src, Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), FileStatus status = fs.getFileStatus(src); final long mtime = status.getModificationTime(); final long atime = status.getAccessTime(); final String owner = status.getOwner(); final String group = status.getGroup(); final FsPermission perm = status.getPermission(); shell = new FsShell(conf); Path target1 = new Path(hdfsTestDir, "targetfile1"); String[] argv = new String[] { "-cp", "-p", src.toUri().toString(), target1.toUri().toString() }; } finally { if (null != shell) { shell.close();
/** * Show the summary disk usage of each dir/file * that matches the file pattern <i>src</i> * @param src a file pattern specifying source files * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ void dus(String src) throws IOException { Path srcPath = new Path(src); FileSystem srcFs = srcPath.getFileSystem(getConf()); FileStatus status[] = srcFs.globStatus(new Path(src)); if (status==null || status.length==0) { throw new FileNotFoundException("Cannot access " + src + ": No such file or directory."); } for(int i=0; i<status.length; i++) { long totalSize = srcFs.getContentSummary(status[i].getPath()).getLength(); String pathStr = status[i].getPath().toString(); System.out.println(("".equals(pathStr)?".":pathStr) + "\t" + totalSize); } }
FsShell shell = new FsShell(); shell.setConf(conf); Path dir = new Path(chmodDir); fs.delete(dir, true); fs.mkdirs(dir); Path file = new Path(chmodDir, "file"); TestDFSShell.writeFile(fs, file); fs.getFileStatus(dir).getPermission().toString()); assertEquals("rw-rw-rw-", fs.getFileStatus(file).getPermission().toString()); Path dir2 = new Path(dir, "stickybit" ); fs.mkdirs(dir2 ); LOG.info("Testing sticky bit on: " + dir2); LOG.info("Sticky bit directory initial mode: " + fs.getFileStatus(dir2).getPermission()); try { fs.close(); shell.close(); } catch (IOException ignored) {}
/** * Create the given dir */ void mkdir(String src) throws IOException { Path f = new Path(src); FileSystem srcFs = f.getFileSystem(getConf()); FileStatus fstatus = null; try { fstatus = srcFs.getFileStatus(f); if (fstatus.isDir()) { throw new IOException("cannot create directory " + src + ": File exists"); } else { throw new IOException(src + " exists but " + "is not a directory"); } } catch(FileNotFoundException e) { if (!srcFs.mkdirs(f)) { throw new IOException("failed to create " + src); } } }
/** * Add local files to the indicated FileSystem name. src is kept. */ void copyFromLocal(Path[] srcs, String dstf) throws IOException { Path dstPath = new Path(dstf); FileSystem dstFs = dstPath.getFileSystem(getConf()); if (srcs.length == 1 && srcs[0].toString().equals("-")) copyFromStdin(dstPath, dstFs); else dstFs.copyFromLocalFile(false, false, srcs, dstPath); }
final Path hdfsFile1 = new Path(testdir1, "testFileForSetrep"); final Path hdfsFile2 = new Path(testdir2, "testFileForSetrep"); final Short oldRepFactor = new Short((short) 1); final Short newRepFactor = new Short((short) 3); cluster.waitActive(); fs = cluster.getFileSystem(); assertThat(fs.mkdirs(new Path(testdir2)), is(true)); shell = new FsShell(conf); fs.create(hdfsFile1, true).close(); fs.create(hdfsFile2, true).close(); argv = new String[] { "-setrep", newRepFactor.toString(), hdfsFile1.toString() }; assertThat(shell.run(argv), is(SUCCESS)); assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor)); assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(oldRepFactor)); assertThat(shell.run(argv), is(SUCCESS)); assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor)); assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(newRepFactor)); shell.close();
FileSystem fs = null; final File localFile = new File(TEST_ROOT_DIR, "testFileForPut"); final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString(); final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithForceOption-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); try { fs = cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); localFile.createNewFile(); writeFile(fs, new Path(testdir, "testFileForPut")); shell = new FsShell(); } finally { if (null != shell) shell.close(); fs.delete(hdfsTestDir, true); fs.close();
/** * Copies from stdin to the indicated file. */ private void copyFromStdin(Path dst, FileSystem dstFs) throws IOException { if (dstFs.isDirectory(dst)) { throw new IOException("When source is stdin, destination must be a file."); } if (dstFs.exists(dst)) { throw new IOException("Target " + dst.toString() + " already exists."); } FSDataOutputStream out = dstFs.create(dst); try { IOUtils.copyBytes(System.in, out, getConf(), false); } finally { out.close(); } }
/** * Add local files to the indicated FileSystem name. src is removed. */ void moveFromLocal(Path[] srcs, String dstf) throws IOException { Path dstPath = new Path(dstf); FileSystem dstFs = dstPath.getFileSystem(getConf()); dstFs.moveFromLocalFile(srcs, dstPath); }
void text(String srcf) throws IOException { Path srcPattern = new Path(srcf); new DelayedExceptionThrowing() { @Override void process(Path p, FileSystem srcFs) throws IOException { if (srcFs.isDirectory(p)) { throw new IOException("Source must be a file."); } printToStdout(forMagic(p, srcFs)); } }.globAndProcess(srcPattern, srcPattern.getFileSystem(getConf())); }
/** * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)} * does not throw an exception when setting the group and with recursion. */ @Test public void testSetFullFileStatusFailInheritGroupRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); String fakeSourceGroup = "fakeGroup1"; String fakeTargetGroup = "fakeGroup2"; Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); when(mockSourceStatus.getGroup()).thenReturn(fakeSourceGroup); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, fakeTargetGroup, mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-chgrp", "-R", fakeSourceGroup, fakeTarget.toString()}); }
/** * Get a listing of all files in that match the file pattern <i>srcf</i>. * @param srcf a file pattern specifying source files * @param recursive if need to list files in subdirs * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ private int ls(String srcf, boolean recursive) throws IOException { Path srcPath = new Path(srcf); FileSystem srcFs = srcPath.getFileSystem(this.getConf()); FileStatus[] srcs = srcFs.globStatus(srcPath); if (srcs==null || srcs.length==0) { throw new FileNotFoundException("Cannot access " + srcf + ": No such file or directory."); } boolean printHeader = (srcs.length == 1) ? true: false; int numOfErrors = 0; for(int i=0; i<srcs.length; i++) { numOfErrors += ls(srcs[i].getPath(), srcFs, recursive, printHeader); } return numOfErrors == 0 ? 0 : -1; }