/** * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method * will only log a warning if permissions cannot be set, no exception will be thrown. * * @param conf the {@link Configuration} used when setting permissions and ACLs * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from * @param targetGroup the group of the target {@link Path}, if this is set and it is equal to the source group, an * extra set group operation is avoided * @param fs the {@link FileSystem} that contains the target {@link Path} * @param target the {@link Path} to copy permissions, group, and ACLs to * @param recursion recursively set permissions and ACLs on the target {@link Path} */ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, String targetGroup, FileSystem fs, Path target, boolean recursion) { setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, recursion ? new FsShell() : null); }
/** * Check the permissions on a file. * @param fs Filesystem the file is contained in * @param stat Stat info for the file * @param action action to be performed * @throws IOException If thrown by Hadoop * @throws AccessControlException if the file cannot be accessed */ public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action) throws IOException, LoginException { checkFileAccess(fs, stat, action, SecurityUtils.getUGI()); }
fileId = HdfsUtils.getFileId(fs, Path.getPathWithoutSchemeAndAuthority(file).toString()); } catch (UnsupportedOperationException ex) { LOG.error("Cannot cache file metadata for " + location + "; " file = HdfsUtils.getFileIdPath(fs, file, fileId); tlms.getFileMetadataHandler(type).cacheFileMetadata(fileId, fs, file);
parsedDbName[DB_NAME], tableName, tbl, partNames)) { FileSystem fs = location.getFileSystem(getConf()); if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(getConf(), fs, location); wh.deleteDir(location, true, isAutopurge, db); fs.mkdirs(location); HdfsUtils.setFullFileStatus(getConf(), status, targetGroup, fs, location, false); } else { FileStatus[] statuses = fs.listStatus(location, FileUtils.HIDDEN_FILES_PATH_FILTER);
public static boolean runDistCp(List<Path> srcPaths, Path dst, Configuration conf) throws IOException { DistCpOptions options = new DistCpOptions.Builder(srcPaths, dst) .withSyncFolder(true) .withCRC(true) .preserve(FileAttribute.BLOCKSIZE) .build(); // Creates the command-line parameters for distcp List<String> params = constructDistCpParams(srcPaths, dst, conf); try { conf.setBoolean("mapred.mapper.new-api", true); DistCp distcp = new DistCp(conf, options); // HIVE-13704 states that we should use run() instead of execute() due to a hadoop known issue // added by HADOOP-10459 if (distcp.run(params.toArray(new String[params.size()])) == 0) { return true; } else { return false; } } catch (Exception e) { throw new IOException("Cannot execute DistCp process: " + e, e); } finally { conf.setBoolean("mapred.mapper.new-api", false); } }
if (arrayContains(groups, superGroupName)) { LOG.debug("User \"" + user + "\" belongs to super-group \"" + superGroupName + "\". " + "Permission granted for action: " + action + "."); return; } else if (arrayContains(groups, stat.getGroup())) { if (dirPerms.getGroupAction().implies(action)) { return;
parsedDbName[DB_NAME], tableName, tbl, partNames)) { FileSystem fs = location.getFileSystem(getConf()); if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(getConf(), fs, location); wh.deleteDir(location, true, isAutopurge, db); fs.mkdirs(location); HdfsUtils.setFullFileStatus(getConf(), status, targetGroup, fs, location, false); } else { FileStatus[] statuses = fs.listStatus(location, FileUtils.HIDDEN_FILES_PATH_FILTER);
public boolean isWritable(Path path) throws IOException { if (!storageAuthCheck) { // no checks for non-secure hadoop installations return true; } if (path == null) { //what??!! return false; } final FileStatus stat; final FileSystem fs; try { fs = getFs(path); stat = fs.getFileStatus(path); HdfsUtils.checkFileAccess(fs, stat, FsAction.WRITE); return true; } catch (FileNotFoundException fnfe){ // File named by path doesn't exist; nothing to validate. return true; } catch (Exception e) { // all other exceptions are considered as emanating from // unauthorized accesses if (LOG.isDebugEnabled()) { LOG.debug("Exception when checking if path (" + path + ")", e); } return false; } }
/** * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)} * does not throw an exception when setting the group and without recursion. */ @Test public void testSetFullFileStatusFailInheritGroup() throws IOException { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FileSystem fs = mock(FileSystem.class); when(mockSourceStatus.getGroup()).thenReturn("fakeGroup1"); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false); verify(fs).setOwner(any(Path.class), any(String.class), any(String.class)); }
fileId = HdfsUtils.getFileId(fs, Path.getPathWithoutSchemeAndAuthority(file).toString()); } catch (UnsupportedOperationException ex) { LOG.error("Cannot cache file metadata for " + location + "; " file = HdfsUtils.getFileIdPath(fs, file, fileId); tlms.getFileMetadataHandler(type).cacheFileMetadata(fileId, fs, file);
@Test public void userReadWriteExecute() throws IOException, LoginException { FileSystem fs = FileSystem.get(makeConf()); Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); UserGroupInformation ugi = SecurityUtils.getUGI(); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); }
/** * Tests that HdfsUtils#setFullFileStatus * does not thrown an exception when setting permissions and without recursion. */ @Test public void testSetFullFileStatusFailInheritPerms() throws IOException { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FileSystem mockFs = mock(FileSystem.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFs).setPermission(any(Path.class), any(FsPermission.class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, null, mockFs, new Path("fakePath"), false); verify(mockFs).setPermission(any(Path.class), any(FsPermission.class)); }
@Test public void otherReadWriteExecute() throws IOException, LoginException { FileSystem fs = FileSystem.get(makeConf()); Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.ALL)); UserGroupInformation ugi = ugiInvalidUserInvalidGroups(); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); }
/** * Tests that HdfsUtils#setFullFileStatus * does not thrown an exception when setting permissions and with recursion. */ @Test public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); } }
@Test public void groupReadWriteExecute() throws IOException, LoginException { FileSystem fs = FileSystem.get(makeConf()); Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.NONE)); UserGroupInformation ugi = ugiInvalidUserValidGroups(); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); }
/** * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)} * does not throw an exception when setting the group and with recursion. */ @Test public void testSetFullFileStatusFailInheritGroupRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); String fakeSourceGroup = "fakeGroup1"; String fakeTargetGroup = "fakeGroup2"; Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); when(mockSourceStatus.getGroup()).thenReturn(fakeSourceGroup); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, fakeTargetGroup, mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-chgrp", "-R", fakeSourceGroup, fakeTarget.toString()}); }
@Test public void rootReadWriteExecute() throws IOException, LoginException { UserGroupInformation ugi = SecurityUtils.getUGI(); FileSystem fs = FileSystem.get(new Configuration()); String old = fs.getConf().get("dfs.permissions.supergroup"); try { fs.getConf().set("dfs.permissions.supergroup", ugi.getPrimaryGroupName()); Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE)); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); } finally { fs.getConf().set("dfs.permissions.supergroup", old); } }
/** * Tests that link HdfsUtils#setFullFileStatus * does not thrown an exception when setting ACLs and without recursion. */ @Test public void testSetFullFileStatusFailInheritAcls() throws IOException { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "true"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); AclStatus mockAclStatus = mock(AclStatus.class); FileSystem mockFs = mock(FileSystem.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockAclStatus.toString()).thenReturn(""); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList<>()); when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus); doThrow(RuntimeException.class).when(mockFs).setAcl(any(Path.class), any(List.class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, null, mockFs, new Path("fakePath"), false); verify(mockFs).setAcl(any(Path.class), any(List.class)); }
@Test(expected = AccessControlException.class) public void userNoRead() throws IOException, LoginException { FileSystem fs = FileSystem.get(makeConf()); Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL)); UserGroupInformation ugi = SecurityUtils.getUGI(); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); }
/** * Tests that HdfsUtils#setFullFileStatus * does not thrown an exception when setting ACLs and with recursion. */ @Test public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "true"); Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); AclStatus mockAclStatus = mock(AclStatus.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockAclStatus.toString()).thenReturn(""); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList<>()); when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()}); }