/** * Authorization privileges against a path. * * @param path * a filesystem path * @param readRequiredPriv * a list of privileges needed for inputs. * @param writeRequiredPriv * a list of privileges needed for outputs. */ public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { try { EnumSet<FsAction> actions = getFsActions(readRequiredPriv); actions.addAll(getFsActions(writeRequiredPriv)); if (actions.isEmpty()) { return; } checkPermissions(getConf(), path, actions); } catch (AccessControlException ex) { throw authorizationException(ex); } catch (LoginException ex) { throw authorizationException(ex); } catch (IOException ex) { throw hiveException(ex); } }
throws HiveException, AuthorizationException { try { initWh(); } catch (MetaException ex) { throw hiveException(ex); if (privExtractor.hasDropPrivilege || requireCreatePrivilege(readRequiredPriv) || requireCreatePrivilege(writeRequiredPriv)) { authorize(hive_db.getDatabase(table.getCatName(), table.getDbName()), new Privilege[] {}, new Privilege[] { Privilege.ALTER_DATA }); getConf().getBoolean(HiveConf.ConfVars.METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK.varname, HiveConf.ConfVars.METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK.defaultBoolVal))) { checkDeletePermission(path, getConf(), authenticator.getUserName()); authorize(path, readRequiredPriv, writeRequiredPriv);
@Override public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { Path path = getDbLocation(db); // extract drop privileges DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, writeRequiredPriv); readRequiredPriv = privExtractor.getReadReqPriv(); writeRequiredPriv = privExtractor.getWriteReqPriv(); // authorize drops if there was a drop privilege requirement if(privExtractor.hasDropPrivilege()) { checkDeletePermission(path, getConf(), authenticator.getUserName()); } authorize(path, readRequiredPriv, writeRequiredPriv); }
private void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { // extract drop privileges DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, writeRequiredPriv); readRequiredPriv = privExtractor.getReadReqPriv(); writeRequiredPriv = privExtractor.getWriteReqPriv(); // authorize drops if there was a drop privilege requirement if(privExtractor.hasDropPrivilege()) { checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName()); } // Partition path can be null in the case of a new create partition - in this case, // we try to default to checking the permissions of the parent table. // Partition itself can also be null, in cases where this gets called as a generic // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887) if ((part == null) || (part.getLocation() == null)) { if (requireCreatePrivilege(readRequiredPriv) || requireCreatePrivilege(writeRequiredPriv)) { // this should be the case only if this is a create partition. // The privilege needed on the table should be ALTER_DATA, and not CREATE authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA}); } else { authorize(table, readRequiredPriv, writeRequiredPriv); } } else { authorize(part.getDataLocation(), readRequiredPriv, writeRequiredPriv); } }
initWh(); root = wh.getWhRoot(); authorize(root, readRequiredPriv, writeRequiredPriv); } catch (MetaException ex) { throw hiveException(ex);
private void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { // extract drop privileges DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, writeRequiredPriv); readRequiredPriv = privExtractor.getReadReqPriv(); writeRequiredPriv = privExtractor.getWriteReqPriv(); // authorize drops if there was a drop privilege requirement if(privExtractor.hasDropPrivilege()) { checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName()); } // Partition path can be null in the case of a new create partition - in this case, // we try to default to checking the permissions of the parent table. // Partition itself can also be null, in cases where this gets called as a generic // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887) if ((part == null) || (part.getLocation() == null)) { // this should be the case only if this is a create partition. // The privilege needed on the table should be ALTER_DATA, and not CREATE authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA}); } else { authorize(part.getDataLocation(), readRequiredPriv, writeRequiredPriv); } }
protected Path getDbLocation(Database db) throws HiveException { try { initWh(); String location = db.getLocationUri(); if (location == null) { return wh.getDefaultDatabasePath(db.getName()); } else { return wh.getDnsPath(wh.getDatabasePath(db)); } } catch (MetaException ex) { throw hiveException(ex); } }
@Override public HivePolicyProvider getHivePolicyProvider() throws HiveAuthzPluginException { return new HDFSPermissionPolicyProvider(getConf()); }
@Override public void authorize(Table table, Partition part, List<String> columns, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { // In a simple storage-based auth, we have no information about columns // living in different files, so we do simple partition-auth and ignore // the columns parameter. authorize(table, part, readRequiredPriv, writeRequiredPriv); }
/** * Given a Privilege[], find out what all FsActions are required */ protected EnumSet<FsAction> getFsActions(Privilege[] privs) { EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class); if (privs == null) { return actions; } for (Privilege priv : privs) { actions.add(getFsAction(priv)); } return actions; }
/** * Checks the permissions for the given path and current user on Hadoop FS. If the given path * does not exists, it returns. */ @SuppressWarnings("deprecation") protected static void checkPermissions(final FileSystem fs, final FileStatus stat, final EnumSet<FsAction> actions, String user) throws IOException, AccessControlException, HiveException { if (stat == null) { // File named by path doesn't exist; nothing to validate. return; } FsAction checkActions = FsAction.NONE; for (FsAction action : actions) { checkActions = checkActions.or(action); } try { FileUtils.checkFileAccessWithImpersonation(fs, stat, checkActions, user); } catch (Exception err) { // fs.permission.AccessControlException removed by HADOOP-11356, but Hive users on older // Hadoop versions may still see this exception .. have to reference by name. if (err.getClass().getName().equals("org.apache.hadoop.fs.permission.AccessControlException")) { throw accessControlException(err); } throw new HiveException(err); } }
/** * Checks the permissions for the given path and current user on Hadoop FS. * If the given path does not exists, it checks for its parent folder. */ protected void checkPermissions(final Configuration conf, final Path path, final EnumSet<FsAction> actions) throws IOException, LoginException, HiveException { if (path == null) { throw new IllegalArgumentException("path is null"); } final FileSystem fs = path.getFileSystem(conf); FileStatus pathStatus = FileUtils.getFileStatusOrNull(fs, path); if (pathStatus != null) { checkPermissions(fs, pathStatus, actions, authenticator.getUserName()); } else if (path.getParent() != null) { // find the ancestor which exists to check its permissions Path par = path.getParent(); FileStatus parStatus = null; while (par != null) { parStatus = FileUtils.getFileStatusOrNull(fs, par); if (parStatus != null) { break; } par = par.getParent(); } checkPermissions(fs, parStatus, actions, authenticator.getUserName()); } }
private void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { // extract drop privileges DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, writeRequiredPriv); readRequiredPriv = privExtractor.getReadReqPriv(); writeRequiredPriv = privExtractor.getWriteReqPriv(); // authorize drops if there was a drop privilege requirement if(privExtractor.hasDropPrivilege()) { checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName()); } // Partition path can be null in the case of a new create partition - in this case, // we try to default to checking the permissions of the parent table. // Partition itself can also be null, in cases where this gets called as a generic // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887) if ((part == null) || (part.getLocation() == null)) { if (requireCreatePrivilege(readRequiredPriv) || requireCreatePrivilege(writeRequiredPriv)) { // this should be the case only if this is a create partition. // The privilege needed on the table should be ALTER_DATA, and not CREATE authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA}); } else { authorize(table, readRequiredPriv, writeRequiredPriv); } } else { authorize(part.getDataLocation(), readRequiredPriv, writeRequiredPriv); } }
initWh(); root = wh.getWhRoot(); authorize(root, readRequiredPriv, writeRequiredPriv); } catch (MetaException ex) { throw hiveException(ex);
protected Path getDbLocation(Database db) throws HiveException { try { initWh(); String location = db.getLocationUri(); if (location == null) { return wh.getDefaultDatabasePath(db.getName()); } else { return wh.getDnsPath(wh.getDatabasePath(db)); } } catch (MetaException ex) { throw hiveException(ex); } }
/** * Make sure that the warehouse variable is set up properly. * @throws MetaException if unable to instantiate */ private void initWh() throws MetaException, HiveException { if (wh == null){ if(!isRunFromMetaStore){ // Note, although HiveProxy has a method that allows us to check if we're being // called from the metastore or from the client, we don't have an initialized HiveProxy // till we explicitly initialize it as being from the client side. So, we have a // chicken-and-egg problem. So, we now track whether or not we're running from client-side // in the SBAP itself. hive_db = new HiveProxy(Hive.get(getConf(), StorageBasedAuthorizationProvider.class)); this.wh = new Warehouse(getConf()); if (this.wh == null){ // If wh is still null after just having initialized it, bail out - something's very wrong. throw new IllegalStateException("Unable to initialize Warehouse from clientside."); } }else{ // not good if we reach here, this was initialized at setMetaStoreHandler() time. // this means handler.getWh() is returning null. Error out. throw new IllegalStateException("Uninitialized Warehouse from MetastoreHandler"); } } }
@Override public void authorize(Table table, Partition part, List<String> columns, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { // In a simple storage-based auth, we have no information about columns // living in different files, so we do simple partition-auth and ignore // the columns parameter. authorize(table, part, readRequiredPriv, writeRequiredPriv); }
/** * Given a Privilege[], find out what all FsActions are required */ protected EnumSet<FsAction> getFsActions(Privilege[] privs) { EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class); if (privs == null) { return actions; } for (Privilege priv : privs) { actions.add(getFsAction(priv)); } return actions; }
/** * Checks the permissions for the given path and current user on Hadoop FS. If the given path * does not exists, it returns. */ @SuppressWarnings("deprecation") protected static void checkPermissions(final FileSystem fs, final FileStatus stat, final EnumSet<FsAction> actions, String user) throws IOException, AccessControlException, HiveException { if (stat == null) { // File named by path doesn't exist; nothing to validate. return; } FsAction checkActions = FsAction.NONE; for (FsAction action : actions) { checkActions = checkActions.or(action); } try { FileUtils.checkFileAccessWithImpersonation(fs, stat, checkActions, user); } catch (Exception err) { // fs.permission.AccessControlException removed by HADOOP-11356, but Hive users on older // Hadoop versions may still see this exception .. have to reference by name. if (err.getClass().getName().equals("org.apache.hadoop.fs.permission.AccessControlException")) { throw accessControlException(err); } throw new HiveException(err); } }
/** * Checks the permissions for the given path and current user on Hadoop FS. * If the given path does not exists, it checks for its parent folder. */ protected void checkPermissions(final Configuration conf, final Path path, final EnumSet<FsAction> actions) throws IOException, LoginException, HiveException { if (path == null) { throw new IllegalArgumentException("path is null"); } final FileSystem fs = path.getFileSystem(conf); FileStatus pathStatus = FileUtils.getFileStatusOrNull(fs, path); if (pathStatus != null) { checkPermissions(fs, pathStatus, actions, authenticator.getUserName()); } else if (path.getParent() != null) { // find the ancestor which exists to check its permissions Path par = path.getParent(); FileStatus parStatus = null; while (par != null) { parStatus = FileUtils.getFileStatusOrNull(fs, par); if (parStatus != null) { break; } par = par.getParent(); } checkPermissions(fs, parStatus, actions, authenticator.getUserName()); } }