/** * Returns metadata for the table named tableName * @param tableName the name of the table * @return the table metadata * @throws HiveException if there's an internal error or if the * table doesn't exist */ public Table getTable(final String tableName) throws HiveException { return this.getTable(tableName, true); }
/** * Drop a database * @param name * @param deleteData * @param ignoreUnknownDb if true, will ignore NoSuchObjectException * @throws HiveException * @throws NoSuchObjectException */ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws HiveException, NoSuchObjectException { dropDatabase(name, deleteData, ignoreUnknownDb, false); }
private ImmutableMap<String, Long> dumpMetaCallTimingWithoutEx(String phase) { try { return Hive.get().dumpAndClearMetaCallTiming(phase); } catch (HiveException he) { LOG.warn("Caught exception attempting to write metadata call information " + he, he); } return null; }
private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFastCheck, boolean doRegisterAllFns) throws HiveException { Hive db = hiveDB.get(); if (db == null || !db.isCurrentUserOwner() || needsRefresh || (c != null && !isCompatible(db, c, isFastCheck))) { if (db != null) { LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh + ", db.isCurrentUserOwner = " + db.isCurrentUserOwner()); closeCurrent(); } db = create(c, doRegisterAllFns); } if (c != null) { db.conf = c; } return db; }
@Test public void testDataDeletion() throws HiveException, IOException, TException { Database db = new Database(); db.setName(dbName); hive.createDatabase(db); Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); hive.createTable(table); table = hive.getTable(dbName, tableName); Path fakeTable = table.getPath().getParent().suffix( Path.SEPARATOR + "faketable"); fs = fakeTable.getFileSystem(hive.getConf()); fs.mkdirs(fakeTable); fs.deleteOnExit(fakeTable); Path fakePart = new Path(table.getDataLocation().toString(), "fakepartition=fakevalue"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); hive.dropTable(dbName, tableName, true, true); assertFalse(fs.exists(fakePart)); hive.dropDatabase(dbName); assertFalse(fs.exists(fakeTable)); }
@BeforeClass public static void init() throws Exception { queryState = new QueryState.Builder().build(); conf = queryState.getConf(); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(conf); // Create a table so we can work against it Hive h = Hive.get(conf); List<String> cols = new ArrayList<String>(); cols.add("a"); List<String> partCols = new ArrayList<String>(); partCols.add("ds"); h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class); Table t = h.getTable("foo"); Map<String, String> partSpec = new HashMap<String, String>(); partSpec.put("ds", "today"); h.createPartition(t, partSpec); }
@Override public Hive getHive() throws HiveException { return Hive.get((HiveConf)conf); }
private IMetaStoreClient getMetaStoreClient(boolean retryInCaseOfTokenExpiration) throws HiveSQLException { try { return Hive.get(getHiveConf()).getMSC(); } catch (HiveException e) { throw new HiveSQLException("Failed to get metastore connection: " + e, e); } catch(MetaException e1) { if (hmsDelegationTokenStr != null && retryInCaseOfTokenExpiration) { LOG.info("Retrying failed metastore connection: " + e1, e1); Hive.closeCurrent(); try { setDelegationToken(Hive.get(getHiveConf()).getDelegationToken(sessionUgi.getUserName(), getUserName())); } catch (HiveException e2) { throw new HiveSQLException("Error connect metastore to setup impersonation: " + e2, e2); } return getMetaStoreClient(false); } else { throw new HiveSQLException("Failed to get metastore connection: " + e1, e1); } } }
/** * Removes all databases and tables from the metastore */ public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm) throws HiveException, MetaException, NoSuchObjectException { for (String dbName : hive.getAllDatabases()) { if (dbName.equals("default")) { continue; } try { Path path = getDbPath(hive, wh, dbName); FileSystem whFs = path.getFileSystem(hive.getConf()); whFs.setPermission(path, defaultPerm); } catch (IOException ex) { //ignore } hive.dropDatabase(dbName, true, true, true); } //clean tables in default db for (String tablename : hive.getAllTables("default")) { hive.dropTable("default", tablename, true, true); } }
public String getDelegationTokenFromMetaStore(String owner) throws HiveSQLException, UnsupportedOperationException, LoginException, IOException { HiveConf hiveConf = getHiveConf(); if (!hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL) || !hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { throw new UnsupportedOperationException( "delegation token is can only be obtained for a secure remote metastore"); } try { Hive.closeCurrent(); return Hive.get(hiveConf).getDelegationToken(owner, owner); } catch (HiveException e) { if (e.getCause() instanceof UnsupportedOperationException) { throw (UnsupportedOperationException)e.getCause(); } else { throw new HiveSQLException("Error connect metastore to setup impersonation", e); } } }
/** * Test basic Hive class interaction, that: * - We can have different Hive objects throughout the lifetime of this thread. */ public void testHiveCloseCurrent() throws Throwable { Hive hive1 = Hive.get(); Hive.closeCurrent(); Hive hive2 = Hive.get(); Hive.closeCurrent(); assertTrue(hive1 != hive2); }
private Table createPartitionedTable(String dbName, String tableName) throws Exception { try { hm.dropTable(dbName, tableName); hm.createTable(tableName, Arrays.asList("key", "value"), // Data columns. Arrays.asList("ds", "hr"), // Partition columns. TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); return hm.getTable(dbName, tableName); } catch (Exception exception) { fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils.stringifyException(exception)); throw exception; } }
/** * Drops table along with the data in it. If the table doesn't exist then it * is a no-op * * @param tableName * table to drop * @throws HiveException * thrown if the drop fails */ public void dropTable(String tableName) throws HiveException { dropTable(tableName, false); }
private Table createTestTable() throws HiveException, AlreadyExistsException { Database db = new Database(); db.setName(dbName); hive.createDatabase(db, true); Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); hive.createTable(table); table = hive.getTable(dbName, tableName); Assert.assertTrue(table.getTTable().isSetId()); table.getTTable().unsetId(); for (Map<String, String> partSpec : parts) { hive.createPartition(table, partSpec); } return table; }
private void cancelDelegationToken() throws HiveSQLException { if (hmsDelegationTokenStr != null) { try { Hive.get(getHiveConf()).cancelDelegationToken(hmsDelegationTokenStr); hmsDelegationTokenStr = null; getHiveConf().setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, ""); } catch (HiveException e) { throw new HiveSQLException("Couldn't cancel delegation token", e); } } }
private static Set<Partition> getAllPartitions(Table tab) throws HiveException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); Set<Partition> result = Hive.get().getAllPartitionsOf(tab); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); return result; }
private IMetaStoreClient getMSC() throws HiveException { try { return hive.getMSC(); } catch (MetaException ex) { throw new HiveException(ex); } }