@Override public List<String> getAllDatabases() throws TException { return getAllDatabases(getDefaultCatalog(conf)); }
@Override public List<String> getHiveDbNames() throws Exception { return getMetaStoreClient().getAllDatabases(); }
void run() throws Exception { wh = new Warehouse(conf); checkOldWarehouseRoot(); checkExternalWarehouseDir(); checkOwnerPermsOptions(); hms = new HiveMetaStoreClient(conf);//MetaException try { List<String> databases = hms.getAllDatabases();//TException LOG.info("Found {} databases", databases.size()); for (String dbName : databases) { if (dbName.matches(runOptions.dbRegex)) { try { processDatabase(dbName); } catch (Exception err) { LOG.error("Error processing database " + dbName, err); failuresEncountered = true; } } } LOG.info("Done processing databases."); } finally { hms.close(); } if (failuresEncountered) { throw new HiveException("One or more failures encountered during processing."); } if (failedValidationChecks) { throw new HiveException("One or more tables failed validation checks for strict managed table mode."); } }
/** * Tests {@link HiveMetaStoreClient#newSynchronizedClient}. Does not * actually test multithreading, but does verify that the proxy * at least works correctly. */ @Test public void testSynchronized() throws Exception { int currentNumberOfDbs = client.getAllDatabases().size(); IMetaStoreClient synchronizedClient = HiveMetaStoreClient.newSynchronizedClient(client); List<String> databases = synchronizedClient.getAllDatabases(); assertEquals(currentNumberOfDbs, databases.size()); }
@Test public void testDatabaseOperations() throws Exception { List<String> dbs = client.getAllDatabases(); String testDb1 = "testdatabaseoperatons1"; String testDb2 = "testdatabaseoperatons2"; if (dbs.contains(testDb1.toLowerCase())) { assertEquals(0, hcatDriver.run("drop database " + testDb1).getResponseCode()); } if (dbs.contains(testDb2.toLowerCase())) { assertEquals(0, hcatDriver.run("drop database " + testDb2).getResponseCode()); } assertEquals(0, hcatDriver.run("create database " + testDb1).getResponseCode()); assertTrue(client.getAllDatabases().contains(testDb1)); assertEquals(0, hcatDriver.run("create database if not exists " + testDb1).getResponseCode()); assertTrue(client.getAllDatabases().contains(testDb1)); assertEquals(0, hcatDriver.run("create database if not exists " + testDb2).getResponseCode()); assertTrue(client.getAllDatabases().contains(testDb2)); assertEquals(0, hcatDriver.run("drop database " + testDb1).getResponseCode()); assertEquals(0, hcatDriver.run("drop database " + testDb2).getResponseCode()); assertFalse(client.getAllDatabases().contains(testDb1)); assertFalse(client.getAllDatabases().contains(testDb2)); }
protected void testFilterForDb(boolean filterAtServer) throws Exception { // Skip this call when testing filter hook at HMS server because HMS server calls authorization // API for getDatabase(), and does not call filter hook if (!filterAtServer) { try { assertNotNull(client.getDatabase(DBNAME1)); fail("getDatabase() should fail with blocking mode"); } catch (NoSuchObjectException e) { // Excepted } } assertEquals(0, client.getDatabases("*").size()); assertEquals(0, client.getAllDatabases().size()); assertEquals(0, client.getDatabases(DBNAME1).size()); }
@Test public void testJDOPersistanceManagerCleanup() throws Exception { if (isThriftClient == false) { return; } int numObjectsBeforeClose = getJDOPersistanceManagerCacheSize(); HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf); closingClient.getAllDatabases(); closingClient.close(); Thread.sleep(5 * 1000); // give HMS time to handle close request int numObjectsAfterClose = getJDOPersistanceManagerCacheSize(); assertTrue(numObjectsBeforeClose == numObjectsAfterClose); HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf); nonClosingClient.getAllDatabases(); // Drop connection without calling close. HMS thread deleteContext // will trigger cleanup nonClosingClient.getTTransport().close(); Thread.sleep(5 * 1000); int numObjectsAfterDroppedConnection = getJDOPersistanceManagerCacheSize(); assertTrue(numObjectsAfterClose == numObjectsAfterDroppedConnection); }
@Override public List<String> call() throws TException { return client.getAllDatabases(); } };
@Override public List<String> run(HiveMetaStoreClient client) throws TException { return client.getAllDatabases(); }}); }
@Override public List<String> call() throws TException { return client.getAllDatabases(); } };
@Override public List<String> getHiveDbNames() throws Exception { return getMetaStoreClient().getAllDatabases(); }
@Override public List<String> getAllDatabases() throws MetaException { return filterDatabases(super.getAllDatabases()); }
public List<String> getHiveDbNames() throws Exception { return getMetaStoreClient().getAllDatabases(); }
@GET @Produces("text/html") public Viewable dbs() { HiveConf hiveConf = new HiveConf(SessionState.class); try { HiveMetaStoreClient client = new HiveMetaStoreClient(hiveConf); List<String> dbs = client.getAllDatabases(); client.close(); request.setAttribute("dbs", dbs); } catch (MetaException e) { throw new WebApplicationException(e); } return new Viewable("/schema/dbs.vm"); }
try (HMSClient client = clientFactory.connect()) { c = client; allDbStr = client.getClient().getAllDatabases(); } catch (Exception e) { if (c != null) {
Mockito.when(client.getAllDatabases()).thenReturn(dbNames);
Mockito.when(client.getAllDatabases()).thenReturn(dbNames); Mockito.when(client.getCurrentNotificationEventId()). thenReturn(new CurrentNotificationEventId(eventId));
HiveMetaStoreClient hms = new HiveMetaStoreClient(conf);//MetaException LOG.debug("Looking for databases"); List<String> databases = hms.getAllDatabases();//TException LOG.debug("Found " + databases.size() + " databases to process"); List<String> compactions = new ArrayList<>();
Mockito.when(client.getAllDatabases()).thenReturn(Lists.newArrayList("db1")); Mockito.when(client.getDatabase("db1")).thenReturn(db1);