public void addDatabaseToCache(Database db) { try { cacheLock.writeLock().lock(); Database dbCopy = db.deepCopy(); // ObjectStore also stores db name in lowercase dbCopy.setName(dbCopy.getName().toLowerCase()); dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase()); databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy); isDatabaseCacheDirty.set(true); } finally { cacheLock.writeLock().unlock(); } }
@Override public void renameDatabase(String databaseName, String newDatabaseName) { org.apache.hadoop.hive.metastore.api.Database database = delegate.getDatabase(databaseName) .orElseThrow(() -> new SchemaNotFoundException(databaseName)); database.setName(newDatabaseName); delegate.alterDatabase(databaseName, database); delegate.getDatabase(databaseName).ifPresent(newDatabase -> { if (newDatabase.getName().equals(databaseName)) { throw new PrestoException(NOT_SUPPORTED, "Hive metastore does not support renaming schemas"); } }); }
public void populateDatabasesInCache(List<Database> databases) { for (Database db : databases) { Database dbCopy = db.deepCopy(); // ObjectStore also stores db name in lowercase dbCopy.setName(dbCopy.getName().toLowerCase()); try { cacheLock.writeLock().lock(); // Since we allow write operations on cache while prewarm is happening: // 1. Don't add databases that were deleted while we were preparing list for prewarm // 2. Skip overwriting exisiting db object // (which is present because it was added after prewarm started) String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); if (databasesDeletedDuringPrewarm.contains(key)) { continue; } databaseCache.putIfAbsent(key, dbCopy); databasesDeletedDuringPrewarm.clear(); isDatabaseCachePrewarmed = true; } finally { cacheLock.writeLock().unlock(); } } }
@Override public Database dbInMetadata(String dbNameToOverride) throws SemanticException { try { MetaData rv = EximUtil.readMetaData(fileSystem, dbMetadataFile); Database dbObj = rv.getDatabase(); if (dbObj == null) { throw new IllegalArgumentException( "_metadata file read did not contain a db object - invalid dump."); } // override the db name if provided in repl load command if (StringUtils.isNotBlank(dbNameToOverride)) { dbObj.setName(dbNameToOverride); } return dbObj; } catch (Exception e) { throw new SemanticException(e); } }
public DatabaseBuilder name(String name) { database.setName(name); File databaseFolder = new File(warehouseFolder, name + ".db"); String databaseLocation = "raw://" + databaseFolder.toURI().getPath(); database.setLocationUri(databaseLocation); return this; }
Database toHiveDb() { Database hiveDB = new Database(); hiveDB.setDescription(this.comment); hiveDB.setLocationUri(this.locationUri); hiveDB.setName(this.dbName); hiveDB.setParameters(this.dbProperties); return hiveDB; }
public static org.apache.hadoop.hive.metastore.api.Database toMetastoreApiDatabase(Database database) { org.apache.hadoop.hive.metastore.api.Database result = new org.apache.hadoop.hive.metastore.api.Database(); result.setName(database.getDatabaseName()); database.getLocation().ifPresent(result::setLocationUri); result.setOwnerName(database.getOwnerName()); result.setOwnerType(toMetastoreApiPrincipalType(database.getOwnerType())); database.getComment().ifPresent(result::setDescription); result.setParameters(database.getParameters()); return result; }
@Test(expected = InvalidObjectException.class) public void testCreateDatabaseInvalidName() throws Exception { Database database = testDatabases[0]; // Invalid character in new database name database.setName("test_database_1;"); client.createDatabase(database); }
@Test(expected = InvalidObjectException.class) public void testCreateDatabaseEmptyName() throws Exception { Database database = testDatabases[0]; // Empty new database name database.setName(""); client.createDatabase(database); // Throwing InvalidObjectException would be more appropriate, but we do not change the API }
@Test(expected = MetaException.class) public void testCreateDatabaseNullName() throws Exception { Database database = testDatabases[0]; // Missing class setting field database.setName(null); client.createDatabase(database); // Throwing InvalidObjectException would be more appropriate, but we do not change the API }
@Test public void testDataDeletion() throws HiveException, IOException, TException { Database db = new Database(); db.setName(dbName); hive.createDatabase(db); Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); hive.createTable(table); table = hive.getTable(dbName, tableName); Path fakeTable = table.getPath().getParent().suffix( Path.SEPARATOR + "faketable"); fs = fakeTable.getFileSystem(hive.getConf()); fs.mkdirs(fakeTable); fs.deleteOnExit(fakeTable); Path fakePart = new Path(table.getDataLocation().toString(), "fakepartition=fakevalue"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); hive.dropTable(dbName, tableName, true, true); assertFalse(fs.exists(fakePart)); hive.dropDatabase(dbName); assertFalse(fs.exists(fakeTable)); }
@Test public void testCreateDb(){ Database db = new Database(); db.setName("testdb"); NotificationEvent event = new NotificationEvent(getEventId(), getTime(), HCatConstants.HCAT_CREATE_DATABASE_EVENT, msgFactory.buildCreateDatabaseMessage(db).toString()); event.setDbName(db.getName()); HCatNotificationEvent hev = new HCatNotificationEvent(event); ReplicationTask rtask = ReplicationTask.create(client,hev); assertEquals(hev.toString(), rtask.getEvent().toString()); verifyCreateDbReplicationTask(rtask); // CREATE DB currently replicated as Noop. }
/** * Create a Database * @param db * @param crtDb * @return Always returns 0 * @throws HiveException */ private int createDatabase(Hive db, CreateDatabaseDesc crtDb) throws HiveException { Database database = new Database(); database.setName(crtDb.getName()); database.setDescription(crtDb.getComment()); database.setLocationUri(crtDb.getLocationUri()); database.setParameters(crtDb.getDatabaseProperties()); database.setOwnerName(SessionState.getUserFromAuthenticator()); database.setOwnerType(PrincipalType.USER); try { makeLocationQualified(database); db.createDatabase(database, crtDb.getIfNotExists()); } catch (AlreadyExistsException ex) { //it would be better if AlreadyExistsException had an errorCode field.... throw new HiveException(ex, ErrorMsg.DATABASE_ALREADY_EXISTS, crtDb.getName()); } return 0; }
@Test public void testDropDb() throws IOException { Database db = new Database(); db.setName("testdb"); NotificationEvent event = new NotificationEvent(getEventId(), getTime(), HCatConstants.HCAT_DROP_DATABASE_EVENT, msgFactory.buildCreateDatabaseMessage(db).toString()); event.setDbName(db.getName()); HCatNotificationEvent hev = new HCatNotificationEvent(event); ReplicationTask rtask = ReplicationTask.create(client,hev); assertEquals(hev.toString(), rtask.getEvent().toString()); verifyDropDbReplicationTask(rtask); }
@Before public void setUp() throws Exception { System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName()); conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); MetaStoreTestUtils.setConfForStandloneMode(conf); MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); msc = new HiveMetaStoreClient(conf); msc.dropDatabase(dbName, true, true); Map<String, String> envProperties = new HashMap<>(); envProperties.put("hadoop.job.ugi", "test_user"); envContext = new EnvironmentContext(envProperties); db.setName(dbName); db.setCatalogName(DEFAULT_CATALOG_NAME); table = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addTableParam("a", "string") .addPartCol("b", "string") .addCol("a", "string") .addCol("b", "string") .build(conf); partition = new PartitionBuilder() .inTable(table) .addValue("2011") .build(conf); DummyListener.notifyList.clear(); }
try { Database db = new Database(); db.setName(databaseName); db.setLocationUri(dbLocation); client.createDatabase(db);
public Database getJDODatabase(String catName, String name) throws NoSuchObjectException { MDatabase mdb = null; boolean commited = false; try { openTransaction(); mdb = getMDatabase(catName, name); commited = commitTransaction(); } finally { if (!commited) { rollbackTransaction(); } } Database db = new Database(); db.setName(mdb.getName()); db.setDescription(mdb.getDescription()); db.setLocationUri(mdb.getLocationUri()); db.setParameters(convertMap(mdb.getParameters())); db.setOwnerName(mdb.getOwnerName()); String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null); PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type); db.setOwnerType(principalType); db.setCatalogName(catName); db.setCreateTime(mdb.getCreateTime()); return db; }
private Table createTestTable() throws HiveException, AlreadyExistsException { Database db = new Database(); db.setName(dbName); hive.createDatabase(db, true); Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); hive.createTable(table); table = hive.getTable(dbName, tableName); Assert.assertTrue(table.getTTable().isSetId()); table.getTTable().unsetId(); for (Map<String, String> partSpec : parts) { hive.createPartition(table, partSpec); } return table; }
@Test public void testAlterDatabaseNotNullableFields() throws Exception { Database database = getDatabaseWithAllParametersSet(); client.createDatabase(database); Database originalDatabase = client.getDatabase(database.getName()); Database newDatabase = new Database(); newDatabase.setName("new_name"); newDatabase.setCatalogName(DEFAULT_CATALOG_NAME); client.alterDatabase(originalDatabase.getName(), newDatabase); // The name should not be changed, so reload the db with the original name Database alteredDatabase = client.getDatabase(originalDatabase.getName()); Assert.assertEquals("Database name should not change", originalDatabase.getName(), alteredDatabase.getName()); Assert.assertEquals("Database description should not change", originalDatabase.getDescription(), alteredDatabase.getDescription()); Assert.assertEquals("Database location should not change", originalDatabase.getLocationUri(), alteredDatabase.getLocationUri()); Assert.assertEquals("Database parameters should be empty", new HashMap<String, String>(), alteredDatabase.getParameters()); Assert.assertNull("Database owner should be empty", alteredDatabase.getOwnerName()); Assert.assertEquals("Database owner type should not change", originalDatabase.getOwnerType(), alteredDatabase.getOwnerType()); Assert.assertNull("Database privileges should be empty", alteredDatabase.getPrivileges()); }