Refine search
@Override protected HiveMetaStoreClient createClient() throws Exception { try { return new HiveMetaStoreClient(conf); } catch (Throwable e) { System.err.println("Unable to open the metastore"); System.err.println(StringUtils.stringifyException(e)); throw new Exception(e); } } }
@Override public Table getTable(String catName, String dbName, String tableName, String validWriteIdList) throws TException { return getTable(catName, dbName, tableName, validWriteIdList, false); }
throws NoSuchObjectException, InvalidOperationException, MetaException, TException { try { getDatabase(catalogName, dbName); } catch (NoSuchObjectException e) { if (!ignoreUnknownDb) { List<String> materializedViews = getTables(dbName, ".*", TableType.MATERIALIZED_VIEW); for (String table : materializedViews) { dropTable(dbName, table, deleteData, true); List<String> tableNameList = getAllTables(dbName); int tableCount = tableNameList.size(); int maxBatchSize = MetastoreConf.getIntVar(conf, ConfVars.BATCH_RETRIEVE_MAX); dropDatabaseCascadePerTable(catalogName, dbName, tableNameList, deleteData, maxBatchSize); } else { LOG.debug("Dropping database in a per DB manner."); dropDatabaseCascadePerDb(catalogName, dbName, tableNameList, deleteData);
private static void silentDropDatabase(String dbName) throws TException { try { for (String tableName : client.getTables(dbName, "*")) { client.dropTable(dbName, tableName); } client.dropDatabase(dbName); } catch (NoSuchObjectException|InvalidOperationException e) { // NOP } }
@Override protected void tearDown() throws Exception { try { super.tearDown(); client.dropTable(dbName, tblName); client.dropDatabase(dbName); client.close(); } catch (Throwable e) { LOG.error("Unable to close metastore", e); throw new Exception(e); } }
private void initTable() throws Exception { client.dropTable(dbName, tblName); } catch (Exception e) { client.dropDatabase(dbName); } catch (Exception e) { client.createDatabase(new Database(dbName, "", null, null)); assertNotNull((client.getDatabase(dbName).getLocationUri())); fields.add(new FieldSchema("colname", serdeConstants.STRING_TYPE_NAME, "")); Table tbl = new Table(); tbl.setDbName(dbName); tbl.setTableName(tblName); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(Lists.newArrayList(new FieldSchema("data_column", serdeConstants.STRING_TYPE_NAME, ""))); tbl.setSd(sd); sd.setInputFormat(RCFileInputFormat.class.getName()); sd.setOutputFormat(RCFileOutputFormat.class.getName()); sd.setParameters(new HashMap<String, String>()); tbl.setParameters(tableParams); client.createTable(tbl); Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation()); assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1")));
String[] partNames, String dbLocation) throws Exception { IMetaStoreClient client = new HiveMetaStoreClient(conf); try { Database db = new Database(); db.setName(databaseName); db.setLocationUri(dbLocation); client.createDatabase(db); Table tbl = new Table(); tbl.setDbName(databaseName); tbl.setTableName(tableName); tbl.setTableType(TableType.MANAGED_TABLE.toString()); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(getTableColumns(colNames, colTypes)); sd.setNumBuckets(1); sd.setLocation(dbLocation + Path.SEPARATOR + tableName); if (partNames != null && partNames.length != 0) {
client.dropTable(dbName, tblName); silentDropDatabase(dbName); new DatabaseBuilder() client.dropType(typeName); Type typ1 = new Type(); typ1.setName(typeName); typ1.setFields(new ArrayList<>(2)); typ1.getFields().add( new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); client.createType(typ1); Table tbl2 = client.getTable(dbName, tblName); assertEquals(tbl2.getDbName(), dbName); assertEquals(tbl2.getTableName(), tblName); assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size()); assertFalse(tbl2.getSd().isCompressed()); assertFalse(tbl2.getSd().isStoredAsSubDirectories()); List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName); List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName); System.err.println(StringUtils.stringifyException(e)); System.err.println("testComplexTable() failed."); throw e; } finally {
@Override public HiveTableMeta getHiveTableMeta(String database, String tableName) throws Exception { HiveTableMetaBuilder builder = new HiveTableMetaBuilder(); Table table = getMetaStoreClient().getTable(database, tableName); List<FieldSchema> allFields = getMetaStoreClient().getFields(database, tableName); List<FieldSchema> partitionFields = table.getPartitionKeys(); if (allFields == null) { allFields = Lists.newArrayList(); List<HiveTableMeta.HiveTableColumnMeta> partitionColumns = Lists.newArrayList(); for (FieldSchema fieldSchema : allFields) { allColumns.add(new HiveTableMeta.HiveTableColumnMeta(fieldSchema.getName(), fieldSchema.getType(), fieldSchema.getComment())); builder.setPartitionColumns(partitionColumns); builder.setSdLocation(table.getSd().getLocation()); builder.setFileSize(getBasicStatForTable(new org.apache.hadoop.hive.ql.metadata.Table(table), StatsSetupConst.TOTAL_SIZE)); builder.setFileNum(getBasicStatForTable(new org.apache.hadoop.hive.ql.metadata.Table(table), StatsSetupConst.NUM_FILES)); builder.setIsNative(!MetaStoreUtils.isNonNativeTable(table)); builder.setTableName(tableName); builder.setSdInputFormat(table.getSd().getInputFormat()); builder.setSdOutputFormat(table.getSd().getOutputFormat()); builder.setOwner(table.getOwner()); builder.setLastAccessTime(table.getLastAccessTime());
String viewName = "compView"; client.dropTable(dbName, tblName); silentDropDatabase(dbName); new DatabaseBuilder() tbl = client.getTable(dbName, tblName); client.createTable(view); view = client.getTable(dbName, viewName); client.add_partition(part); Partition part2 = client.getPartition(dbName, viewName, part.getValues()); client.alter_partition(dbName, viewName, part2, null); Partition part3 = client.getPartition(dbName, viewName, part.getValues()); assertEquals("couldn't view alter partition", part3.getParameters().get( "a"), "b"); client.dropTable(dbName, viewName); client.dropTable(dbName, tblName); client.dropDatabase(dbName);
vals.add("14"); client.dropTable(dbName, tblName); silentDropDatabase(dbName); new DatabaseBuilder() tbl = client.getTable(dbName, tblName); client.add_partition(part); Partition part2 = client.getPartition(dbName, tblName, part.getValues()); part2.getSd().setNumBuckets(12); part2.getSd().getSerdeInfo().getParameters().put("abc", "1"); client.alter_partition(dbName, tblName, part2, null); Partition part3 = client.getPartition(dbName, tblName, part.getValues()); assertEquals("couldn't alter partition", part3.getParameters().get( "retention"), "10"); client.dropTable(dbName, tblName); client.dropDatabase(dbName); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testPartition() failed."); throw e;
try { HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf); clearAndRecreateDB(hmsc); createTable(hmsc, true); Table table = hmsc.getTable(dbName, tableName); populatePartitions(hmsc, table, List<FieldSchema> fields = table.getSd().getCols(); fields.add(new FieldSchema("goo", "string", "Entirely new column. Doesn't apply to older partitions.")); table.getSd().setCols(fields); hmsc.alter_table(dbName, tableName, table); table = hmsc.getTable(dbName,tableName); Assert.assertEquals("Unexpected number of table columns.", 3, table.getSd().getColsSize()); PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1); Assert.assertEquals("Unexpected number of partitions.", nDates * 3, partitionSpecProxy.size()); Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size()); Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName()); Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName()); String partitionLocation = partition.getSd().getLocation(); String tableLocation = table.getSd().getLocation();
Table tbl = client.getTable(dbName, tblName); List<FieldSchema> cols = tbl.getSd().getCols(); cols.add(new FieldSchema("new_col", ColumnType.STRING_TYPE_NAME, "")); tbl.getSd().setCols(cols); client.alter_table(dbName, tblName, tbl, true); Table tbl2 = client.getTable(dbName, tblName); assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size()); assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName()); Partition partition = client.getPartition(dbName, tblName, pvalues); assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName()); tbl.getSd().setCols(cols); client.alter_table(dbName, tblName, tbl, false); tbl2 = client.getTable(dbName, tblName); assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size()); assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName()); partition = client.getPartition(dbName, tblName, pvalues); assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
.setLocation(dbLocation) .create(client, conf); Database db = client.getDatabase(dbName); .create(client, conf); tbl = client.getTable(dbName, tblName_1); Path path = new Path(tbl.getSd().getLocation()); System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri()); assertEquals("Table location is not a subset of the database location", path.getParent().toString(), db.getLocationUri()); System.err.println(StringUtils.stringifyException(e)); System.err.println("testTableDatabase() failed."); throw e;
Database db = new Database(); db.setName(dbName); client.createDatabase(db); cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, "")); cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, "")); ArrayList<FieldSchema> partCols = Lists.newArrayList( new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, ""), new FieldSchema("p2", serdeConstants.INT_TYPE_NAME, "")); Table tbl = new Table(); tbl.setDbName(dbName); tbl.setTableName(tblName); addSd(cols, tbl); tbl.setPartitionKeys(partCols); client.createTable(tbl); tbl = client.getTable(dbName, tblName); client.listPartitionsByExpr(dbName, tblName, new byte[] { 'f', 'o', 'o' }, null, (short)-1, new ArrayList<Partition>()); fail("Should have thrown IncompatibleMetastoreException");
@Test public void testCreateTableIfNotExists() throws Exception { hcatDriver.run("drop table " + TBL_NAME); hcatDriver.run("create table " + TBL_NAME + " (a int) stored as RCFILE"); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); List<FieldSchema> cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null))); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null))); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); hcatDriver.run("drop table junit_sem_analysis"); }
@Test public void testAlterTableSetFF() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); hcatDriver.run("alter table junit_sem_analysis set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'"); hcatDriver.run("desc extended junit_sem_analysis"); tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); hcatDriver.run("drop table junit_sem_analysis"); }
void processTable(Database dbObj, String tableName, boolean modifyDefaultManagedLocation) throws HiveException, IOException, TException { String dbName = dbObj.getName(); LOG.debug("Processing table {}", getQualifiedName(dbName, tableName)); Table tableObj = hms.getTable(dbName, tableName); TableType tableType = TableType.valueOf(tableObj.getTableType()); getHiveUpdater(), hms, conf); if (!failedValidationChecks && (TableType.valueOf(tableObj.getTableType()) == TableType.MANAGED_TABLE)) { Path tablePath = new Path(tableObj.getSd().getLocation()); if (modifyDefaultManagedLocation && shouldModifyTableLocation(dbObj, tableObj)) { Path newTablePath = wh.getDnsPath(
@Test public void testAlterTableRename() throws Exception { hcatDriver.run("drop table oldname"); hcatDriver.run("drop table newname"); hcatDriver.run("create table oldname (a int)"); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, "oldname"); assertTrue("The old table location is: " + tbl.getSd().getLocation(), tbl.getSd().getLocation().contains("oldname")); hcatDriver.run("alter table oldname rename to newNAME"); tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, "newname"); // since the oldname table is not under its database (See HIVE-15059), the renamed oldname table will keep // its location after HIVE-14909. I changed to check the existence of the newname table and its name instead // of verifying its location // assertTrue(tbl.getSd().getLocation().contains("newname")); assertTrue(tbl != null); assertTrue(tbl.getTableName().equalsIgnoreCase("newname")); hcatDriver.run("drop table newname"); }
.setOwnerName(SecurityUtils.getUser()) .build(conf); Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName()); client.createDatabase(db); db = client.getDatabase(TEST_DB1_NAME); .create(client, conf); db2 = client.getDatabase(TEST_DB2_NAME); warehouse.getDatabasePath(db2).toString(), db2.getLocationUri()); List<String> dbs = client.getDatabases(".*"); client.dropDatabase(TEST_DB1_NAME); client.dropDatabase(TEST_DB2_NAME); silentDropDatabase(TEST_DB1_NAME); silentDropDatabase(TEST_DB2_NAME); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testDatabase() failed."); throw e;