@Deprecated @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { //actually temp table does not support partitions, cascade is not applicable here alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } super.alter_table(dbname, tbl_name, new_tbl, cascade); }
@Deprecated @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { //actually temp table does not support partitions, cascade is not applicable here alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } super.alter_table(dbname, tbl_name, new_tbl, cascade); }
@Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { // actually temp table does not support partitions, cascade is not // applicable here alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } super.alter_table(dbname, tbl_name, new_tbl); }
@Override public void alter_table(String catName, String dbName, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, EnvironmentContext envContext, String validWriteIds) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbName, tbl_name); if (old_tbl != null) { //actually temp table does not support partitions, cascade is not applicable here alterTempTable(dbName, tbl_name, old_tbl, new_tbl, null); return; } super.alter_table(catName, dbName, tbl_name, new_tbl, envContext, validWriteIds); }
@Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { // actually temp table does not support partitions, cascade is not // applicable here alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } super.alter_table(dbname, tbl_name, new_tbl); }
tbl.getSd().setCols(cols); client.alter_table(dbName, tblName, tbl, true); tbl.getSd().setCols(cols); client.alter_table(dbName, tblName, tbl, false); tbl2 = client.getTable(dbName, tblName); assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size());
msc.alter_table(dbName, tblName, renamedTable); listSize++; assertEquals(notifyList.size(), listSize); msc.alter_table(dbName, renamed, table); listSize++; assertEquals(notifyList.size(), listSize); msc.alter_table(dbName, tblName, table); listSize++; assertEquals(notifyList.size(), listSize);
fields.add(new FieldSchema("goo", "string", "Entirely new column. Doesn't apply to older partitions.")); table.getSd().setCols(fields); hmsc.alter_table(dbName, tableName, table);
tbl2.setTableName(invTblName); tbl2.getSd().setCols(invCols); client.alter_table(dbName, tblName, tbl2); } catch (InvalidOperationException ex) { failed = true; client.alter_table(dbName, tblName, tbl_pk); } catch (InvalidOperationException ex) { failed = true; client.alter_table(dbName, tblName, tbl_pk); } catch (InvalidOperationException ex) { failed = true; tbl2.getSd().setCols(cols); tbl2.getSd().setNumBuckets(32); client.alter_table(dbName, tblName, tbl2); Table tbl3 = client.getTable(dbName, tbl2.getTableName()); assertEquals("Alter table didn't succeed. Num buckets is different ", failed = false; try { client.alter_table(dbName, tbl2.getTableName(), tbl_pk); } catch (InvalidOperationException ex) { failed = true;
/** * @param dbname * @param tbl_name * @param new_tbl * @throws InvalidOperationException * @throws MetaException * @throws TException * @see * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( * java.lang.String, java.lang.String, * org.apache.hadoop.hive.metastore.api.Table) */ public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException { alter_table(dbname, tbl_name, new_tbl, null); }
/** * @param dbname * @param tbl_name * @param new_tbl * @throws InvalidOperationException * @throws MetaException * @throws TException * @see * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( * java.lang.String, java.lang.String, * org.apache.hadoop.hive.metastore.api.Table) */ @Override public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException { alter_table(dbname, tbl_name, new_tbl, null); }
/** * @param dbname * @param tbl_name * @param new_tbl * @throws InvalidOperationException * @throws MetaException * @throws TException * @see * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( * java.lang.String, java.lang.String, * org.apache.hadoop.hive.metastore.api.Table) */ @Override public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException { alter_table(dbname, tbl_name, new_tbl, null); }
@Override public Void call() throws TException { client.alter_table( tbl.getDbName(), tbl.getTableName(), tbl); return null; } };
@Override public Void call() throws TException { client.alter_table( tbl.getDbName(), tbl.getTableName(), tbl); return null; } };
@Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { // First try temp table org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { alterTempTable(dbname, tbl_name, old_tbl, new_tbl, envContext); return; } // Try underlying client super.alter_table(dbname, tbl_name, new_tbl, envContext); }
@Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { //actually temp table does not support partitions, cascade is not applicable here alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } super.alter_table(dbname, tbl_name, new_tbl, cascade); }
void updateLastCommitTimeSynced() { // Set the last commit time from the TBLproperties String lastCommitSynced = activeTimeline.lastInstant().get().getTimestamp(); try { Table table = client.getTable(syncConfig.databaseName, syncConfig.tableName); table.putToParameters(HOODIE_LAST_COMMIT_TIME_SYNC, lastCommitSynced); client.alter_table(syncConfig.databaseName, syncConfig.tableName, table, true); } catch (Exception e) { throw new HoodieHiveSyncException( "Failed to get update last commit time synced to " + lastCommitSynced, e); } }
@Test(expected = CommitFailedException.class) public void testFailure() throws TException { com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME); final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME); final String dummyLocation = "dummylocation"; table.getParameters().put(METADATA_LOCATION_PROP, dummyLocation); metastoreClient.alter_table(DB_NAME, TABLE_NAME, table); icebergTable.updateSchema() .addColumn("data", Types.LongType.get()) .commit(); } }