/** * Drop a given table or some partitions. DropTableDesc is currently used for both. * * @param db * The database in question. * @param dropTbl * This is the table we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } if (dropTbl.getPartSpecs() == null) { dropTable(db, tbl, dropTbl); } else { dropPartitions(db, tbl, dropTbl); } }
/** * Drop a given table or some partitions. DropTableDesc is currently used for both. * * @param db * The database in question. * @param dropTbl * This is the table we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } if (dropTbl.getPartSpecs() == null) { dropTable(db, tbl, dropTbl); } else { dropPartitions(db, tbl, dropTbl); } }
@Override public List<Task<? extends Serializable>> handle(Context context) throws SemanticException { DropTableMessage msg = deserializer.getDropTableMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; DropTableDesc dropTableDesc = new DropTableDesc( actualDbName + "." + actualTblName, null, true, true, context.eventOnlyReplicationSpec(), false ); Task<DDLWork> dropTableTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf ); context.log.debug( "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName() ); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null); return Collections.singletonList(dropTableTask); } }
); context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions()); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); return Collections.singletonList(dropPtnTask);
LOG.debug("Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName()); dbsUpdated.put(actualDbName,dmd.getEventTo()); return tasks; tasks.add(dropPtnTask); LOG.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), dropPartitionMessage.getPartitions()); dbsUpdated.put(actualDbName, dmd.getEventTo()); tablesUpdated.put(actualDbName + "." + actualTblName, dmd.getEventTo());
= db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance()
LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", dropTbl.getTableName()); return; // table is newer, leave it be. db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) {
= db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance()
db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) {
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {
/** * Drop a given table or some partitions. DropTableDesc is currently used for both. * * @param db * The database in question. * @param dropTbl * This is the table we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } if (dropTbl.getPartSpecs() == null) { dropTable(db, tbl, dropTbl); } else { dropPartitions(db, tbl, dropTbl); } }
tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { db.dropTable(dropTbl.getTableName()); if (tbl != null) { work.getOutputs().add(new WriteEntity(tbl)); db.getPartitionNames(dropTbl.getTableName(), (short) -1); Set<Map<String, String>> partitions = new HashSet<Map<String, String>>(); for (String partitionName : partitionNames) { db.dropPartition(dropTbl.getTableName(), partition.getValues(), true); work.getOutputs().add(new WriteEntity(partition));
db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) {
= db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance()
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {
Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {