public Path getDataLocation() { if (table.isPartitioned()) { if (tPartition.getSd() == null) return null; else return new Path(tPartition.getSd().getLocation()); } else { if (table.getTTable() == null || table.getTTable().getSd() == null) return null; else return new Path(table.getTTable().getSd().getLocation()); } }
public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema) { StorageDescriptor storageDescriptor = table.getSd(); if (storageDescriptor == null) { throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor"); } Table.Builder tableBuilder = Table.builder() .setDatabaseName(table.getDbName()) .setTableName(table.getTableName()) .setOwner(nullToEmpty(table.getOwner())) .setTableType(table.getTableType()) .setDataColumns(schema.stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setPartitionColumns(table.getPartitionKeys().stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters()) .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText()))) .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText()))); fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName()); return tableBuilder.build(); }
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges) { org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table(); result.setDbName(table.getDatabaseName()); result.setTableName(table.getTableName()); result.setOwner(table.getOwner()); result.setTableType(table.getTableType()); result.setParameters(table.getParameters()); result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList())); result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage())); result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges)); result.setViewOriginalText(table.getViewOriginalText().orElse(null)); result.setViewExpandedText(table.getViewExpandedText().orElse(null)); return result; }
public static Properties getTableMetadata( org.apache.hadoop.hive.metastore.api.Table table) { return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); }
private org.apache.hadoop.hive.metastore.api.Table deepCopyAndLowerCaseTable( org.apache.hadoop.hive.metastore.api.Table tbl) { org.apache.hadoop.hive.metastore.api.Table newCopy = deepCopy(tbl); newCopy.setDbName(newCopy.getDbName().toLowerCase()); newCopy.setTableName(newCopy.getTableName().toLowerCase()); return newCopy; }
public JSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj, List<Map<String, String>> partitionKeyValues, long timestamp) { this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), partitionKeyValues, timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } }
@Test public void testDropTableWithPurge() throws Exception { Table table = testTables[0]; client.dropTable(table.getDbName(), table.getTableName(), true, true, true); Assert.assertFalse("Table path should be removed", metaStore.isPathExists(new Path(table.getSd().getLocation()))); Assert.assertFalse("Table path should not be in trash", metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation()))); }
MetaException, NoSuchObjectException, TException { boolean isVirtualTable = tbl.getTableName().startsWith(SemanticAnalyzer.VALUES_TMP_TABLE_NAME_PREFIX); String dbName = tbl.getDbName(); String tblName = tbl.getTableName(); Map<String, Table> tables = getTempTablesForDatabase(dbName, tblName); if (tables != null && tables.containsKey(tblName)) { if (tbl.getSd().getLocation() == null) { tbl.getSd().setLocation(SessionState.generateTempTableLocation(conf)); Path tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); if (tblPath == null) { throw new MetaException("Temp table path not set for " + tbl.getTableName()); } else { if (!wh.isDir(tblPath)) { tbl.getSd().setLocation(tblPath.toString()); Table tTable = new Table(tbl); if (!isVirtualTable) { StatsSetupConst.setStatsStateForCreateTable(tbl.getParameters(), org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable(tbl), StatsSetupConst.TRUE);
private String getFinalDynamicPartitionDestination(Table table, Map<String, String> partKVs, OutputJobInfo jobInfo) { Path partPath = new Path(table.getTTable().getSd().getLocation()); if (!customDynamicLocationUsed) { // file:///tmp/hcat_junit_warehouse/employee/_DYN0.7770480401313761/emp_country=IN/emp_state=KA -> // file:///tmp/hcat_junit_warehouse/employee/emp_country=IN/emp_state=KA for (FieldSchema partKey : table.getPartitionKeys()) { partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } return partPath.toString(); } else { // if custom root specified, update the parent path if (jobInfo.getCustomDynamicRoot() != null && jobInfo.getCustomDynamicRoot().length() > 0) { partPath = new Path(partPath, jobInfo.getCustomDynamicRoot()); } return new Path(partPath, HCatFileUtil.resolveCustomPath(jobInfo, partKVs, false)).toString(); } }
FileSystem fs = null; Table tbl = tableEvent.getTable(); String name = tbl.getTableName(); org.apache.hadoop.hive.ql.metadata.Table mTbl = new org.apache.hadoop.hive.ql.metadata.Table( tbl); IHMSHandler handler = tableEvent.getHandler(); Configuration conf = handler.getConf(); Warehouse wh = new Warehouse(conf); Path tblPath = new Path(tbl.getSd().getLocation()); fs = wh.getFs(tblPath); Date now = new Date(); exportPath = fs.getHomeDirectory(); } else { exportPath = new Path(exportPathString); Path metaPath = new Path(exportPath, name + "." + dateString); LOG.info("Exporting the metadata of table " + tbl.toString() + " to path " + metaPath.toString()); try {
@Test public void testAlterTableChangeCols() throws Exception { Table originalTable = partitionedTable; Table newTable = originalTable.deepCopy(); List<FieldSchema> cols = newTable.getSd().getCols(); // Change a column cols.get(0).setName("modified_col"); // Remove a column cols.remove(1); // Add a new column cols.add(new FieldSchema("new_col", "int", null)); // Store the changes client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertTrue("Original table directory should be kept", metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); // The following data might be changed alteredTable.setParameters(newTable.getParameters()); Assert.assertEquals("The table data should be the same", newTable, alteredTable); // Modify partition column type, and comment newTable.getPartitionKeys().get(0).setType("string"); newTable.getPartitionKeys().get(0).setComment("changed comment"); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); // The following data might be changed alteredTable.setParameters(newTable.getParameters()); Assert.assertEquals("The table data should be the same", newTable, alteredTable); }
if (!table.isImmutable()){ return; if (table.getPartitionKeys().size() > 0) { if (!outputInfo.isDynamicPartitioningUsed()) { List<String> partitionValues = getPartitionValueList( table, outputInfo.getPartitionValues()); List<String> currentParts = client.listPartitionNames(outputInfo.getDatabaseName(), outputInfo.getTableName(), partitionValues, (short) 1); Path tablePath = new Path(table.getTTable().getSd().getLocation()); FileSystem fs = tablePath.getFileSystem(context.getConfiguration());
@Test public void testAlterTableExternalTableChangeLocation() throws Exception { Table originalTable = externalTable; // Change the location, and see the results Table newTable = originalTable.deepCopy(); newTable.getSd().setLocation(newTable.getSd().getLocation() + "_modified"); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertTrue("Original table directory should be kept", metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(), alteredTable.getSd().getLocation()); Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are // present for(String key: newTable.getParameters().keySet()) { Assert.assertEquals("parameters are present", newTable.getParameters().get(key), alteredTable.getParameters().get(key)); } // The parameters are checked manually, so do not check them newTable.setParameters(alteredTable.getParameters()); // The following data should be changed, other data should be the same newTable.getSd().setLocation(alteredTable.getSd().getLocation()); Assert.assertEquals("The table data should be the same", newTable, alteredTable); }
@Test public void testAlterTableExternalTable() throws Exception { Table originalTable = externalTable; String originalTableName = originalTable.getTableName(); String originalDatabase = originalTable.getDbName(); Table newTable = originalTable.deepCopy(); newTable.setTableName("new_external_table_for_test"); client.alter_table(originalDatabase, originalTableName, newTable); List<String> tableNames = client.getTables(originalDatabase, originalTableName); Assert.assertEquals("Original table should be removed", 0, tableNames.size()); Assert.assertTrue("Original table directory should be kept", metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(), alteredTable.getSd().getLocation()); Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are // present for(String key: newTable.getParameters().keySet()) { Assert.assertEquals("parameters are present", newTable.getParameters().get(key), alteredTable.getParameters().get(key)); } // The parameters are checked manually, so do not check them newTable.setParameters(alteredTable.getParameters()); Assert.assertEquals("The table data should be the same", newTable, alteredTable); }
private PartitionHelper newWarehousePartitionHelper() throws MetaException, WorkerException { String location = table.getTable().getSd().getLocation(); Path tablePath = new Path(location); List<FieldSchema> partitionFields = table.getTable().getPartitionKeys(); List<String> partitionColumns = new ArrayList<>(partitionFields.size()); for (FieldSchema field : partitionFields) { partitionColumns.add(field.getName()); } return new WarehousePartitionHelper(configuration, tablePath, partitionColumns); }
private Path getPartitionLocation() throws NoSuchObjectException, MetaException, TException { Path partitionLocacation; if (partition.isEmpty()) { partitionLocacation = new Path(table.getSd().getLocation()); } else { // TODO: calculate this instead. Just because we're writing to the location doesn't mean that it'll // always be wanted in the meta store right away. List<Partition> partitionEntries = metaStoreClient.listPartitions(table.getDbName(), table.getTableName(), partition, (short) 1); partitionLocacation = new Path(partitionEntries.get(0).getSd().getLocation()); } return partitionLocacation; }
@Test public void testTruncateTableUnpartitioned() throws Exception { // Unpartitioned table Path dataFile = new Path(testTables[0].getSd().getLocation() + "/dataFile"); client.truncateTable(testTables[0].getDbName(), testTables[0].getTableName(), null); Assert.assertTrue("Location should exist", metaStore.isPathExists(new Path(testTables[0].getSd().getLocation()))); Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile)); }
/** * Make location in specified sd qualified. * * @param databaseName * Database name. * @param sd * Storage descriptor. * @param name * Object name. */ public static void makeLocationQualified(String databaseName, Table table, HiveConf conf) throws HiveException { Path path = null; StorageDescriptor sd = table.getTTable().getSd(); // If the table's location is currently unset, it is left unset, allowing the metastore to // fill in the table's location. // Note that the previous logic for some reason would make a special case if the DB was the // default database, and actually attempt to generate a location. // This seems incorrect and uncessary, since the metastore is just as able to fill in the // default table location in the case of the default DB, as it is for non-default DBs. if (sd.isSetLocation()) { path = new Path(sd.getLocation()); } if (path != null) { sd.setLocation(Utilities.getQualifiedPath(conf, path)); } }
@Test public void testAddPartitionSpecEmptyLocationInTableToo() throws Exception { Table table = createTable(DB_NAME, TABLE_NAME, getYearPartCol(), null); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, ""); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); client.add_partitions_pspec(partitionSpecProxy); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertEquals(table.getSd().getLocation() + "/year=2017", resultPart.getSd().getLocation()); Assert.assertTrue(metaStore.isPathExists(new Path(resultPart.getSd().getLocation()))); }
protected Table getDummyTable() throws SemanticException { Path dummyPath = createDummyFile(); Table desc = new Table(DUMMY_DATABASE, DUMMY_TABLE); desc.getTTable().getSd().setLocation(dummyPath.toString()); desc.getTTable().getSd().getSerdeInfo().setSerializationLib(NullStructSerDe.class.getName()); desc.setInputFormatClass(NullRowsInputFormat.class); desc.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); return desc; }