public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject( Table tbl, Map<String, String> partSpec, Path location) throws HiveException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new HiveException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition(); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!tbl.isView()) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; }
public Object getFieldValue(_Fields field) { switch (field) { case NAME: return getName(); case TYPE: return getType(); case COMMENT: return getComment(); } throw new IllegalStateException(); }
public static List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; }
private static void extractColumnInfos(Table table, List<String> colNames, List<String> colTypes) { for (FieldSchema col : table.getAllCols()) { colNames.add(col.getName()); colTypes.add(col.getType()); } }
@Test public void testCreateTableDefaultValuesView() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); table.setTableType("VIRTUAL_VIEW"); cols.add(new FieldSchema("column_name", "int", null)); sd.setCols(cols); sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); client.createTable(table); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views Assert.assertNull("Storage descriptor location should be null", createdTable.getSd().getLocation()); }
List<FieldSchema> partitionFields = table.getPartitionKeys(); if (allFields == null) { allFields = Lists.newArrayList(); List<HiveTableMeta.HiveTableColumnMeta> partitionColumns = Lists.newArrayList(); for (FieldSchema fieldSchema : allFields) { allColumns.add(new HiveTableMeta.HiveTableColumnMeta(fieldSchema.getName(), fieldSchema.getType(), fieldSchema.getComment())); partitionColumns.add(new HiveTableMeta.HiveTableColumnMeta(fieldSchema.getName(), fieldSchema.getType(), fieldSchema.getComment())); builder.setPartitionColumns(partitionColumns); builder.setSdLocation(table.getSd().getLocation()); builder.setFileSize(getBasicStatForTable(new org.apache.hadoop.hive.ql.metadata.Table(table), StatsSetupConst.TOTAL_SIZE)); builder.setFileNum(getBasicStatForTable(new org.apache.hadoop.hive.ql.metadata.Table(table), StatsSetupConst.NUM_FILES)); builder.setIsNative(!MetaStoreUtils.isNonNativeTable(table)); builder.setTableName(tableName); builder.setSdInputFormat(table.getSd().getInputFormat()); builder.setSdOutputFormat(table.getSd().getOutputFormat()); builder.setOwner(table.getOwner()); builder.setLastAccessTime(table.getLastAccessTime());
private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception { try { db.dropTable(catName, dbName, tableName); Table table = new Table(); table.setCatName(catName); table.setDbName(dbName); table.setTableName(tableName); FieldSchema col1 = new FieldSchema("key", "string", ""); FieldSchema col2 = new FieldSchema("value", "int", ""); FieldSchema col3 = new FieldSchema("city", "string", ""); StorageDescriptor sd = new StorageDescriptor(); sd.setSerdeInfo(new SerDeInfo()); sd.setInputFormat(TextInputFormat.class.getCanonicalName()); sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName()); sd.setCols(Arrays.asList(col1, col2)); table.setPartitionKeys(Arrays.asList(col3)); table.setSd(sd); db.createTable(table); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { fail("Unable to drop and create table " + StatsUtils .getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils .stringifyException(exception)); throw exception; } }
@Test public void testGetTableAvro() { final String databaseName = "testdb"; final String tableName = "testtable"; HiveTable.Builder builder = new HiveTable.Builder(); builder.withDbName(databaseName).withTableName(tableName); State serdeProps = new State(); serdeProps.setProp("avro.schema.literal", "{\"type\": \"record\", \"name\": \"TestEvent\"," + " \"namespace\": \"test.namespace\", \"fields\": [{\"name\":\"a\"," + " \"type\": \"int\"}]}"); builder.withSerdeProps(serdeProps); HiveTable hiveTable = builder.build(); hiveTable.setInputFormat(AvroContainerInputFormat.class.getName()); hiveTable.setOutputFormat(AvroContainerOutputFormat.class.getName()); hiveTable.setSerDeType(AvroSerDe.class.getName()); Table table = HiveMetaStoreUtils.getTable(hiveTable); Assert.assertEquals(table.getDbName(), databaseName); Assert.assertEquals(table.getTableName(), tableName); StorageDescriptor sd = table.getSd(); Assert.assertEquals(sd.getInputFormat(), AvroContainerInputFormat.class.getName()); Assert.assertEquals(sd.getOutputFormat(), AvroContainerOutputFormat.class.getName()); Assert.assertNotNull(sd.getSerdeInfo()); Assert.assertEquals(sd.getSerdeInfo().getSerializationLib(), AvroSerDe.class.getName()); List<FieldSchema> fields = sd.getCols(); Assert.assertTrue(fields != null && fields.size() == 1); FieldSchema fieldA = fields.get(0); Assert.assertEquals(fieldA.getName(), "a"); Assert.assertEquals(fieldA.getType(), "int"); }
private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException { msc.dropTable(dbName, tblName); silentDropDatabase(dbName); msc.dropType(typeName); Type typ1 = new Type(); typ1.setName(typeName); typ1.setFields(new ArrayList<FieldSchema>(1)); typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); msc.createType(typ1); Table tbl = new Table(); tbl.setDbName(dbName); tbl.setTableName(tblName); StorageDescriptor sd = new StorageDescriptor(); sd.setSerdeInfo(new SerDeInfo()); sd.getSerdeInfo().setName(tblName); sd.getSerdeInfo().setParameters(new HashMap<String, String>()); sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); sd.setInputFormat(HiveInputFormat.class.getName()); sd.setOutputFormat(HiveOutputFormat.class.getName()); tbl.setSd(sd); sd.setCols(typ1.getFields()); sd.setSerdeInfo(new SerDeInfo()); return tbl; }
throw new HiveException("empty table creation??"); throw new HiveException("Unable to fetch table " + tableName + ". " + e.getMessage(), e); if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) { Map<String, String> parameters = tTable.getSd().getParameters(); String sf = parameters!=null?parameters.get(SERIALIZATION_FORMAT) : null; if (sf != null) { tTable.getSd().getSerdeInfo().getSerializationLib()) && tTable.getSd().getColsSize() > 0 && tTable.getSd().getCols().get(0).getType().indexOf('<') == -1) { tTable.getSd().getSerdeInfo().setSerializationLib( org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); return new Table(tTable);
private StorageDescriptor newStorageDescriptor(String location, List<Order> sortCols) { StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> cols = new ArrayList<FieldSchema>(2); cols.add(new FieldSchema("a", "varchar(25)", "still no comment")); cols.add(new FieldSchema("b", "int", "comment")); sd.setCols(cols); sd.setLocation(location); sd.setInputFormat(MockInputFormat.class.getName()); sd.setOutputFormat(MockOutputFormat.class.getName()); sd.setNumBuckets(1); SerDeInfo serde = new SerDeInfo(); serde.setSerializationLib(LazySimpleSerDe.class.getName()); sd.setSerdeInfo(serde); List<String> bucketCols = new ArrayList<String>(1); bucketCols.add("a"); sd.setBucketCols(bucketCols); if (sortCols != null) { sd.setSortCols(sortCols); } return sd; }
@Test(expected = MetaException.class) public void testAlterTableInvalidStorageDescriptorNullColumnType() throws Exception { Table originalTable = testTables[0]; Table newTable = originalTable.deepCopy(); newTable.getSd().getCols().get(0).setType(null); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); }
private static void makeTestChangesOnPartition(Partition partition) { partition.getParameters().put("hmsTestParam001", "testValue001"); partition.setCreateTime(NEW_CREATE_TIME); partition.setLastAccessTime(NEW_CREATE_TIME); partition.getSd().setLocation(partition.getSd().getLocation()+"/hh=01"); partition.getSd().getCols().add(new FieldSchema("newcol", "string", "")); }
public Table createTestAvroTable(String dbName, String tableName, String tableSdLoc, Optional<String> partitionFieldName, boolean ignoreDbCreation) throws Exception { if (!ignoreDbCreation) { createTestDb(dbName); } Table tbl = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(dbName, tableName); tbl.getSd().setLocation(tableSdLoc); tbl.getSd().getSerdeInfo().setSerializationLib(AvroSerDe.class.getName()); tbl.getSd().getSerdeInfo().setParameters(ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")); if (partitionFieldName.isPresent()) { tbl.addToPartitionKeys(new FieldSchema(partitionFieldName.get(), "string", "some comment")); } this.localMetastoreClient.createTable(tbl); return tbl; }
private void addWriteNotificationLog(Table tbl, Map<String, String> partitionSpec, List<Path> newFiles, Long writeId) throws HiveException { if (!conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML)) { LOG.debug("write notification log is ignored as dml event logging is disabled"); return; if (tbl.isTemporary()) { LOG.debug("write notification log is ignored as " + tbl.getTableName() + " is temporary : " + writeId); return; LOG.debug("adding write notification log for operation " + writeId + " table " + tbl.getCompleteName() + "partition " + partitionSpec + " list of files " + newFiles); for (FieldSchema fs : tbl.getPartitionKeys()) { rqst.addToPartitionVals(partitionSpec.get(fs.getName())); throw new HiveException(e);
assert(tbl.isPartitioned()); Set<Partition> partitions = db.getAllPartitionsOf(tbl); int colIndex = -1; for(FieldSchema col : tbl.getTTable().getPartitionKeys()) { colIndex++; if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) { break; if (colIndex == -1 || colIndex == tbl.getTTable().getPartitionKeys().size()) { throw new HiveException("Cannot find partition column " + alterPartitionDesc.getPartKeySpec().getName()); TypeInfoUtils.getTypeInfoFromTypeString(alterPartitionDesc.getPartKeySpec().getType()); ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); if (part.getName().equals(conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { continue; throw new HiveException("Exception while converting " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + part.getValues().get(colIndex)); for(FieldSchema col : tbl.getTTable().getPartitionKeys()) { if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) { newPartitionKeys.add(alterPartitionDesc.getPartKeySpec()); } else {
private PartitionHelper newWarehousePartitionHelper() throws MetaException, WorkerException { String location = table.getTable().getSd().getLocation(); Path tablePath = new Path(location); List<FieldSchema> partitionFields = table.getTable().getPartitionKeys(); List<String> partitionColumns = new ArrayList<>(partitionFields.size()); for (FieldSchema field : partitionFields) { partitionColumns.add(field.getName()); } return new WarehousePartitionHelper(configuration, tablePath, partitionColumns); }
FieldSchema newCol = new FieldSchema(); newCol.setName(unescapeIdentifier(name)); newCol.setType(getTypeStringFromAST(typeChild)); newCol.setComment(unescapeSQLString(colAst.getChild(2).getText())); for( FieldSchema col : tab.getTTable().getPartitionKeys()) { if (col.getName().compareTo(newCol.getName()) == 0) { fFoundColumn = true; throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName()));
@Override public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName); if (!source.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } org.apache.hadoop.hive.metastore.api.Table table = source.get(); for (FieldSchema fieldSchema : table.getPartitionKeys()) { if (fieldSchema.getName().equals(oldColumnName)) { throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported"); } } for (FieldSchema fieldSchema : table.getSd().getCols()) { if (fieldSchema.getName().equals(oldColumnName)) { fieldSchema.setName(newColumnName); } } alterTable(databaseName, tableName, table); }