private boolean isAvro(Table table) { return AvroSerDe.class.getName().equals(table.getSd().getSerdeInfo().getSerializationLib()); } }
protected void prepareBucketingFields() { this.isBucketed = table.getSd().getNumBuckets() > 0; // For unbucketed tables we have exactly 1 RecordUpdater (until HIVE-19208) for each AbstractRecordWriter which // ends up writing to a file bucket_000000. // See also {@link #getBucket(Object)} this.totalBuckets = isBucketed ? table.getSd().getNumBuckets() : 1; if (isBucketed) { this.bucketIds = getBucketColIDs(table.getSd().getBucketCols(), table.getSd().getCols()); this.bucketFieldData = new Object[bucketIds.size()]; this.bucketObjInspectors = getObjectInspectorsForBucketedCols(bucketIds, inputRowObjectInspector); this.bucketStructFields = new StructField[bucketIds.size()]; List<? extends StructField> allFields = inputRowObjectInspector.getAllStructFieldRefs(); for (int i = 0; i < bucketIds.size(); i++) { bucketStructFields[i] = allFields.get(bucketIds.get(i)); } } }
@Override public RelDistribution getDistribution() { ImmutableList.Builder<Integer> columnPositions = new ImmutableList.Builder<Integer>(); for (String bucketColumn : this.hiveTblMetadata.getBucketCols()) { for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) { FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i); if (field.getName().equals(bucketColumn)) { columnPositions.add(i); break; } } } return new HiveRelDistribution(RelDistribution.Type.HASH_DISTRIBUTED, columnPositions.build()); }
public static void addSchemaEvolutionToTableScanOperator(Table table, TableScanOperator tableScanOp) { String colNames = MetaStoreUtils.getColumnNamesFromFieldSchema(table.getSd().getCols()); String colTypes = MetaStoreUtils.getColumnTypesFromFieldSchema(table.getSd().getCols()); tableScanOp.setSchemaEvolution(colNames, colTypes); }
public static void addSchemaEvolutionToTableScanOperator(Table table, TableScanOperator tableScanOp) { String colNames = MetaStoreUtils.getColumnNamesFromFieldSchema(table.getSd().getCols()); String colTypes = MetaStoreUtils.getColumnTypesFromFieldSchema(table.getSd().getCols()); tableScanOp.setSchemaEvolution(colNames, colTypes); }
@Override public RelDistribution getDistribution() { ImmutableList.Builder<Integer> columnPositions = new ImmutableList.Builder<Integer>(); for (String bucketColumn : this.hiveTblMetadata.getBucketCols()) { for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) { FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i); if (field.getName().equals(bucketColumn)) { columnPositions.add(i); break; } } } return new HiveRelDistribution(RelDistribution.Type.HASH_DISTRIBUTED, columnPositions.build()); }
@Override public List<RelCollation> getCollationList() { ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>(); for (Order sortColumn : this.hiveTblMetadata.getSortCols()) { for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) { FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i); if (field.getName().equals(sortColumn.getCol())) { Direction direction; NullDirection nullDirection; if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { direction = Direction.ASCENDING; nullDirection = NullDirection.FIRST; } else { direction = Direction.DESCENDING; nullDirection = NullDirection.LAST; } collationList.add(new RelFieldCollation(i, direction, nullDirection)); break; } } } return new ImmutableList.Builder<RelCollation>() .add(RelCollationTraitDef.INSTANCE.canonize( new HiveRelCollation(collationList.build()))) .build(); }
deserializerClassName = tableScanDesc.getTableMetadata().getSd().getSerdeInfo().getSerializationLib(); Deserializer deserializer = ReflectionUtil.newInstance( context.conf.getClassByName(deserializerClassName)
private Table getTable(Hive db) throws SemanticException, HiveException { Table tbl = work.getTable(); // FIXME for ctas this is still needed because location is not set sometimes if (tbl.getSd().getLocation() == null) { tbl = db.getTable(work.getFullTableName()); } return tbl; }
public void checkValidity() throws HiveException { if (!tPartition.getSd().equals(table.getSd())) { Table.validateColumns(getCols(), table.getPartCols()); } } }
public void checkValidity() throws HiveException { if (!tPartition.getSd().equals(table.getSd())) { Table.validateColumns(getCols(), table.getPartCols()); } } }
break; case INPUT_FORMAT: InputFormat<?, ?> inputFormat = HiveUtils.getInputFormat(table.getSd());
/** * Automatically serializes the {@link HiveDataset} by calling {@link #setHiveDataset(HiveDataset)} * @param hiveDataset for which the workunit is being created */ @SuppressWarnings("deprecation") public HiveWorkUnit(HiveDataset hiveDataset) { super(); setHiveDataset(hiveDataset); if (hiveDataset.getTable().getTableType() != TableType.VIRTUAL_VIEW) { setTableLocation(hiveDataset.getTable().getSd().getLocation()); } }
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject( Table tbl, Map<String, String> partSpec, Path location) throws HiveException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new HiveException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition(); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!tbl.isView()) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; }
private int updateColumns(Table tbl, Partition part) throws HiveException { String serializationLib = tbl.getSd().getSerdeInfo().getSerializationLib(); if (MetastoreConf.getStringCollection(conf, MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA).contains(serializationLib)) { throw new HiveException(tbl.getTableName() + " has serde " + serializationLib + " for which schema " + "is already handled by HMS."); } Deserializer deserializer = tbl.getDeserializer(true); try { LOG.info("Updating metastore columns for table: {}", tbl.getTableName()); final List<FieldSchema> fields = HiveMetaStoreUtils.getFieldsFromDeserializer( tbl.getTableName(), deserializer); StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); sd.setCols(fields); } catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) { LOG.error("alter table update columns: {}", e); throw new HiveException(e, ErrorMsg.GENERIC_ERROR); } return 0; }
Table table = new Table(rv.getTable()); parsedLocation = ReplExternalTables .externalTableLocation(context.hiveConf, table.getSd().getLocation()); } catch (IOException e) { throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject( Table tbl, Map<String, String> partSpec, Path location) throws HiveException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new HiveException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition(); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!tbl.isView()) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; }
@Override public List<RelCollation> getCollationList() { ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>(); for (Order sortColumn : this.hiveTblMetadata.getSortCols()) { for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) { FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i); if (field.getName().equals(sortColumn.getCol())) { Direction direction; NullDirection nullDirection; if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { direction = Direction.ASCENDING; nullDirection = NullDirection.FIRST; } else { direction = Direction.DESCENDING; nullDirection = NullDirection.LAST; } collationList.add(new RelFieldCollation(i,direction,nullDirection)); break; } } } return new ImmutableList.Builder<RelCollation>() .add(RelCollationTraitDef.INSTANCE.canonize( new HiveRelCollation(collationList.build()))) .build(); }
private static class ThreadLocalHive extends ThreadLocal<Hive> { @Override protected Hive initialValue() { return null; } @Override public synchronized void set(Hive hiveObj) { Hive currentHive = this.get(); if (currentHive != hiveObj) { // Remove/close current thread-local Hive object before overwriting with new Hive object. remove(); super.set(hiveObj); } } @Override public synchronized void remove() { Hive currentHive = this.get(); if (currentHive != null) { // Close the metastore connections before removing it from thread local hiveDB. currentHive.close(false); super.remove(); } } }
Path destLocation; if (partitionValues == null) { destLocation = new Path(table.getSd().getLocation()); } else { Map<String, String> partSpec = Warehouse.makeSpecFromValues(