private static Properties getTableProperties(Table table) { Properties properties = new Properties(); properties.putAll(table.getParameters()); return properties; } }
/** * Get the parameter map of the Entity. */ public Map<String, String> getParameters() { if (p != null) { return p.getParameters(); } else { return t.getParameters(); } }
/** * Get the parameter map of the Entity. */ public Map<String, String> getParameters() { if (p != null) { return p.getParameters(); } else { return t.getParameters(); } }
public Map<String, String> getTableParams() { return this.hivePartition.getTable().getParameters(); }
private ReplLoadOpType getLoadTableType(Table table) throws InvalidOperationException, HiveException { if (table == null) { return ReplLoadOpType.LOAD_NEW; } if (ReplUtils.replCkptStatus(table.getDbName(), table.getParameters(), context.dumpDirectory)) { return ReplLoadOpType.LOAD_SKIP; } return ReplLoadOpType.LOAD_REPLACE; }
protected int getBucket(Object row) { if (!isBucketed) { return 0; } Object[] bucketFields = getBucketFields(row); int bucketingVersion = Utilities.getBucketingVersion( table.getParameters().get(hive_metastoreConstants.TABLE_BUCKETING_VERSION)); return bucketingVersion == 2 ? ObjectInspectorUtils.getBucketNumber(bucketFields, bucketObjInspectors, totalBuckets) : ObjectInspectorUtils.getBucketNumberOld(bucketFields, bucketObjInspectors, totalBuckets); }
/** * Actually makes the table transactional */ private static void alterTable(Table t, Hive db, boolean isMM) throws HiveException, InvalidOperationException { org.apache.hadoop.hive.ql.metadata.Table metaTable = //clone to make sure new prop doesn't leak new org.apache.hadoop.hive.ql.metadata.Table(t.deepCopy()); metaTable.getParameters().put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); if(isMM) { metaTable.getParameters() .put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "insert_only"); } EnvironmentContext ec = new EnvironmentContext(); /*we are not modifying any data so stats should be exactly the same*/ ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); db.alterTable(Warehouse.getQualifiedName(t), metaTable, false, ec, false); }
/** * Determines if a current replication object(current state of dump) is allowed to * replicate-replace-into a given table */ public boolean allowReplacementInto(Table table) { return allowReplacement(getLastReplicatedStateFromParameters(table.getParameters()),this.getCurrentReplicationState()); }
/** * Determines if a current replication event specification is allowed to * replicate-replace-into a given table */ public boolean allowEventReplacementInto(Table table) { return allowReplacement(getLastReplicatedStateFromParameters(table.getParameters()),this.getReplicationState()); }
private boolean resetStatisticsProps(Table table) { if (hasFollowingStatsTask()) { // If there's a follow-on stats task then the stats will be correct after load, so don't // need to reset the statistics. return false; } if (!work.getIsInReplicationScope()) { // If the load is not happening during replication and there is not follow-on stats // task, stats will be inaccurate after load and so need to be reset. return true; } // If we are loading a table during replication, the stats will also be replicated // and hence accurate if it's a non-transactional table. For transactional table we // do not replicate stats yet. return AcidUtils.isTransactionalTable(table.getParameters()); }
/** * Setup the table level stats as if the table is new. Used when setting up Table for a new * table or during replication. */ public void setStatsStateLikeNewTable() { // We do not replicate statistics for // an ACID Table right now, so don't touch them right now. if (AcidUtils.isTransactionalTable(this)) { return; } if (isPartitioned()) { StatsSetupConst.setStatsStateForCreateTable(getParameters(), null, StatsSetupConst.FALSE); } else { StatsSetupConst.setStatsStateForCreateTable(getParameters(), MetaStoreUtils.getColumnNames(getCols()), StatsSetupConst.TRUE); } } };
public boolean isMmTable() { if (getTable() != null) { return AcidUtils.isInsertOnlyTable(table.getParameters()); } else { // Dynamic Partition Insert case return AcidUtils.isInsertOnlyTable(getTableInfo().getProperties()); } } public boolean isFullAcidTable() {
public static MmContext createIfNeeded(Table t) { if (t == null) return null; if (!AcidUtils.isInsertOnlyTable(t.getParameters())) return null; return new MmContext(AcidUtils.getFullTableName(t.getDbName(), t.getTableName())); }
private ColStatistics extractColStats(RexInputRef ref) { RelColumnOrigin columnOrigin = this.metadataProvider.getColumnOrigin(filterOp, ref.getIndex()); if (columnOrigin != null) { RelOptHiveTable table = (RelOptHiveTable) columnOrigin.getOriginTable(); if (table != null) { ColStatistics colStats = table.getColStat(Lists.newArrayList(columnOrigin.getOriginColumnOrdinal()), false).get(0); if (colStats != null && StatsUtils.areColumnStatsUptoDateForQueryAnswering( table.getHiveTableMD(), table.getHiveTableMD().getParameters(), colStats.getColumnName())) { return colStats; } } } return null; }
protected RecordUpdater createRecordUpdater(final Path partitionPath, int bucketId, Long minWriteId, Long maxWriteID) throws IOException { // Initialize table properties from the table parameters. This is required because the table // may define certain table parameters that may be required while writing. The table parameter // 'transactional_properties' is one such example. Properties tblProperties = new Properties(); tblProperties.putAll(table.getParameters()); return acidOutputFormat.getRecordUpdater(partitionPath, new AcidOutputFormat.Options(conf) .filesystem(fs) .inspector(outputRowObjectInspector) .bucket(bucketId) .tableProperties(tblProperties) .minimumWriteId(minWriteId) .maximumWriteId(maxWriteID) .statementId(statementId) .finalDestination(partitionPath)); }
private Table newTable(boolean isPartitioned) { Table t = new Table("default", "table" + Integer.toString(nextInput++)); if (isPartitioned) { FieldSchema fs = new FieldSchema(); fs.setName("version"); fs.setType("String"); List<FieldSchema> partCols = new ArrayList<FieldSchema>(1); partCols.add(fs); t.setPartCols(partCols); } Map<String, String> tblProps = t.getParameters(); if(tblProps == null) { tblProps = new HashMap<>(); } tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); t.setParameters(tblProps); return t; }
private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl, boolean isOutputPadded) { formatOutput("Database:", tbl.getDbName(), tableInfo); formatOutput("OwnerType:", (tbl.getOwnerType() != null) ? tbl.getOwnerType().name() : "null", tableInfo); formatOutput("Owner:", tbl.getOwner(), tableInfo); formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo); formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo); formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo); if (!tbl.isView()) { formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo); } formatOutput("Table Type:", tbl.getTableType().name(), tableInfo); if (tbl.getParameters().size() > 0) { tableInfo.append("Table Parameters:").append(LINE_DELIM); displayAllParameters(tbl.getParameters(), tableInfo, false, isOutputPadded); } }
private Long extractRowCount(RexInputRef ref) { RelColumnOrigin columnOrigin = this.metadataProvider.getColumnOrigin(filterOp, ref.getIndex()); if (columnOrigin != null) { RelOptHiveTable table = (RelOptHiveTable) columnOrigin.getOriginTable(); if (table != null) { if (StatsUtils.areBasicStatsUptoDateForQueryAnswering(table.getHiveTableMD(), table.getHiveTableMD().getParameters())) { return StatsUtils.getNumRows(table.getHiveTableMD()); } } } return null; }
@Before public void before() { HepProgramBuilder programBuilder = new HepProgramBuilder(); programBuilder.addRuleInstance(HiveReduceExpressionsWithStatsRule.INSTANCE); planner = new HepPlanner(programBuilder.build()); JavaTypeFactoryImpl typeFactory = new JavaTypeFactoryImpl(); RexBuilder rexBuilder = new RexBuilder(typeFactory); final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class); Mockito.doReturn(rowTypeMock).when(tableMock).getRowType(); Mockito.doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any()); statObj = new ColStatistics("_int", "int"); Mockito.doReturn(Lists.newArrayList(statObj)).when(tableMock).getColStat(Matchers.anyListOf(Integer.class), Matchers.eq(false)); Mockito.doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); Mockito.doReturn(tableParams).when(hiveTableMDMock).getParameters(); builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock); StatsSetupConst.setStatsStateForCreateTable(tableParams, Lists.newArrayList("_int"), StatsSetupConst.TRUE); tableParams.put(StatsSetupConst.ROW_COUNT, "3"); }
private List<FieldSchema> getColsInternal(boolean forMs) { try { String serializationLib = tPartition.getSd().getSerdeInfo().getSerializationLib(); // Do the lightweight check for general case. if (Table.hasMetastoreBasedSchema(SessionState.getSessionConf(), serializationLib)) { return tPartition.getSd().getCols(); } else if (forMs && !Table.shouldStoreFieldsInMetastore( SessionState.getSessionConf(), serializationLib, table.getParameters())) { return Hive.getFieldsFromDeserializerForMsStorage(table, getDeserializer()); } return HiveMetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), getDeserializer()); } catch (Exception e) { LOG.error("Unable to get cols from serde: " + tPartition.getSd().getSerdeInfo().getSerializationLib(), e); } return new ArrayList<FieldSchema>(); }