public Map<String, String> getParams() { return this.hivePartition.getParameters(); }
/** * Returns original partition of archived partition, null for unarchived one */ private String getOriginalLocation(Partition p) { Map<String, String> params = p.getParameters(); return params.get( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ORIGINAL_LOCATION); }
/** * Sets original location of partition which is to be archived */ private void setOriginalLocation(Partition p, String loc) { Map<String, String> params = p.getParameters(); if (loc == null) { params.remove(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ORIGINAL_LOCATION); } else { params.put(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ORIGINAL_LOCATION, loc); } }
/** * Returns original partition of archived partition, null for unarchived one */ private String getOriginalLocation(Partition p) { Map<String, String> params = p.getParameters(); return params.get( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ORIGINAL_LOCATION); }
private void validatePartition(Partition newPart) throws HiveException { // Remove the DDL time so that it gets refreshed if (newPart.getParameters() != null) { newPart.getParameters().remove(hive_metastoreConstants.DDL_TIME); } newPart.checkValidity(); }
/** * Sets original location of partition which is to be archived */ private void setOriginalLocation(Partition p, String loc) { Map<String, String> params = p.getParameters(); if (loc == null) { params.remove(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ORIGINAL_LOCATION); } else { params.put(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ORIGINAL_LOCATION, loc); } }
private void validatePartition(Partition newPart) throws HiveException { // Remove the DDL time so that it gets refreshed if (newPart.getParameters() != null) { newPart.getParameters().remove(hive_metastoreConstants.DDL_TIME); } newPart.checkValidity(); }
/** * Get the parameter map of the Entity. */ public Map<String, String> getParameters() { if (p != null) { return p.getParameters(); } else { return t.getParameters(); } }
@Override public boolean apply(@Nullable Partition partition) { if (partition == null){ return false; } return (allowEventReplacementInto(partition.getParameters())); } };
/** * Sets archiving flag locally; it has to be pushed into metastore * @param p partition to set flag * @param state desired state of IS_ARCHIVED flag * @param level desired level for state == true, anything for false */ private void setIsArchived(Partition p, boolean state, int level) { Map<String, String> params = p.getParameters(); if (state) { params.put(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.IS_ARCHIVED, "true"); params.put(ArchiveUtils.ARCHIVING_LEVEL, Integer .toString(level)); } else { params.remove(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.IS_ARCHIVED); params.remove(ArchiveUtils.ARCHIVING_LEVEL); } }
/** * Get the parameter map of the Entity. */ public Map<String, String> getParameters() { if (p != null) { return p.getParameters(); } else { return t.getParameters(); } }
protected long getSize(HiveConf conf, Partition partition) { Path path = partition.getDataLocation(); String size = partition.getParameters().get("totalSize"); return getSize(conf, size, path); } }
private ReplLoadOpType getLoadPartitionType(Map<String, String> partSpec) throws InvalidOperationException, HiveException { Partition ptn = context.hiveDb.getPartition(table, partSpec, false); if (ptn == null) { return ReplLoadOpType.LOAD_NEW; } if (ReplUtils.replCkptStatus(tableContext.dbNameToLoadIn, ptn.getParameters(), context.dumpDirectory)) { return ReplLoadOpType.LOAD_SKIP; } return ReplLoadOpType.LOAD_REPLACE; } }
protected long getSize(HiveConf conf, Partition partition) { Path path = partition.getDataLocation(); String size = partition.getParameters().get("totalSize"); return getSize(conf, size, path); } }
/** * Determines if a current replication object(current state of dump) is allowed to * replicate-replace-into a given partition */ public boolean allowReplacementInto(Partition ptn){ return allowReplacement(getLastReplicatedStateFromParameters(ptn.getParameters()),this.getCurrentReplicationState()); }
/** * Determines if a current replication event specification is allowed to * replicate-replace-into a given partition */ public boolean allowEventReplacementInto(Partition ptn){ return allowReplacement(getLastReplicatedStateFromParameters(ptn.getParameters()),this.getReplicationState()); }
private Collection<List<ColumnStatisticsObj>> verifyAndGetPartColumnStats( Hive hive, Table tbl, String colName, Set<Partition> parts) throws TException, LockException { List<String> partNames = new ArrayList<String>(parts.size()); for (Partition part : parts) { if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) { Logger.debug("Stats for part : " + part.getSpec() + " column " + colName + " are not up to date."); return null; } partNames.add(part.getName()); } AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(hive.getConf(), tbl); Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics( tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName), tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (result.size() != parts.size()) { Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions"); return null; } return result.values(); }
@Test public void testDroppedPartitions() throws Exception { WorkUnitState previousWus = new WorkUnitState(); previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "db@test_dataset_urn"); previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true); previousWus .setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015-01", 100l, "2015-02", 101l))); SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus)); PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state); Table table = mockTable("test_dataset_urn"); Mockito.when(table.getPartitionKeys()).thenReturn(ImmutableList.of(new FieldSchema("year", "string", ""))); Partition partition2015 = mockPartition(table, ImmutableList.of("2015")); // partition 2015 replaces 2015-01 and 2015-02 Mockito.when(partition2015.getParameters()).thenReturn( ImmutableMap.of(AbstractAvroToOrcConverter.REPLACED_PARTITIONS_HIVE_METASTORE_KEY, "2015-01|2015-02")); watermarker.onPartitionProcessBegin(partition2015, 0l, 0l); Assert.assertEquals(watermarker.getExpectedHighWatermarks().get("db@test_dataset_urn"), ImmutableMap.of("2015", 0l)); }
public PartitionDesc(final Partition part, final TableDesc tableDesc) throws HiveException { PartitionDescConstructorHelper(part, tableDesc, true); if (Utilities.isInputFileFormatSelfDescribing(this)) { // if IF is self describing no need to send column info per partition, since its not used anyway. Table tbl = part.getTable(); setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(), part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys())); } else { setProperties(part.getMetadataFromPartitionSchema()); } }
public PartitionDesc(final Partition part) throws HiveException { PartitionDescConstructorHelper(part, getTableDesc(part.getTable()), true); if (Utilities.isInputFileFormatSelfDescribing(this)) { // if IF is self describing no need to send column info per partition, since its not used anyway. Table tbl = part.getTable(); setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(), part.getTPartition().getSd(), part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys())); } else { setProperties(part.getMetadataFromPartitionSchema()); } }