/** * Should be only used by serialization. */ public void setTPartition( org.apache.hadoop.hive.metastore.api.Partition partition) { StringInternUtils.internStringsInList(partition.getValues()); tPartition = partition; }
public static String getPartitionName(Table table, Partition partition) { try { return Warehouse.makePartName(getPartCols(table), partition.getValues()); } catch (MetaException e) { throw new RuntimeException(e); } }
public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) { Map<String, String> partitionKeys = new LinkedHashMap<>(); for (int i = 0; i < table.getPartitionKeysSize(); ++i) { partitionKeys.put(table.getPartitionKeys().get(i).getName(), partition.getValues().get(i)); } return partitionKeys; }
public static int getArchivingLevel(Partition part) throws MetaException { if (!isArchived(part)) { throw new MetaException("Getting level of unarchived partition"); } String lv = part.getParameters().get(ARCHIVING_LEVEL); if (lv != null) { return Integer.parseInt(lv); } // partitions archived before introducing multiple archiving return part.getValues().size(); }
private void dropPartition(Partition partition, boolean ifExists, boolean deleteData) throws HCatException, MetaException, TException { try { hmsClient.dropPartition(partition.getDbName(), partition.getTableName(), partition.getValues(), deleteData); } catch (NoSuchObjectException e) { if (!ifExists) { throw new ObjectNotFoundException( "NoSuchObjectException while dropping partition: " + partition.getValues(), e); } } }
@Override public synchronized void dropPartition(String databaseName, String tableName, List<String> parts, boolean deleteData) { partitions.entrySet().removeIf(entry -> entry.getKey().matches(databaseName, tableName) && entry.getValue().getValues().equals(parts)); }
private void addPartition(Partition p) throws AlreadyExistsException, MetaException { String partName = Warehouse.makePartName(tTable.getPartitionKeys(), p.getValues()); if(parts.putIfAbsent(partName, p) != null) { throw new AlreadyExistsException("Partition " + partName + " already exists"); } } /**
private static boolean partitionMatches(Partition partition, String databaseName, String tableName, List<String> parts) { if (!partition.getDbName().equals(databaseName) || !partition.getTableName().equals(tableName)) { return false; } List<String> values = partition.getValues(); if (values.size() != parts.size()) { return false; } for (int i = 0; i < values.size(); i++) { String part = parts.get(i); if (!part.isEmpty() && !values.get(i).equals(part)) { return false; } } return true; }
protected List<Path> getDirectories(HiveConf conf, Table t, Partition p) throws Exception { String partValue = (p == null) ? null : p.getValues().get(0); String location = getLocation(t.getTableName(), partValue); Path dir = new Path(location); FileSystem fs = FileSystem.get(conf); FileStatus[] stats = fs.listStatus(dir); List<Path> paths = new ArrayList<Path>(stats.length); for (int i = 0; i < stats.length; i++) paths.add(stats[i].getPath()); return paths; }
private static void assertPartitionsHaveCorrectValues(List<Partition> partitions, List<List<String>> testValues) throws Exception { assertEquals(testValues.size(), partitions.size()); for (int i = 0; i < partitions.size(); ++i) { assertEquals(testValues.get(i), partitions.get(i).getValues()); } }
private static void assertPartitionsHaveCorrectValues(List<Partition> partitions, List<List<String>> testValues) throws Exception { assertEquals(testValues.size(), partitions.size()); for (int i = 0; i < partitions.size(); ++i) { assertEquals(testValues.get(i), partitions.get(i).getValues()); } }
public static Partition fromMetastoreApiPartition(org.apache.hadoop.hive.metastore.api.Partition partition) { StorageDescriptor storageDescriptor = partition.getSd(); if (storageDescriptor == null) { throw new PrestoException(HIVE_INVALID_METADATA, "Partition does not contain a storage descriptor: " + partition); } Partition.Builder partitionBuilder = Partition.builder() .setDatabaseName(partition.getDbName()) .setTableName(partition.getTableName()) .setValues(partition.getValues()) .setColumns(storageDescriptor.getCols().stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setParameters(partition.getParameters()); fromMetastoreApiStorageDescriptor(storageDescriptor, partitionBuilder.getStorageBuilder(), format("%s.%s", partition.getTableName(), partition.getValues())); return partitionBuilder.build(); }
PartValEqWrapperLite(Partition partition) { this.values = partition.isSetValues()? partition.getValues() : null; if (partition.getSd() != null) { this.location = partition.getSd().getLocation(); } }
@Test public void testDropPartitionNullPartDropOptions() throws Exception { client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), null); List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0]); List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[1], PARTITIONS[2]); checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, true, false); }
@Test public void testDropPartition() throws Exception { boolean dropSuccessful = client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), false); Assert.assertTrue(dropSuccessful); List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0]); List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[1], PARTITIONS[2]); checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, false, false); }
@Test public void createOnPartitionTable() throws Exception { helper.createPartitionIfNotExists(PARTITIONED_VALUES); verify(mockClient).add_partition(partitionCaptor.capture()); Partition actual = partitionCaptor.getValue(); assertThat(actual.getSd().getLocation(), is(PARTITION_LOCATION)); assertThat(actual.getValues(), is(PARTITIONED_VALUES)); }
private void validatePartition(Partition expectedPartition, Partition actualPartition) { assertEquals(expectedPartition.getValues(), actualPartition.getValues()); assertEquals(expectedPartition.getDbName(), actualPartition.getDbName()); assertEquals(expectedPartition.getTableName(), actualPartition.getTableName()); }
/** * Testing getPartition(String,String,List(String)) -> * get_partition(String,String,List(String)). */ @Test public void testGetPartitionByValues() throws Exception { createTable3PartCols1Part(client); List<String> parts = Lists.newArrayList("1997", "05", "16"); Partition partition = client.getPartition(DB_NAME, TABLE_NAME, parts); assertNotNull(partition); assertEquals(parts, partition.getValues()); }
private Partition createPartition(Table table, List<String> values, String location) throws Exception { Partition partition = buildPartition(table, values, location); client.add_partition(partition); return client.getPartition(DB_NAME, table.getTableName(), partition.getValues()); }
private static void adjust(HiveMetaStoreClient client, Partition part, String dbName, String tblName, boolean isThriftClient) throws TException { Partition part_get = client.getPartition(dbName, tblName, part.getValues()); if (isThriftClient) { part.setCreateTime(part_get.getCreateTime()); part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime())); } part.setWriteId(part_get.getWriteId()); }