/** * get all the partitions of the table that matches the given partial * specification. partition columns whose value is can be anything should be * an empty string. * * @param tbl * object for which partition is needed. Must be partitioned. * @return list of partition objects * @throws HiveException */ public List<Partition> getPartitions(Table tbl, Map<String, String> partialPartSpec) throws HiveException { return getPartitions(tbl, partialPartSpec, (short)-1); }
private List<Path> getLocations(Hive db, Table table, Map<String, String> partSpec) throws HiveException, InvalidOperationException { List<Path> locations = new ArrayList<Path>(); if (partSpec == null) { if (table.isPartitioned()) { for (Partition partition : db.getPartitions(table)) { locations.add(partition.getDataLocation()); EnvironmentContext environmentContext = new EnvironmentContext(); if (needToUpdateStats(partition.getParameters(), environmentContext)) { db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext); } } } else { locations.add(table.getPath()); EnvironmentContext environmentContext = new EnvironmentContext(); if (needToUpdateStats(table.getParameters(), environmentContext)) { db.alterTable(table.getDbName()+"."+table.getTableName(), table, environmentContext); } } } else { for (Partition partition : db.getPartitionsByNames(table, partSpec)) { locations.add(partition.getDataLocation()); EnvironmentContext environmentContext = new EnvironmentContext(); if (needToUpdateStats(partition.getParameters(), environmentContext)) { db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext); } } } return locations; }
/** * get all the partitions of the table that matches the given partial * specification. partition columns whose value is can be anything should be * an empty string. * * @param tbl * object for which partition is needed. Must be partitioned. * @return list of partition objects * @throws HiveException */ public List<Partition> getPartitions(Table tbl, Map<String, String> partialPartSpec) throws HiveException { return getPartitions(tbl, partialPartSpec, (short)-1); }
protected List<Partition> getPartitions(Table table, Map<String, String> partSpec, boolean throwException) throws SemanticException { List<Partition> partitions; try { partitions = partSpec == null ? db.getPartitions(table) : db.getPartitions(table, partSpec); } catch (Exception e) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } if (partitions.isEmpty() && throwException) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); } return partitions; }
protected List<Partition> getPartitions(Table table, Map<String, String> partSpec, boolean throwException) throws SemanticException { List<Partition> partitions; try { partitions = partSpec == null ? db.getPartitions(table) : db.getPartitions(table, partSpec); } catch (Exception e) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } if (partitions.isEmpty() && throwException) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); } return partitions; }
private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par) throws HiveException { // output file system information Path tblPath = tbl.getPath(); List<Path> locations = new ArrayList<Path>(); if (tbl.isPartitioned()) { if (par == null) { for (Partition curPart : db.getPartitions(tbl)) { if (curPart.getLocation() != null) { locations.add(new Path(curPart.getLocation())); } } } else { if (par.getLocation() != null) { locations.add(new Path(par.getLocation())); } } } else { if (tblPath != null) { locations.add(tblPath); } } return locations; }
private boolean containsPartition(Index index, Map<String, String> partSpec) throws HiveException { String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); Table indexTable = hive.getTable(qualified[0], qualified[1]); List<Partition> parts = hive.getPartitions(indexTable, partSpec); return (parts == null || parts.size() == 0); } }
private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par) throws HiveException { // output file system information Path tblPath = tbl.getPath(); List<Path> locations = new ArrayList<Path>(); if (tbl.isPartitioned()) { if (par == null) { for (Partition curPart : db.getPartitions(tbl)) { if (curPart.getLocation() != null) { locations.add(new Path(curPart.getLocation())); } } } else { if (par.getLocation() != null) { locations.add(new Path(par.getLocation())); } } } else { if (tblPath != null) { locations.add(tblPath); } } return locations; }
public TableSpec(Hive db, String tableName, Map<String, String> partSpec, boolean allowPartialPartitionsSpec) throws HiveException { Table table = db.getTable(tableName); tableHandle = table; this.tableName = table.getDbName() + "." + table.getTableName(); if (partSpec == null) { specType = SpecType.TABLE_ONLY; } else if(allowPartialPartitionsSpec) { partitions = db.getPartitions(table, partSpec); specType = SpecType.STATIC_PARTITION; } else { Partition partition = db.getPartition(table, partSpec, false); if (partition == null) { throw new SemanticException("partition is unknown: " + table + "/" + partSpec); } partHandle = partition; partitions = Collections.singletonList(partHandle); specType = SpecType.STATIC_PARTITION; } }
if (allowMany) { try { parts = db.getPartitions(table, partSpec); } catch (HiveException e) { LOG.error("Got HiveException during obtaining list of partitions"
if (allowMany) { try { parts = db.getPartitions(table, partSpec); } catch (HiveException e) { LOG.error("Got HiveException during obtaining list of partitions"
@Test(expected = MetastoreException.class) public void testInvalidPartitionKeyName() throws HiveException, AlreadyExistsException, IOException, MetastoreException { Table table = createTestTable(); List<Partition> partitions = hive.getPartitions(table); assertEquals(2, partitions.size()); // add a fake partition dir on fs fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf()); Path fakePart = new Path(table.getDataLocation().toString(), "fakedate=2009-01-01/fakecity=sanjose"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); checker.checkMetastore(catName, dbName, tableName, null, new CheckResult()); }
baseTblPartitions = db.getPartitions(baseTbl); for (Partition basePart : baseTblPartitions) { HashMap<String, String> pSpec = basePart.getSpec();
@Test public void testAdditionalPartitionDirs() throws HiveException, AlreadyExistsException, IOException, MetastoreException { Table table = createTestTable(); List<Partition> partitions = hive.getPartitions(table); assertEquals(2, partitions.size()); // add a fake partition dir on fs fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf()); Path fakePart = new Path(table.getDataLocation().toString(), partDateName + "=2017-01-01/" + partCityName + "=paloalto/fakePartCol=fakepartValue"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); CheckResult result = new CheckResult(); checker.checkMetastore(catName, dbName, tableName, null, result); assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs()); assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs()); assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), result.getPartitionsNotOnFs()); //fakePart path partition is added since the defined partition keys are valid assertEquals(1, result.getPartitionsNotInMs().size()); }
write(lineFor(table.getTableName(), fullyQualifiedDataLocation, hiveConf)); if (table.isPartitioned()) { List<Partition> partitions = Hive.get(hiveConf).getPartitions(table); for (Partition partition : partitions) { boolean partitionLocOutsideTableLoc = !FileUtils.isPathWithinSubtree(
@Test public void testSkipInvalidPartitionKeyName() throws HiveException, AlreadyExistsException, IOException, MetastoreException { hive.getConf().set(HiveConf.ConfVars.HIVE_MSCK_PATH_VALIDATION.varname, "skip"); checker = new HiveMetaStoreChecker(msc, hive.getConf()); Table table = createTestTable(); List<Partition> partitions = hive.getPartitions(table); assertEquals(2, partitions.size()); // add a fake partition dir on fs fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf()); Path fakePart = new Path(table.getDataLocation().toString(), "fakedate=2009-01-01/fakecity=sanjose"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); createPartitionsDirectoriesOnFS(table, 2); CheckResult result = new CheckResult(); checker.checkMetastore(catName, dbName, tableName, null, result); assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs()); assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs()); assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), result.getPartitionsNotOnFs()); // only 2 valid partitions should be added assertEquals(2, result.getPartitionsNotInMs().size()); }
List<Partition> partitions = hive.getPartitions(table); assertEquals(numOfPartKeys * valuesPerPartition, partitions.size()); return table;
FetchWork work; if (!tbl.getPartCols().isEmpty()) { List<Partition> partitions = hive.getPartitions(tbl); List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>(); List<Path> partLocs = new ArrayList<Path>();
List<Partition> partitions = db.getPartitions(tbl, partSpec); if (partitions.size() > 1) { throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS);
assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), result.getPartitionsNotInMs()); List<Partition> partitions = hive.getPartitions(table); assertEquals(2, partitions.size());