public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate) throws HiveException { return getPartition(tbl, partSpec, forceCreate, null, true); }
public TableSpec(Hive db, String tableName, Map<String, String> partSpec, boolean allowPartialPartitionsSpec) throws HiveException { Table table = db.getTable(tableName); tableHandle = table; this.tableName = table.getDbName() + "." + table.getTableName(); if (partSpec == null) { specType = SpecType.TABLE_ONLY; } else if(allowPartialPartitionsSpec) { partitions = db.getPartitions(table, partSpec); specType = SpecType.STATIC_PARTITION; } else { Partition partition = db.getPartition(table, partSpec, false); if (partition == null) { throw new SemanticException("partition is unknown: " + table + "/" + partSpec); } partHandle = partition; partitions = Collections.singletonList(partHandle); specType = SpecType.STATIC_PARTITION; } }
public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate) throws HiveException { return getPartition(tbl, partSpec, forceCreate, null, true, null); }
/** * Returns partition metadata * * @param tbl * the partition's table * @param partSpec * partition keys and values * @param forceCreate * if this is true and partition doesn't exist then a partition is * created * @param partPath the path where the partition data is located * @param inheritTableSpecs whether to copy over the table specs for if/of/serde * @return result partition object or null if there is no partition * @throws HiveException */ public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException { return getPartition(tbl, partSpec, forceCreate, partPath, inheritTableSpecs, null); }
public TableSpec(Hive db, HiveConf conf, String tableName, Map<String, String> partSpec) throws HiveException { this.tableName = tableName; this.partSpec = partSpec; this.tableHandle = db.getTable(tableName); if (partSpec != null) { this.specType = SpecType.STATIC_PARTITION; this.partHandle = db.getPartition(tableHandle, partSpec, false); this.partitions = Arrays.asList(partHandle); } else { this.specType = SpecType.TABLE_ONLY; } }
private ReplLoadOpType getLoadPartitionType(Map<String, String> partSpec) throws InvalidOperationException, HiveException { Partition ptn = context.hiveDb.getPartition(table, partSpec, false); if (ptn == null) { return ReplLoadOpType.LOAD_NEW; } if (ReplUtils.replCkptStatus(tableContext.dbNameToLoadIn, ptn.getParameters(), context.dumpDirectory)) { return ReplLoadOpType.LOAD_SKIP; } return ReplLoadOpType.LOAD_REPLACE; } }
protected Partition getPartition(Table table, Map<String, String> partSpec, boolean throwException) throws SemanticException { Partition partition; try { partition = db.getPartition(table, partSpec, false); } catch (Exception e) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } if (partition == null && throwException) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); } return partition; }
protected Partition getPartition(Table table, Map<String, String> partSpec, boolean throwException) throws SemanticException { Partition partition; try { partition = db.getPartition(table, partSpec, false); } catch (Exception e) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } if (partition == null && throwException) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); } return partition; }
private Partition getPartition(Table table, Map<String, String> partSpec) throws SemanticException { try { Partition partition = db.getPartition(table, partSpec, false); if (partition == null) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); } return partition; } catch (HiveException e) { if(e instanceof SemanticException) { throw (SemanticException)e; } throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } } private String toMessage(ErrorMsg message, Object detail) {
private Partition getPartition(Table table, Map<String, String> partSpec) throws SemanticException { try { Partition partition = db.getPartition(table, partSpec, false); if (partition == null) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); } return partition; } catch (HiveException e) { if(e instanceof SemanticException) { throw (SemanticException)e; } throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } } private String toMessage(ErrorMsg message, Object detail) {
partn = db.getPartition(tab, partSpec, false); } catch (HiveException e) { partn = null;
@Before public void setup() throws Exception { queryState = new QueryState.Builder().build(); //set authorization mode to V2 HiveConf conf = queryState.getConf(); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, SQLStdHiveAuthorizerFactory.class.getName()); db = Mockito.mock(Hive.class); table = new Table(DB, TABLE); SessionState.start(conf); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap<String, String>(), false)) .thenReturn(partition); }
/** * check that every index table contains the given partition and is fresh */ private static boolean containsPartition(Hive hive, Partition part, List<Index> indexes) throws HiveException { HashMap<String, String> partSpec = part.getSpec(); if (partSpec.isEmpty()) { // empty specs come from non-partitioned tables return isIndexTableFresh(hive, indexes, part.getTable()); } for (Index index : indexes) { // index.getDbName() is used as a default database, which is database of target table, // if index.getIndexTableName() does not contain database name String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); Table indexTable = hive.getTable(qualified[0], qualified[1]); // get partitions that match the spec Partition matchingPartition = hive.getPartition(indexTable, partSpec, false); if (matchingPartition == null) { LOG.info("Index table " + indexTable + "did not contain built partition that matched " + partSpec); return false; } else if (!isIndexPartitionFresh(hive, index, part)) { return false; } } return true; }
@Before public void setup() throws Exception { queryState = new QueryState.Builder().build(); db = Mockito.mock(Hive.class); HiveConf hiveConf = queryState.getConf(); table = new Table(DB, TABLE); partition = new Partition(table); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(hiveConf); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap<String, String>(), false)) .thenReturn(partition); }
/** * Creates a locking object for a table (when partition spec is not provided) * or a table partition * @param hiveDB an object to communicate with the metastore * @param tableName the table to create the locking object on * @param partSpec the spec of a partition to create the locking object on * @return the locking object * @throws HiveException */ public static HiveLockObject createFrom(Hive hiveDB, String tableName, Map<String, String> partSpec) throws HiveException { Table tbl = hiveDB.getTable(tableName); if (tbl == null) { throw new HiveException("Table " + tableName + " does not exist "); } HiveLockObject obj = null; if (partSpec == null) { obj = new HiveLockObject(tbl, null); } else { Partition par = hiveDB.getPartition(tbl, partSpec, false); if (par == null) { throw new HiveException("Partition " + partSpec + " for table " + tableName + " does not exist"); } obj = new HiveLockObject(par, null); } return obj; }
Partition existingPtn = db.getPartition(existingTable, partSpec, false); return ((existingPtn != null) && replicationSpec.allowEventReplacementInto(existingPtn.getParameters()));
Partition par = db.getPartition(tbl, partSpec, false); if (par == null) { throw new HiveException("Partition " + partSpec + " for table " +
@Before public void setup() throws Exception { queryState = new QueryState.Builder().build(); HiveConf conf = queryState.getConf(); conf.setVar(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY, TestHiveAuthorizationTaskFactory.DummyHiveAuthorizationTaskFactoryImpl.class.getName()); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); db = Mockito.mock(Hive.class); table = new Table(DB, TABLE); partition = new Partition(table); SessionState.start(conf); context = new Context(conf); parseDriver = new ParseDriver(); analyzer = new DDLSemanticAnalyzer(queryState, db); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap<String, String>(), false)) .thenReturn(partition); HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator(); auth.setConf(conf); currentUser = auth.getUserName(); DummyHiveAuthorizationTaskFactoryImpl.reset(); } /**
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } else { Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false); if (part == null) { throw new HiveException("Specified partition does not exist");
private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, TaskInformation ti) throws HiveException, IOException, InvalidOperationException { List<String> partVals = MetaStoreUtils.getPvals(table.getPartCols(), tbd.getPartitionSpec()); db.validatePartitionNameCharacters(partVals); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("loadPartition called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName()); } db.loadPartition(tbd.getSourcePath(), db.getTable(tbd.getTable().getTableName()), tbd.getPartitionSpec(), tbd.getLoadFileType(), tbd.getInheritTableSpecs(), tbd.getInheritLocation(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && !tbd.isMmTable(), resetStatisticsProps(table), tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite()); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); // See the comment inside updatePartitionBucketSortColumns. if (!tbd.isMmTable() && (ti.bucketCols != null || ti.sortCols != null)) { updatePartitionBucketSortColumns(db, table, partn, ti.bucketCols, ti.numBuckets, ti.sortCols); } DataContainer dc = new DataContainer(table.getTTable(), partn.getTPartition()); // add this partition to post-execution hook if (work.getOutputs() != null) { DDLTask.addIfAbsentByName(new WriteEntity(partn, getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs()); } return dc; }