private void msckAddPartitionsOneByOne(Hive db, Table table, Set<CheckResult.PartitionResult> partsNotInMs, List<String> repairOutput) { for (CheckResult.PartitionResult part : partsNotInMs) { try { db.createPartition(table, Warehouse.makeSpecFromName(part.getPartitionName())); repairOutput.add("Repair: Added partition to metastore " + table.getTableName() + ':' + part.getPartitionName()); } catch (Exception e) { LOG.warn("Repair error, could not add partition to metastore: ", e); } } }
Partition indexPart = db.getPartition(indexTbl, partSpec, false); if (indexPart == null) { indexPart = db.createPartition(indexTbl, partSpec); Partition indexPart = db.getPartition(indexTbl, pSpec, false); if (indexPart == null) { indexPart = db.createPartition(indexTbl, pSpec);
@BeforeClass public static void init() throws Exception { queryState = new QueryState.Builder().build(); conf = queryState.getConf(); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(conf); // Create a table so we can work against it Hive h = Hive.get(conf); List<String> cols = new ArrayList<String>(); cols.add("a"); List<String> partCols = new ArrayList<String>(); partCols.add("ds"); h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class); Table t = h.getTable("foo"); Map<String, String> partSpec = new HashMap<String, String>(); partSpec.put("ds", "today"); h.createPartition(t, partSpec); }
part_spec.put("hr", "12"); try { hm.createPartition(tbl, part_spec); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e));
Map<String, String> partVals = new HashMap<String, String>(2); partVals.put("ds", "yesterday"); db.createPartition(u, partVals); partVals.clear(); partVals.put("ds", "today"); db.createPartition(u, partVals); sem.analyze(tree, ctx);
hive.createPartition(table, partSpec);
hm.createPartition(table, partitionSpec);
hm.createPartition(table, partitionSpec); hm.createPartition(table, partitionSpec);
private Table createTestTable() throws HiveException, AlreadyExistsException { Database db = new Database(); db.setName(dbName); hive.createDatabase(db, true); Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); hive.createTable(table); table = hive.getTable(dbName, tableName); Assert.assertTrue(table.getTTable().isSetId()); table.getTTable().unsetId(); for (Map<String, String> partSpec : parts) { hive.createPartition(table, partSpec); } return table; }
/** * Creates a partition. * * @param tbl * table for which partition needs to be created * @param partSpec * partition keys and their values * @return created partition object * @throws HiveException * if table doesn't exist or partition already exists */ public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException { return createPartition(tbl, partSpec, null); }
/** * Adds the partition. * * @param eventName the event name * @param key the key * @param finalPath the final path * @param className the class name * @return true, if successful */ private boolean addPartition(String eventName, String key, Path finalPath, String className) { try { Table t = getTable(eventName, className); HashMap<String, String> partSpec = new HashMap<String, String>(); partSpec.put("dt", key); Partition p = client.createPartition(t, partSpec); p.setLocation(finalPath.toString()); client.alterPartition(database, eventName, p, null); return true; } catch (Exception e) { LOG.warn("Unable to add the partition ", e); return false; } }
private void msckAddPartitionsOneByOne(Hive db, Table table, List<CheckResult.PartitionResult> partsNotInMs, List<String> repairOutput) { for (CheckResult.PartitionResult part : partsNotInMs) { try { db.createPartition(table, Warehouse.makeSpecFromName(part.getPartitionName())); repairOutput.add("Repair: Added partition to metastore " + table.getTableName() + ':' + part.getPartitionName()); } catch (Exception e) { LOG.warn("Repair error, could not add partition to metastore: ", e); } } }
Partition indexPart = db.getPartition(indexTbl, partSpec, false); if (indexPart == null) { indexPart = db.createPartition(indexTbl, partSpec); Partition indexPart = db.getPartition(indexTbl, pSpec, false); if (indexPart == null) { indexPart = db.createPartition(indexTbl, pSpec);
Partition indexPart = db.getPartition(indexTbl, partSpec, false); if (indexPart == null) { indexPart = db.createPartition(indexTbl, partSpec); Partition indexPart = db.getPartition(indexTbl, pSpec, false); if (indexPart == null) { indexPart = db.createPartition(indexTbl, pSpec);
for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) { try { db.createPartition(table, Warehouse.makeSpecFromName(part .getPartitionName())); repairOutput.add("Repair: Added partition to metastore "
db.createPartition(tbl, addPartitionDesc.getPartSpec()); } else { if (tbl.isView()) { db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl .getPath(), addPartitionDesc.getLocation()));