new AddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists); for (int num = start; num < numCh; num++) { ASTNode child = (ASTNode) ast.getChild(num); case HiveParser.TOK_PARTSPEC: if (currentPart != null) { addPartitionDesc.addPartition(currentPart, currentLocation); currentLocation = null; addPartitionDesc.addPartition(currentPart, currentLocation); for (int index = 0; index < addPartitionDesc.getPartitionCount(); index++) { OnePartitionDesc desc = addPartitionDesc.getPartition(index); if (desc.getLocation() == null) { if (desc.getPartParams() == null) { if (addPartitionDesc.getPartitionCount() == 0) { cmd.append(" WHERE "); boolean firstOr = true; for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc.getPartition(i); if (firstOr) { firstOr = false;
public List<Partition> createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException { Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); int size = addPartitionDesc.getPartitionCount(); List<org.apache.hadoop.hive.metastore.api.Partition> in = new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size); for (int i = 0; i < size; ++i) { in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i))); if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()){ : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { out.add(new Partition(tbl, outPart)); try { org.apache.hadoop.hive.metastore.api.Partition ptn = getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues()); if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn)){ partsToAlter.add(p); : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { out.add(new Partition(tbl, outPart)); getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), partsToAlter, null); getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ out.add(new Partition(tbl,outPart));
private static Task<? extends Serializable> alterSinglePartition( URI fromURI, FileSystem fs, ImportTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, EximUtil.SemanticAnalyzerWrapperContext x) { addPartitionDesc.setReplaceMode(true); if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())){ addPartitionDesc.setReplicationSpec(replicationSpec); } addPartitionDesc.getPartition(0).setLocation(ptn.getLocation()); // use existing location return TaskFactory.get(new DDLWork( x.getInputs(), x.getOutputs(), addPartitionDesc ), x.getConf()); }
private static void createPartitionIfNotExists(HiveEndPoint ep, IMetaStoreClient msClient, HiveConf conf) throws PartitionCreationFailed { if (ep.partitionVals.isEmpty()) { return; } try { org.apache.hadoop.hive.ql.metadata.Table tableObject = new org.apache.hadoop.hive.ql.metadata.Table(msClient.getTable(ep.database, ep.table)); Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(ep.database, ep.table, true); String partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); msClient.add_partition(partition); } catch (AlreadyExistsException e) { //ignore this - multiple clients may be trying to create the same partition //AddPartitionDesc has ifExists flag but it's not propagated to // HMSHnalder.add_partitions_core() and so it throws... } catch(HiveException|TException e) { LOG.error("Failed to create partition : " + ep, e); throw new PartitionCreationFailed(ep, e); } }
AddPartitionDesc apd = new AddPartitionDesc( table.getDbName(), table.getTableName(), false); try { for (CheckResult.PartitionResult part : partsNotInMs) { counter++; apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() + ':' + part.getPartitionName()); if (counter % batch_size == 0 || counter == partsNotInMs.size()) { db.createPartitions(apd); apd = new AddPartitionDesc(table.getDbName(), table.getTableName(), false); apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() + ':' + part.getPartitionName());
private static AddPartitionDesc getBaseAddPartitionDescFromPartition( Path fromPath, String dbname, ImportTableDesc tblDesc, Partition partition) throws MetaException, SemanticException { AddPartitionDesc partsDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(), EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()), partition.getSd().getLocation(), partition.getParameters()); AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0); partDesc.setInputFormat(partition.getSd().getInputFormat()); partDesc.setOutputFormat(partition.getSd().getOutputFormat()); partDesc.setNumBuckets(partition.getSd().getNumBuckets()); partDesc.setCols(partition.getSd().getCols()); partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib()); partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters()); partDesc.setBucketCols(partition.getSd().getBucketCols()); partDesc.setSortCols(partition.getSd().getSortCols()); partDesc.setLocation(new Path(fromPath, Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString()); return partsDesc; }
public List<Partition> createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException { Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); int size = addPartitionDesc.getPartitionCount(); List<org.apache.hadoop.hive.metastore.api.Partition> in = new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size); for (int i = 0; i < size; ++i) { in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i))); if (!addPartitionDesc.getReplaceMode()){ : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { out.add(new Partition(tbl, outPart)); getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), in); List<String> part_names = new ArrayList<String>(); for (org.apache.hadoop.hive.metastore.api.Partition p: in){ getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ out.add(new Partition(tbl,outPart));
try { AddPartitionDesc partsDesc = new AddPartitionDesc(tblDesc.getDatabaseName(), tblDesc.getTableName(), EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()), partition.getSd().getLocation(), partition.getParameters()); AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0); partDesc.setInputFormat(partition.getSd().getInputFormat()); partDesc.setOutputFormat(partition.getSd().getOutputFormat()); Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString()); partsDesc.setReplicationSpec(replicationSpec());
public void addPartition(Map<String, String> partSpec, String location) { addPartition(partSpec, location, null); }
int stmtId = 0; for (int index = 0; index < addPartitionDesc.getPartitionCount(); index++) { OnePartitionDesc desc = addPartitionDesc.getPartition(index); if (desc.getLocation() != null) { AcidUtils.validateAcidPartitionLocation(desc.getLocation(), conf); if(addPartitionDesc.isIfNotExists()) {
Map<String, String> lastReplicatedPartSpec = null; if (!encounteredTheLastReplicatedPartition) { lastReplicatedPartSpec = lastPartitionReplicated.getPartition(0).getPartSpec(); LOG.info("Start processing from partition info spec : {}", StringUtils.mapToString(lastReplicatedPartSpec)); while (!encounteredTheLastReplicatedPartition && partitionIterator.hasNext()) { AddPartitionDesc addPartitionDesc = partitionIterator.next(); Map<String, String> currentSpec = addPartitionDesc.getPartition(0).getPartSpec(); encounteredTheLastReplicatedPartition = lastReplicatedPartSpec.equals(currentSpec); Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec(); Task<?> ptnRootTask = null; ReplLoadOpType loadPtnType = getLoadPartitionType(partSpec);
.listIterator(); partnIter.hasNext();) { AddPartitionDesc addPartitionDesc = partnIter.next(); if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(parsedPartSpec)) { found = true; } else { x.getConf().set("import.destination.table", tblDesc.getTableName()); for (AddPartitionDesc addPartitionDesc : partitionDescs) { addPartitionDesc.setTableName(tblDesc.getTableName());
Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); addPartitionDesc.getExpectView()); Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false); if (checkPart != null && addPartitionDesc.getIfNotExists()) { return 0; if (addPartitionDesc.getLocation() == null) { db.createPartition(tbl, addPartitionDesc.getPartSpec()); } else { if (tbl.isView()) { db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl .getPath(), addPartitionDesc.getLocation())); .getPartition(tbl, addPartitionDesc.getPartSpec(), false); work.getOutputs().add(new WriteEntity(part));
if (isPartitioned(tblDesc)) { for (AddPartitionDesc addPartitionDesc : partitionDescs) { addPartitionDesc.setReplicationSpec(replicationSpec); t.addDependentTask( addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x)); x.getLOG().debug("table partitioned"); for (AddPartitionDesc addPartitionDesc : partitionDescs) { addPartitionDesc.setReplicationSpec(replicationSpec); Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
if (currentPart != null) { validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( db.getCurrentDatabase(), tblName, currentPart, currentLocation, ifNotExists, expectView); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( db.getCurrentDatabase(), tblName, currentPart, currentLocation, ifNotExists, expectView); tab.isValidSpec(partitionDesc.getPartSpec()); } catch (HiveException ex) { throw new SemanticException(ex.getMessage(), ex); cmd.append("("); for (Map.Entry<String, String> entry : partitionDesc.getPartSpec().entrySet())
private Task<? extends Serializable> alterSinglePartition( URI fromURI, FileSystem fs, CreateTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn) { addPartitionDesc.setReplaceMode(true); addPartitionDesc.getPartition(0).setLocation(ptn.getLocation()); // use existing location return TaskFactory.get(new DDLWork( getInputs(), getOutputs(), addPartitionDesc ), conf); }
@Override public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException { String partLocation = null; String partName = null; boolean exists = false; try { Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); if (getMSC() == null) { // We assume it doesn't exist if we can't check it // so the driver will decide return new PartitionInfo(partName, partLocation, false); } getMSC().add_partition(partition); if (LOG.isDebugEnabled()) { LOG.debug("Created partition {} for table {}", partName, tableObject.getFullyQualifiedName()); } } catch (AlreadyExistsException e) { exists = true; } catch (HiveException | TException e) { throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " + toConnectionInfoString(), e); } return new PartitionInfo(partName, partLocation, exists); }
AddPartitionDesc partitionDesc = new AddPartitionDesc(database, tableName, false); Map<String, String> partSpec = new HashMap<String, String>(); partSpec.put("dummy_partition_col", "dummy_val"); partitionDesc.addPartition(partSpec, partDir.toUri().toString()); Hive.get(conf).createPartitions(partitionDesc); log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
private static AddPartitionDesc getBaseAddPartitionDescFromPartition( Path fromPath, String dbName, ImportTableDesc tblDesc, Partition partition, ReplicationSpec replicationSpec, HiveConf conf) throws MetaException, SemanticException { AddPartitionDesc partsDesc = new AddPartitionDesc(dbName, tblDesc.getTableName(), EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()), partition.getSd().getLocation(), partition.getParameters()); AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0); partDesc.setInputFormat(partition.getSd().getInputFormat()); partDesc.setOutputFormat(partition.getSd().getOutputFormat()); partDesc.setNumBuckets(partition.getSd().getNumBuckets()); partDesc.setCols(partition.getSd().getCols()); partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib()); partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters()); partDesc.setBucketCols(partition.getSd().getBucketCols()); partDesc.setSortCols(partition.getSd().getSortCols()); if (replicationSpec.isInReplicationScope() && tblDesc.isExternal() && !replicationSpec.isMigratingToExternalTable()) { String newLocation = ReplExternalTables .externalTableLocation(conf, partition.getSd().getLocation()); LOG.debug("partition {} has data location: {}", partition, newLocation); partDesc.setLocation(newLocation); } else { partDesc.setLocation(new Path(fromPath, Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString()); } return partsDesc; }
public void addPartition(Map<String, String> partSpec, String location) { addPartition(partSpec, location, null); }