void updatePartitionLocation(String dbName, Table table, String partName, Partition part, Path newLocation) throws HiveException, TException { String msg = String.format("ALTER TABLE %s PARTITION (%s) SET LOCATION '%s'", getQualifiedName(table), partName, newLocation.toString()); LOG.info(msg); org.apache.hadoop.hive.ql.metadata.Partition modifiedPart = new org.apache.hadoop.hive.ql.metadata.Partition( new org.apache.hadoop.hive.ql.metadata.Table(table), part); modifiedPart.setLocation(newLocation.toString()); alterPartitionInternal(table, modifiedPart); }
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition object to modify * @param harPath - new location of partition (har schema URI) */ private void setArchived(Partition p, Path harPath, int level) { assert(ArchiveUtils.isArchived(p) == false); setIsArchived(p, true, level); setOriginalLocation(p, p.getLocation()); p.setLocation(harPath.toString()); }
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition object to modify * @param harPath - new location of partition (har schema URI) */ private void setArchived(Partition p, Path harPath, int level) { assert(ArchiveUtils.isArchived(p) == false); setIsArchived(p, true, level); setOriginalLocation(p, p.getLocation()); p.setLocation(harPath.toString()); }
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as not archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition to modify */ private void setUnArchived(Partition p) { assert(ArchiveUtils.isArchived(p) == true); String parentDir = getOriginalLocation(p); setIsArchived(p, false, 0); setOriginalLocation(p, null); assert(parentDir != null); p.setLocation(parentDir); }
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as not archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition to modify */ private void setUnArchived(Partition p) { assert(ArchiveUtils.isArchived(p) == true); String parentDir = getOriginalLocation(p); setIsArchived(p, false, 0); setOriginalLocation(p, null); assert(parentDir != null); p.setLocation(parentDir); }
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException { try { Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy()); targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, Long.toString(this.hiveCopyEntityHelper.getStartTime())); targetPartition.setLocation(targetLocation.toString()); targetPartition.getTPartition().unsetCreateTime(); return targetPartition; } catch (HiveException he) { throw new IOException(he); } }
if (location != null) { location = Utilities.getQualifiedPath(conf, new Path(location)); tmpPart.setLocation(location);
if (location != null) { location = Utilities.getQualifiedPath(conf, new Path(location)); newPart.setLocation(location);
part.setLocation(newLocation); authorize(part, Privilege.ALTER_DATA); } else {
/** * Adds the partition. * * @param eventName the event name * @param key the key * @param finalPath the final path * @param className the class name * @return true, if successful */ private boolean addPartition(String eventName, String key, Path finalPath, String className) { try { Table t = getTable(eventName, className); HashMap<String, String> partSpec = new HashMap<String, String>(); partSpec.put("dt", key); Partition p = client.createPartition(t, partSpec); p.setLocation(finalPath.toString()); client.alterPartition(database, eventName, p, null); return true; } catch (Exception e) { LOG.warn("Unable to add the partition ", e); return false; } }
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition object to modify * @param harPath - new location of partition (har schema URI) */ private void setArchived(Partition p, Path harPath, int level) { assert(ArchiveUtils.isArchived(p) == false); setIsArchived(p, true, level); setOriginalLocation(p, p.getLocation()); p.setLocation(harPath.toString()); }
p.setLocation(harUri.toString());
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as not archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition to modify */ private void setUnArchived(Partition p) { assert(ArchiveUtils.isArchived(p) == true); String parentDir = getOriginalLocation(p); setIsArchived(p, false, 0); setOriginalLocation(p, null); assert(parentDir != null); p.setLocation(parentDir); }
/** * Sets the appropriate attributes in the supplied Partition object to mark * it as not archived. Note that the metastore is not touched - a separate * call to alter_partition is needed. * * @param p - the partition to modify */ private void setUnArchived(Partition p) { assert(isArchived(p) == true); String parentDir = getOriginalLocation(p); setIsArchived(p, false); setOriginalLocation(p, null); assert(parentDir != null); p.setLocation(parentDir); }
latestPart.getTPartition().getSd().getSerdeInfo().getParameters().putAll( partition.getTPartition().getSd().getSerdeInfo().getParameters()); latestPart.setLocation(partition.getLocation()); latestPart.setInputFormatClass(partition.getInputFormatClass()); latestPart.setOutputFormatClass(partition.getOutputFormatClass().asSubclass(HiveOutputFormat.class));
latestPart.getTPartition().getSd().getSerdeInfo().getParameters().putAll( partition.getTPartition().getSd().getSerdeInfo().getParameters()); latestPart.setLocation(partition.getLocation()); latestPart.setInputFormatClass(partition.getInputFormatClass()); latestPart.setOutputFormatClass(partition.getOutputFormatClass().asSubclass(HiveOutputFormat.class));
public static void updatePartitionFromXPartition(Partition partition, XPartition xp) throws ClassNotFoundException { partition.getParameters().putAll(mapFromXProperties(xp.getPartitionParameters())); partition.getTPartition().getSd().getSerdeInfo().setParameters(mapFromXProperties(xp.getSerdeParameters())); partition.setLocation(xp.getLocation()); if (xp.getInputFormat() != null) { partition.setInputFormatClass(Class.forName(xp.getInputFormat()).asSubclass(InputFormat.class)); } if (xp.getOutputFormat() != null) { Class<? extends HiveOutputFormat> outputFormatClass = Class.forName(xp.getOutputFormat()).asSubclass(HiveOutputFormat.class); partition.setOutputFormatClass(outputFormatClass); } partition.getParameters().put(MetastoreConstants.PARTITION_UPDATE_PERIOD, xp.getUpdatePeriod().name()); partition.getTPartition().getSd().getSerdeInfo().setSerializationLib(xp.getSerdeClassname()); }
public static void updatePartitionFromXPartition(Partition partition, XPartition xp) throws ClassNotFoundException { partition.getParameters().putAll(mapFromXProperties(xp.getPartitionParameters())); partition.getTPartition().getSd().getSerdeInfo().setParameters(mapFromXProperties(xp.getSerdeParameters())); partition.setLocation(xp.getLocation()); if (xp.getInputFormat() != null) { partition.setInputFormatClass(Class.forName(xp.getInputFormat()).asSubclass(InputFormat.class)); } if (xp.getOutputFormat() != null) { Class<? extends HiveOutputFormat> outputFormatClass = Class.forName(xp.getOutputFormat()).asSubclass(HiveOutputFormat.class); partition.setOutputFormatClass(outputFormatClass); } partition.getParameters().put(MetastoreConstants.PARTITION_UPDATE_PERIOD, xp.getUpdatePeriod().name()); partition.getTPartition().getSd().getSerdeInfo().setSerializationLib(xp.getSerdeClassname()); }
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException { try { Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy()); targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, Long.toString(this.hiveCopyEntityHelper.getStartTime())); targetPartition.setLocation(targetLocation.toString()); targetPartition.getTPartition().unsetCreateTime(); return targetPartition; } catch (HiveException he) { throw new IOException(he); } }
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException { try { Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy()); targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, Long.toString(this.hiveCopyEntityHelper.getStartTime())); targetPartition.setLocation(targetLocation.toString()); targetPartition.getTPartition().unsetCreateTime(); return targetPartition; } catch (HiveException he) { throw new IOException(he); } }