@Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);} @Override public String getLocation() { return partition.getSd().getLocation(); }
@Override public void putToParameters(String key, String value) { partitionList.get(index).putToParameters(key, value); }
@Override public void apply(Partition t, Object[] fields) { t.putToParameters((String)fields[1], extractSqlClob(fields[2])); }}); // Perform conversion of null map values
@Override public void apply(Partition t, Object[] fields) { t.putToParameters((String) fields[1], extractSqlClob(fields[2])); } });
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException { try { Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy()); targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, Long.toString(this.hiveCopyEntityHelper.getStartTime())); targetPartition.setLocation(targetLocation.toString()); targetPartition.getTPartition().unsetCreateTime(); return targetPartition; } catch (HiveException he) { throw new IOException(he); } }
@Override public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider) throws SemanticException, IOException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); try { // Remove all the entries from the parameters which are added by repl tasks internally. Map<String, String> parameters = partition.getParameters(); if (parameters != null) { parameters.entrySet() .removeIf(e -> e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY)); } if (additionalPropertiesProvider.isInReplicationScope()) { // Current replication state must be set on the Partition object only for bootstrap dump. // Event replication State will be null in case of bootstrap dump. if (additionalPropertiesProvider.getReplSpecType() != ReplicationSpec.Type.INCREMENTAL_DUMP) { partition.putToParameters( ReplicationSpec.KEY.CURR_STATE_ID.toString(), additionalPropertiesProvider.getCurrentReplicationState()); } } writer.jsonGenerator.writeString(serializer.toString(partition, UTF_8)); writer.jsonGenerator.flush(); } catch (TException e) { throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e); } }
tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) { tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System .currentTimeMillis() / 1000));
private static void adjust(HiveMetaStoreClient client, Partition part, String dbName, String tblName, boolean isThriftClient) throws TException { Partition part_get = client.getPartition(dbName, tblName, part.getValues()); if (isThriftClient) { part.setCreateTime(part_get.getCreateTime()); part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime())); } part.setWriteId(part_get.getWriteId()); }
new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) { new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System .currentTimeMillis() / 1000));
Partition tptn = partition.getTPartition(); if (replicationSpec.isInReplicationScope()){ tptn.putToParameters( ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState()); if ((tptn.getParameters().containsKey("EXTERNAL")) && (tptn.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){ tptn.putToParameters("EXTERNAL", "FALSE");
part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
@Override public void apply(Partition t, Object[] fields) { t.putToParameters((String)fields[1], (String)fields[2]); }});
@Override public void apply(Partition t, Object[] fields) { t.putToParameters((String)fields[1], (String)fields[2]); }});
@Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);} @Override public String getLocation() { return partition.getSd().getLocation(); }
public void apply(Partition t, Object[] fields) { t.putToParameters((String)fields[1], (String)fields[2]); }});
@Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);} @Override public String getLocation() { return partition.getSd().getLocation(); }
@Override public void putToParameters(String key, String value) { partitionList.get(index).putToParameters(key, value); }
@Override public void putToParameters(String key, String value) { partitionList.get(index).putToParameters(key, value); }
@Override public void putToParameters(String key, String value) { partitionList.get(index).putToParameters(key, value); }
Partition apply(Partition partition, String avroSchemaDestination, String eventId) throws Exception { if (avroSchemaDestination == null) { return partition; } avroSchemaDestination = addTrailingSlash(avroSchemaDestination); avroSchemaDestination += eventId; String avroSchemaSource = partition.getParameters().get(AVRO_SCHEMA_URL_PARAMETER); copy(avroSchemaSource, avroSchemaDestination); partition.putToParameters(AVRO_SCHEMA_URL_PARAMETER, avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource)); LOG.info("Avro SerDe transformation has been applied to partition '{}'", partition.toString()); return partition; } }