@Override public void preCreateTable(Table table) throws MetaException { if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE); } Arrays.stream(KafkaTableProperties.values()) .filter(KafkaTableProperties::isMandatory) .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()), "Set Table property " + key.getName())); // Put all the default at the pre create. Arrays.stream(KafkaTableProperties.values()).forEach((key) -> { if (table.getParameters().get(key.getName()) == null) { table.putToParameters(key.getName(), key.getDefaultValue()); } }); }
private Table updatePropertiesInTable(Table table, ReplicationSpec additionalPropertiesProvider) { // Remove all the entries from the parameters which are added by repl tasks internally. Map<String, String> parameters = table.getParameters(); if (parameters != null) { parameters.entrySet() .removeIf(e -> e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY)); } if (additionalPropertiesProvider.isInReplicationScope()) { // Current replication state must be set on the Table object only for bootstrap dump. // Event replication State will be null in case of bootstrap dump. if (additionalPropertiesProvider.getReplSpecType() != ReplicationSpec.Type.INCREMENTAL_DUMP) { table.putToParameters( ReplicationSpec.KEY.CURR_STATE_ID.toString(), additionalPropertiesProvider.getCurrentReplicationState()); } } else { // ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE; // write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\""); // TODO: if we want to be explicit about this dump not being a replication dump, we can // uncomment this else section, but currently unneeded. Will require a lot of golden file // regen if we do so. } return table; }
private Table getTargetTable(Table originTable, Path targetLocation) throws IOException { try { Table targetTable = originTable.copy(); targetTable.setDbName(this.targetDatabase); targetTable.setDataLocation(targetLocation); /* * Need to set the table owner as the flow executor */ targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName()); targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP); targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, Long.toString(this.startTime)); targetTable.getTTable().unsetCreateTime(); HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this); return targetTable; } catch (HiveException he) { throw new IOException(he); } }
newTable.putToParameters("EXTERNAL", "TRUE"); newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else { newTable.putToParameters("comment", comment); newTable.putToParameters("bucketing_version", "2");
@Test public void configureJobPropertiesWithDefaultValues() throws MetaException { KafkaStorageHandler kafkaStorageHandler = new KafkaStorageHandler(); TableDesc tableDesc = Mockito.mock(TableDesc.class); Properties properties = new Properties(); Table preCreateTable = new Table(); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291); preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString()); kafkaStorageHandler.preCreateTable(preCreateTable); preCreateTable.getParameters().forEach(properties::setProperty); Mockito.when(tableDesc.getProperties()).thenReturn(properties); Map<String, String> jobProperties = new HashMap<>(); kafkaStorageHandler.configureInputJobProperties(tableDesc, jobProperties); kafkaStorageHandler.configureOutputJobProperties(tableDesc, jobProperties); Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName()), TEST_TOPIC); Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName()), LOCALHOST_9291); Arrays.stream(KafkaTableProperties.values()) .filter(key -> !key.isMandatory()) .forEach((key) -> Assert.assertEquals("Wrong match for key " + key.getName(), key.getDefaultValue(), jobProperties.get(key.getName()))); }
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291); kafkaStorageHandler.preCreateTable(preCreateTable); preCreateTable.getParameters().forEach(properties::setProperty);
if (tblProps != null) { for (Entry<String, String> prop : tblProps.entrySet()) { tt.putToParameters(prop.getKey(), prop.getValue());
newTable.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System .currentTimeMillis() / 1000));
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291); preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString()); kafkaStorageHandler.preCreateTable(preCreateTable);
if (tbl.getParameters() == null || tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) { tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List<String> partCols, boolean setPartitionLevelPrivilages) throws TException { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) .addCol("id", "int") .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } client.createTable(table); return table; }
private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List<String> partCols, boolean setPartitionLevelPrivilages) throws TException { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) .addCol("id", "int") .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } client.createTable(table); return table; }
private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List<String> partCols, boolean setPartitionLevelPrivilages) throws Exception { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) .addCol("id", "int") .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } client.createTable(table); return table; }
ttable.putToParameters( ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState()); if ((ttable.getParameters().containsKey("EXTERNAL")) && (ttable.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){ ttable.putToParameters("EXTERNAL","FALSE");
newTable.putToParameters("EXTERNAL", "TRUE"); newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else {
Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception { if (avroSchemaDestination == null) { return table; } avroSchemaDestination = addTrailingSlash(avroSchemaDestination); avroSchemaDestination += eventId; String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER); copy(avroSchemaSource, avroSchemaDestination); table.putToParameters(AVRO_SCHEMA_URL_PARAMETER, avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource)); LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName()); return table; }
Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception { if (avroSchemaDestination == null) { return table; } avroSchemaDestination = addTrailingSlash(avroSchemaDestination); avroSchemaDestination += eventId; String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER); copy(avroSchemaSource, avroSchemaDestination); table.putToParameters(AVRO_SCHEMA_URL_PARAMETER, avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource)); LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName()); return table; }
void updateLastCommitTimeSynced() { // Set the last commit time from the TBLproperties String lastCommitSynced = activeTimeline.lastInstant().get().getTimestamp(); try { Table table = client.getTable(syncConfig.databaseName, syncConfig.tableName); table.putToParameters(HOODIE_LAST_COMMIT_TIME_SYNC, lastCommitSynced); client.alter_table(syncConfig.databaseName, syncConfig.tableName, table, true); } catch (Exception e) { throw new HoodieHiveSyncException( "Failed to get update last commit time synced to " + lastCommitSynced, e); } }
private void setReplicaTableType(Table source, Table replica) { if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) { replica.setTableType(TableType.VIRTUAL_VIEW.name()); return; } // We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored // table. replica.setTableType(TableType.EXTERNAL_TABLE.name()); replica.putToParameters(EXTERNAL, "TRUE"); }
private void setReplicaTableType(Table source, Table replica) { if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) { replica.setTableType(TableType.VIRTUAL_VIEW.name()); return; } // We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored // table. replica.setTableType(TableType.EXTERNAL_TABLE.name()); replica.putToParameters(EXTERNAL, "TRUE"); }