NUM_SERVER_INSTANCES, true); _offlineBuilder.setTableName("testOfflineTable").setTimeColumnName("timeColumn").setTimeType("DAYS") .setRetentionTimeUnit("DAYS").setRetentionTimeValue("5").setServerTenant("DefaultTenant");
@DataProvider(name = "offlineTagConfigTestDataProvider") public Object[][] offlineTagConfigTestDataProvider() { TableConfig.Builder tableConfigBuilder = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE); tableConfigBuilder.setTableName("testOfflineTable").setTimeColumnName("timeColumn").setTimeType("DAYS") .setRetentionTimeUnit("DAYS").setRetentionTimeValue("5").setServerTenant("aServerTenant"); List<Object[]> inputs = new ArrayList<>(); TableConfig tableConfig = tableConfigBuilder.build(); inputs.add(new Object[]{tableConfig, "aServerTenant", "aServerTenant_OFFLINE"}); tableConfig = tableConfigBuilder.setTagOverrideConfig(null).build(); inputs.add(new Object[]{tableConfig, "aServerTenant", "aServerTenant_OFFLINE"}); TagOverrideConfig tagOverrideConfig = new TagOverrideConfig(); tableConfig = tableConfigBuilder.setTagOverrideConfig(tagOverrideConfig).build(); inputs.add(new Object[]{tableConfig, "aServerTenant", "aServerTenant_OFFLINE"}); tagOverrideConfig = new TagOverrideConfig(); tagOverrideConfig.setRealtimeConsuming("overriddenTag_REALTIME"); tagOverrideConfig.setRealtimeCompleted("overriddenTag_OFFLINE"); tableConfig = tableConfigBuilder.setTagOverrideConfig(tagOverrideConfig).build(); inputs.add(new Object[]{tableConfig, "aServerTenant", "aServerTenant_OFFLINE"}); return inputs.toArray(new Object[inputs.size()][]); }
private void setupRealtimeTable() throws IOException { // Set up the realtime table. Map<String, String> streamConfigs = new HashMap<>(); streamConfigs.put("streamType", "kafka"); streamConfigs.put("stream.kafka.consumer.type", "highLevel"); streamConfigs.put("stream.kafka.topic.name", "kafkaTopic"); streamConfigs .put("stream.kafka.decoder.class.name", "org.apache.pinot.core.realtime.impl.kafka.KafkaAvroMessageDecoder"); streamConfigs.put("stream.kafka.hlc.zk.connect.string", "localhost:1111/zkConnect"); streamConfigs.put("stream.kafka.decoder.prop.schema.registry.rest.url", "http://localhost:2222/schemaRegistry"); TableConfig realtimeTimeConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.REALTIME).setTableName(RAW_DINING_TABLE_NAME) .setTimeColumnName("timeColumn").setTimeType("DAYS"). setStreamConfigs(streamConfigs).build(); Schema schema = new Schema(); schema.setSchemaName(RAW_DINING_TABLE_NAME); _pinotResourceManager.addOrUpdateSchema(schema); // Fake an PinotLLCRealtimeSegmentManager instance: required for a realtime table creation. PinotLLCRealtimeSegmentManager .create(_pinotResourceManager, new ControllerConf(), new ControllerMetrics(new MetricsRegistry())); _pinotResourceManager.addTable(realtimeTimeConfig); _helixBrokerStarter.getHelixExternalViewBasedRouting() .markDataResourceOnline(realtimeTimeConfig, null, new ArrayList<InstanceConfig>()); }
.setTimeColumnName(timeColumnName).setTimeType(timeType).setSchemaName(schemaName).setBrokerTenant(brokerTenant) .setServerTenant(serverTenant).setLoadMode(loadMode).setSortedColumn(sortedColumn) .setInvertedIndexColumns(invertedIndexColumns).setBloomFilterColumns(bloomFilterColumns)
NUM_SERVER_INSTANCES, true); _offlineBuilder.setTableName("testOfflineTable").setTimeColumnName("timeColumn").setTimeType("DAYS") .setRetentionTimeUnit("DAYS").setRetentionTimeValue("5"); streamConfigs.put(StreamConfigProperties .constructStreamProperty(streamType, StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest"); _realtimeBuilder.setTableName(REALTIME_TABLE_NAME).setTimeColumnName("timeColumn").setTimeType("DAYS") .setRetentionTimeUnit("DAYS").setRetentionTimeValue("5").setSchemaName(REALTIME_TABLE_NAME) .setStreamConfigs(streamConfigs);
.setTimeColumnName("timeColumn").setTimeType("DAYS").build(); _pinotResourceManager.addTable(offlineTableConfig); setupRealtimeTable();
@DataProvider(name = "realtimeTagConfigTestDataProvider") public Object[][] realtimeTagConfigTestDataProvider() { TableConfig.Builder tableConfigBuilder = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE); tableConfigBuilder.setTableName("testRealtimeTable").setTimeColumnName("timeColumn").setTimeType("DAYS") .setRetentionTimeUnit("DAYS").setRetentionTimeValue("5").setServerTenant("aServerTenant");
@BeforeClass public void setUp() throws Exception { try { startZk(); ControllerConf config = getDefaultControllerConfiguration(); config.setTableMinReplicas(MIN_NUM_REPLICAS); startController(config); ControllerRequestBuilderUtil .addFakeBrokerInstancesToAutoJoinHelixCluster(getHelixClusterName(), ZkStarter.DEFAULT_ZK_STR, NUM_BROKER_INSTANCES, true); ControllerRequestBuilderUtil .addFakeDataInstancesToAutoJoinHelixCluster(getHelixClusterName(), ZkStarter.DEFAULT_ZK_STR, NUM_SERVER_INSTANCES, true); _offlineBuilder.setTableName("testOfflineTable").setTimeColumnName("timeColumn").setTimeType("DAYS") .setRetentionTimeUnit("DAYS").setRetentionTimeValue("5"); setUpTable(); // Join 4 more servers as untagged String[] instanceNames = {"Server_localhost_a", "Server_localhost_b", "Server_localhost_c", "Server_localhost_d"}; for (String instanceName : instanceNames) { ControllerRequestBuilderUtil .addFakeDataInstanceToAutoJoinHelixCluster(getHelixClusterName(), ZkStarter.DEFAULT_ZK_STR, instanceName, true); _helixAdmin.removeInstanceTag(getHelixClusterName(), instanceName, OFFLINE_TENENT_NAME); } } catch (Exception e) { e.printStackTrace(); } }
private void addTable() throws Exception { if (_tableConfigFile != null) { AddTableCommand addTableCommand = new AddTableCommand().setControllerPort(_controllerPort).setFilePath(_tableConfigFile).setExecute(true); addTableCommand.execute(); return; } if (_tableName == null) { LOGGER.error("Table info not specified in configuration, please specify either config file or table name"); return; } String controllerAddress = "http://" + _localhost + ":" + _controllerPort; String tableJSONConfigString = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE).setTableName(_tableName) .setTimeColumnName(_timeColumnName).setTimeType(_timeUnit).setNumReplicas(3).setBrokerTenant("broker") .setServerTenant("server").build().toJSONConfigString(); sendPostRequest(ControllerRequestURLBuilder.baseUrl(controllerAddress).forTableCreate(), tableJSONConfigString); }
private void addingTableToPropertyStore(String tableName) throws Exception { TableConfig tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE).setTableName(tableName) .setTimeColumnName("timestamp").setTimeType("DAYS").build(); ZKMetadataProvider .setOfflineTableConfig(_propertyStore, tableConfig.getTableName(), TableConfig.toZnRecord(tableConfig)); } }
private static TableConfig getOfflineTableConfig(String tableName, String timeColumnName, String timeType, String brokerTenant, String serverTenant, String loadMode, SegmentVersion segmentVersion, List<String> invertedIndexColumns, List<String> bloomFilterColumns, TableTaskConfig taskConfig) throws Exception { return new TableConfig.Builder(Helix.TableType.OFFLINE).setTableName(tableName).setTimeColumnName(timeColumnName) .setTimeType(timeType).setNumReplicas(3).setBrokerTenant(brokerTenant).setServerTenant(serverTenant) .setLoadMode(loadMode).setSegmentVersion(segmentVersion.toString()) .setInvertedIndexColumns(invertedIndexColumns).setBloomFilterColumns(bloomFilterColumns) .setTaskConfig(taskConfig).build(); }