@Override public void init(TableConfig tableConfig, ZkHelixPropertyStore<ZNRecord> propertyStore) { _tableNameWithType = tableConfig.getTableName(); _propertyStore = propertyStore; }
public static boolean validate(TableConfig tableConfig) { // TODO: ADD MORE VALIDATIONS. String tableName = tableConfig.getTableName(); return !tableName.contains(" "); } }
public void clearFlushThresholdUpdater(TableConfig tableConfig) { _flushThresholdUpdaterMap.remove(tableConfig.getTableName()); } }
@Override public void init(Configuration configuration, TableConfig tableConfig, ZkHelixPropertyStore<ZNRecord> propertyStore, BrokerMetrics brokerMetrics) { _propertyStore = propertyStore; _tableName = tableConfig.getTableName(); _brokerMetrics = brokerMetrics; // TODO: We need to specify the type of pruners via config instead of hardcoding. _pruner = new SegmentZKMetadataPrunerService(new String[]{PARTITION_METADATA_PRUNER}); }
@Override public void init(Configuration configuration, TableConfig tableConfig, ZkHelixPropertyStore<ZNRecord> propertyStore, BrokerMetrics brokerMetrics) { _tableName = tableConfig.getTableName(); _brokerMetrics = brokerMetrics; // Enable dynamic routing when the config is explicitly set RoutingConfig routingConfig = tableConfig.getRoutingConfig(); if (routingConfig != null) { Map<String, String> routingOption = routingConfig.getRoutingTableBuilderOptions(); _enableDynamicComputing = Boolean.valueOf(routingOption.get(RoutingConfig.ENABLE_DYNAMIC_COMPUTING_KEY)); if (_enableDynamicComputing) { LOGGER.info("Dynamic routing table computation is enabled for table {}", _tableName); } } }
/** * Rebalances segments and updates the idealstate in Helix * @param tableConfig a table config * @param replicaGroupPartitionAssignment a replica group partition assignment * @return a rebalanced idealstate */ private IdealState rebalanceSegmentsAndUpdateIdealState(final TableConfig tableConfig, final ReplicaGroupPartitionAssignment replicaGroupPartitionAssignment) { final Function<IdealState, IdealState> updaterFunction = new Function<IdealState, IdealState>() { @Nullable @Override public IdealState apply(@Nullable IdealState idealState) { return rebalanceSegments(idealState, tableConfig, replicaGroupPartitionAssignment); } }; HelixHelper.updateIdealState(_helixManager, tableConfig.getTableName(), updaterFunction, RetryPolicies.exponentialBackoffRetryPolicy(5, 1000, 2.0f)); return _helixAdmin.getResourceIdealState(_helixClusterName, tableConfig.getTableName()); }
public List<String> getAllTenantTables() throws Exception { String tableConfigPath = "/CONFIGS/TABLE"; List<ZNRecord> tableConfigs = _propertyStore.getChildren(tableConfigPath, null, 0); List<String> tables = new ArrayList<>(128); for (ZNRecord znRecord : tableConfigs) { TableConfig tableConfig = TableConfig.fromZnRecord(znRecord); if (tableConfig.getTenantConfig().getServer().equals(_tenantName)) { tables.add(tableConfig.getTableName()); } } return tables; }
@Deprecated @PUT @Path("/tables/{tableName}/metadataConfigs") @Produces(MediaType.APPLICATION_JSON) @ApiOperation(value = "Update table metadata", notes = "Updates table configuration") @ApiResponses(value = {@ApiResponse(code = 200, message = "Success"), @ApiResponse(code = 500, message = "Internal server error"), @ApiResponse(code = 404, message = "Table not found")}) public SuccessResponse updateTableMetadata(@PathParam("tableName") String tableName, String requestBody) { try { TableConfig tableConfig = TableConfig.fromJsonString(requestBody); pinotHelixResourceManager.updateMetadataConfigFor(tableConfig.getTableName(), tableConfig.getTableType(), tableConfig.getCustomConfig()); return new SuccessResponse("Successfully updated " + tableName + " configuration"); } catch (Exception e) { String errStr = "Error while updating table configuration, table: " + tableName; throw new ControllerApplicationException(LOGGER, errStr, Response.Status.INTERNAL_SERVER_ERROR, e); } } }
/** * Gets stream partition assignment of a table by reading the segment assignment in ideal state */ public PartitionAssignment getStreamPartitionAssignmentFromIdealState(TableConfig tableConfig, IdealState idealState) { String tableNameWithType = tableConfig.getTableName(); // get latest segment in each partition Map<String, LLCSegmentName> partitionIdToLatestSegment = getPartitionToLatestSegments(idealState); // extract partition assignment from the latest segments PartitionAssignment partitionAssignment = new PartitionAssignment(tableNameWithType); Map<String, Map<String, String>> mapFields = idealState.getRecord().getMapFields(); for (Map.Entry<String, LLCSegmentName> entry : partitionIdToLatestSegment.entrySet()) { String segmentName = entry.getValue().getSegmentName(); Map<String, String> instanceStateMap = mapFields.get(segmentName); partitionAssignment.addPartition(entry.getKey(), Lists.newArrayList(instanceStateMap.keySet())); } return partitionAssignment; }
@Override public void setupNewTable(TableConfig tableConfig, IdealState emptyIdealState) throws InvalidConfigException { _currentTable = tableConfig.getTableName(); super.setupNewTable(tableConfig, emptyIdealState); }
private void updateRealtimeDocumentCount(TableConfig tableConfig) { String realtimeTableName = tableConfig.getTableName(); List<RealtimeSegmentZKMetadata> metadataList = _pinotHelixResourceManager.getRealtimeSegmentMetadata(realtimeTableName); boolean countHLCSegments = true; // false if this table has ONLY LLC segments (i.e. fully migrated) StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); if (streamConfig.hasLowLevelConsumerType() && !streamConfig.hasHighLevelConsumerType()) { countHLCSegments = false; } // Update the gauge to contain the total document count in the segments _validationMetrics.updateTotalDocumentCountGauge(tableConfig.getTableName(), computeRealtimeTotalDocumentInSegments(metadataList, countHLCSegments)); }
@Nonnull @Override public List<PinotTaskConfig> generateTasks(@Nonnull List<TableConfig> tableConfigs) { assertEquals(tableConfigs.size(), 2); // Generate at most 2 tasks if (_clusterInfoProvider.getTaskStates(TASK_TYPE).size() >= 2) { return Collections.emptyList(); } List<PinotTaskConfig> taskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { Map<String, String> configs = new HashMap<>(); configs.put("tableName", tableConfig.getTableName()); configs.put("tableType", tableConfig.getTableType().toString()); taskConfigs.add(new PinotTaskConfig(TASK_TYPE, configs)); } return taskConfigs; }
private void checkTableConfigWithHllConfig(TableConfig tableConfig, TableConfig tableConfigToCompare) { // Check that the segment assignment configuration does exist. Assert.assertEquals(tableConfigToCompare.getTableName(), tableConfig.getTableName()); Assert.assertNotNull(tableConfigToCompare.getValidationConfig().getHllConfig()); // Check that the configurations are correct. HllConfig hllConfig = tableConfigToCompare.getValidationConfig().getHllConfig(); Set<String> columns = new HashSet<>(); columns.add("column"); columns.add("column2"); Assert.assertEquals(hllConfig.getColumnsToDeriveHllFields(), columns); Assert.assertEquals(hllConfig.getHllLog2m(), 9); Assert.assertEquals(hllConfig.getHllDeriveColumnSuffix(), "suffix"); } }
private void checkTableConfigWithAssignmentConfig(TableConfig tableConfig, TableConfig tableConfigToCompare) { // Check that the segment assignment configuration does exist. Assert.assertEquals(tableConfigToCompare.getTableName(), tableConfig.getTableName()); Assert.assertNotNull(tableConfigToCompare.getValidationConfig().getReplicaGroupStrategyConfig()); Assert.assertEquals(tableConfigToCompare.getValidationConfig().getReplicaGroupStrategyConfig(), tableConfig.getValidationConfig().getReplicaGroupStrategyConfig()); // Check that the configurations are correct. ReplicaGroupStrategyConfig strategyConfig = tableConfigToCompare.getValidationConfig().getReplicaGroupStrategyConfig(); Assert.assertTrue(strategyConfig.getMirrorAssignmentAcrossReplicaGroups()); Assert.assertEquals(strategyConfig.getNumInstancesPerPartition(), 5); Assert.assertEquals(strategyConfig.getPartitionColumn(), "memberId"); }
private void validateLoadedConfig(TableConfig config) { Assert.assertEquals(config.getTableName(), "mytable_OFFLINE"); Assert.assertEquals(config.getQuotaConfig().getStorage(), "125 GiB"); Assert.assertEquals(config.getValidationConfig().getRetentionTimeValue(), "5"); Assert.assertEquals(config.getValidationConfig().getRetentionTimeUnit(), "DAYS"); Assert.assertEquals(config.getTenantConfig().getBroker(), "foo"); Assert.assertEquals(config.getTenantConfig().getServer(), "bar"); Assert.assertEquals(config.getIndexingConfig().getSortedColumn(), Lists.newArrayList("foo")); } }
private PartitionAssignment verifyGeneratePartitionAssignment(TableConfig tableConfig, int numPartitions, List<String> consumingInstanceList, PartitionAssignment previousPartitionAssignment, boolean exceptionExpected, boolean unchanged) { String tableName = tableConfig.getTableName(); TestStreamPartitionAssignmentGenerator partitionAssignmentGenerator = new TestStreamPartitionAssignmentGenerator(_mockHelixManager); partitionAssignmentGenerator.setConsumingInstances(consumingInstanceList); PartitionAssignment partitionAssignment; try { partitionAssignment = partitionAssignmentGenerator.generateStreamPartitionAssignment(tableConfig, numPartitions); Assert.assertFalse(exceptionExpected, "Unexpected exception for table " + tableName); verify(tableName, partitionAssignment, numPartitions, consumingInstanceList, unchanged, previousPartitionAssignment); } catch (Exception e) { Assert.assertTrue(exceptionExpected, "Expected exception for table " + tableName); partitionAssignment = previousPartitionAssignment; } return partitionAssignment; }
protected void updateTableConfiguration() { if (isUsingNewConfigFormat()) { CombinedConfig combinedConfig = new CombinedConfig(_offlineTableConfig, _realtimeTableConfig, _schema); try { sendPutRequest(_controllerRequestURLBuilder.forNewUpdateTableConfig(_offlineTableConfig.getTableName()), Serializer.serializeToString(combinedConfig)); } catch (IOException e) { throw new RuntimeException(e); } } } }
private String getDefaultSegmentName(TableConfig tableConfig, Schema schema, List<File> inputIndexDirs, long minStartTime, long maxEndTime) throws Exception { String tableName = tableConfig.getTableName(); // Fetch time related configurations from schema and table config. String pushFrequency = tableConfig.getValidationConfig().getSegmentPushFrequency(); String timeColumnType = tableConfig.getValidationConfig().getTimeType(); String pushType = tableConfig.getValidationConfig().getSegmentPushType(); String timeFormat = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeFormat(); // Generate the final segment name using segment name generator NormalizedDateSegmentNameGenerator segmentNameGenerator = new NormalizedDateSegmentNameGenerator(tableName, DEFAULT_SEQUENCE_ID, timeColumnType, pushFrequency, pushType, null, null, timeFormat); return segmentNameGenerator.generateSegmentName(minStartTime, maxEndTime); }
private void advanceASeqForPartition(IdealState idealState, FakePinotLLCRealtimeSegmentManager segmentManager, PartitionAssignment partitionAssignment, String segmentName, int partition, int nextSeqNum, long nextOffset, TableConfig tableConfig) { String tableName = tableConfig.getTableName(); String rawTableName = TableNameBuilder.extractRawTableName(tableName); LLCSegmentName llcSegmentName = new LLCSegmentName(segmentName); segmentManager.updateOldSegmentMetadataZNRecord(tableName, llcSegmentName, nextOffset); LLCSegmentName newLlcSegmentName = new LLCSegmentName(rawTableName, partition, nextSeqNum, System.currentTimeMillis()); CommittingSegmentDescriptor committingSegmentDescriptor = new CommittingSegmentDescriptor(segmentName, nextOffset, 0); segmentManager.createNewSegmentMetadataZNRecord(tableConfig, llcSegmentName, newLlcSegmentName, partitionAssignment, committingSegmentDescriptor, false); segmentManager.updateIdealStateOnSegmentCompletion(idealState, segmentName, newLlcSegmentName.getSegmentName(), partitionAssignment); }
private void addingTableToPropertyStore(String tableName) throws Exception { TableConfig tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE).setTableName(tableName) .setTimeColumnName("timestamp").setTimeType("DAYS").build(); ZKMetadataProvider .setOfflineTableConfig(_propertyStore, tableConfig.getTableName(), TableConfig.toZnRecord(tableConfig)); } }