@Override public void init(Configuration configuration, TableConfig tableConfig, ZkHelixPropertyStore<ZNRecord> propertyStore, BrokerMetrics brokerMetrics) { super.init(configuration, tableConfig, propertyStore, brokerMetrics); String partitionColumn = tableConfig.getValidationConfig().getReplicaGroupStrategyConfig().getPartitionColumn(); _isPartitionLevelReplicaGroupAssignment = (partitionColumn != null); _numReplicas = tableConfig.getValidationConfig().getReplicationNumber(); }
int numReplicas = tableConfig.getValidationConfig().getReplicasPerPartitionNumber(); if (_retentionHours == 0) { if (tableConfig.getValidationConfig().getSegmentPushFrequency().equalsIgnoreCase("hourly")) { _retentionHours = DEFAULT_RETENTION_FOR_HOURLY_PUSH; } else {
String segmentPushType = validationConfig.getSegmentPushType(); if (segmentPushType == null) { throw new RuntimeException("Segment push type is null. Please configure the value correctly in the table config. " String timeColumnName = validationConfig.getTimeColumnName(); String timeColumnType = validationConfig.getTimeType(); if (timeColumnName == null || timeColumnType == null) { throw new RuntimeException("Time column or time column type is null. Both are required for APPEND use case. "
private void manageRetentionForTable(String tableNameWithType) { // Build retention strategy from table config TableConfig tableConfig = _pinotHelixResourceManager.getTableConfig(tableNameWithType); if (tableConfig == null) { LOGGER.error("Failed to get table config for table: {}", tableNameWithType); return; } SegmentsValidationAndRetentionConfig validationConfig = tableConfig.getValidationConfig(); String segmentPushType = validationConfig.getSegmentPushType(); if (!"APPEND".equalsIgnoreCase(segmentPushType)) { LOGGER.info("Segment push type is not APPEND for table: {}, skip", tableNameWithType); return; } String retentionTimeUnit = validationConfig.getRetentionTimeUnit(); String retentionTimeValue = validationConfig.getRetentionTimeValue(); RetentionStrategy retentionStrategy; try { retentionStrategy = new TimeRetentionStrategy(TimeUnit.valueOf(retentionTimeUnit.toUpperCase()), Long.parseLong(retentionTimeValue)); } catch (Exception e) { LOGGER.warn("Invalid retention time: {} {} for table: {}, skip", retentionTimeUnit, retentionTimeValue); return; } // Scan all segment ZK metadata and purge segments if necessary if (TableNameBuilder.OFFLINE.tableHasTypeSuffix(tableNameWithType)) { manageRetentionForOfflineTable(tableNameWithType, retentionStrategy); } else { manageRetentionForRealtimeTable(tableNameWithType, retentionStrategy); } }
int requestReplication; try { requestReplication = segmentsConfig.getReplicationNumber(); if (requestReplication < configMinReplication) { LOGGER.info("Creating table with minimum replication factor of: {} instead of requested replication: {}", configMinReplication, requestReplication); segmentsConfig.setReplication(String.valueOf(configMinReplication)); String replicasPerPartitionStr = segmentsConfig.getReplicasPerPartition(); if (replicasPerPartitionStr == null) { throw new PinotHelixResourceManager.InvalidTableConfigException( "Creating table with minimum replicasPerPartition of: {} instead of requested replicasPerPartition: {}", configMinReplication, replicasPerPartition); segmentsConfig.setReplicasPerPartition(String.valueOf(configMinReplication));
private String getDefaultSegmentName(TableConfig tableConfig, Schema schema, List<File> inputIndexDirs, long minStartTime, long maxEndTime) throws Exception { String tableName = tableConfig.getTableName(); // Fetch time related configurations from schema and table config. String pushFrequency = tableConfig.getValidationConfig().getSegmentPushFrequency(); String timeColumnType = tableConfig.getValidationConfig().getTimeType(); String pushType = tableConfig.getValidationConfig().getSegmentPushType(); String timeFormat = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeFormat(); // Generate the final segment name using segment name generator NormalizedDateSegmentNameGenerator segmentNameGenerator = new NormalizedDateSegmentNameGenerator(tableName, DEFAULT_SEQUENCE_ID, timeColumnType, pushFrequency, pushType, null, null, timeFormat); return segmentNameGenerator.generateSegmentName(minStartTime, maxEndTime); }
Assert.assertEquals(tableConfigToCompare.getTableName(), tableConfig.getTableName()); Assert.assertNull(tableConfigToCompare.getQuotaConfig()); Assert.assertNull(tableConfigToCompare.getValidationConfig().getReplicaGroupStrategyConfig()); Assert.assertNull(tableConfigToCompare.getValidationConfig().getHllConfig()); Assert.assertEquals(tableConfigToCompare.getTableName(), tableConfig.getTableName()); Assert.assertNull(tableConfigToCompare.getQuotaConfig()); Assert.assertNull(tableConfig.getValidationConfig().getReplicaGroupStrategyConfig()); Assert.assertNull(tableConfigToCompare.getValidationConfig().getHllConfig()); tableConfig.getValidationConfig().setReplicaGroupStrategyConfig(replicaGroupConfig); tableConfig.getValidationConfig().setHllConfig(hllConfig);
SegmentsValidationAndRetentionConfig SegmentConfigToCompare = tableConfigToCompare.getValidationConfig(); String newTimeColumnName = newSegmentConfig.getTimeColumnName(); String existingTimeColumnName = SegmentConfigToCompare.getTimeColumnName(); if (!existingTimeColumnName.equals(newTimeColumnName)) { throw new PinotHelixResourceManager.InvalidTableConfigException(String String newTimeColumnType = newSegmentConfig.getTimeType(); String existingTimeColumnType = SegmentConfigToCompare.getTimeType(); if (!existingTimeColumnType.equalsIgnoreCase(newTimeColumnType)) { throw new PinotHelixResourceManager.InvalidTableConfigException(String
Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeValue(), "5"); Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeUnit(), "DAYS"); tableConfig.getValidationConfig().setRetentionTimeUnit("HOURS"); tableConfig.getValidationConfig().setRetentionTimeValue("10"); Assert.assertEquals(modifiedConfig.getValidationConfig().getRetentionTimeUnit(), "HOURS"); Assert.assertEquals(modifiedConfig.getValidationConfig().getRetentionTimeValue(), "10"); sendPostRequest(_createTableUrl, tableJSONConfigString); tableConfig = getTableConfig(tableName, "REALTIME"); Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeValue(), "5"); Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeUnit(), "DAYS"); Assert.assertNull(tableConfig.getQuotaConfig());
@BeforeClass public void setUp() { _tableSizeReader = mock(TableSizeReader.class); _tableConfig = mock(TableConfig.class); _quotaConfig = mock(QuotaConfig.class); _controllerMetrics = new ControllerMetrics(new MetricsRegistry()); _validationConfig = mock(SegmentsValidationAndRetentionConfig.class); _pinotHelixResourceManager = mock(PinotHelixResourceManager.class); when(_tableConfig.getValidationConfig()).thenReturn(_validationConfig); when(_validationConfig.getReplicationNumber()).thenReturn(2); TEST_DIR.mkdirs(); }
ReplicaGroupStrategyConfig replicaGroupConfig = tableConfig.getValidationConfig().getReplicaGroupStrategyConfig(); boolean mirrorAssignment = replicaGroupConfig.getMirrorAssignmentAcrossReplicaGroups(); int numPartitions = replicaGroupPartitionAssignment.getNumPartitions();
when(mockTableConfig.getTableName()).thenReturn(tableName); SegmentsValidationAndRetentionConfig mockValidationConfig = mock(SegmentsValidationAndRetentionConfig.class); when(mockValidationConfig.getReplicasPerPartition()).thenReturn(Integer.toString(nReplicas)); when(mockValidationConfig.getReplicasPerPartitionNumber()).thenReturn(nReplicas); when(mockTableConfig.getValidationConfig()).thenReturn(mockValidationConfig); CommonConstants.Helix.TableType tableTypeFromTableName = TableNameBuilder.getTableTypeFromTableName(tableName);
private void updateTableConfig(int targetNumInstancePerPartition, int targetNumReplicaGroup) throws IOException { String tableNameWithType = TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME); TableConfig tableConfig = _helixResourceManager.getTableConfig(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE); tableConfig.getValidationConfig().getReplicaGroupStrategyConfig() .setNumInstancesPerPartition(targetNumInstancePerPartition); tableConfig.getValidationConfig().setReplication(Integer.toString(targetNumReplicaGroup)); _helixResourceManager .setExistingTableConfig(tableConfig, tableNameWithType, CommonConstants.Helix.TableType.OFFLINE); } }
tableConfig.getValidationConfig().setReplicaGroupStrategyConfig(replicaGroupStrategyConfig); tableConfig.getValidationConfig().setSegmentAssignmentStrategy("ReplicaGroupSegmentAssignmentStrategy"); tableConfig.getValidationConfig().setReplication("2");
private void validateLoadedConfig(TableConfig config) { Assert.assertEquals(config.getTableName(), "mytable_OFFLINE"); Assert.assertEquals(config.getQuotaConfig().getStorage(), "125 GiB"); Assert.assertEquals(config.getValidationConfig().getRetentionTimeValue(), "5"); Assert.assertEquals(config.getValidationConfig().getRetentionTimeUnit(), "DAYS"); Assert.assertEquals(config.getTenantConfig().getBroker(), "foo"); Assert.assertEquals(config.getTenantConfig().getServer(), "bar"); Assert.assertEquals(config.getIndexingConfig().getSortedColumn(), Lists.newArrayList("foo")); } }
int numSegments = offlineSegmentZKMetadataList.size(); SegmentsValidationAndRetentionConfig validationConfig = tableConfig.getValidationConfig(); if (numSegments >= 2 && StringUtils.isNotEmpty(validationConfig.getTimeColumnName())) { List<Interval> segmentIntervals = new ArrayList<>(numSegments); List<String> segmentsWithInvalidInterval = new ArrayList<>(); LOGGER.warn("Table: {} has segments with invalid interval: {}", offlineTableName, segmentsWithInvalidInterval); Duration frequency = convertToDuration(validationConfig.getSegmentPushFrequency()); numMissingSegments = computeNumMissingSegments(segmentIntervals, frequency);
case PartitionAwareOffline: SegmentsValidationAndRetentionConfig validationConfig = tableConfig.getValidationConfig(); String segmentAssignmentStrategy = validationConfig.getSegmentAssignmentStrategy(); (validationConfig.getSegmentPushType() != null) && !validationConfig.getSegmentPushType() .equalsIgnoreCase("REFRESH");
/** * If realtime table and includeConsuming=true, rebalance consuming segments. NewPartitionAssignment will be used only * in this case. Always rebalance completed (online) segments, NewPartitionAssignment unused in this case * @param idealState old ideal state * @param tableConfig table config of table tor rebalance * @param rebalanceUserConfig custom user configs for specific rebalance strategies * @param newPartitionAssignment new rebalanced partition assignments as part of the resource rebalance * @return */ @Override public IdealState getRebalancedIdealState(IdealState idealState, TableConfig tableConfig, Configuration rebalanceUserConfig, PartitionAssignment newPartitionAssignment) { String tableNameWithType = tableConfig.getTableName(); CommonConstants.Helix.TableType tableType = tableConfig.getTableType(); LOGGER.info("Rebalancing ideal state for table {}", tableNameWithType); // get target num replicas int targetNumReplicas; if (tableType.equals(CommonConstants.Helix.TableType.REALTIME)) { String replicasString = tableConfig.getValidationConfig().getReplicasPerPartition(); try { targetNumReplicas = Integer.parseInt(replicasString); } catch (Exception e) { throw new RuntimeException("Invalid value for replicasPerPartition:'" + replicasString + "'", e); } } else { targetNumReplicas = Integer.parseInt(tableConfig.getValidationConfig().getReplication()); } return rebalanceIdealState(idealState, tableConfig, targetNumReplicas, rebalanceUserConfig, newPartitionAssignment); }
.setSegmentAssignmentStrategy("ReplicaGroupSegmentAssignmentStrategy").build(); tableConfig.getValidationConfig().setReplicaGroupStrategyConfig(replicaGroupStrategyConfig); tableConfig.setIndexingConfig(indexingConfig);
/** * Generates stream partition assignment for given table, using tagged hosts and num partitions */ public PartitionAssignment generateStreamPartitionAssignment(TableConfig tableConfig, int numPartitions) throws InvalidConfigException { // TODO: add an override which can read from znode, instead of generating on the fly List<String> partitions = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { partitions.add(String.valueOf(i)); } String tableNameWithType = tableConfig.getTableName(); int numReplicas = tableConfig.getValidationConfig().getReplicasPerPartitionNumber(); List<String> consumingTaggedInstances = getConsumingTaggedInstances(tableConfig); if (consumingTaggedInstances.size() < numReplicas) { throw new InvalidConfigException( "Not enough consuming instances tagged. Must be atleast equal to numReplicas:" + numReplicas); } /** * TODO: We will use only uniform assignment for now * This will be refactored as AssignmentStrategy interface and implementations UniformAssignment, BalancedAssignment etc * {@link StreamPartitionAssignmentGenerator} and AssignmentStrategy interface will together replace * StreamPartitionAssignmentGenerator and StreamPartitionAssignmentStrategy */ return uniformAssignment(tableNameWithType, partitions, numReplicas, consumingTaggedInstances); }