public long getCommitTimeoutMS(String tableName) { long commitTimeoutMS = SegmentCompletionProtocol.getMaxSegmentCommitTimeMs(); if (_propertyStore == null) { return commitTimeoutMS; } TableConfig tableConfig = getRealtimeTableConfig(tableName); final Map<String, String> streamConfigs = tableConfig.getIndexingConfig().getStreamConfigs(); if (streamConfigs != null && streamConfigs.containsKey(StreamConfigProperties.SEGMENT_COMMIT_TIMEOUT_SECONDS)) { final String commitTimeoutSecondsStr = streamConfigs.get(StreamConfigProperties.SEGMENT_COMMIT_TIMEOUT_SECONDS); try { return TimeUnit.MILLISECONDS.convert(Integer.parseInt(commitTimeoutSecondsStr), TimeUnit.SECONDS); } catch (Exception e) { LOGGER.warn("Failed to parse flush size of {}", commitTimeoutSecondsStr, e); return commitTimeoutMS; } } return commitTimeoutMS; }
public static IdealState buildInitialHighLevelRealtimeIdealStateFor(String realtimeTableName, TableConfig realtimeTableConfig, HelixManager helixManager, ZkHelixPropertyStore<ZNRecord> zkHelixPropertyStore, boolean enableBatchMessageMode) { RealtimeTagConfig realtimeTagConfig = new RealtimeTagConfig(realtimeTableConfig); final List<String> realtimeInstances = HelixHelper.getInstancesWithTag(helixManager, realtimeTagConfig.getConsumingServerTag()); IdealState idealState = buildEmptyRealtimeIdealStateFor(realtimeTableName, 1, enableBatchMessageMode); if (realtimeInstances.size() % Integer.parseInt(realtimeTableConfig.getValidationConfig().getReplication()) != 0) { throw new RuntimeException( "Number of instance in current tenant should be an integer multiples of the number of replications"); } setupInstanceConfigForHighLevelConsumer(realtimeTableName, realtimeInstances.size(), Integer.parseInt(realtimeTableConfig.getValidationConfig().getReplication()), realtimeTableConfig.getIndexingConfig().getStreamConfigs(), zkHelixPropertyStore, realtimeInstances); return idealState; }
/** * Check table config for flush size. * * If flush size < 0, create a new DefaultFlushThresholdUpdater with default flush size * If flush size > 0, create a new DefaultFlushThresholdUpdater with given flush size. * If flush size == 0, create new SegmentSizeBasedFlushThresholdUpdater if not already created. Create only 1 per table, because we want to maintain tuning information for the table in the updater * @param realtimeTableConfig * @return */ public FlushThresholdUpdater getFlushThresholdUpdater(TableConfig realtimeTableConfig) { final String tableName = realtimeTableConfig.getTableName(); PartitionLevelStreamConfig streamConfig = new PartitionLevelStreamConfig(realtimeTableConfig.getIndexingConfig().getStreamConfigs()); final int tableFlushSize = streamConfig.getFlushThresholdRows(); final long desiredSegmentSize = streamConfig.getFlushSegmentDesiredSizeBytes(); if (tableFlushSize == 0) { return _flushThresholdUpdaterMap .computeIfAbsent(tableName, k -> new SegmentSizeBasedFlushThresholdUpdater(desiredSegmentSize)); } else { _flushThresholdUpdaterMap.remove(tableName); return new DefaultFlushThresholdUpdater(tableFlushSize); } }
@Override protected void processTable(String tableNameWithType) { CommonConstants.Helix.TableType tableType = TableNameBuilder.getTableTypeFromTableName(tableNameWithType); if (tableType == CommonConstants.Helix.TableType.REALTIME) { TableConfig tableConfig = _pinotHelixResourceManager.getTableConfig(tableNameWithType); if (tableConfig == null) { LOGGER.warn("Failed to find table config for table: {}, skipping validation", tableNameWithType); return; } if (_updateRealtimeDocumentCount) { updateRealtimeDocumentCount(tableConfig); } Map<String, String> streamConfigMap = tableConfig.getIndexingConfig().getStreamConfigs(); StreamConfig streamConfig = new StreamConfig(streamConfigMap); if (streamConfig.hasLowLevelConsumerType()) { _llcRealtimeSegmentManager.ensureAllPartitionsConsuming(tableConfig); } } }
decoderClass); IndexingConfig mockIndexConfig = mock(IndexingConfig.class); when(mockIndexConfig.getStreamConfigs()).thenReturn(streamConfigMap); when(mockTableConfig.getIndexingConfig()).thenReturn(mockIndexConfig);
when(mockIndexConfig.getStreamConfigs()).thenReturn(streamConfigMap);
protected FakePinotLLCRealtimeSegmentManager(List<String> existingLLCSegments) { super(null, clusterName, null, null, null, CONTROLLER_CONF, new ControllerMetrics(new MetricsRegistry())); try { TableConfigCache mockCache = mock(TableConfigCache.class); TableConfig mockTableConfig = mock(TableConfig.class); IndexingConfig mockIndexingConfig = mock(IndexingConfig.class); when(mockTableConfig.getIndexingConfig()).thenReturn(mockIndexingConfig); when(mockIndexingConfig.getStreamConfigs()).thenReturn(getStreamConfigs()); when(mockCache.getTableConfig(anyString())).thenReturn(mockTableConfig); Field tableConfigCacheField = PinotLLCRealtimeSegmentManager.class.getDeclaredField("_tableConfigCache"); tableConfigCacheField.setAccessible(true); tableConfigCacheField.set(this, mockCache); HelixManager mockHelixManager = mock(HelixManager.class); _partitionAssignmentGenerator = new FakeStreamPartitionAssignmentGenerator(mockHelixManager); Field partitionAssignmentGeneratorField = PinotLLCRealtimeSegmentManager.class.getDeclaredField("_streamPartitionAssignmentGenerator"); partitionAssignmentGeneratorField.setAccessible(true); partitionAssignmentGeneratorField.set(this, _partitionAssignmentGenerator); } catch (Exception e) { Utils.rethrowException(e); } if (existingLLCSegments != null) { _existingLLCSegments = existingLLCSegments; } CONTROLLER_CONF.setControllerVipHost("vip"); CONTROLLER_CONF.setControllerPort("9000"); CONTROLLER_CONF.setDataDir(baseDir.toString()); _version = 0; _tableConfigStore = new TableConfigStore(); }
StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); if (!streamConfig.hasLowLevelConsumerType()) { LOGGER.info("Table {} does not have LLC and will have no partition assignment", tableNameWithType);
StreamConfig streamConfig; try { streamConfig = new StreamConfig(config.getIndexingConfig().getStreamConfigs()); } catch (Exception e) { String errorMsg = String.format("Invalid tableIndexConfig or streamConfig: %s", e.getMessage());
/** * Sets up a new table's segments metadata and returns the ideal state setup with initial segments * @param tableConfig * @param idealState * @param partitionCount * @return */ private IdealState setupTable(TableConfig tableConfig, IdealState idealState, int partitionCount) throws InvalidConfigException { final String tableNameWithType = tableConfig.getTableName(); if (!idealState.isEnabled()) { LOGGER.info("Skipping validation for disabled table {}", tableNameWithType); return idealState; } final StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); final long now = getCurrentTimeMs(); PartitionAssignment partitionAssignment = _streamPartitionAssignmentGenerator.generateStreamPartitionAssignment(tableConfig, partitionCount); Set<Integer> newPartitions = new HashSet<>(partitionCount); for (int partition = 0; partition < partitionCount; partition++) { newPartitions.add(partition); } OffsetCriteria offsetCriteria = streamConfig.getOffsetCriteria(); Set<String> consumingSegments = setupNewPartitions(tableConfig, streamConfig, offsetCriteria, partitionAssignment, newPartitions, now); RealtimeSegmentAssignmentStrategy segmentAssignmentStrategy = new ConsumingSegmentAssignmentStrategy(); Map<String, List<String>> assignments = segmentAssignmentStrategy.assign(consumingSegments, partitionAssignment); updateIdealState(idealState, null, consumingSegments, assignments); return idealState; }
private void ensureRealtimeClusterIsSetUp(TableConfig config, String realtimeTableName, IndexingConfig indexingConfig) { StreamConfig streamConfig = new StreamConfig(indexingConfig.getStreamConfigs()); IdealState idealState = _helixAdmin.getResourceIdealState(_helixClusterName, realtimeTableName);
private void updateRealtimeDocumentCount(TableConfig tableConfig) { String realtimeTableName = tableConfig.getTableName(); List<RealtimeSegmentZKMetadata> metadataList = _pinotHelixResourceManager.getRealtimeSegmentMetadata(realtimeTableName); boolean countHLCSegments = true; // false if this table has ONLY LLC segments (i.e. fully migrated) StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); if (streamConfig.hasLowLevelConsumerType() && !streamConfig.hasHighLevelConsumerType()) { countHLCSegments = false; } // Update the gauge to contain the total document count in the segments _validationMetrics.updateTotalDocumentCountGauge(tableConfig.getTableName(), computeRealtimeTotalDocumentInSegments(metadataList, countHLCSegments)); }
final int partitionCount) { final String tableNameWithType = tableConfig.getTableName(); final StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); if (!idealState.isEnabled()) { LOGGER.info("Skipping validation for disabled table {}", tableNameWithType);
final StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); final int partitionCount = getPartitionCount(streamConfig); HelixHelper.updateIdealState(_helixManager, tableNameWithType, new Function<IdealState, IdealState>() {
StreamConfig metadata = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); if (metadata.hasHighLevelConsumerType()) { idealStateMap.put(realtimeTableName, _pinotHelixResourceManager.getHelixAdmin()
if (TableNameBuilder.getTableTypeFromTableName(znRecordId) == TableType.REALTIME) { TableConfig tableConfig = TableConfig.fromZnRecord(tableConfigZnRecord); StreamConfig metadata = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); if (metadata.hasHighLevelConsumerType()) { String realtimeTable = tableConfig.getTableName();
/** * * @param tableConfig * @param emptyIdealState may contain HLC segments if both HLC and LLC are configured */ public void setupNewTable(TableConfig tableConfig, IdealState emptyIdealState) throws InvalidConfigException { final StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs()); int partitionCount = getPartitionCount(streamConfig); List<String> currentSegments = getExistingSegments(tableConfig.getTableName()); // Make sure that there are no low-level segments existing. if (currentSegments != null) { for (String segment : currentSegments) { if (!SegmentName.isHighLevelConsumerSegmentName(segment)) { // For now, we don't support re-creating the low-level realtime segments throw new RuntimeException("Low-level segments already exist for table " + tableConfig.getTableType()); } } } _flushThresholdUpdateManager.clearFlushThresholdUpdater(tableConfig); if (!isConnected()) { throw new RuntimeException( "Lost zk connection while setting up new table " + tableConfig.getTableName() + " isConnected=" + isConnected()); } IdealState idealState = setupTable(tableConfig, emptyIdealState, partitionCount); setTableIdealState(tableConfig.getTableName(), idealState); }
_streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs());
_partitionLevelStreamConfig = new PartitionLevelStreamConfig(indexingConfig.getStreamConfigs()); _streamConsumerFactory = StreamConsumerFactoryProvider.create(_partitionLevelStreamConfig); _streamTopic = _partitionLevelStreamConfig.getTopicName();
case PartitionAwareRealtime: StreamConfig streamConfig = new StreamConfig(tableConfig.getIndexingConfig().getStreamConfigs());