/** * DruidDimensionRowProvider fetches data from Druid and adds it to the dimension cache. * The dimensions to be loaded can be passed in as a parameter. * * @param physicalTableDictionary The physical tables * @param dimensionDictionary The dimension dictionary to load dimensions from. * @param dimensionsToLoad The dimensions to use. * @param druidWebService The druid webservice to query. */ public DruidDimensionValueLoader( PhysicalTableDictionary physicalTableDictionary, DimensionDictionary dimensionDictionary, List<String> dimensionsToLoad, DruidWebService druidWebService ) { this.dimensions = dimensionsToLoad.stream() .map(dimensionDictionary::findByApiName) .collect(Collectors.toCollection(LinkedHashSet::new)); this.dataSources = physicalTableDictionary.values().stream() .map(table -> table.withConstraint(DataSourceConstraint.unconstrained(table))) .map(TableDataSource::new) .collect(Collectors.toCollection(LinkedHashSet::new)); this.druidWebService = druidWebService; }
/** * Build a PartitionAvailability based on a map of table based filters. * * @param dataSourceFilterMap A map of part tables to filters * * @return The availability describing the partition */ private static Availability buildAvailability(Map<ConfigPhysicalTable, DataSourceFilter> dataSourceFilterMap) { return new PartitionAvailability(dataSourceFilterMap.entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().getAvailability(), Map.Entry::getValue))); } }
/** * Constructor. * * @param sourceTable The table being constrained * @param constraint The constraint being applied */ public ConstrainedTable(ConfigPhysicalTable sourceTable, DataSourceConstraint constraint) { this.constraint = constraint; this.sourceTable = sourceTable; Availability sourceAvailability = sourceTable.getAvailability(); PhysicalDataSourceConstraint physicalDataSourceConstraint = new PhysicalDataSourceConstraint( constraint, sourceTable.getSchema() ); availableIntervals = new SimplifiedIntervalList( sourceAvailability.getAvailableIntervals(physicalDataSourceConstraint) ); allAvailableIntervals = Collections.unmodifiableMap( mapToSchemaAvailability( sourceAvailability.getAllAvailableIntervals(), getSchema() ) ); dataSourceNames = Collections.unmodifiableSet( sourceAvailability.getDataSourceNames(physicalDataSourceConstraint) ); }
res.put( "timeGrain", e.getValue().getSchema().getTimeGrain().getName().toLowerCase(Locale.ENGLISH) ); res.put("uri", SlicesServlet.getSliceDetailUrl(e.getKey(), uriInfo));
(ConfigPhysicalTable physicalTable) -> Sets.intersection( physicalTable.getSchema().getMetricColumnNames(), metricNames
/** * SqlDimensionValueLoader fetches data from Sql and adds it to the dimension cache. * The dimensions to be loaded can be passed in as a parameter. * * @param physicalTableDictionary The physical tables * @param dimensionDictionary The dimension dictionary to load dimensions from. * @param dimensionsToLoad The dimensions to be loaded. * @param sqlBackedClient The sql backed client. */ public SqlDimensionValueLoader( PhysicalTableDictionary physicalTableDictionary, DimensionDictionary dimensionDictionary, List<String> dimensionsToLoad, SqlBackedClient sqlBackedClient ) { this.dimensions = dimensionsToLoad.stream() .map(dimensionDictionary::findByApiName) .collect(Collectors.toCollection(LinkedHashSet::new)); this.dataSources = physicalTableDictionary.values().stream() .map(table -> table.withConstraint(DataSourceConstraint.unconstrained(table))) .map(TableDataSource::new) .collect(Collectors.toCollection(LinkedHashSet::new)); this.sqlBackedClient = sqlBackedClient; }
/** * Build a PartitionAvailability based on a map of table based filters. * * @param dataSourceFilterMap A map of part tables to filters * * @return The availability describing the partition */ public static PartitionAvailability build(Map<ConfigPhysicalTable, DataSourceFilter> dataSourceFilterMap) { return new PartitionAvailability(dataSourceFilterMap.entrySet().stream() .collect( Collectors.toMap( entry -> entry.getKey().getAvailability(), Map.Entry::getValue ) ) ); }
@Override public ConfigPhysicalTable build(ResourceDictionaries dictionaries, DataSourceMetadataService metadataService) { try { Map<ConfigPhysicalTable, Set<String>> tableMetricNamesMap = getTableToMetricsMap(dictionaries); validateDependentMetrics(tableMetricNamesMap); return new BaseCompositePhysicalTable( getName(), getTimeGrain(), buildColumns(dictionaries.getDimensionDictionary()), tableMetricNamesMap.keySet(), getLogicalToPhysicalNames(), MetricUnionAvailability.build( tableMetricNamesMap.keySet(), tableMetricNamesMap.entrySet().stream() .collect(Collectors.toMap( entry -> entry.getKey().getAvailability(), Map.Entry::getValue ))) ); } catch (IllegalArgumentException e) { String message = String.format(VALIDATION_ERROR_FORMAT, e.getMessage()); LOG.error(message); throw new IllegalArgumentException(message); } }
/** * Constructor. * * @param sourceTable The table being constrained * @param queryPlanningConstraint The constraint being applied */ public ConstrainedTable(ConfigPhysicalTable sourceTable, QueryPlanningConstraint queryPlanningConstraint) { this.constraint = queryPlanningConstraint; this.sourceTable = sourceTable; Availability sourceAvailability = sourceTable.getAvailability(); availableIntervals = new SimplifiedIntervalList( sourceAvailability.getAvailableIntervals(constraint) ); allAvailableIntervals = Collections.unmodifiableMap( mapToSchemaAvailability( sourceAvailability.getAllAvailableIntervals(), getSchema() ) ); dataSourceNames = Collections.unmodifiableSet( sourceAvailability.getDataSourceNames(constraint) ); }