@Override public PhysicalTableSchema getSchema() { return getSourceTable().getSchema(); }
/** * Return a view of the available intervals for the original source table given a constraint. * * @param constraint The constraint which limits available intervals * * @return The intervals that the table can report on */ @Override public SimplifiedIntervalList getAvailableIntervals(DataSourceConstraint constraint) { if (getConstraint().equals(constraint)) { return getAvailableIntervals(); } return getSourceTable().getAvailableIntervals(constraint); }
/** * Return the {@link TableName} of the dataSources which back the original source table given a constraint. * * @param constraint A constraint which may narrow the data sources participating. * * @return A set of tablenames for backing dataSources */ @Override public Set<DataSourceName> getDataSourceNames(DataSourceConstraint constraint) { if (getConstraint().equals(constraint)) { return getDataSourceNames(); } return getSourceTable().getDataSourceNames(constraint); }
/** * Constructor. * * @param sourceTable The table being constrained * @param queryPlanningConstraint The constraint being applied */ public ConstrainedTable(ConfigPhysicalTable sourceTable, QueryPlanningConstraint queryPlanningConstraint) { this.constraint = queryPlanningConstraint; this.sourceTable = sourceTable; Availability sourceAvailability = sourceTable.getAvailability(); availableIntervals = new SimplifiedIntervalList( sourceAvailability.getAvailableIntervals(constraint) ); allAvailableIntervals = Collections.unmodifiableMap( mapToSchemaAvailability( sourceAvailability.getAllAvailableIntervals(), getSchema() ) ); dataSourceNames = Collections.unmodifiableSet( sourceAvailability.getDataSourceNames(constraint) ); }
/** * Gets the timezone of the backing table for the given druid query. * * @param druidQuery The druid query to find the timezone for * * @return the {@link DateTimeZone} of the physical table for this query. */ private DateTimeZone getTimeZone(DruidAggregationQuery<?> druidQuery) { return druidQuery.getDataSource() .getPhysicalTable() .getSchema() .getTimeGrain() .getTimeZone(); }
/** * Returns a set of identifiers used by Fili to identify this data source's physical tables. * * @return The set of names used by Fili to identify this data source's physical tables */ @JsonInclude(JsonInclude.Include.NON_NULL) public Set<String> getNames() { return getPhysicalTable().getDataSourceNames().stream() .map(DataSourceName::asName) .collect( Collectors.collectingAndThen( Collectors.toCollection(LinkedHashSet::new), Collections::unmodifiableSet ) ); }
query.getDataSource().getPhysicalTable().getAvailableIntervals() );
/** * Checks if a {@link Dimension} exists in a {@link DataSource}. * * @param dimension The dimension to look for in the datasource. * @param dataSource The datasource to look through for the dimension. * * @return true if the dimension was found. */ default boolean dimensionExistsInDataSource(Dimension dimension, DataSource dataSource) { return dataSource.getPhysicalTable() .getDimensions() .stream() .anyMatch(dimension::equals); }
/** * Create a constrained copy of this table. * * @param constraint The dataSourceConstraint which narrows the view of the underlying availability * * @return a constrained table whose availability and serialization are narrowed by this constraint */ @Override public ConstrainedTable withConstraint(DataSourceConstraint constraint) { validateConstraintSchema(constraint); return new ConstrainedTable(this, new PhysicalDataSourceConstraint(constraint, getSchema())); }
/** * JSON tree walk up to physical table to retrieve physical name for a dimension. * * @param value the dimension to retrieve api name. * @param gen the Json Generator to retrieve the tree to walk on. * * @return an Optional String of physical name */ public static Optional<String> findPhysicalName(Dimension value, JsonGenerator gen) { String apiName = value.getApiName(); // Search for physical name return mapNearestDruidQuery( gen, druidQuery -> druidQuery.getDataSource().getPhysicalTable().getPhysicalColumnName(apiName) ); }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }
/** * Constructor. * * @param sourceTable The table being constrained * @param constraint The constraint being applied */ public ConstrainedTable(ConfigPhysicalTable sourceTable, DataSourceConstraint constraint) { this.constraint = constraint; this.sourceTable = sourceTable; Availability sourceAvailability = sourceTable.getAvailability(); PhysicalDataSourceConstraint physicalDataSourceConstraint = new PhysicalDataSourceConstraint( constraint, sourceTable.getSchema() ); availableIntervals = new SimplifiedIntervalList( sourceAvailability.getAvailableIntervals(physicalDataSourceConstraint) ); allAvailableIntervals = Collections.unmodifiableMap( mapToSchemaAvailability( sourceAvailability.getAllAvailableIntervals(), getSchema() ) ); dataSourceNames = Collections.unmodifiableSet( sourceAvailability.getDataSourceNames(physicalDataSourceConstraint) ); }
ApiToFieldMapper aliasMaker = new ApiToFieldMapper(druidQuery.getDataSource().getPhysicalTable().getSchema());
/** * Build a data source from a table. * * @param table A fact table or fact table view * * @return A table datasource for a fact table or a union data source for a fact table view */ private DataSource buildTableDataSource(ConstrainedTable table) { if (table.getDataSourceNames().size() == 1) { return new TableDataSource(table); } else { return new UnionDataSource(table); } }
druidQuery.getInnermostQuery().getDataSource().getPhysicalTable().getAvailableIntervals(), new SimplifiedIntervalList(request.getIntervals()), request.getGranularity()
/** * Create a constrained copy of the source table. * * @param constraint The dataSourceConstraint which narrows the view of the underlying availability * * @return a constrained table whose availability and serialization are narrowed by this constraint */ @Override public ConstrainedTable withConstraint(DataSourceConstraint constraint) { return getSourceTable().withConstraint(constraint); } }
/** * Constructor. * * @param physicalTable The physical table of the data source. It must have only 1 backing data source. */ public TableDataSource(ConstrainedTable physicalTable) { super(DefaultDataSourceType.TABLE, physicalTable); if (physicalTable.getDataSourceNames().size() > 1) { LOG.error(TOO_MANY_BACKING_DATA_SOURCES.logFormat(getPhysicalTable())); throw new IllegalArgumentException(TOO_MANY_BACKING_DATA_SOURCES.format(getPhysicalTable())); } }
@Override public String getName() { return getSourceTable().getName(); }
public String getName() { return getPhysicalTable().getDataSourceNames().stream().findFirst() .orElseThrow(() -> { LOG.error(TOO_FEW_BACKING_DATA_SOURCES.logFormat(getPhysicalTable())); return new IllegalArgumentException(TOO_FEW_BACKING_DATA_SOURCES.format(getPhysicalTable())); }).asName(); }
@Override public String getPhysicalColumnName(String logicalName) { return getSourceTable().getPhysicalColumnName(logicalName); }