/** * Construct an DefaultSqlAggregationType with a keyword to look for in a * druid aggregation types, i.e. {"longSum", "doubleMin"}. * * @param sqlAggFunction The aggregation function that should be performed. * @param aliases The druid aggregation type. */ DefaultSqlAggregationType(SqlAggFunction sqlAggFunction, String... aliases) { this.sqlAggFunction = sqlAggFunction; this.validDruidAggregations = Utils.asLinkedHashSet(aliases); }
/** * Construct the cache key. * Current implementation includes all the fields of the druidQuery besides the context. * * @param druidQuery The druid query. * * @return The cache key as a String. * @throws JsonProcessingException if the druid query cannot be serialized to JSON */ protected String getKey(DruidAggregationQuery<?> druidQuery) throws JsonProcessingException { JsonNode root = mapper.valueToTree(druidQuery); Utils.canonicalize(root , mapper, false); return writer.writeValueAsString(root); } }
/** * Constructor. * * @param luceneIndexPath Path to the lucene index files * @param maxResults Maximum number of allowed results in a page * @param searchTimeout Maximum time in milliseconds that a lucene search can run */ public LuceneSearchProvider(String luceneIndexPath, int maxResults, int searchTimeout) { this.luceneIndexPath = luceneIndexPath; Utils.createParentDirectories(this.luceneIndexPath); this.maxResults = maxResults; this.searchTimeout = searchTimeout; try { luceneDirectory = new MMapDirectory(Paths.get(this.luceneIndexPath)); luceneIndexIsHealthy = true; } catch (IOException e) { luceneIndexIsHealthy = false; String message = ErrorMessageFormat.UNABLE_TO_CREATE_DIR.format(this.luceneIndexPath); LOG.error(message, e); } }
/** * Getter for set of columns by sub-type. * * @param columnClass The class of columns to to search * @param <T> sub-type of Column to return * * @return Set of Columns */ default <T extends Column> LinkedHashSet<T> getColumns(Class<T> columnClass) { return Utils.getSubsetByType(getColumns(), columnClass); }
/** * Build a context for a request. * * @param containerRequestContext context from the http request object * @param readCache true if the cache should be checked for a response */ public RequestContext(ContainerRequestContext containerRequestContext, boolean readCache) { this.containerRequestContext = containerRequestContext; this.readCache = readCache; this.searchableHeaders = containerRequestContext != null ? Utils.headersToLowerCase(containerRequestContext.getHeaders()) : new MultivaluedHashMap<>(); }
/** * Cleanup the existing instance. * * @param providerName The name of the provider */ public static synchronized void removeInstance(String providerName) { LUCENE_SEARCH_PROVIDERS.remove(providerName); Utils.deleteFiles(getProviderPath(providerName)); }
/** * Get a registered FeatureFlag by name. * * @param name Name of the FeatureFlag to get * * @return The feature flag with the given name * @throws BadApiRequestException if no feature flag has been registered for that name */ public FeatureFlag forName(String name) throws BadApiRequestException { FeatureFlag flag = NAMES_TO_VALUES.get(name.toUpperCase(Locale.ENGLISH)); return flag != null ? flag : Utils.insteadThrowRuntime( new BadApiRequestException("Invalid feature flag: " + name) ); }
/** * Given a field name and a tree of json nodes, empty the contents of all the json nodes matching the field name. * This method is recursive. * * @param node The root of the tree of json nodes. * @param fieldName The name of the node to be omitted. * @param mapper The object mapper that creates and empty node. * * @deprecated Should avoid this method and instead use {@link #canonicalize(JsonNode, ObjectMapper, boolean)} * which preserves JSON object ordering that guarantees consistent hash values. */ @Deprecated public static void omitField(JsonNode node, String fieldName, ObjectMapper mapper) { if (node.has("context")) { ((ObjectNode) node).replace(fieldName, mapper.createObjectNode()); } for (JsonNode child : node) { omitField(child, fieldName, mapper); } }
.map(link -> new ImmutablePair<>(link.getBodyName(), link.getPage(pages))) .filter(pair -> pair.getRight().isPresent()) .map(pair -> Utils.withRight(pair, pair.getRight().getAsInt())) .map(pair -> Utils.withRight( pair, uriInfo.getRequestUriBuilder() .map(pair -> Utils.withRight(pair, pair.getRight().build())) .collect(StreamUtils.toLinkedMap(Pair::getLeft, Pair::getRight)); responseContext.put(ResponseContextKeys.PAGINATION_LINKS_CONTEXT_KEY.getName(), bodyLinks);
/** * Evaluate Druid query for expensive aggregation that could bring down Druid. * * @param query Druid Query * * @return query or null if not required */ public static WeightEvaluationQuery makeWeightEvaluationQuery(DruidAggregationQuery<?> query) { // get inner-most query for evaluation DruidAggregationQuery<?> innerQuery = query.getInnermostQuery(); int weight = Utils.getSubsetByType(innerQuery.getAggregations(), SketchAggregation.class).size(); return new WeightEvaluationQuery(innerQuery, weight); }
@Override public RateLimitRequestToken getToken(ContainerRequestContext request) { MultivaluedMap<String, String> headers = Utils.headersToLowerCase(request.getHeaders());
/** * Get the Druid Metric Names by the logical table they should be present in. * * @param logicalTable Logical table for which to get the DruidMetricNames * * @return the Druid metric names for that logical table */ public static Set<FieldName> getByLogicalTable(TestLogicalTableName logicalTable) { switch (logicalTable) { case SHAPES: return Utils.asLinkedHashSet(HEIGHT, WIDTH, DEPTH, USERS); case PETS: return Utils.asLinkedHashSet(LIMBS); case MONTHLY: case HOURLY: return Utils.asLinkedHashSet(LIMBS); } return Collections.<FieldName>emptySet(); } }
Set<String> metricNames = Utils.getSubsetByType(columns, MetricColumn.class).stream() .map(MetricColumn::getName) .collect(Collectors.collectingAndThen(Collectors.toSet(), ImmutableSet::copyOf));
/** * Construct the cache key. * Current implementation includes all the fields of the druidQuery besides the context. * * @param druidQuery The druid query. * * @return The cache key as a String. * @throws JsonProcessingException if the druid query cannot be serialized to JSON */ private String getKey(DruidAggregationQuery<?> druidQuery) throws JsonProcessingException { JsonNode root = mapper.valueToTree(druidQuery); Utils.canonicalize(root, mapper, false); return writer.writeValueAsString(root); } }
/** * Get the Druid Metric Names by the logical table they should be present in. * * @param logicalTable Logical table for which to get the DruidMetricNames * * @return the Druid metric names for that logical table */ public static Set<FieldName> getByLogicalTable(TestLogicalTableName logicalTable) { switch (logicalTable) { case SHAPES: return Utils.asLinkedHashSet(HEIGHT, WIDTH, DEPTH, USERS); case PETS: return Utils.asLinkedHashSet(LIMBS); case MONTHLY: case HOURLY: return Utils.asLinkedHashSet(LIMBS); } return Collections.<FieldName>emptySet(); } }
DruidAggregationQuery<?> innerQuery = query.getInnermostQuery(); int sketchWeight = Utils.getSubsetByType(innerQuery.getAggregations(), SketchAggregation.class).size(); if (sketchWeight == 0) { return 0;
/** * Construct the cache key. * Current implementation includes all the fields of the druidQuery besides the context. * * @param druidQuery The druid query. * * @return The cache key as a String. * @throws JsonProcessingException if the druid query cannot be mapped to Json */ protected String getKey(DruidAggregationQuery<?> druidQuery) throws JsonProcessingException { JsonNode root = mapper.valueToTree(druidQuery); Utils.canonicalize(root, mapper, false); return writer.writeValueAsString(root); } }
/** * Get the set of ApiDimensionNames for the logical table, by name. * * @param logicalTable Name of the logical table * * @return the set of it's dimension names */ public static Set<TestApiDimensionName> getByLogicalTable(TestLogicalTableName logicalTable) { switch (logicalTable) { case SHAPES: return Utils.asLinkedHashSet(SIZE, SHAPE, COLOR, OTHER, MODEL); case PETS: return Utils.asLinkedHashSet(SPECIES, BREED, SEX); case MONTHLY: case HOURLY: case HOURLY_MONTHLY: return Utils.asLinkedHashSet(OTHER); } return Collections.<TestApiDimensionName>emptySet(); } }
/** * Given a JsonObjectNode, order the fields and recursively and replace context blocks with empty nodes. * * This method is recursive. * * @param node The root of the tree of json nodes. * @param mapper The object mapper that creates and empty node. * @param preserveContext Boolean indicating whether context should be omitted. */ public static void canonicalize(JsonNode node, ObjectMapper mapper, boolean preserveContext) { if (node.isObject()) { ObjectNode objectNode = ((ObjectNode) node); if (objectNode.has("context") && !preserveContext) { objectNode.replace("context", mapper.createObjectNode()); } Iterator<Map.Entry<String, JsonNode>> iterator = objectNode.fields(); // collect and sort the entries TreeMap<String, JsonNode> fieldMap = new TreeMap<>(); while (iterator.hasNext()) { Map.Entry<String, JsonNode> entry = iterator.next(); fieldMap.put(entry.getKey(), entry.getValue()); // canonicalize all child nodes canonicalize(entry.getValue(), mapper, preserveContext); } // remove the existing entries objectNode.removeAll(); // replace the entries in sorted order objectNode.setAll(fieldMap); } }
/** * Get the set of ApiDimensionNames for the logical table, by name. * * @param logicalTable Name of the logical table * * @return the set of it's dimension names */ public static Set<TestApiDimensionName> getByLogicalTable(TestLogicalTableName logicalTable) { switch (logicalTable) { case SHAPES: return Utils.asLinkedHashSet(SIZE, SHAPE, COLOR, OTHER, MODEL); case PETS: return Utils.asLinkedHashSet(SPECIES, BREED, SEX); case MONTHLY: case HOURLY: case HOURLY_MONTHLY: return Utils.asLinkedHashSet(OTHER); } return Collections.<TestApiDimensionName>emptySet(); } }