protected static void writeFieldIf(JsonGenerator generator, String fieldName, Object o) throws IOException { if (o != null) { writeField(generator, fieldName, o); } }
@Override public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", "extraction"); generator.writeStringField("dimension", dimension); writeFieldIf(generator, "outputName", outputName); writeField(generator, "extractionFn", extractionFunction); generator.writeEndObject(); }
@Override public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", "extraction"); generator.writeStringField("dimension", dimension); writeFieldIf(generator, "outputName", outputName); writeField(generator, "extractionFn", extractionFunction); generator.writeEndObject(); }
protected static void writeFieldIf(JsonGenerator generator, String fieldName, Object o) throws IOException { if (o != null) { writeField(generator, fieldName, o); } }
public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", type.lowercase()); generator.writeStringField("dimension", dimension); writeField(generator, "values", values); writeFieldIf(generator, "extractionFn", extractionFunction); generator.writeEndObject(); } }
@Override public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", type); writeField(generator, "filter", filter); writeField(generator, "aggregator", aggregation); generator.writeEndObject(); } }
public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", type.lowercase()); generator.writeStringField("dimension", dimension); DruidQuery.writeField(generator, "values", values); DruidQuery.writeFieldIf(generator, "extractionFn", extractionFunction); generator.writeEndObject(); } }
@Override public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", type); writeField(generator, "filter", filter); writeField(generator, "aggregator", aggregation); generator.writeEndObject(); } }
@Nonnull public String toQuery() { final StringWriter sw = new StringWriter(); try { final JsonFactory factory = new JsonFactory(); final JsonGenerator generator = factory.createGenerator(sw); generator.writeStartObject(); generator.writeStringField("queryType", "scan"); generator.writeStringField("dataSource", dataSource); writeField(generator, "intervals", intervals); writeFieldIf(generator, "filter", jsonFilter); writeFieldIf(generator, "virtualColumns", virtualColumnList.size() > 0 ? virtualColumnList : null); writeField(generator, "columns", columns); generator.writeStringField("resultFormat", "compactedList"); if (fetchLimit != null) { generator.writeNumberField("limit", fetchLimit); } generator.writeEndObject(); generator.close(); } catch (IOException e) { throw new RuntimeException(e); } return sw.toString(); } }
@Override public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", "filter"); DruidQuery.writeField(generator, "filter", filter); generator.writeEndObject(); } }
@Nullable private String planAsGroupBy(List<DimensionSpec> groupByKeyDims, DruidJsonFilter jsonFilter, List<VirtualColumn> virtualColumnList, List<JsonAggregation> aggregations, List<JsonExpressionPostAgg> postAggregations, JsonLimit limit, DruidJsonFilter havingFilter) { final StringWriter sw = new StringWriter(); final JsonFactory factory = new JsonFactory(); try { final JsonGenerator generator = factory.createGenerator(sw); generator.writeStartObject(); generator.writeStringField("queryType", "groupBy"); generator.writeStringField("dataSource", druidTable.dataSource); writeField(generator, "granularity", Granularities.all()); writeField(generator, "dimensions", groupByKeyDims); writeFieldIf(generator, "virtualColumns", virtualColumnList.size() > 0 ? virtualColumnList : null); writeFieldIf(generator, "limitSpec", limit); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "postAggregations", postAggregations.size() > 0 ? postAggregations : null); writeField(generator, "intervals", intervals); writeFieldIf(generator, "having", havingFilter == null ? null : new DruidJsonFilter.JsonDimHavingFilter(havingFilter)); generator.writeEndObject(); generator.close(); } catch (IOException e) { throw new RuntimeException(e); } return sw.toString(); }
public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", type.lowercase()); switch (type) { case NOT: writeField(generator, "field", fields.get(0)); break; default: writeField(generator, "fields", fields); } generator.writeEndObject(); } }
writeField(generator, "granularity", Granularities.all()); writeField(generator, "dimension", groupByKeyDims.get(0)); writeFieldIf(generator, "virtualColumns", virtualColumnList.size() > 0 ? virtualColumnList : null); generator.writeStringField("metric", topNMetricColumnName); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "postAggregations", postAggregations.size() > 0 ? postAggregations : null); writeField(generator, "intervals", intervals); generator.writeNumberField("threshold", limit.limit); generator.writeEndObject();
public void write(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("type", type.lowercase()); switch (type) { case NOT: DruidQuery.writeField(generator, "field", fields.get(0)); break; default: DruidQuery.writeField(generator, "fields", fields); } generator.writeEndObject(); } }
generator.writeBooleanField("descending", sortDirection != null && sortDirection.equals("descending")); writeField(generator, "granularity", timeseriesGranularity); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "virtualColumns", virtualColumnList.size() > 0 ? virtualColumnList : null); writeFieldIf(generator, "postAggregations", postAggregations.size() > 0 ? postAggregations : null); writeField(generator, "intervals", intervals); generator.writeFieldName("context");
@Override public void write(JsonGenerator generator) throws IOException { super.write(generator); // Druid spec for ThetaSketchEstimate requires a field accessor writeField(generator, "field", new JsonFieldAccessor("", fieldName)); generator.writeEndObject(); } }
generator.writeBooleanField("descending", timeSeriesDirection != null && timeSeriesDirection == Direction.DESCENDING); writeField(generator, "granularity", finalGranularity); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null); writeField(generator, "intervals", intervals); writeField(generator, "granularity", finalGranularity); writeField(generator, "dimension", dimensions.get(0)); generator.writeStringField("metric", fieldNames.get(collationIndexes.get(0))); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null); writeField(generator, "intervals", intervals); generator.writeNumberField("threshold", fetch); generator.writeStringField("queryType", "groupBy"); generator.writeStringField("dataSource", druidTable.dataSource); writeField(generator, "granularity", finalGranularity); writeField(generator, "dimensions", dimensions); writeFieldIf(generator, "limitSpec", limit); writeFieldIf(generator, "filter", jsonFilter); writeField(generator, "aggregations", aggregations); writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null); writeField(generator, "intervals", intervals); writeFieldIf(generator, "having", null); generator.writeStringField("dataSource", druidTable.dataSource);