private void expect(JsonParser parser, JsonToken token) throws IOException { expect(parser.nextToken(), token); }
private void parseFields(List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, int posTimestampField, Row.RowBuilder rowBuilder, JsonParser parser) throws IOException { while (parser.nextToken() == JsonToken.FIELD_NAME) { parseField(fieldNames, fieldTypes, posTimestampField, rowBuilder, parser); } }
private void parseField(List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, int posTimestampField, Row.RowBuilder rowBuilder, JsonParser parser) throws IOException { final String fieldName = parser.getCurrentName(); parseFieldForName(fieldNames, fieldTypes, posTimestampField, rowBuilder, parser, fieldName); }
final Long timeValue = extractTimestampField(parser); if (parser.nextToken() == JsonToken.FIELD_NAME && parser.getCurrentName().equals("result") parseFields(fieldNames, fieldTypes, rowBuilder, parser); sink.send(rowBuilder.build()); rowBuilder.reset(); expect(parser, JsonToken.END_OBJECT); if (parser.nextToken() == JsonToken.START_ARRAY && parser.nextToken() == JsonToken.START_OBJECT) { final Long timeValue = extractTimestampField(parser); if (parser.nextToken() == JsonToken.FIELD_NAME && parser.getCurrentName().equals("result") parseFields(fieldNames, fieldTypes, rowBuilder, parser); sink.send(rowBuilder.build()); rowBuilder.reset(); page.offset = -1; page.totalRowCount = 0; expectScalarField(parser, DEFAULT_RESPONSE_TIMESTAMP_COLUMN); if (parser.nextToken() == JsonToken.FIELD_NAME && parser.getCurrentName().equals("result") expect(token, JsonToken.END_OBJECT); } else if (parser.getCurrentName().equals("events") && parser.nextToken() == JsonToken.START_ARRAY) {
@Override protected Map<String, Table> getTableMap() { if (!discoverTables) { return ImmutableMap.of(); } if (tableMap == null) { final DruidConnectionImpl connection = new DruidConnectionImpl(url, coordinatorUrl); Set<String> tableNames = connection.tableNames(); tableMap = Maps.asMap( ImmutableSet.copyOf(tableNames), CacheBuilder.newBuilder() .build(CacheLoader.from(name -> table(name, connection)))); } return tableMap; }
public void run() throws InterruptedException { final List<ColumnMetaData.Rep> fieldTypes = new ArrayList<>(); for (RelDataTypeField field : query.getRowType().getFieldList()) { fieldTypes.add(getPrimitive(field)); } final DruidConnectionImpl connection = new DruidConnectionImpl(query.druidTable.schema.url, query.druidTable.schema.coordinatorUrl); final boolean limitQuery = containsLimit(querySpec); final DruidConnectionImpl.Page page = new DruidConnectionImpl.Page(); do { final String queryString = querySpec.getQueryString(page.pagingIdentifier, page.offset); connection.request(querySpec.queryType, queryString, sink, querySpec.fieldNames, fieldTypes, page); } while (!limitQuery && page.pagingIdentifier != null && page.totalRowCount > 0); }
/** Executes a query request. * * @param queryType Query type * @param data Data to post * @param sink Sink to which to send the parsed rows * @param fieldNames Names of fields * @param fieldTypes Types of fields (never null, but elements may be null) * @param page Page definition (in/out) */ public void request(QueryType queryType, String data, Sink sink, List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, Page page) { final String url = this.url + "/druid/v2/?pretty"; final Map<String, String> requestHeaders = ImmutableMap.of("Content-Type", "application/json"); if (CalcitePrepareImpl.DEBUG) { System.out.println(data); } try (InputStream in0 = post(url, data, requestHeaders, 10000, 1800000); InputStream in = traceResponse(in0)) { parse(queryType, in, sink, fieldNames, fieldTypes, page); } catch (IOException e) { throw new RuntimeException("Error while processing druid request [" + data + "]", e); } }
@Override public Table load(@Nonnull String tableName) throws Exception { final Map<String, SqlTypeName> fieldMap = new LinkedHashMap<>(); final Set<String> metricNameSet = new LinkedHashSet<>(); final Map<String, List<ComplexMetric>> complexMetrics = new HashMap<>(); connection.metadata(tableName, DruidTable.DEFAULT_TIMESTAMP_COLUMN, null, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(DruidSchema.this, tableName, null, fieldMap, metricNameSet, DruidTable.DEFAULT_TIMESTAMP_COLUMN, complexMetrics); } }));
private void parseFields(List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, Row.RowBuilder rowBuilder, JsonParser parser) throws IOException { parseFields(fieldNames, fieldTypes, -1, rowBuilder, parser); }
DruidConnectionImpl connection = new DruidConnectionImpl(druidSchema.url, druidSchema.url.replace(":8082", ":8081")); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldBuilder,
final Long timeValue = extractTimestampField(parser); if (parser.nextToken() == JsonToken.FIELD_NAME && parser.getCurrentName().equals("result") parseFields(fieldNames, fieldTypes, rowBuilder, parser); sink.send(rowBuilder.build()); rowBuilder.reset(); expect(parser, JsonToken.END_OBJECT); if (parser.nextToken() == JsonToken.START_ARRAY && parser.nextToken() == JsonToken.START_OBJECT) { final Long timeValue = extractTimestampField(parser); if (parser.nextToken() == JsonToken.FIELD_NAME && parser.getCurrentName().equals("result") parseFields(fieldNames, fieldTypes, rowBuilder, parser); sink.send(rowBuilder.build()); rowBuilder.reset(); page.offset = -1; page.totalRowCount = 0; expectScalarField(parser, DEFAULT_RESPONSE_TIMESTAMP_COLUMN); if (parser.nextToken() == JsonToken.FIELD_NAME && parser.getCurrentName().equals("result") expect(token, JsonToken.END_OBJECT); } else if (parser.getCurrentName().equals("events") && parser.nextToken() == JsonToken.START_ARRAY) {
final DruidConnectionImpl connection = new DruidConnectionImpl(url, coordinatorUrl); Set<String> tableNames = connection.tableNames();
public void run() throws InterruptedException { final List<ColumnMetaData.Rep> fieldTypes = new ArrayList<>(); for (RelDataTypeField field : query.getRowType().getFieldList()) { fieldTypes.add(getPrimitive(field)); } final DruidConnectionImpl connection = new DruidConnectionImpl(query.druidTable.schema.url, query.druidTable.schema.coordinatorUrl); final boolean limitQuery = containsLimit(querySpec); final DruidConnectionImpl.Page page = new DruidConnectionImpl.Page(); do { final String queryString = querySpec.getQueryString(page.pagingIdentifier, page.offset); connection.request(querySpec.queryType, queryString, sink, querySpec.fieldNames, fieldTypes, page); } while (!limitQuery && page.pagingIdentifier != null && page.totalRowCount > 0); }
/** Executes a query request. * * @param queryType Query type * @param data Data to post * @param sink Sink to which to send the parsed rows * @param fieldNames Names of fields * @param fieldTypes Types of fields (never null, but elements may be null) * @param page Page definition (in/out) */ public void request(QueryType queryType, String data, Sink sink, List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, Page page) { final String url = this.url + "/druid/v2/?pretty"; final Map<String, String> requestHeaders = ImmutableMap.of("Content-Type", "application/json"); if (CalcitePrepareImpl.DEBUG) { System.out.println(data); } try (InputStream in0 = post(url, data, requestHeaders, 10000, 1800000); InputStream in = traceResponse(in0)) { parse(queryType, in, sink, fieldNames, fieldTypes, page); } catch (IOException e) { throw new RuntimeException("Error while processing druid request [" + data + "]", e); } }
private Table table(String tableName, DruidConnectionImpl connection) { final Map<String, SqlTypeName> fieldMap = new LinkedHashMap<>(); final Set<String> metricNameSet = new LinkedHashSet<>(); final Map<String, List<ComplexMetric>> complexMetrics = new HashMap<>(); connection.metadata(tableName, DruidTable.DEFAULT_TIMESTAMP_COLUMN, null, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(DruidSchema.this, tableName, null, fieldMap, metricNameSet, DruidTable.DEFAULT_TIMESTAMP_COLUMN, complexMetrics); } }
private void parseFields(List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, Row.RowBuilder rowBuilder, JsonParser parser) throws IOException { parseFields(fieldNames, fieldTypes, -1, rowBuilder, parser); }
DruidConnectionImpl connection = new DruidConnectionImpl(druidSchema.url, druidSchema.url.replace(":8082", ":8081")); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldBuilder,
private void expect(JsonParser parser, JsonToken token) throws IOException { expect(parser.nextToken(), token); }
/** Creates a {@link DruidTable} by using the given {@link DruidConnectionImpl} * to populate the other parameters. The parameters may be partially populated. * * @param druidSchema Druid schema * @param dataSourceName Data source name in Druid, also table name * @param intervals Intervals, or null to use default * @param fieldMap Partially populated map of fields (dimensions plus metrics) * @param metricNameSet Partially populated set of metric names * @param timestampColumnName Name of timestamp column, or null * @param connection Connection used to find column definitions; Must be non-null * @param complexMetrics List of complex metrics in Druid (thetaSketch, hyperUnique) * * @return A table */ static Table create(DruidSchema druidSchema, String dataSourceName, List<Interval> intervals, Map<String, SqlTypeName> fieldMap, Set<String> metricNameSet, String timestampColumnName, DruidConnectionImpl connection, Map<String, List<ComplexMetric>> complexMetrics) { assert connection != null; connection.metadata(dataSourceName, timestampColumnName, intervals, fieldMap, metricNameSet, complexMetrics); return DruidTable.create(druidSchema, dataSourceName, intervals, fieldMap, metricNameSet, timestampColumnName, complexMetrics); }
private void parseFields(List<String> fieldNames, List<ColumnMetaData.Rep> fieldTypes, int posTimestampField, Row.RowBuilder rowBuilder, JsonParser parser) throws IOException { while (parser.nextToken() == JsonToken.FIELD_NAME) { parseField(fieldNames, fieldTypes, posTimestampField, rowBuilder, parser); } }