this.consumerProperties = Preconditions.checkNotNull(consumerProperties, "consumerProperties"); Preconditions.checkNotNull(consumerProperties.get(BOOTSTRAP_SERVERS_KEY), StringUtils.format("consumerProperties must contain entry for [%s]", BOOTSTRAP_SERVERS_KEY));
/** * @param connector SQL metadata connector to the metadata storage * @param metadataStorageTablesConfig Table config * * @return all the active data sources in the metadata storage */ static Collection<String> getAllDataSourceNames(SQLMetadataConnector connector, final MetadataStorageTablesConfig metadataStorageTablesConfig) { return connector.getDBI() .withHandle((HandleCallback<List<String>>) handle -> handle.createQuery(String.format( "SELECT DISTINCT(datasource) FROM %s WHERE used = true", metadataStorageTablesConfig.getSegmentsTable())) .fold(Lists.<String>newArrayList(), (druidDataSources, stringObjectMap, foldController, statementContext) -> { druidDataSources.add(MapUtils.getString(stringObjectMap, "datasource")); return druidDataSources; })); }
return Pair.of(dimensions, aggregatorFactories.toArray(new AggregatorFactory[0]));
@Override public Emitter get() { if (emitter == null) { throw new ISE("Emitter was null, that's bad!"); } return emitter; } }
DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(truncatedTime)); final Interval interval = new Interval(truncatedDateTime, segmentGranularity.increment(truncatedDateTime));
private void stopKafkaIngestion(String overlordAddress, String dataSourceName) { try { FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/shutdown", overlordAddress, dataSourceName))), new FullResponseHandler(Charset.forName("UTF-8"))), input -> input instanceof IOException, getMaxRetryCount()); if (response.getStatus().equals(HttpResponseStatus.OK)) { CONSOLE.printInfo("Druid Kafka Ingestion shutdown successful."); } else { throw new IOException(String.format("Unable to stop Kafka Ingestion Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent())); } } catch (Exception e) { throw new RuntimeException(e); } }
throw new RE(ex, "Failure getting results for query[%s] from locations[%s] because of [%s]", query,
@Override public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException { throw new UOE("CharSequence not supported"); }
@Override public String fromByteBuffer(final ByteBuffer buffer, final int numBytes) { return StringUtils.fromUtf8(buffer, numBytes); }
public SegmentValidationException(Throwable cause, String formatText, Object... arguments) { super(StringUtils.nonStrictFormat(formatText, arguments), cause); } }
JodaUtils.umbrellaInterval(segments.stream().map(DataSegment::getInterval).collect(Collectors.toList())); LOG.info("Building timeline for umbrella Interval [{}]", indexedInterval); timeline = getTimelineForIntervalWithHandle(handle, dataSource, indexedInterval, metadataStorageTablesConfig);
@Override public String toString() { return StringUtils.format("serverTime-%s", windowPeriod); } };
@Override public void dataSource(SearchQuery query) { throw new ISE("Unsupported method in default query metrics implementation."); }
DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final Interval interval = new Interval(truncatedDateTime, segmentGranularity.increment(truncatedDateTime));
private void resetKafkaIngestion(String overlordAddress, String dataSourceName) { try { FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/reset", overlordAddress, dataSourceName))), new FullResponseHandler(Charset.forName("UTF-8"))), input -> input instanceof IOException, getMaxRetryCount()); if (response.getStatus().equals(HttpResponseStatus.OK)) { CONSOLE.printInfo("Druid Kafka Ingestion Reset successful."); } else { throw new IOException(String.format("Unable to reset Kafka Ingestion Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent())); } } catch (Exception e) { throw new RuntimeException(e); } }
@Override public void queryType(SelectQuery query) { throw new ISE("Unsupported method in default query metrics implementation."); }
FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s", overlordAddress, dataSourceName))),
FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/status",
try { coordinatorResponse = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/status", coordinatorAddress))), new FullResponseHandler(Charset.forName("UTF-8"))).getContent(),