Tabnine Logo
MultipleSpecificSegmentSpec
Code IndexAdd Tabnine to your IDE (free)

How to use
MultipleSpecificSegmentSpec
in
org.apache.druid.query.spec

Best Java code snippets using org.apache.druid.query.spec.MultipleSpecificSegmentSpec (Showing top 12 results out of 315)

origin: apache/incubator-druid

private static Sequence<SegmentAnalysis> runSegmentMetadataQuery(
  final QueryLifecycleFactory queryLifecycleFactory,
  final Iterable<DataSegment> segments,
  final AuthenticationResult authenticationResult
)
{
 // Sanity check: getOnlyElement of a set, to ensure all segments have the same dataSource.
 final String dataSource = Iterables.getOnlyElement(
   StreamSupport.stream(segments.spliterator(), false)
          .map(DataSegment::getDataSource).collect(Collectors.toSet())
 );
 final MultipleSpecificSegmentSpec querySegmentSpec = new MultipleSpecificSegmentSpec(
   StreamSupport.stream(segments.spliterator(), false)
          .map(DataSegment::toDescriptor).collect(Collectors.toList())
 );
 final SegmentMetadataQuery segmentMetadataQuery = new SegmentMetadataQuery(
   new TableDataSource(dataSource),
   querySegmentSpec,
   new AllColumnIncluderator(),
   false,
   ImmutableMap.of(),
   EnumSet.noneOf(SegmentMetadataQuery.AnalysisType.class),
   false,
   false
 );
 return queryLifecycleFactory.factorize().runSimple(segmentMetadataQuery, authenticationResult, null);
}
origin: apache/incubator-druid

} else if ((int) context.get("count") == 1) {
 Assert.assertTrue("Should retry with 2 missing segments", ((MultipleSpecificSegmentSpec) ((BaseQuery) query).getQuerySegmentSpec()).getDescriptors().size() == 2);
} else {
 Assert.assertTrue("Should retry with 1 missing segments", ((MultipleSpecificSegmentSpec) ((BaseQuery) query).getQuerySegmentSpec()).getDescriptors().size() == 1);
origin: apache/incubator-druid

new MultipleSpecificSegmentSpec(
  missingSegments
origin: apache/incubator-druid

private Sequence<Result<TimeseriesResultValue>> toFilteredQueryableTimeseriesResults(
  TimeseriesQuery query,
  List<SegmentId> segmentIds,
  List<Interval> queryIntervals,
  List<Iterable<Result<TimeseriesResultValue>>> results
)
{
 MultipleSpecificSegmentSpec spec = (MultipleSpecificSegmentSpec) query.getQuerySegmentSpec();
 List<Result<TimeseriesResultValue>> ret = new ArrayList<>();
 for (SegmentDescriptor descriptor : spec.getDescriptors()) {
  SegmentId id = SegmentId.dummy(
    StringUtils.format("%s_%s", queryIntervals.indexOf(descriptor.getInterval()), descriptor.getPartitionNumber())
  );
  int index = segmentIds.indexOf(id);
  if (index != -1) {
   Result result = new Result(
     results.get(index).iterator().next().getTimestamp(),
     new BySegmentResultValueClass(
       Lists.newArrayList(results.get(index)),
       id.toString(),
       descriptor.getInterval()
     )
   );
   ret.add(result);
  } else {
   throw new ISE("Descriptor %s not found in server", id);
  }
 }
 return Sequences.simple(ret);
}
origin: apache/incubator-druid

private void addSequencesFromServer(
  final List<Sequence<T>> listOfSequences,
  final SortedMap<DruidServer, List<SegmentDescriptor>> segmentsByServer
)
{
 segmentsByServer.forEach((server, segmentsOfServer) -> {
  final QueryRunner serverRunner = serverView.getQueryRunner(server);
  if (serverRunner == null) {
   log.error("Server[%s] doesn't have a query runner", server);
   return;
  }
  final MultipleSpecificSegmentSpec segmentsOfServerSpec = new MultipleSpecificSegmentSpec(segmentsOfServer);
  // Divide user-provided maxQueuedBytes by the number of servers, and limit each server to that much.
  final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, httpClientConfig.getMaxQueuedBytes());
  final long maxQueuedBytesPerServer = maxQueuedBytes / segmentsByServer.size();
  final Sequence<T> serverResults;
  if (isBySegment) {
   serverResults = getBySegmentServerResults(serverRunner, segmentsOfServerSpec, maxQueuedBytesPerServer);
  } else if (!server.segmentReplicatable() || !populateCache) {
   serverResults = getSimpleServerResults(serverRunner, segmentsOfServerSpec, maxQueuedBytesPerServer);
  } else {
   serverResults = getAndCacheServerResults(serverRunner, segmentsOfServerSpec, maxQueuedBytesPerServer);
  }
  listOfSequences.add(serverResults);
 });
}
origin: apache/incubator-druid

    new SegmentDescriptor(Intervals.of("2011-11-01/2011-11-10"), "2", 10)
  ),
  ((MultipleSpecificSegmentSpec) spec).getDescriptors()
);
origin: apache/incubator-druid

new MultipleSpecificSegmentSpec(
  ImmutableList.of(
    new SegmentDescriptor(
new MultipleSpecificSegmentSpec(
  ImmutableList.of(
    new SegmentDescriptor(
new MultipleSpecificSegmentSpec(
  ImmutableList.of(
    new SegmentDescriptor(
origin: apache/incubator-druid

descriptors.add(new SegmentDescriptor(interval2, "v", 5));
descriptors.add(new SegmentDescriptor(interval3, "v", 6));
MultipleSpecificSegmentSpec expected = new MultipleSpecificSegmentSpec(descriptors);
origin: apache/incubator-druid

.setDataSource(QueryRunnerTestHelper.dataSource)
.setQuerySegmentSpec(
  new MultipleSpecificSegmentSpec(
    ImmutableList.of(
      descriptor_26_28_0,
origin: org.apache.druid/druid-sql

private static Sequence<SegmentAnalysis> runSegmentMetadataQuery(
  final QueryLifecycleFactory queryLifecycleFactory,
  final Iterable<DataSegment> segments,
  final AuthenticationResult authenticationResult
)
{
 // Sanity check: getOnlyElement of a set, to ensure all segments have the same dataSource.
 final String dataSource = Iterables.getOnlyElement(
   StreamSupport.stream(segments.spliterator(), false)
          .map(DataSegment::getDataSource).collect(Collectors.toSet())
 );
 final MultipleSpecificSegmentSpec querySegmentSpec = new MultipleSpecificSegmentSpec(
   StreamSupport.stream(segments.spliterator(), false)
          .map(DataSegment::toDescriptor).collect(Collectors.toList())
 );
 final SegmentMetadataQuery segmentMetadataQuery = new SegmentMetadataQuery(
   new TableDataSource(dataSource),
   querySegmentSpec,
   new AllColumnIncluderator(),
   false,
   ImmutableMap.of(),
   EnumSet.noneOf(SegmentMetadataQuery.AnalysisType.class),
   false,
   false
 );
 return queryLifecycleFactory.factorize().runSimple(segmentMetadataQuery, authenticationResult, null);
}
origin: org.apache.druid/druid-processing

new MultipleSpecificSegmentSpec(
  missingSegments
origin: org.apache.druid/druid-server

private void addSequencesFromServer(
  final List<Sequence<T>> listOfSequences,
  final SortedMap<DruidServer, List<SegmentDescriptor>> segmentsByServer
)
{
 segmentsByServer.forEach((server, segmentsOfServer) -> {
  final QueryRunner serverRunner = serverView.getQueryRunner(server);
  if (serverRunner == null) {
   log.error("Server[%s] doesn't have a query runner", server);
   return;
  }
  final MultipleSpecificSegmentSpec segmentsOfServerSpec = new MultipleSpecificSegmentSpec(segmentsOfServer);
  // Divide user-provided maxQueuedBytes by the number of servers, and limit each server to that much.
  final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, httpClientConfig.getMaxQueuedBytes());
  final long maxQueuedBytesPerServer = maxQueuedBytes / segmentsByServer.size();
  final Sequence<T> serverResults;
  if (isBySegment) {
   serverResults = getBySegmentServerResults(serverRunner, segmentsOfServerSpec, maxQueuedBytesPerServer);
  } else if (!server.segmentReplicatable() || !populateCache) {
   serverResults = getSimpleServerResults(serverRunner, segmentsOfServerSpec, maxQueuedBytesPerServer);
  } else {
   serverResults = getAndCacheServerResults(serverRunner, segmentsOfServerSpec, maxQueuedBytesPerServer);
  }
  listOfSequences.add(serverResults);
 });
}
org.apache.druid.query.specMultipleSpecificSegmentSpec

Most used methods

  • <init>
  • getDescriptors

Popular in Java

  • Making http post requests using okhttp
  • scheduleAtFixedRate (Timer)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • onRequestPermissionsResult (Fragment)
  • ObjectMapper (com.fasterxml.jackson.databind)
    ObjectMapper provides functionality for reading and writing JSON, either to and from basic POJOs (Pl
  • PrintStream (java.io)
    Fake signature of an existing Java class.
  • ArrayList (java.util)
    ArrayList is an implementation of List, backed by an array. All optional operations including adding
  • BoxLayout (javax.swing)
  • JTextField (javax.swing)
  • IOUtils (org.apache.commons.io)
    General IO stream manipulation utilities. This class provides static utility methods for input/outpu
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now