private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); }
@Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; consumeBucketsAndMaybeBreak((int) bucketOrds.size()); List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size()); for (long i = 0; i < bucketOrds.size(); i++) { buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i))); } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); // value source will be null for unmapped fields // Important: use `rounding` here, not `shardRounding` InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.nextValue(); // We can use shardRounding here, which is sometimes more efficient // if daylight saving times are involved. long rounded = shardRounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; } long bucketOrd = bucketOrds.add(rounded); if (bucketOrd < 0) { // already seen bucketOrd = -1 - bucketOrd; collectExistingBucket(sub, doc, bucketOrd); } else { collectBucket(sub, doc, bucketOrd); } previousRounded = rounded; } } } };
@Override public InternalAggregation buildEmptyAggregation() { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public InternalAggregation buildEmptyAggregation() { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size()); for (long i = 0; i < bucketOrds.size(); i++) { buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i))); } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator()); // value source will be null for unmapped fields InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public InternalAggregation buildEmptyAggregation() { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public void collect(int doc, long bucket) throws IOException { assert bucket == 0; values.setDocument(doc); final int valuesCount = values.count(); long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.valueAt(i); long rounded = rounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; } long bucketOrd = bucketOrds.add(rounded); if (bucketOrd < 0) { // already seen bucketOrd = -1 - bucketOrd; collectExistingBucket(sub, doc, bucketOrd); } else { collectBucket(sub, doc, bucketOrd); } previousRounded = rounded; } } };
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); }
@Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; consumeBucketsAndMaybeBreak((int) bucketOrds.size()); List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size()); for (long i = 0; i < bucketOrds.size(); i++) { buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i))); } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); // value source will be null for unmapped fields // Important: use `rounding` here, not `shardRounding` InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public InternalAggregation buildEmptyAggregation() { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.nextValue(); // We can use shardRounding here, which is sometimes more efficient // if daylight saving times are involved. long rounded = shardRounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; } long bucketOrd = bucketOrds.add(rounded); if (bucketOrd < 0) { // already seen bucketOrd = -1 - bucketOrd; collectExistingBucket(sub, doc, bucketOrd); } else { collectBucket(sub, doc, bucketOrd); } previousRounded = rounded; } } } };
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); }
@Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; consumeBucketsAndMaybeBreak((int) bucketOrds.size()); List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size()); for (long i = 0; i < bucketOrds.size(); i++) { buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i))); } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); // value source will be null for unmapped fields // Important: use `rounding` here, not `shardRounding` InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); }
@Override public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.nextValue(); // We can use shardRounding here, which is sometimes more efficient // if daylight saving times are involved. long rounded = shardRounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; } long bucketOrd = bucketOrds.add(rounded); if (bucketOrd < 0) { // already seen bucketOrd = -1 - bucketOrd; collectExistingBucket(sub, doc, bucketOrd); } else { collectBucket(sub, doc, bucketOrd); } previousRounded = rounded; } } } };
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); }