/** * check whether the start millis of target interval is more than minDataLagMs lagging behind maxInterval's * minDataLag is required to prevent repeatedly building data because of delay data. * * @param target * @param maxInterval * @return true if the start millis of target interval is more than minDataLagMs lagging behind maxInterval's */ private boolean hasEnoughLag(Interval target, Interval maxInterval) { return minDataLagMs <= (maxInterval.getStartMillis() - target.getStartMillis()); }
private static double toLocalInterval(long millis, Interval interval) { return (millis - interval.getStartMillis()) / MILLIS_FACTOR; }
/** * If currHydrant is A, creates a new index B, sets currHydrant to B and returns A. * * @return the current index after swapping in a new one */ public FireHydrant swap() { return makeNewCurrIndex(interval.getStartMillis(), schema); }
@Override public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) { final Interval currInterval = new Interval(period, referenceTimestamp); if (includeFuture) { return currInterval.getStartMillis() <= theInterval.getStartMillis(); } else { return currInterval.contains(theInterval); } } }
@Override public long apply(Row row) { return query.getIntervals().get(0).getStartMillis(); } };
public DeterminePartitionsDimSelectionMapperHelper(HadoopDruidIndexerConfig config, String partitionDimension) { this.config = config; this.partitionDimension = partitionDimension; final ImmutableMap.Builder<Long, Integer> timeIndexBuilder = ImmutableMap.builder(); int idx = 0; for (final Interval bucketInterval : config.getGranularitySpec().bucketIntervals().get()) { timeIndexBuilder.put(bucketInterval.getStartMillis(), idx); idx++; } this.intervalIndexes = timeIndexBuilder.build(); }
private List<Pair<Long, Long>> makeIntervalLongs() { List<Pair<Long, Long>> intervalLongs = new ArrayList<>(); for (Interval interval : intervals) { intervalLongs.add(new Pair<Long, Long>(interval.getStartMillis(), interval.getEndMillis())); } return intervalLongs; }
SegmentsForSequence build() { final NavigableMap<Long, SegmentsOfInterval> map = new TreeMap<>(); for (Entry<SegmentIdWithShardSpec, Pair<SegmentWithState, List<SegmentWithState>>> entry : intervalToSegments.entrySet()) { map.put( entry.getKey().getInterval().getStartMillis(), new SegmentsOfInterval(entry.getKey().getInterval(), entry.getValue().lhs, entry.getValue().rhs) ); } return new SegmentsForSequence(map, lastSegmentId); } }
private Interval trim(Interval inputInterval, Interval interval) { long start = interval.getStartMillis(); long end = interval.getEndMillis(); boolean makeNew = false; if (start < inputInterval.getStartMillis()) { start = inputInterval.getStartMillis(); makeNew = true; } if (end > inputInterval.getEndMillis()) { end = inputInterval.getEndMillis(); makeNew = true; } return makeNew ? new Interval(start, end, interval.getChronology()) : interval; }
private static double convertStart(DataSegment dataSegment, Interval interval) { return toLocalInterval(dataSegment.getInterval().getStartMillis(), interval); }
private SegmentId(String dataSource, Interval interval, String version, int partitionNum) { this.dataSource = STRING_INTERNER.intern(Objects.requireNonNull(dataSource)); this.intervalStartMillis = interval.getStartMillis(); this.intervalEndMillis = interval.getEndMillis(); this.intervalChronology = interval.getChronology(); // Versions are timestamp-based Strings, interning of them doesn't make sense. If this is not the case, interning // could be conditionally allowed via a system property. this.version = Objects.requireNonNull(version); this.partitionNum = partitionNum; this.hashCode = computeHashCode(); }
@Override public void run(Context context) throws IOException, InterruptedException { setup(context); while (context.nextKeyValue()) { map(context.getCurrentKey(), context.getCurrentValue(), context); } for (Map.Entry<Interval, HyperLogLogCollector> entry : hyperLogLogs.entrySet()) { context.write( new LongWritable(entry.getKey().getStartMillis()), new BytesWritable(entry.getValue().toByteArray()) ); } cleanup(context); }
public static boolean eligibleForLoad(Period period, Interval interval, DateTime referenceTimestamp, boolean includeFuture) { final Interval currInterval = new Interval(period, referenceTimestamp); if (includeFuture) { return currInterval.getStartMillis() < interval.getEndMillis(); } else { return eligibleForLoad(currInterval, interval); } }
@Override public void run() { abandonSegment(sink.getInterval().getStartMillis(), sink); metrics.incrementHandOffCount(); } }
/** * Creates a new interval with the same start and end, but a different chronology. * * @param chronology the chronology to use, null means ISO default * @return an interval with a different chronology */ public Interval withChronology(Chronology chronology) { if (getChronology() == chronology) { return this; } return new Interval(getStartMillis(), getEndMillis(), chronology); }
/** * Creates a new interval with the specified start millisecond instant. * * @param startInstant the start instant for the new interval * @return an interval with the end from this interval and the specified start * @throws IllegalArgumentException if the resulting interval has end before start */ public Interval withStartMillis(long startInstant) { if (startInstant == getStartMillis()) { return this; } return new Interval(startInstant, getEndMillis(), getChronology()); }
public static RangeSet<Long> fromIntervals(final Iterable<Interval> intervals) { final RangeSet<Long> retVal = TreeRangeSet.create(); for (Interval interval : intervals) { retVal.add(Range.closedOpen(interval.getStartMillis(), interval.getEndMillis())); } return retVal; }
@Test public void testIterableNone() { final Iterator<Interval> iterator = Granularities.NONE.getIterable(Intervals.utc(0, 1000)).iterator(); int count = 0; while (iterator.hasNext()) { Assert.assertEquals(count, iterator.next().getStartMillis()); count++; } }
public static BoundDimFilter TIME_BOUND(final Object intervalObj) { final Interval interval = new Interval(intervalObj, ISOChronology.getInstanceUTC()); return new BoundDimFilter( ColumnHolder.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(interval.getEndMillis()), false, true, null, null, StringComparators.NUMERIC ); }
void add(SegmentIdWithShardSpec identifier) { intervalToSegmentStates.computeIfAbsent( identifier.getInterval().getStartMillis(), k -> new SegmentsOfInterval(identifier.getInterval()) ).setAppendingSegment(SegmentWithState.newSegment(identifier)); lastSegmentId = identifier.toString(); }