@Override public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) { return interval.contains(theInterval); } }
private Sink getSink(long timestamp) { if (theSink.getInterval().contains(timestamp)) { return theSink; } else { return null; } }
private static String findVersion(Map<Interval, String> versions, Interval interval) { return versions.entrySet().stream() .filter(entry -> entry.getKey().contains(interval)) .map(Entry::getValue) .findFirst() .orElseThrow(() -> new ISE("Cannot find a version for interval[%s]", interval)); }
@Override public boolean apply(InputRow input) { return interval.contains(input.getTimestampFromEpoch()); } }
private static String findVersion(Map<Interval, String> versions, Interval interval) { return versions.entrySet().stream() .filter(entry -> entry.getKey().contains(interval)) .map(Entry::getValue) .findFirst() .orElseThrow(() -> new ISE("Cannot find a version for interval[%s]", interval)); }
DateTime start = new DateTime(2010, 5, 25, 12, 0, 0, 0); DateTime end = new DateTime(2010, 5, 25, 21, 0, 0, 0); Interval interval = new Interval(start, end); DateTime test = new DateTime(2010, 5, 25, 16, 0, 0, 0); System.out.println(interval.contains(test));
private List<TaskLockPosse> findLockPossesContainingInterval(final String dataSource, final Interval interval) { giant.lock(); try { final List<TaskLockPosse> intervalOverlapsPosses = findLockPossesOverlapsInterval(dataSource, interval); return intervalOverlapsPosses.stream() .filter(taskLockPosse -> taskLockPosse.taskLock.getInterval().contains(interval)) .collect(Collectors.toList()); } finally { giant.unlock(); } }
@Override public Optional<Interval> bucketInterval(DateTime dt) { // First interval with start time ≤ dt final Interval interval = intervals.floor(new Interval(dt, DateTimes.MAX)); if (interval != null && interval.contains(dt)) { return Optional.of(interval); } else { return Optional.absent(); } }
private static boolean matchGroupIdAndContainInterval(TaskLock existingLock, String taskGroupId, Interval interval) { return existingLock.getInterval().contains(interval) && existingLock.getGroupId().equals(taskGroupId); }
private SegmentIdWithShardSpec tryAllocateFirstSegment(TaskActionToolbox toolbox, Task task, Interval rowInterval) { // No existing segments for this row, but there might still be nearby ones that conflict with our preferred // segment granularity. Try that first, and then progressively smaller ones if it fails. final List<Interval> tryIntervals = Granularity.granularitiesFinerThan(preferredSegmentGranularity) .stream() .map(granularity -> granularity.bucket(timestamp)) .collect(Collectors.toList()); for (Interval tryInterval : tryIntervals) { if (tryInterval.contains(rowInterval)) { final SegmentIdWithShardSpec identifier = tryAllocate(toolbox, task, tryInterval, rowInterval, false); if (identifier != null) { return identifier; } } } return null; }
/** * Checks if the provided time is within the current range. * @param t A time to check containment for * @return Returns whether the provided time is within the current range */ public boolean contains(Time t) { return this.getJodaTimeInterval().contains(t.getJodaTimeInstant()); }
CompactibleTimelineObjectHolderCursor( VersionedIntervalTimeline<String, DataSegment> timeline, List<Interval> totalIntervalsToSearch ) { this.holders = totalIntervalsToSearch .stream() .flatMap(interval -> timeline .lookup(interval) .stream() .filter(holder -> { final List<PartitionChunk<DataSegment>> chunks = Lists.newArrayList(holder.getObject().iterator()); final long partitionBytes = chunks.stream().mapToLong(chunk -> chunk.getObject().getSize()).sum(); return chunks.size() > 0 && partitionBytes > 0 && interval.contains(chunks.get(0).getObject().getInterval()); }) ) .collect(Collectors.toList()); }
@Override public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) { final Interval currInterval = new Interval(period, referenceTimestamp); if (includeFuture) { return currInterval.getStartMillis() <= theInterval.getStartMillis(); } else { return currInterval.contains(theInterval); } } }
@Override public boolean apply(TaskLock input) { return input.getInterval().contains(THE_DISTANT_FUTURE); } }
@Override public boolean apply(TaskLock input) { return input.getInterval().contains(THE_DISTANT_FUTURE); } }
@Override public boolean apply(TaskLock input) { return input.getInterval().contains(PARTY_TIME); } }
public Sink getSink(long timestamp) { if (sink.getInterval().contains(timestamp)) { return sink; } return null; }
@Override public boolean apply(TaskLock input) { return input.getInterval().contains(PARTY_TIME); } }
private SegmentIdWithShardSpec tryAllocateSubsequentSegment( TaskActionToolbox toolbox, Task task, Interval rowInterval, DataSegment usedSegment ) { // Existing segment(s) exist for this row; use the interval of the first one. if (!usedSegment.getInterval().contains(rowInterval)) { log.error("The interval of existing segment[%s] doesn't contain rowInterval[%s]", usedSegment, rowInterval); return null; } else { // If segment allocation failed here, it is highly likely an unrecoverable error. We log here for easier // debugging. return tryAllocate(toolbox, task, usedSegment.getInterval(), rowInterval, true); } }
static boolean isSegmentLoaded(Iterable<ImmutableSegmentLoadInfo> serverView, SegmentDescriptor descriptor) { for (ImmutableSegmentLoadInfo segmentLoadInfo : serverView) { if (segmentLoadInfo.getSegment().getInterval().contains(descriptor.getInterval()) && segmentLoadInfo.getSegment().getShardSpec().getPartitionNum() == descriptor.getPartitionNumber() && segmentLoadInfo.getSegment().getVersion().compareTo(descriptor.getVersion()) >= 0 && Iterables.any( segmentLoadInfo.getServers(), DruidServerMetadata::segmentReplicatable )) { return true; } } return false; } }