) throws IOException, InterruptedException final Optional<Interval> maybeInterval = config.getGranularitySpec().bucketInterval(timestamp);
.collect(Collectors.toMap(TaskLock::getInterval, TaskLock::getVersion)); final Optional<Interval> maybeInterval = granularitySpec.bucketInterval(timestamp); if (!maybeInterval.isPresent()) { throw new IAE("Could not find interval for timestamp [%s]", timestamp);
|| granularitySpec.bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch())) .isPresent()) { innerMap(inputRow, context);
/** * Get the proper bucket for some input row. * * @param inputRow an InputRow * * @return the Bucket that this row belongs to */ public Optional<Bucket> getBucket(InputRow inputRow) { final Optional<Interval> timeBucket = schema.getDataSchema().getGranularitySpec().bucketInterval( DateTimes.utc(inputRow.getTimestampFromEpoch()) ); if (!timeBucket.isPresent()) { return Optional.absent(); } final DateTime bucketStart = timeBucket.get().getStart(); final ShardSpec actualSpec = shardSpecLookups.get(bucketStart.getMillis()) .getShardSpec( rollupGran.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow ); final HadoopyShardSpec hadoopyShardSpec = hadoopShardSpecLookup.get(bucketStart.getMillis()).get(actualSpec); return Optional.of( new Bucket( hadoopyShardSpec.getShardNum(), bucketStart, actualSpec.getPartitionNum() ) ); }
"2012-01-03T00Z", Optional.of(Intervals.of("2012-01-03T00Z/2012-01-04T00Z")), spec.bucketInterval(DateTimes.of("2012-01-03T00Z")) ); "2012-01-03T01Z", Optional.of(Intervals.of("2012-01-03T00Z/2012-01-04T00Z")), spec.bucketInterval(DateTimes.of("2012-01-03T01Z")) ); "2012-01-04T01Z", Optional.<Interval>absent(), spec.bucketInterval(DateTimes.of("2012-01-04T01Z")) ); "2012-01-07T23:59:59.999Z", Optional.of(Intervals.of("2012-01-07T00Z/2012-01-08T00Z")), spec.bucketInterval(DateTimes.of("2012-01-07T23:59:59.999Z")) ); "2012-01-08T01Z", Optional.of(Intervals.of("2012-01-08T00Z/2012-01-11T00Z")), spec.bucketInterval(DateTimes.of("2012-01-08T01Z")) ); "2012-01-04T00Z", Optional.absent(), spec.bucketInterval(DateTimes.of("2012-01-04T00Z")) );
interval = config.getGranularitySpec().getSegmentGranularity().bucket(DateTimes.utc(key.get())); } else { Optional<Interval> intervalOptional = config.getGranularitySpec().bucketInterval(DateTimes.utc(key.get()));
"2012-01-03T00Z", Optional.of(Intervals.of("2012-01-03T00Z/2012-01-04T00Z")), spec.bucketInterval(DateTimes.of("2012-01-03T00Z")) ); "2012-01-03T01Z", Optional.of(Intervals.of("2012-01-03T00Z/2012-01-04T00Z")), spec.bucketInterval(DateTimes.of("2012-01-03T01Z")) ); "2012-01-04T01Z", Optional.<Interval>absent(), spec.bucketInterval(DateTimes.of("2012-01-04T01Z")) ); "2012-01-07T23:59:59.999Z", Optional.of(Intervals.of("2012-01-07T00Z/2012-01-08T00Z")), spec.bucketInterval(DateTimes.of("2012-01-07T23:59:59.999Z")) ); "2012-01-08T01Z", Optional.of(Intervals.of("2012-01-08T00Z/2012-01-09T00Z")), spec.bucketInterval(DateTimes.of("2012-01-08T01Z")) );
.bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch()));
final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); if (!optInterval.isPresent()) { fireDepartmentMetrics.incrementThrownAway();
config.getGranularitySpec().bucketInterval(bucket).orNull() ); config.makeSegmentPartitionInfoPath(config.getGranularitySpec().bucketInterval(bucket).get()), config.isOverwriteFiles() );
.collect(Collectors.toMap(TaskLock::getInterval, TaskLock::getVersion)); final Optional<Interval> maybeInterval = granularitySpec.bucketInterval(timestamp); if (!maybeInterval.isPresent()) { throw new IAE("Could not find interval for timestamp [%s]", timestamp);
final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); if (!optInterval.isPresent()) { determinePartitionsMeters.incrementThrownAway();
|| granularitySpec.bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch())) .isPresent()) { innerMap(inputRow, context);
Bucket bucket = Bucket.fromGroupKey(keyBytes.getGroupKey()).lhs; final Interval interval = config.getGranularitySpec().bucketInterval(bucket.time).get();
Optional<Interval> maybeInterval = granularitySpec.bucketInterval(timestamp); if (!maybeInterval.isPresent()) { throw new ISE("Could not find interval for timestamp [%s]", timestamp); final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); if (!optInterval.isPresent()) { buildSegmentsMeters.incrementThrownAway();
) throws IOException, InterruptedException final Optional<Interval> maybeInterval = config.getGranularitySpec().bucketInterval(timestamp);
/** * Get the proper bucket for some input row. * * @param inputRow an InputRow * * @return the Bucket that this row belongs to */ public Optional<Bucket> getBucket(InputRow inputRow) { final Optional<Interval> timeBucket = schema.getDataSchema().getGranularitySpec().bucketInterval( DateTimes.utc(inputRow.getTimestampFromEpoch()) ); if (!timeBucket.isPresent()) { return Optional.absent(); } final DateTime bucketStart = timeBucket.get().getStart(); final ShardSpec actualSpec = shardSpecLookups.get(bucketStart.getMillis()) .getShardSpec( rollupGran.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow ); final HadoopyShardSpec hadoopyShardSpec = hadoopShardSpecLookup.get(bucketStart.getMillis()).get(actualSpec); return Optional.of( new Bucket( hadoopyShardSpec.getShardNum(), bucketStart, actualSpec.getPartitionNum() ) ); }
interval = config.getGranularitySpec().getSegmentGranularity().bucket(DateTimes.utc(key.get())); } else { Optional<Interval> intervalOptional = config.getGranularitySpec().bucketInterval(DateTimes.utc(key.get()));
.bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch()));
final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); if (!optInterval.isPresent()) { determinePartitionsMeters.incrementThrownAway();