@Override public Interval apply(SegmentIdentifier input) { return input.getInterval(); } })
SegmentsForSequence build() { final NavigableMap<Long, SegmentsOfInterval> map = new TreeMap<>(); for (Entry<SegmentIdentifier, Pair<SegmentWithState, List<SegmentWithState>>> entry : intervalToSegments.entrySet()) { map.put( entry.getKey().getInterval().getStartMillis(), new SegmentsOfInterval(entry.getKey().getInterval(), entry.getValue().lhs, entry.getValue().rhs) ); } return new SegmentsForSequence(map, lastSegmentId); } }
void add(SegmentIdentifier identifier) { intervalToSegmentStates.computeIfAbsent( identifier.getInterval().getStartMillis(), k -> new SegmentsOfInterval(identifier.getInterval()) ).setAppendingSegment(SegmentWithState.newSegment(identifier)); lastSegmentId = identifier.getIdentifierAsString(); }
public void dropSegment(final SegmentIdentifier identifier) { log.info("Dropping segment: %s", identifier); segments.remove(identifier.getInterval().getStartMillis()); Futures.addCallback( appenderator.drop(identifier), new FutureCallback<Object>() { @Override public void onSuccess(Object result) { log.info("Dropped segment: %s", identifier); } @Override public void onFailure(Throwable e) { // TODO: Retry? log.warn(e, "Failed to drop segment: %s", identifier); } } ); }
private void addSegment(final SegmentIdentifier identifier) { segments.put(identifier.getInterval().getStartMillis(), identifier); try { segmentAnnouncer.announceSegment( new DataSegment( identifier.getDataSource(), identifier.getInterval(), identifier.getVersion(), ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), identifier.getShardSpec(), null, 0 ) ); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", identifier.getDataSource()) .addData("interval", identifier.getInterval()) .emit(); } }
handoffNotifier.registerSegmentHandoffCallback( new SegmentDescriptor( segmentIdentifier.getInterval(), segmentIdentifier.getVersion(), segmentIdentifier.getShardSpec().getPartitionNum()
identifier.getInterval(), schema, identifier.getShardSpec(),
private List<SegmentIdentifier> getPendingSegmentsForIntervalWithHandle( final Handle handle, final String dataSource, final Interval interval ) throws IOException { final List<SegmentIdentifier> identifiers = Lists.newArrayList(); final ResultIterator<byte[]> dbSegments = handle.createQuery( StringUtils.format( "SELECT payload FROM %1$s WHERE dataSource = :dataSource AND start <= :end and %2$send%2$s >= :start", dbTables.getPendingSegmentsTable(), connector.getQuoteString() ) ) .bind("dataSource", dataSource) .bind("start", interval.getStart().toString()) .bind("end", interval.getEnd().toString()) .map(ByteArrayMapper.FIRST) .iterator(); while (dbSegments.hasNext()) { final byte[] payload = dbSegments.next(); final SegmentIdentifier identifier = jsonMapper.readValue(payload, SegmentIdentifier.class); if (interval.overlaps(identifier.getInterval())) { identifiers.add(identifier); } } dbSegments.close(); return identifiers; }
final Long intervalStart = segment.getInterval().getStartMillis(); if (intervalStart < minTimestamp) { log.info("Adding entry [%s] for merge and push.", segment);
); if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) { if (previousSegmentId == null) { log.info(
/** * Move a set of identifiers out from "active", making way for newer segments. * This method is to support KafkaIndexTask's legacy mode and will be removed in the future. * See KakfaIndexTask.runLegacy(). */ public void moveSegmentOut(final String sequenceName, final List<SegmentIdentifier> identifiers) { synchronized (segments) { final SegmentsForSequence activeSegmentsForSequence = segments.get(sequenceName); if (activeSegmentsForSequence == null) { throw new ISE("WTF?! Asked to remove segments for sequenceName[%s] which doesn't exist...", sequenceName); } for (final SegmentIdentifier identifier : identifiers) { log.info("Moving segment[%s] out of active list.", identifier); final long key = identifier.getInterval().getStartMillis(); final SegmentsOfInterval segmentsOfInterval = activeSegmentsForSequence.get(key); if (segmentsOfInterval == null || segmentsOfInterval.getAppendingSegment() == null || !segmentsOfInterval.getAppendingSegment().getSegmentIdentifier().equals(identifier)) { throw new ISE("WTF?! Asked to remove segment[%s] that didn't exist...", identifier); } segmentsOfInterval.finishAppendingToCurrentActiveSegment(SegmentWithState::finishAppending); } } }
private Sink getOrCreateSink(final SegmentIdentifier identifier) { Sink retVal = sinks.get(identifier); if (retVal == null) { retVal = new Sink( identifier.getInterval(), schema, identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getMaxRowsInMemory(), maxBytesTuningConfig, tuningConfig.isReportParseExceptions(), null ); try { segmentAnnouncer.announceSegment(retVal.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()) .emit(); } sinks.put(identifier, retVal); metrics.setSinkCount(sinks.size()); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), identifier.getShardSpec().createChunk(retVal)); } return retVal; }
new NumberedShardSpec(0, 0) ); } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) { log.warn( "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", return new SegmentIdentifier( dataSource, max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1) return new SegmentIdentifier( dataSource, max.getInterval(), max.getVersion(), new NumberedShardSpec(
persistedFile = indexMerger.persist( indexToPersist.getIndex(), identifier.getInterval(), new File(persistDir, String.valueOf(indexToPersist.getCount())), indexSpec,