private Sink getSink(long timestamp) { if (theSink.getInterval().contains(timestamp)) { return theSink; } else { return null; } }
@Override public Sink getSink(long timestamp) { if (theSink.getInterval().contains(timestamp)) { return theSink; } else { return null; } }
@Override public void run() { abandonSegment(sink.getInterval().getStartMillis(), sink); metrics.incrementHandOffCount(); } }
@Override public void persist(final Runnable commitRunnable) { final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList(); for (Sink sink : sinks.values()) { if (sink.swappable()) { indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval())); } } log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource()); persistExecutor.execute( new ThreadRenamingRunnable(String.format("%s-incremental-persist", schema.getDataSource())) { @Override public void doRun() { for (Pair<FireHydrant, Interval> pair : indexesToPersist) { metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs)); } commitRunnable.run(); } } ); }
private void addSink(final Sink sink) { sinks.put(sink.getInterval().getStartMillis(), sink); metrics.setSinkCount(sinks.size()); sinkTimeline.add( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<Sink>(sink) ); try { segmentAnnouncer.announceSegment(sink.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", sink.getInterval()) .emit(); } }
private void removeSegment(final Sink sink, final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Unable to remove file for dataSource[%s]", schema.getDataSource()) .addData("file", target) .addData("interval", sink.getInterval()) .emit(); } } } }
/** * Unannounces a given sink and removes all local references to it. */ protected void abandonSegment(final long truncatedTime, final Sink sink) { try { segmentAnnouncer.unannounceSegment(sink.getSegment()); FileUtils.deleteDirectory(computePersistDir(schema, sink.getInterval())); log.info("Removing sinkKey %d for segment %s", truncatedTime, sink.getSegment().getIdentifier()); sinks.remove(truncatedTime); sinkTimeline.remove( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<>(sink) ); synchronized (handoffCondition) { handoffCondition.notifyAll(); } } catch (IOException e) { log.makeAlert(e, "Unable to abandon old segment for dataSource[%s]", schema.getDataSource()) .addData("interval", sink.getInterval()) .emit(); } }
@Override public Sink getSink(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final long truncatedTime = segmentGranularity.truncate(timestamp); Sink retVal = sinks.get(truncatedTime); if (retVal == null) { final Interval sinkInterval = new Interval( new DateTime(truncatedTime), segmentGranularity.increment(new DateTime(truncatedTime)) ); retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval)); try { segmentAnnouncer.announceSegment(retVal.getSegment()); sinks.put(truncatedTime, retVal); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), new SingleElementPartitionChunk<Sink>(retVal)); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()) .emit(); } } return retVal; }
for (Sink sink : sinks.values()) { if (sink.swappable()) { indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
try { segmentAnnouncer.unannounceSegment(sink.getSegment()); removeSegment(sink, computePersistDir(schema, sink.getInterval())); log.info("Removing sinkKey %d for segment %s", truncatedTime, sink.getSegment().getIdentifier()); sinks.remove(truncatedTime); metrics.setSinkCount(sinks.size()); sinkTimeline.remove( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<>(sink) .addData("interval", sink.getInterval()) .emit();
private Sink getOrCreateSink(final SegmentIdentifier identifier) { Sink retVal = sinks.get(identifier); if (retVal == null) { retVal = new Sink( identifier.getInterval(), schema, identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getMaxRowsInMemory(), tuningConfig.isReportParseExceptions() ); try { segmentAnnouncer.announceSegment(retVal.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()) .emit(); } sinks.put(identifier, retVal); metrics.setSinkCount(sinks.size()); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), identifier.getShardSpec().createChunk(retVal)); } return retVal; }
sinks.put(identifier, currSink); sinkTimeline.add( currSink.getInterval(), currSink.getVersion(), identifier.getShardSpec().createChunk(currSink)
new ThreadRenamingRunnable(threadName) final Interval interval = sink.getInterval(); Stopwatch mergeStopwatch = null; new SegmentDescriptor(sink.getInterval(), sink.getVersion(), config.getShardSpec().getPartitionNum()), mergeExecutor, new Runnable()
droppingSinks.remove(identifier); sinkTimeline.remove( sink.getInterval(), sink.getVersion(), identifier.getShardSpec().createChunk(sink)
sinks.put(sinkInterval.getStartMillis(), currSink); sinkTimeline.add( currSink.getInterval(), currSink.getVersion(), new SingleElementPartitionChunk<Sink>(currSink)
@Override public void doRun() final Interval interval = sink.getInterval();
.addData("interval", sink.getInterval()) .addData("partitionNum", segment.getShardSpec().getPartitionNum()) .addData("marker", isPushedMarker)