@Override public void finishJob() { log.info("Stopping job"); for (final Map.Entry<Long, Sink> entry : getSinks().entrySet()) { abandonSegment(entry.getKey(), entry.getValue()); } shutdownExecutors(); if (flushScheduledExec != null) { flushScheduledExec.shutdown(); } stopped = true; } }
@Override public Object startJob() { log.info("Starting job for %s", getSchema().getDataSource()); computeBaseDir(getSchema()).mkdirs(); initializeExecutors(); if (flushScheduledExec == null) { flushScheduledExec = Execs.scheduledSingleThreaded("flushing_scheduled_%d"); } Object retVal = bootstrapSinksFromDisk(); startFlushThread(); return retVal; }
@Override public ScheduledExecutors.Signal doCall() { if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } long minTimestamp = getSegmentGranularity().truncate( getRejectionPolicy().getCurrMaxTime().minus(windowMillis) ).getMillis(); List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList(); for (Map.Entry<Long, Sink> entry : getSinks().entrySet()) { final Long intervalStart = entry.getKey(); if (intervalStart < minTimestamp) { log.info("Adding entry[%s] to flush.", entry); sinksToPush.add(entry); } } for (final Map.Entry<Long, Sink> entry : sinksToPush) { flushAfterDuration(entry.getKey(), entry.getValue()); } if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } else { return ScheduledExecutors.Signal.REPEAT; } } }
@Override public ScheduledExecutors.Signal doCall() { if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } long minTimestamp = segmentGranularity.bucketStart( getRejectionPolicy().getCurrMaxTime().minus(windowMillis) ).getMillis(); List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList(); for (Map.Entry<Long, Sink> entry : getSinks().entrySet()) { final Long intervalStart = entry.getKey(); if (intervalStart < minTimestamp) { log.info("Adding entry[%s] to flush.", entry); sinksToPush.add(entry); } } for (final Map.Entry<Long, Sink> entry : sinksToPush) { flushAfterDuration(entry.getKey(), entry.getValue()); } if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } else { return ScheduledExecutors.Signal.REPEAT; } } };
private void startFlushThread() final long truncatedNow = getSegmentGranularity().truncate(new DateTime()).getMillis(); final long windowMillis = getWindowPeriod().toStandardDuration().getMillis(); new Duration(System.currentTimeMillis(), getSegmentGranularity().increment(truncatedNow) + windowMillis) .scheduleAtFixedRate( flushScheduledExec, new Duration(System.currentTimeMillis(), getSegmentGranularity().increment(truncatedNow) + windowMillis), new Duration(truncatedNow, getSegmentGranularity().increment(truncatedNow)), new ThreadRenamingCallable<ScheduledExecutors.Signal>( String.format( "%s-flusher-%d", getSchema().getDataSource(), getSchema().getShardSpec().getPartitionNum()
@Override public ScheduledExecutors.Signal call() throws Exception { log.info("Abandoning segment %s", sink.getSegment().getIdentifier()); abandonSegment(truncatedTime, sink); return ScheduledExecutors.Signal.STOP; } }
@Override public Plumber findPlumber( final DataSchema schema, final RealtimeTuningConfig config, final FireDepartmentMetrics metrics ) { verifyState(); return new FlushingPlumber( flushDuration, schema, config, metrics, emitter, conglomerate, segmentAnnouncer, queryExecutorService, indexMergerV9, indexIO, cache, cacheConfig, objectMapper ); }
@Override public ScheduledExecutors.Signal call() throws Exception { log.info("Abandoning segment %s", sink.getSegment().getIdentifier()); abandonSegment(truncatedTime, sink); return ScheduledExecutors.Signal.STOP; } }
@Override public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) { verifyState(); final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod); log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy); return new FlushingPlumber( flushDuration, windowPeriod, basePersistDirectory, segmentGranularity, schema, metrics, rejectionPolicy, emitter, conglomerate, segmentAnnouncer, queryExecutorService, versioningPolicy, maxPendingPersists ); }
@Override public void startJob() { log.info("Starting job for %s", getSchema().getDataSource()); computeBaseDir(getSchema()).mkdirs(); initializeExecutors(); if (flushScheduledExec == null) { flushScheduledExec = Executors.newScheduledThreadPool( 1, new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("flushing_scheduled_%d") .build() ); } bootstrapSinksFromDisk(); startFlushThread(); }
@Override public void finishJob() { log.info("Stopping job"); for (final Map.Entry<Long, Sink> entry : getSinks().entrySet()) { abandonSegment(entry.getKey(), entry.getValue()); } shutdownExecutors(); if (flushScheduledExec != null) { flushScheduledExec.shutdown(); } stopped = true; } }