@Override public Plumber findPlumber( final DataSchema schema, final RealtimeTuningConfig config, final FireDepartmentMetrics metrics ) { final Appenderator appenderator = appenderatorFactory.build( schema, config, metrics ); return new AppenderatorPlumber( schema, config, metrics, segmentAnnouncer, segmentPublisher, handoffNotifierFactory.createSegmentHandoffNotifier(schema.getDataSource()), appenderator ); }
Assert.assertEquals(null, plumber.startJob()); AppenderatorTest.IR("2000", "bar", 2), AppenderatorTest.IR("2000", "qux", 4)}; Assert.assertEquals(1, plumber.add(rows[0], null).getRowCount()); Assert.assertEquals(2, plumber.add(rows[1], null).getRowCount()); Assert.assertEquals(3, plumber.add(rows[2], null).getRowCount()); Assert.assertEquals(1, plumber.getSegmentsView().size()); SegmentIdWithShardSpec si = plumber.getSegmentsView().values().toArray(new SegmentIdWithShardSpec[0])[0]; Assert.assertTrue(appenderator.getSegments().isEmpty()); plumber.dropSegment(si); plumber.finishJob();
public AppenderatorPlumber( DataSchema schema, RealtimeTuningConfig config, FireDepartmentMetrics metrics, DataSegmentAnnouncer segmentAnnouncer, SegmentPublisher segmentPublisher, SegmentHandoffNotifier handoffNotifier, Appenderator appenderator ) { this.schema = schema; this.config = config; this.rejectionPolicy = config.getRejectionPolicyFactory().create(config.getWindowPeriod()); this.metrics = metrics; this.segmentAnnouncer = segmentAnnouncer; this.segmentPublisher = segmentPublisher; this.handoffNotifier = handoffNotifier; this.appenderator = appenderator; log.info("Creating plumber using rejectionPolicy[%s]", getRejectionPolicy()); }
@Override public Object startJob() { handoffNotifier.start(); Object retVal = appenderator.startJob(); initializeExecutors(); startPersistThread(); // Push pending sinks bootstrapped from previous run mergeAndPush(); return retVal; }
if (lastCommitterSupplier != null) { mergeAndPush(); stopped = true; handoffNotifier.close(); shutdownExecutors(); appenderator.close();
@Override public Void apply(Throwable throwable) { final List<String> segmentIdentifierStrings = Lists.transform( segmentsToPush, SegmentIdWithShardSpec::toString ); log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource()) .addData("segments", segmentIdentifierStrings) .emit(); if (shuttingDown) { // We're trying to shut down, and these segments failed to push. Let's just get rid of them. // This call will also delete possibly-partially-written files, so we don't need to do it explicitly. cleanShutdown = false; for (SegmentIdWithShardSpec identifier : segmentsToPush) { dropSegment(identifier); } } return null; } };
@Override public IncrementalIndexAddResult add(InputRow row, Supplier<Committer> committerSupplier) throws IndexSizeExceededException { final SegmentIdWithShardSpec identifier = getSegmentIdentifier(row.getTimestampFromEpoch()); if (identifier == null) { return Plumber.THROWAWAY; } try { final Appenderator.AppenderatorAddResult addResult = appenderator.add(identifier, row, committerSupplier); lastCommitterSupplier = committerSupplier; return new IncrementalIndexAddResult(addResult.getNumRowsInSegment(), 0, addResult.getParseException()); } catch (SegmentNotWritableException e) { // Segment already started handoff return Plumber.NOT_WRITABLE; } }
private SegmentIdWithShardSpec getSegmentIdentifier(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); SegmentIdWithShardSpec retVal = segments.get(truncatedTime); if (retVal == null) { final Interval interval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new SegmentIdWithShardSpec( schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec() ); addSegment(retVal); } return retVal; }
@Override public Object startJob() { handoffNotifier.start(); Object retVal = appenderator.startJob(); initializeExecutors(); startPersistThread(); // Push pending sinks bootstrapped from previous run mergeAndPush(); return retVal; }
@Override public Void apply(Throwable throwable) { final List<String> segmentIdentifierStrings = Lists.transform( segmentsToPush, new Function<SegmentIdentifier, String>() { @Override public String apply(SegmentIdentifier input) { return input.getIdentifierAsString(); } } ); log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource()) .addData("segments", segmentIdentifierStrings) .emit(); if (shuttingDown) { // We're trying to shut down, and these segments failed to push. Let's just get rid of them. // This call will also delete possibly-partially-written files, so we don't need to do it explicitly. cleanShutdown = false; for (SegmentIdentifier identifier : segmentsToPush) { dropSegment(identifier); } } return null; } };
if (lastCommitterSupplier != null) { mergeAndPush(); stopped = true; handoffNotifier.close(); shutdownExecutors(); appenderator.close();
@Override public IncrementalIndexAddResult add(InputRow row, Supplier<Committer> committerSupplier) throws IndexSizeExceededException { final SegmentIdentifier identifier = getSegmentIdentifier(row.getTimestampFromEpoch()); if (identifier == null) { return Plumber.THROWAWAY; } try { final Appenderator.AppenderatorAddResult addResult = appenderator.add(identifier, row, committerSupplier); lastCommitterSupplier = committerSupplier; return new IncrementalIndexAddResult(addResult.getNumRowsInSegment(), 0, addResult.getParseException()); } catch (SegmentNotWritableException e) { // Segment already started handoff return Plumber.NOT_WRITABLE; } }
private SegmentIdentifier getSegmentIdentifier(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); SegmentIdentifier retVal = segments.get(truncatedTime); if (retVal == null) { final Interval interval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new SegmentIdentifier( schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec() ); addSegment(retVal); } return retVal; }
); this.plumber = new AppenderatorPlumber(appenderatorTester.getSchema(), tuningConfig, appenderatorTester.getMetrics(), segmentAnnouncer, segmentPublisher, handoffNotifier,
public AppenderatorPlumber( DataSchema schema, RealtimeTuningConfig config, FireDepartmentMetrics metrics, DataSegmentAnnouncer segmentAnnouncer, SegmentPublisher segmentPublisher, SegmentHandoffNotifier handoffNotifier, Appenderator appenderator ) { this.schema = schema; this.config = config; this.rejectionPolicy = config.getRejectionPolicyFactory().create(config.getWindowPeriod()); this.metrics = metrics; this.segmentAnnouncer = segmentAnnouncer; this.segmentPublisher = segmentPublisher; this.handoffNotifier = handoffNotifier; this.appenderator = appenderator; log.info("Creating plumber using rejectionPolicy[%s]", getRejectionPolicy()); }
@Override public Plumber findPlumber( final DataSchema schema, final RealtimeTuningConfig config, final FireDepartmentMetrics metrics ) { final Appenderator appenderator = appenderatorFactory.build( schema, config, metrics ); return new AppenderatorPlumber( schema, config, metrics, segmentAnnouncer, segmentPublisher, handoffNotifierFactory.createSegmentHandoffNotifier(schema.getDataSource()), appenderator ); }