@Override public Completable call(JobDetails jobDetails) { Duration runtimeBlockSize = Duration.standardHours(2); Trigger trigger = jobDetails.getTrigger(); DateTime timeSliceInclusive = new DateTime(trigger.getTriggerTime(), DateTimeZone.UTC).minus(runtimeBlockSize); // Rewind to previous timeslice DateTime timeSliceStart = DateTimeService.getTimeSlice(timeSliceInclusive, runtimeBlockSize); long startOfSlice = timeSliceStart.getMillis(); Stopwatch stopwatch = Stopwatch.createStarted(); logger.infof("Starting to process temp table for starting time of %s", timeSliceStart.toString()); // TODO Optimization - new worker per token - use parallelism in Cassandra (with configured parallelism) return metricsService.compressBlock(startOfSlice, pageSize, maxReadConcurrency) .doOnError(t -> logger.errorf("Compression job failed: %s", t.getMessage())) .doOnCompleted(() -> { stopwatch.stop(); logger.info("Finished processing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms"); }); } }
@Override public Completable call(JobDetails jobDetails) { Duration runtimeBlockSize = Duration.standardHours(2); Trigger trigger = jobDetails.getTrigger(); DateTime timeSliceInclusive = new DateTime(trigger.getTriggerTime(), DateTimeZone.UTC).minus(runtimeBlockSize); // Rewind to previous timeslice DateTime timeSliceStart = DateTimeService.getTimeSlice(timeSliceInclusive, runtimeBlockSize); long startOfSlice = timeSliceStart.getMillis(); Stopwatch stopwatch = Stopwatch.createStarted(); logger.infof("Starting to process temp table for starting time of %s", timeSliceStart.toString()); // TODO Optimization - new worker per token - use parallelism in Cassandra (with configured parallelism) return metricsService.compressBlock(startOfSlice, pageSize, maxReadConcurrency) .doOnError(t -> logger.errorf("Compression job failed: %s", t.getMessage())) .doOnCompleted(() -> { stopwatch.stop(); logger.info("Finished processing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms"); }); } }
.doOnCompleted(() -> { if (wrap) { out.write(Buffer.buffer("}"));
@Override public Completable call(JobDetails jobDetails) { Trigger trigger = jobDetails.getTrigger(); ZonedDateTime currentBlock = ZonedDateTime.ofInstant(Instant.ofEpochMilli(trigger.getTriggerTime()), UTC) .with(DateTimeService.startOfPreviousEvenHour()); ZonedDateTime lastMaintainedBlock = currentBlock.plus(forwardTime); return service.verifyAndCreateTempTables(currentBlock, lastMaintainedBlock) .doOnCompleted(() -> logger.debugf("Temporary tables are valid until %s", lastMaintainedBlock.toString())); } }
@Override public Completable call(JobDetails jobDetails) { Trigger trigger = jobDetails.getTrigger(); ZonedDateTime currentBlock = ZonedDateTime.ofInstant(Instant.ofEpochMilli(trigger.getTriggerTime()), UTC) .with(DateTimeService.startOfPreviousEvenHour()); ZonedDateTime lastMaintainedBlock = currentBlock.plus(forwardTime); return service.verifyAndCreateTempTables(currentBlock, lastMaintainedBlock) .doOnCompleted(() -> logger.debugf("Temporary tables are valid until %s", lastMaintainedBlock.toString())); } }
public Completable stop() { LOGGER.debug("Instructed to shutdown."); stopped = true; Completable channelShutdown = Observable .from(channels) .flatMapCompletable(new Func1<DcpChannel, Completable>() { @Override public Completable call(DcpChannel dcpChannel) { return dcpChannel.disconnect(); } }) .toCompletable(); if (ownsConfigProvider) { channelShutdown = channelShutdown.andThen(configProvider.stop()); } return channelShutdown.doOnCompleted(new Action0() { @Override public void call() { LOGGER.info("Shutdown complete."); } }); }
.doOnCompleted(() -> { stopwatch.stop(); logger.info("Finished compressing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) +
.doOnCompleted(() -> { stopwatch.stop(); logger.info("Finished compressing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) +
}) .toCompletable() .doOnCompleted(() -> updateIndexerTasks(messages));
@Override public void start(Future<Void> startFuture) { log.info("Launching GeoRocket " + getVersion() + " ..."); deployExtensionVerticles() .doOnCompleted(() -> { vertx.eventBus().publish(ExtensionVerticle.EXTENSION_VERTICLE_ADDRESS, new JsonObject().put("type", ExtensionVerticle.MESSAGE_ON_INIT)); }) .andThen(deployTask() .flatMap(v -> deployIndexer()) .flatMap(v -> deployImporter()) .flatMap(v -> deployMetadata()) .flatMap(v -> deployHttpServer()) ) .subscribe(id -> { vertx.eventBus().publish(ExtensionVerticle.EXTENSION_VERTICLE_ADDRESS, new JsonObject().put("type", ExtensionVerticle.MESSAGE_POST_INIT)); log.info("GeoRocket launched successfully."); startFuture.complete(); }, startFuture::fail); }
.call(details) .doOnTerminate(() -> jobsService.resetJobDetails(details)) .doOnCompleted(() -> { stopwatch.stop(); if (logger.isDebugEnabled()) {
@Override public Completable call(JobDetails details) { String tenantId = details.getParameters().get("tenantId"); // The concat operator is used instead of merge to ensure things execute in order. The deleteMetricData // method queries the metrics index, so we want to update the index only after we have finished deleting // data. return deleteMetricData(tenantId) .concatWith(deleteTenant(tenantId)) .concatWith(deleteRetentions(tenantId)) .concatWith(deleteMetricsIndex(tenantId)) .concatWith(deleteTags(tenantId)) .toCompletable() .doOnCompleted(() -> logger.infof("Finished deleting " + tenantId)); }
@Override public Completable call(JobDetails details) { String tenantId = details.getParameters().get("tenantId"); // The concat operator is used instead of merge to ensure things execute in order. The deleteMetricData // method queries the metrics index, so we want to update the index only after we have finished deleting // data. return deleteMetricData(tenantId) .concatWith(deleteTenant(tenantId)) .concatWith(deleteRetentions(tenantId)) .concatWith(deleteMetricsIndex(tenantId)) .concatWith(deleteTags(tenantId)) .toCompletable() .doOnCompleted(() -> logger.infof("Finished deleting " + tenantId)); }
private Completable refreshAsync(RxDocumentServiceRequest request) { // TODO System.Diagnostics.Debug.Assert(request.IsNameBased); String resourceFullName = PathsHelper.getCollectionPath(request.getResourceAddress()); Completable completable = null; if (request.requestContext.resolvedCollectionRid != null) { // Here we will issue backend call only if cache wasn't already refreshed (if whatever is there corresponds to previously resolved collection rid). DocumentCollection obsoleteValue = new DocumentCollection(); obsoleteValue.setResourceId(request.requestContext.resolvedCollectionRid); completable = this.collectionInfoByNameCache.getAsync( resourceFullName, obsoleteValue, () -> { Single<DocumentCollection> collectionObs = this.getByNameAsync(resourceFullName); return collectionObs.doOnSuccess(collection -> { this.collectionInfoByIdCache.set(collection.getResourceId(), collection); }); }).toCompletable(); } else { // In case of ForceRefresh directive coming from client, there will be no ResolvedCollectionRid, so we // need to refresh unconditionally. completable = Completable.fromAction(() -> this.refresh(request.getResourceAddress())); } return completable.doOnCompleted(() -> request.requestContext.resolvedCollectionRid = null); }
/** * Test a simple merge * @param context the test context */ @Test public void simple(TestContext context) { Async async = context.async(); MergeStrategy strategy = new MergeNamespacesStrategy(); BufferWriteStream bws = new BufferWriteStream(); strategy.init(META1) .andThen(strategy.init(META2)) .andThen(strategy.merge(new DelegateChunkReadStream(CHUNK1), META1, bws)) .andThen(strategy.merge(new DelegateChunkReadStream(CHUNK2), META2, bws)) .doOnCompleted(() -> strategy.finish(bws)) .subscribe(() -> { context.assertEquals(XMLHEADER + EXPECTEDROOT + CONTENTS1 + CONTENTS2 + "</" + EXPECTEDROOT.getName() + ">", bws.getBuffer().toString("utf-8")); async.complete(); }, context::fail); }
/** * Test if chunks that have not been passed to the initalize method can be merged * @param context the test context */ @Test public void mergeUninitialized(TestContext context) { Async async = context.async(); MergeStrategy strategy = new AllSameStrategy(); BufferWriteStream bws = new BufferWriteStream(); strategy.init(cm) // skip second init .andThen(strategy.merge(new DelegateChunkReadStream(chunk1), cm, bws)) .andThen(strategy.merge(new DelegateChunkReadStream(chunk2), cm, bws)) .doOnCompleted(() -> strategy.finish(bws)) .subscribe(() -> { context.assertEquals(XMLHEADER + "<root><test chunk=\"1\"></test><test chunk=\"2\"></test></root>", bws.getBuffer().toString("utf-8")); async.complete(); }, context::fail); }
/** * Test a simple merge * @param context the test context */ @Test public void simple(TestContext context) { Async async = context.async(); MergeStrategy strategy = new AllSameStrategy(); BufferWriteStream bws = new BufferWriteStream(); strategy.init(cm) .andThen(strategy.init(cm)) .andThen(strategy.merge(new DelegateChunkReadStream(chunk1), cm, bws)) .andThen(strategy.merge(new DelegateChunkReadStream(chunk2), cm, bws)) .doOnCompleted(() -> strategy.finish(bws)) .subscribe(() -> { context.assertEquals(XMLHEADER + "<root><test chunk=\"1\"></test><test chunk=\"2\"></test></root>", bws.getBuffer().toString("utf-8")); async.complete(); }, context::fail); }