public Completable refreshLocationAsync(DatabaseAccount databaseAccount) { return Completable.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Completable.complete(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); }
@Override public Completable call(JobDetails jobDetails) { Duration runtimeBlockSize = Duration.standardHours(2); Trigger trigger = jobDetails.getTrigger(); DateTime timeSliceInclusive = new DateTime(trigger.getTriggerTime(), DateTimeZone.UTC).minus(runtimeBlockSize); // Rewind to previous timeslice DateTime timeSliceStart = DateTimeService.getTimeSlice(timeSliceInclusive, runtimeBlockSize); long startOfSlice = timeSliceStart.getMillis(); Stopwatch stopwatch = Stopwatch.createStarted(); logger.infof("Starting to process temp table for starting time of %s", timeSliceStart.toString()); // TODO Optimization - new worker per token - use parallelism in Cassandra (with configured parallelism) return metricsService.compressBlock(startOfSlice, pageSize, maxReadConcurrency) .doOnError(t -> logger.errorf("Compression job failed: %s", t.getMessage())) .doOnCompleted(() -> { stopwatch.stop(); logger.info("Finished processing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms"); }); } }
@Override public Completable call(JobDetails jobDetails) { Duration runtimeBlockSize = Duration.standardHours(2); Trigger trigger = jobDetails.getTrigger(); DateTime timeSliceInclusive = new DateTime(trigger.getTriggerTime(), DateTimeZone.UTC).minus(runtimeBlockSize); // Rewind to previous timeslice DateTime timeSliceStart = DateTimeService.getTimeSlice(timeSliceInclusive, runtimeBlockSize); long startOfSlice = timeSliceStart.getMillis(); Stopwatch stopwatch = Stopwatch.createStarted(); logger.infof("Starting to process temp table for starting time of %s", timeSliceStart.toString()); // TODO Optimization - new worker per token - use parallelism in Cassandra (with configured parallelism) return metricsService.compressBlock(startOfSlice, pageSize, maxReadConcurrency) .doOnError(t -> logger.errorf("Compression job failed: %s", t.getMessage())) .doOnCompleted(() -> { stopwatch.stop(); logger.info("Finished processing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms"); }); } }
public Completable connect() { stopped = false; Completable atLeastOneConfig = configProvider.configs().first().toCompletable() .timeout(env.bootstrapTimeout(), TimeUnit.SECONDS) .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { LOGGER.warn("Did not receive initial configuration from provider."); } }); return configProvider.start() .timeout(env.connectTimeout(), TimeUnit.SECONDS) .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { LOGGER.warn("Cannot connect configuration provider."); } }) .concatWith(atLeastOneConfig); }
.doOnError(t -> logger.warn("Failed to compress data", t)) .doOnCompleted(() -> { stopwatch.stop();
.doOnError(t -> logger.warn("Failed to compress data", t)) .doOnCompleted(() -> { stopwatch.stop();