@Override public Future<Void> doSaveAsync(final SnapshotMetadata metadata, final Object snapshot) { LOG.debug("In doSaveAsync - metadata: {}, snapshot: {}", metadata, snapshot); return Futures.future(() -> doSave(metadata, snapshot), executionContext); }
@Override public Future<Void> doDeleteAsync(final String persistenceId, final SnapshotSelectionCriteria criteria) { LOG.debug("In doDeleteAsync - persistenceId: {}, criteria: {}", persistenceId, criteria); return Futures.future(() -> doDelete(persistenceId, criteria), executionContext); }
@Override public Future<Void> doDeleteAsync(final SnapshotMetadata metadata) { LOG.debug("In doDeleteAsync - metadata: {}", metadata); // Multiple snapshot files here mean that there were multiple snapshots for this seqNr - we delete all of them. // Usually snapshot-stores would keep one snapshot per sequenceNr however here in the file-based one we // timestamp snapshots and allow multiple to be kept around (for the same seqNr) if desired. return Futures.future(() -> doDelete(metadata), executionContext); }
@Override public Future<Optional<SelectedSnapshot>> doLoadAsync(final String persistenceId, final SnapshotSelectionCriteria criteria) { LOG.debug("In doLoadAsync - persistenceId: {}, criteria: {}", persistenceId, criteria); // Select the youngest 'maxLoadAttempts' snapshots that match the criteria. This may help in situations where // saving of a snapshot could not be completed because of a JVM crash. Hence, an attempt to load that snapshot // will fail but loading an older snapshot may succeed. Deque<SnapshotMetadata> metadatas = getSnapshotMetadatas(persistenceId, criteria).stream() .sorted(LocalSnapshotStore::compare).collect(reverse()).stream().limit(maxLoadAttempts) .collect(Collectors.toCollection(ArrayDeque::new)); if (metadatas.isEmpty()) { return Futures.successful(Optional.empty()); } LOG.debug("doLoadAsync - found: {}", metadatas); return Futures.future(() -> doLoad(metadatas), executionContext); }
@Override public <T> CompletableFuture<T> execute(Callable<T> callable) { Future<T> scalaFuture = Futures.<T>future(callable, actorSystem.dispatcher()); return FutureUtils.toJava(scalaFuture); }
@Override public <T> Future<T> execute(Callable<T> callable) { scala.concurrent.Future<T> scalaFuture = Futures.future(callable, actorSystem.dispatcher()); return new FlinkFuture<>(scalaFuture); }
@Override public <T> CompletableFuture<T> execute(Callable<T> callable) { Future<T> scalaFuture = Futures.<T>future(callable, actorSystem.dispatcher()); return FutureUtils.toJava(scalaFuture); }
@Override public <T> CompletableFuture<T> execute(Callable<T> callable) { Future<T> scalaFuture = Futures.<T>future(callable, actorSystem.dispatcher()); return FutureUtils.toJava(scalaFuture); }
@Override public Future<Long> doAsyncReadHighestSequenceNr( final String processorId, long fromSequenceNr ) { return Futures.future( new Callable<Long>() { @Override public Long call() throws Exception { TreeMap<Long, PersistentRepr> map = (TreeMap<Long, PersistentRepr>) persistentMap( processorId ); if ( map.isEmpty() ) { return 0L; } return map.lastKey(); } }, context().dispatcher() ); }
@Override public Future<Option<Identity>> authenticate(HttpRequest request) { return Futures.future(() -> Option.apply(doAuth(request)), EC); }
private void tryToAttachToJob() { LOG.info("Sending message to JobManager {} to attach to job {} and wait for progress", jobManager, jobID); Futures.future(new Callable<Object>() { @Override public Object call() throws Exception { LOG.info("Attaching to job {} at the job manager {}.", jobID, jobManager.path()); jobManager.tell( decorateMessage( new JobManagerMessages.RegisterJobClient( jobID, ListeningBehaviour.EXECUTION_RESULT_AND_STATE_CHANGES)), getSelf()); // issue a RegistrationTimeout message to check that we submit the job within // the given timeout getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(JobClientMessages.getRegistrationTimeout()), getContext().dispatcher(), ActorRef.noSender()); return null; } }, getContext().dispatcher()); }
private void tryToAttachToJob() { LOG.info("Sending message to JobManager {} to attach to job {} and wait for progress", jobManager, jobID); Futures.future(new Callable<Object>() { @Override public Object call() throws Exception { LOG.info("Attaching to job {} at the job manager {}.", jobID, jobManager.path()); jobManager.tell( decorateMessage( new JobManagerMessages.RegisterJobClient( jobID, ListeningBehaviour.EXECUTION_RESULT_AND_STATE_CHANGES)), getSelf()); // issue a RegistrationTimeout message to check that we submit the job within // the given timeout getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(JobClientMessages.getRegistrationTimeout()), getContext().dispatcher(), ActorRef.noSender()); return null; } }, getContext().dispatcher()); }
private void tryToAttachToJob() { LOG.info("Sending message to JobManager {} to attach to job {} and wait for progress", jobManager, jobID); Futures.future(new Callable<Object>() { @Override public Object call() throws Exception { LOG.info("Attaching to job {} at the job manager {}.", jobID, jobManager.path()); jobManager.tell( decorateMessage( new JobManagerMessages.RegisterJobClient( jobID, ListeningBehaviour.EXECUTION_RESULT_AND_STATE_CHANGES)), getSelf()); // issue a RegistrationTimeout message to check that we submit the job within // the given timeout getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(JobClientMessages.getRegistrationTimeout()), getContext().dispatcher(), ActorRef.noSender()); return null; } }, getContext().dispatcher()); }
private void tryToAttachToJob() { LOG.info("Sending message to JobManager {} to attach to job {} and wait for progress", jobManager, jobID); Futures.future(new Callable<Object>() { @Override public Object call() throws Exception { LOG.info("Attaching to job {} at the job manager {}.", jobID, jobManager.path()); jobManager.tell( decorateMessage( new JobManagerMessages.RegisterJobClient( jobID, ListeningBehaviour.EXECUTION_RESULT_AND_STATE_CHANGES)), getSelf()); // issue a RegistrationTimeout message to check that we submit the job within // the given timeout getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(JobClientMessages.getRegistrationTimeout()), getContext().dispatcher(), ActorRef.noSender()); return null; } }, getContext().dispatcher()); }
/** * Starts an embedded zookeeper instance given an actor system * @param system system to use */ public static void startEmbeddedZooKeeper(ActorSystem system) { System.setProperty("jute.maxbuffer","5048583"); Future<Void> f = Futures.future(new Callable<Void>() { @Override public Void call() throws Exception { ZooKeeperRunner runner = new ZooKeeperRunner(); runner.run(); return null; } },system.dispatcher()); throwExceptionIfExists(f,system.dispatcher()); }
@Override public Future<Void> doAsyncReplayMessages( final String processorId, final long fromSequenceNr, final long toSequenceNr, final long max, final Procedure<PersistentRepr> replayCallback ) { return Futures.future( new Callable<Void>() { @Override public Void call() throws Exception { Map<Long, PersistentRepr> replayMap = Maps.filterValues( persistentMap( processorId ), new Predicate<PersistentRepr>() { @Override public boolean apply( PersistentRepr message ) { if ( message.sequenceNr() >= fromSequenceNr && message.sequenceNr() <= toSequenceNr ) { return true; } return false; } } ); int count = 0; for ( PersistentRepr message : replayMap.values() ) { if ( count++ < max ) { replayCallback.apply( message ); } } return null; } }, context().dispatcher() ); }
/** * Creates a future whose value is determined by the asynchronously executed callable. * * @param callable whose value is delivered by the future * @param executor to be used to execute the callable * @param <T> type of the future's value * @return future which represents the value of the callable */ public static <T> Future<T> supplyAsync(Callable<T> callable, Executor executor) { Preconditions.checkNotNull(callable); Preconditions.checkNotNull(executor); scala.concurrent.Future<T> scalaFuture = Futures.future(callable, createExecutionContext(executor)); return new FlinkFuture<>(scalaFuture); }
private void tryToFillSubscription(final ActorRef sender, final Subscription subscription) { final ActorRef self = self(); if (subscription instanceof AsyncSubscription) { Future<Boolean> f = future(() -> { log.info("Got async subscription on {} from {}, filling subscriptions", subscription, sender); boolean finished = loadEvents(sender, subscription); if (!finished) { log.info("Async IncompleteSubscriptionPleaseSendNew"); sender.tell(new IncompleteSubscriptionPleaseSendNew(subscription.getAggregateType()), self); } else { log.info("Async CompleteAsyncSubscriptionPleaseSendSyncSubscription"); sender.tell(new CompleteAsyncSubscriptionPleaseSendSyncSubscription(subscription.getAggregateType()), self); } return finished; }, getContext().system().dispatcher()); f.onFailure(new OnFailure() { public void onFailure(Throwable failure) { log.error("Error in AsyncSubscribe, restarting subscriber", failure); sender.tell(new NewEventstoreStarting(), self); } }, getContext().system().dispatcher()); } else { log.info("Sending subscription to singelton {} from {}", eventstoresingeltonProxy.path(), sender().path()); eventstoresingeltonProxy.tell(subscription, sender()); } }
private void tryToFillSubscription(final ActorRef sender, final Messages.AsyncSubscription subscription) { final ActorRef self = self(); Future<Boolean> f = future(() -> { log.info("Got async subscription on {} from {}, filling subscriptions", subscription, sender); boolean finished = loadEvents(sender, subscription); if (!finished) { log.info("Async IncompleteSubscriptionPleaseSendNew"); sender.tell(Messages.IncompleteSubscriptionPleaseSendNew.newBuilder().setAggregateType(subscription.getAggregateType()).build(), self); } else { log.info("Async CompleteAsyncSubscriptionPleaseSendSyncSubscription"); sender.tell(Messages.CompleteAsyncSubscriptionPleaseSendSyncSubscription.newBuilder().setAggregateType(subscription.getAggregateType()).build(), self); } return finished; }, getContext().system().dispatcher()); f.onFailure(new OnFailure() { public void onFailure(Throwable failure) { log.error("Error in AsyncSubscribe, restarting subscriber", failure); sender.tell(new NewEventstoreStarting(), self); } }, getContext().system().dispatcher()); }
@Override public void addRecording(Recording recording, MediaAttributes.MediaType mediaType) { final String fileExtension = mediaType.equals(MediaAttributes.MediaType.AUDIO_ONLY) ? ".wav" : ".mp4"; if (s3AccessTool != null && ec != null) { final String recordingSid = recording.getSid().toString(); URI s3Uri = s3AccessTool.getS3Uri(recordingPath+"/"+recordingSid+fileExtension); //s3AccessTool.uploadFile(recordingPath+"/"+recording.getSid().toString()+fileExtension); if (s3Uri != null) { recording = recording.setS3Uri(s3Uri); } Future<Boolean> f = Futures.future(new Callable<Boolean>() { @Override public Boolean call () throws Exception { return s3AccessTool.uploadFile(recordingPath+"/"+recordingSid+fileExtension); } }, ec); } String fileUrl = String.format("/restcomm/%s/Accounts/%s/Recordings/%s",recording.getApiVersion(),recording.getAccountSid(),recording.getSid()); recording = recording.updateFileUri(generateLocalFileUri(fileUrl, fileExtension)); final SqlSession session = sessions.openSession(); try { session.insert(namespace + "addRecording", toMap(recording)); session.commit(); } finally { session.close(); } }