Refine search
/** * Wait and retry. */ public void waitAndRetry() { ContinueToSendToBatchSenderAsstManager continueToSendToBatchSenderAsstManager = new ContinueToSendToBatchSenderAsstManager( processedWorkerCount); logger.debug("NOW WAIT Another " + asstManagerRetryIntervalMillis + " MS. at " + PcDateUtils.getNowDateTimeStrStandard()); getContext() .system() .scheduler() .scheduleOnce( Duration.create(asstManagerRetryIntervalMillis, TimeUnit.MILLISECONDS), getSelf(), continueToSendToBatchSenderAsstManager, getContext().system().dispatcher(), getSelf()); return; }
if ( queueReadersByQueueName.get( request.getQueueName() ) == null ) { if ( !request.isOnlyIfEmpty() || inMemoryQueue.peek( request.getQueueName()) == null ) { ActorRef readerRef = getContext().actorOf( Props.create( GuiceActorProducer.class, QueueRefresher.class ), request.getQueueName() + "_reader"); queueReadersByQueueName.get( request.getQueueName() ).tell( request, self() ); ActorRef readerRef = getContext().actorOf( Props.create( GuiceActorProducer.class, QueueTimeouter.class), request.getQueueName() + "_timeouter"); queueTimeoutersByQueueName.get( request.getQueueName() ).tell( request, self() ); ActorRef readerRef = getContext().actorOf( Props.create( GuiceActorProducer.class, ShardAllocator.class), request.getQueueName() + "_shard_allocator"); shardAllocatorsByQueueName.get( request.getQueueName() ).tell( request, self() );
Cancellable scheduler = getContext().system().scheduler().schedule( Duration.create( 0, TimeUnit.MILLISECONDS ), Duration.create( qakkaFig.getQueueRefreshMilliseconds(), TimeUnit.MILLISECONDS ), self(), new QueueRefreshRequest( queueName, false ), getContext().dispatcher(), getSelf() ); refreshSchedulersByQueueName.put( queueName, scheduler ); Cancellable scheduler = getContext().system().scheduler().schedule( Duration.create( 0, TimeUnit.SECONDS ), Duration.create( qakkaFig.getQueueTimeoutSeconds() / 2, TimeUnit.SECONDS ), self(), new QueueTimeoutRequest( queueName ), getContext().dispatcher(), getSelf() ); timeoutSchedulersByQueueName.put( queueName, scheduler ); Cancellable scheduler = getContext().system().scheduler().schedule( Duration.create( 0, TimeUnit.MILLISECONDS ), Duration.create( qakkaFig.getShardAllocationCheckFrequencyMillis(), TimeUnit.MILLISECONDS ), self(), new ShardCheckRequest( queueName ), getContext().dispatcher(), getSelf() ); shardAllocationSchedulersByQueueName.put( queueName, scheduler );
statusCodeInt, PcDateUtils.getNowDateTimeStrStandard(), responseHeaders); if (!getContext().system().deadLetters().equals(sender)) { sender.tell(res, getSelf()); getContext().stop(getSelf());
private void registerConnectionTimeout() { if (connectionTimeout != null) { connectionTimeout.cancel(); } connectionTimeoutId = UUID.randomUUID(); connectionTimeout = getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(new JobClientMessages.ConnectionTimeout(connectionTimeoutId)), getContext().dispatcher(), ActorRef.noSender() ); }
@Override protected void onRecoveryComplete() { restoreFromSnapshot = null; //notify shard manager getContext().parent().tell(new ActorInitialized(), getSelf()); // Being paranoid here - this method should only be called once but just in case... if (txCommitTimeoutCheckSchedule == null) { // Schedule a message to be periodically sent to check if the current in-progress // transaction should be expired and aborted. FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS); txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule( period, period, getSelf(), TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender()); } }
private void sendRegistrationRequests() { for (Map.Entry<String, Boolean> entry : notifierRegistrationStatus.entrySet()) { if (!entry.getValue()) { try { LOG.debug("{} registering with {}", getSelf().path().toString(), entry.getKey()); ActorRef notifier = Await.result( getContext().actorSelection(entry.getKey()).resolveOne(duration), duration); notifier.tell(new RegisterRoleChangeListener(), getSelf()); } catch (Exception e) { LOG.error("ERROR!! Unable to send registration request to notifier {}", entry.getKey()); } } } }
public void schedule(UntypedActorContext context) { if (repeatedTriggerTime <= 0) { LOGGER.info("Scheduling once {}", toString()); context.system().scheduler().scheduleOnce( startTime(), context.self(), getScheduledMessage(), context.dispatcher(), ActorRef.noSender()); } else { LOGGER.info("Scheduling repeated {}", toString()); context.system().scheduler().schedule( startTime(), Duration.create(getRepeatedTriggerTime(), getTriggerTimeUnit()), context.self(), getScheduledMessage(), context.dispatcher(), ActorRef.noSender()); } }
ActorSelection service = getContext().actorSelection( address + routerPath ); service.tell( message, getSender() ); getSender().tell( new ErrorResponse("ClientActor not ready"), getSender() ); getSender().tell( new StatusMessage( name, StatusMessage.Status.READY ), getSender() ); } else { getSender().tell( new StatusMessage( name, StatusMessage.Status.INITIALIZING), getSender() );
director.tell(batchResponseFromManager, getSelf()); task.setExecutionEndTime(endTime); for (ActorRef worker : workers.values()) { getContext().stop(worker); && !batchSenderAsstManager.isTerminated()) { getContext().stop(batchSenderAsstManager); if (getSelf() != null && !getSelf().isTerminated()) { getContext().stop(getSelf());
public Worker(ActorRef clusterClient, Props workExecutorProps, FiniteDuration registerInterval) { this.clusterClient = clusterClient; this.workExecutor = getContext().watch(getContext().actorOf(workExecutorProps, "exec")); this.registerTask = getContext().system().scheduler().schedule(Duration.Zero(), registerInterval, clusterClient, new SendToAll("/user/master/singleton", new RegisterWorker(workerId)), getContext().dispatcher(), getSelf()); }
private void scheduleRegistrationListener(FiniteDuration interval) { LOG.debug("--->scheduleRegistrationListener called."); registrationSchedule = getContext().system().scheduler().schedule( interval, interval, getSelf(), new RegisterListener(), getContext().system().dispatcher(), getSelf()); }
public MainActor() { MAX_CONCURRENCY = getNumWorkers(); workerRouter = getContext().actorOf(Props.create(WorkerActor.class). withRouter(new RoundRobinPool(MAX_CONCURRENCY)), "workerRouter"); scheduler = getContext().system().scheduler(); WORK_GENERATE_INTERVAL = getWorkGenerateInterval(); WORK_TRACKER_INTERVAL = getWorkTrackInterval(); }
futures, getContext().system().dispatcher()); return new OrderHistory(order, address); }, getContext().system().dispatcher()); pipe(aggResult, getContext().system().dispatcher()).to( orderAggregateActor);
@Inject public QueueActorRouter( QueueActorRouterProducer queueActorRouterProducer, QakkaFig qakkaFig ) { this.queueActorRouterProducer = queueActorRouterProducer; this.qakkaFig = qakkaFig; this.routerRef = getContext().actorOf( FromConfig.getInstance().props( Props.create( GuiceActorProducer.class, QueueActor.class) .withDispatcher("akka.blocking-io-dispatcher")), "router"); }