public Simulator() { Config config = context().system().settings().config().getConfig("caffeine.simulator"); settings = new BasicSettings(config); List<Routee> routes = makeRoutes(); router = new Router(new BroadcastRoutingLogic(), routes); remaining = routes.size(); batchSize = settings.batchSize(); stopwatch = Stopwatch.createStarted(); reporter = settings.report().format().create(config); }
private void process(LongArrayList events) { try { policy.stats().stopwatch().start(); for (int i = 0; i < events.size(); i++) { policy.record(events.getLong(i)); } } catch (Exception e) { sender().tell(ERROR, self()); context().system().log().error(e, ""); } finally { policy.stats().stopwatch().stop(); } }
private void scheduleTimeoutMessage(ActorContext context, TimeoutMessage message, long delay) { context.system() .scheduler() .scheduleOnce( Duration.create(delay, TimeUnit.MILLISECONDS), context.self(), message, context.dispatcher(), context.self()); }
private void applyRedirectionRule(RedirectionRule body) { context() .system() .scheduler() .scheduleOnce(Duration.create(body.ruleTTL, TimeUnit.MILLISECONDS), self(), new RuleTimeoutMessage(body.getRuleId()), context().dispatcher(), self()); redirectionRules.put(body.getRuleId(), body); }
void scheduleTimeoutMessage(ActorContext context, EndpointEvent event) { context.system() .scheduler() .scheduleOnce(Duration.create(getTtl(event), TimeUnit.MILLISECONDS), context.self(), new EndpointEventTimeoutMessage(event), context.dispatcher(), context.self()); }
private void finish() { try { policy.finished(); sender().tell(policy.stats(), self()); } catch (Exception e) { sender().tell(ERROR, self()); context().system().log().error(e, ""); } } }
/** Broadcast the trace events to all of the policy actors. */ private void broadcast() { try (LongStream events = eventStream()) { LongArrayList batch = new LongArrayList(batchSize); for (PrimitiveIterator.OfLong i = events.iterator(); i.hasNext();) { batch.add(i.nextLong()); if (batch.size() == batchSize) { router.route(batch, self()); batch = new LongArrayList(batchSize); } } router.route(batch, self()); router.route(FINISH, self()); } catch (Exception e) { context().system().log().error(e, ""); context().stop(self()); } }
public MockMediaGateway () { super(); system = context().system(); connEndpointMap = new ConcurrentHashMap<String, String>(); }
public GatherMockMediaGateway() { super(); endpoints = new ConcurrentHashMap<MediaSession, ActorRef>(); links = new ConcurrentHashMap<MediaSession, ActorRef>(); connections = new ConcurrentHashMap<MediaSession, ActorRef>(); system = context().system(); }
@Override public BaseNodeManager create() throws Exception { return new BaseNodeManager(nodeId, topologyId, actorContext.system(), delegateFactory, roleChangeStrategy); } }), nodeId);
public LocalSnapshotStore(final Config config) { this.executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher")); snapshotDir = new File(config.getString("dir")); int localMaxLoadAttempts = config.getInt("max-load-attempts"); maxLoadAttempts = localMaxLoadAttempts > 0 ? localMaxLoadAttempts : 1; LOG.debug("LocalSnapshotStore ctor: snapshotDir: {}, maxLoadAttempts: {}", snapshotDir, maxLoadAttempts); }
private Object tryDeserializeAkkaSnapshot(final File file) throws IOException { LOG.debug("tryDeserializeAkkaSnapshot {}", file); // The snapshot was probably previously stored via akka's LocalSnapshotStore which wraps the data // in a Snapshot instance and uses the SnapshotSerializer to serialize it to a byte[]. So we'll use // the SnapshotSerializer to try to de-serialize it. SnapshotSerializer snapshotSerializer = new SnapshotSerializer((ExtendedActorSystem) context().system()); try (InputStream in = new BufferedInputStream(new FileInputStream(file))) { return ((Snapshot)snapshotSerializer.fromBinary(ByteStreams.toByteArray(in))).data(); } }
/** * Tries to forward the specified command to the * {@link PolicyPersistenceActor} for the Policy whose ID is * provided by the command. If no actor for that Policy ID can be found a warning is logged. * * @param command the command to be forwarded but also to provide the Policy ID for finding the associated Policy * Persistence Actor. */ void forwardToPolicyPersistenceActor(final PolicyCommand command) { PolicyPersistenceActor.getShardRegion(actorContext.system()).forward(command, actorContext); } }
@Override public void run() { LOG.debug("Retrying identify message from master to node {} , full path {}", ((ActorIdentity) message).correlationId(), path); context.system().actorSelection(path).tell(new Identify(((ActorIdentity) message).correlationId()), self); } }, system.dispatcher());
private ActorRef getNotifierActor() { if(notifierActor == null) { LOG.debug("Creating actor {}", actorName); String dispatcher = new Dispatchers(actorContext.system().dispatchers()).getDispatcherPath( Dispatchers.DispatcherType.Notification); notifierActor = actorContext.actorOf(ShardDataTreeNotificationPublisherActor.props(actorName) .withDispatcher(dispatcher).withMailbox( org.opendaylight.controller.cluster.datastore.utils.ActorContext.BOUNDED_MAILBOX), actorName); } return notifierActor; } }
private void scheduleCheckForThingActivity() { log.debug("Scheduling for activity check in <{}> seconds.", activityCheckInterval.getSeconds()); // send a message to ourself: activityChecker = context().system().scheduler() .scheduleOnce(Duration.create(activityCheckInterval.getSeconds(), TimeUnit.SECONDS), self(), new CheckForActivity(sequenceNumber), context().dispatcher(), null); }
private void scheduleCheckForThingActivity() { log.debug("Scheduling for activity check in <{}> seconds.", activityCheckInterval.getSeconds()); // send a message to ourself: activityChecker = context().system().scheduler() .scheduleOnce(Duration.create(activityCheckInterval.getSeconds(), TimeUnit.SECONDS), self(), new CheckForActivity(sequenceNumber), context().dispatcher(), null); }
public Simulator() { Config config = context().system().settings().config().getConfig("caffeine.simulator"); settings = new BasicSettings(config); List<Routee> routes = makeRoutes(); router = new Router(new BroadcastRoutingLogic(), routes); remaining = routes.size(); batchSize = settings.batchSize(); stopwatch = Stopwatch.createStarted(); reporter = settings.report().format().create(config); }
private Cancellable scheduleOnce(final FiniteDuration when, final Object message, final ActorRef sender) { return persistenceActor.context() .system() .scheduler() .scheduleOnce(when, persistenceActor.self(), message, persistenceActor.context().dispatcher(), sender); }
@Override public void setup(Configuration conf) { this.conf = conf; String url = conf.get(MASTER_URL); if(url != null) { this.masterPath = conf.get(MASTER_PATH); Address a = AddressFromURIString.apply(url); Cluster.get(context().system()).join(a); mediator = DistributedPubSubExtension.get(getContext().system()).mediator(); } }