/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
public void put(final String clientId, final PushConnection pushConnection) { pushConnection.setSecureToken(mintNewSecureToken()); clientPushConnectionMap.put(clientId, pushConnection); }
final PushConnection pushConn = pushConnectionRegistry.get(userAuth.getClientIdentity()); if (pushConn == null) { sendHttpResponse(ctx, request, NOT_FOUND, userAuth);
final PushConnection pushConn = pushConnectionRegistry.get(userAuth.getClientIdentity()); if (pushConn == null) { sendHttpResponse(ctx, request, NOT_FOUND, userAuth);
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
final PushConnection pushConn = pushConnectionRegistry.get(userAuth.getClientIdentity()); if (pushConn == null) { sendHttpResponse(ctx, request, NOT_FOUND, userAuth);
public void put(final String clientId, final PushConnection pushConnection) { pushConnection.setSecureToken(mintNewSecureToken()); clientPushConnectionMap.put(clientId, pushConnection); }
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }