private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
protected boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
} else { final PushUserAuth authEvent = doAuth(req); if (authEvent.isSuccess()) { logger.warn("Auth failed: {}", authEvent.statusCode()); sendHttpResponse(req, ctx, HttpResponseStatus.valueOf(authEvent.statusCode()));
if (!userAuth.isSuccess()) { sendHttpResponse(ctx, request, UNAUTHORIZED, userAuth); logNoIdentity(); final PushConnection pushConn = pushConnectionRegistry.get(userAuth.getClientIdentity()); if (pushConn == null) { sendHttpResponse(ctx, request, NOT_FOUND, userAuth);
} else { final PushUserAuth authEvent = doAuth(req); if (authEvent.isSuccess()) { logger.warn("Auth failed: {}", authEvent.statusCode()); sendHttpResponse(req, ctx, HttpResponseStatus.valueOf(authEvent.statusCode()));
if (!userAuth.isSuccess()) { sendHttpResponse(ctx, request, UNAUTHORIZED, userAuth); logNoIdentity(); final PushConnection pushConn = pushConnectionRegistry.get(userAuth.getClientIdentity()); if (pushConn == null) { sendHttpResponse(ctx, request, NOT_FOUND, userAuth);
} else { final PushUserAuth authEvent = doAuth(req); if (authEvent.isSuccess()) { logger.warn("Auth failed: {}", authEvent.statusCode()); sendHttpResponse(req, ctx, HttpResponseStatus.valueOf(authEvent.statusCode()));
if (!userAuth.isSuccess()) { sendHttpResponse(ctx, request, UNAUTHORIZED, userAuth); logNoIdentity(); final PushConnection pushConn = pushConnectionRegistry.get(userAuth.getClientIdentity()); if (pushConn == null) { sendHttpResponse(ctx, request, NOT_FOUND, userAuth);
protected final boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
protected final boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
protected boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
@Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { this.ctx = ctx; if (! destroyed.get()) { if (evt == pushProtocol.getHandshakeCompleteEvent()) { pushConnection = new PushConnection(pushProtocol, ctx); // Unauthenticated connection, wait for small amount of time for a client to send auth token in // a first web socket frame, otherwise close connection ctx.executor().schedule(this::closeIfNotAuthenticated, UNAUTHENTICATED_CONN_TTL.get(), TimeUnit.SECONDS); logger.debug("WebSocket handshake complete."); } else if (evt instanceof PushUserAuth) { authEvent = (PushUserAuth) evt; if (authEvent.isSuccess()) { logger.debug("registering client {}", authEvent); ctx.pipeline().remove(PushAuthHandler.NAME); registerClient(ctx, authEvent, pushConnection, pushConnectionRegistry); logger.debug("Authentication complete {}", authEvent); } else { pushProtocol.sendErrorAndClose(ctx,1008, "Auth failed"); } } } super.userEventTriggered(ctx, evt); }
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
@Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { this.ctx = ctx; if (! destroyed.get()) { if (evt == pushProtocol.getHandshakeCompleteEvent()) { pushConnection = new PushConnection(pushProtocol, ctx); // Unauthenticated connection, wait for small amount of time for a client to send auth token in // a first web socket frame, otherwise close connection ctx.executor().schedule(this::closeIfNotAuthenticated, UNAUTHENTICATED_CONN_TTL.get(), TimeUnit.SECONDS); logger.debug("WebSocket handshake complete."); } else if (evt instanceof PushUserAuth) { authEvent = (PushUserAuth) evt; if (authEvent.isSuccess()) { logger.debug("registering client {}", authEvent); ctx.pipeline().remove(PushAuthHandler.NAME); registerClient(ctx, authEvent, pushConnection, pushConnectionRegistry); logger.debug("Authentication complete {}", authEvent); } else { pushProtocol.sendErrorAndClose(ctx,1008, "Auth failed"); } } } super.userEventTriggered(ctx, evt); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
protected final boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
@Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { this.ctx = ctx; if (! destroyed.get()) { if (evt == pushProtocol.getHandshakeCompleteEvent()) { pushConnection = new PushConnection(pushProtocol, ctx); // Unauthenticated connection, wait for small amount of time for a client to send auth token in // a first web socket frame, otherwise close connection ctx.executor().schedule(this::closeIfNotAuthenticated, UNAUTHENTICATED_CONN_TTL.get(), TimeUnit.SECONDS); logger.debug("WebSocket handshake complete."); } else if (evt instanceof PushUserAuth) { authEvent = (PushUserAuth) evt; if (authEvent.isSuccess()) { logger.debug("registering client {}", authEvent); ctx.pipeline().remove(PushAuthHandler.NAME); registerClient(ctx, authEvent, pushConnection, pushConnectionRegistry); logger.debug("Authentication complete {}", authEvent); } else { sendErrorAndClose(1008, "Auth Failed"); } } } super.userEventTriggered(ctx, evt); }