private void closeIfNotAuthenticated() { if (! isAuthenticated()) { logger.error("Closing connection because it is still unauthenticated after {} seconds.", UNAUTHENTICATED_CONN_TTL.get()); forceCloseConnectionFromServerSide(); } }
@Override public final void channelInactive(ChannelHandlerContext ctx) throws Exception { tearDown(); super.channelInactive(ctx); ctx.close(); }
@Override protected void addPushHandlers(final ChannelPipeline pipeline) { pipeline.addLast(PushAuthHandler.NAME, pushAuthHandler); pipeline.addLast(new PushRegistrationHandler(pushConnectionRegistry, PushProtocol.SSE)); pipeline.addLast(new SampleSSEPushClientProtocolHandler()); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
private void requestClientToCloseConnection() { if (ctx.channel().isActive()) { // Application level protocol for asking client to close connection ctx.writeAndFlush(goAwayMessage()); // Force close connection if client doesn't close in reasonable time after we made request ctx.executor().schedule(() -> forceCloseConnectionFromServerSide(), CLIENT_CLOSE_GRACE_PERIOD.get(), TimeUnit.SECONDS); } else { forceCloseConnectionFromServerSide(); } }
@Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { this.ctx = ctx; if (! destroyed.get()) { if (evt == pushProtocol.getHandshakeCompleteEvent()) { pushConnection = new PushConnection(pushProtocol, ctx); // Unauthenticated connection, wait for small amount of time for a client to send auth token in // a first web socket frame, otherwise close connection ctx.executor().schedule(this::closeIfNotAuthenticated, UNAUTHENTICATED_CONN_TTL.get(), TimeUnit.SECONDS); logger.debug("WebSocket handshake complete."); } else if (evt instanceof PushUserAuth) { authEvent = (PushUserAuth) evt; if (authEvent.isSuccess()) { logger.debug("registering client {}", authEvent); ctx.pipeline().remove(PushAuthHandler.NAME); registerClient(ctx, authEvent, pushConnection, pushConnectionRegistry); logger.debug("Authentication complete {}", authEvent); } else { sendErrorAndClose(1008, "Auth Failed"); } } } super.userEventTriggered(ctx, evt); }
private void requestClientToCloseConnection() { if (ctx.channel().isActive()) { // Application level protocol for asking client to close connection ctx.writeAndFlush(pushProtocol.goAwayMessage()); // Force close connection if client doesn't close in reasonable time after we made request ctx.executor().schedule(() -> forceCloseConnectionFromServerSide(), CLIENT_CLOSE_GRACE_PERIOD.get(), TimeUnit.SECONDS); } else { forceCloseConnectionFromServerSide(); } }
@Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { this.ctx = ctx; if (! destroyed.get()) { if (evt == pushProtocol.getHandshakeCompleteEvent()) { pushConnection = new PushConnection(pushProtocol, ctx); // Unauthenticated connection, wait for small amount of time for a client to send auth token in // a first web socket frame, otherwise close connection ctx.executor().schedule(this::closeIfNotAuthenticated, UNAUTHENTICATED_CONN_TTL.get(), TimeUnit.SECONDS); logger.debug("WebSocket handshake complete."); } else if (evt instanceof PushUserAuth) { authEvent = (PushUserAuth) evt; if (authEvent.isSuccess()) { logger.debug("registering client {}", authEvent); ctx.pipeline().remove(PushAuthHandler.NAME); registerClient(ctx, authEvent, pushConnection, pushConnectionRegistry); logger.debug("Authentication complete {}", authEvent); } else { pushProtocol.sendErrorAndClose(ctx,1008, "Auth failed"); } } } super.userEventTriggered(ctx, evt); }
@Override public final void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { this.ctx = ctx; try { handleRead(ctx, msg); } finally { ReferenceCountUtil.release(msg); } }
protected final void forceCloseConnectionFromServerSide() { if (! destroyed.get()) { sendErrorAndClose(1000, "server closed connection"); logger.debug("server forcing close connection"); } }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
private void requestClientToCloseConnection() { if (ctx.channel().isActive()) { // Application level protocol for asking client to close connection ctx.writeAndFlush(pushProtocol.goAwayMessage()); // Force close connection if client doesn't close in reasonable time after we made request ctx.executor().schedule(() -> forceCloseConnectionFromServerSide(), CLIENT_CLOSE_GRACE_PERIOD.get(), TimeUnit.SECONDS); } else { forceCloseConnectionFromServerSide(); } }
@Override public final void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { this.ctx = ctx; if (! destroyed.get()) { if (evt == pushProtocol.getHandshakeCompleteEvent()) { pushConnection = new PushConnection(pushProtocol, ctx); // Unauthenticated connection, wait for small amount of time for a client to send auth token in // a first web socket frame, otherwise close connection ctx.executor().schedule(this::closeIfNotAuthenticated, UNAUTHENTICATED_CONN_TTL.get(), TimeUnit.SECONDS); logger.debug("WebSocket handshake complete."); } else if (evt instanceof PushUserAuth) { authEvent = (PushUserAuth) evt; if (authEvent.isSuccess()) { logger.debug("registering client {}", authEvent); ctx.pipeline().remove(PushAuthHandler.NAME); registerClient(ctx, authEvent, pushConnection, pushConnectionRegistry); logger.debug("Authentication complete {}", authEvent); } else { pushProtocol.sendErrorAndClose(ctx,1008, "Auth failed"); } } } super.userEventTriggered(ctx, evt); }
private void closeIfNotAuthenticated() { if (! isAuthenticated()) { logger.error("Closing connection because it is still unauthenticated after {} seconds.", UNAUTHENTICATED_CONN_TTL.get()); forceCloseConnectionFromServerSide(); } }
@Override public final void channelInactive(ChannelHandlerContext ctx) throws Exception { tearDown(); super.channelInactive(ctx); ctx.close(); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
@Override protected void addPushHandlers(final ChannelPipeline pipeline) { pipeline.addLast(PushAuthHandler.NAME, pushAuthHandler); pipeline.addLast(new WebSocketServerCompressionHandler()); pipeline.addLast(new WebSocketServerProtocolHandler(PushProtocol.WEBSOCKET.getPath(), null, true)); pipeline.addLast(new PushRegistrationHandler(pushConnectionRegistry, PushProtocol.WEBSOCKET)); pipeline.addLast(new SampleWebSocketPushClientProtocolHandler()); }
private void closeIfNotAuthenticated() { if (! isAuthenticated()) { logger.error("Closing connection because it is still unauthenticated after {} seconds.", UNAUTHENTICATED_CONN_TTL.get()); forceCloseConnectionFromServerSide(); } }
@Override public final void channelInactive(ChannelHandlerContext ctx) throws Exception { tearDown(); super.channelInactive(ctx); ctx.close(); }
@Override protected void addPushHandlers(final ChannelPipeline pipeline) { pipeline.addLast(PushAuthHandler.NAME, pushAuthHandler); pipeline.addLast(new PushRegistrationHandler(pushConnectionRegistry, PushProtocol.SSE)); pipeline.addLast(new SampleSSEPushClientProtocolHandler()); }