public Kernel createKernel( int channel, int port ) throws IOException { return new SelectorKernel(port); } }
protected void accept( SelectionKey key ) throws IOException { // Would only get accepts on a server channel ServerSocketChannel serverChan = (ServerSocketChannel)key.channel(); // Setup the connection to be non-blocking SocketChannel remoteChan = serverChan.accept(); remoteChan.configureBlocking(false); // And disable Nagle's buffering algorithm... we want // data to go when we put it there. Socket sock = remoteChan.socket(); sock.setTcpNoDelay(true); // Let the selector know we're interested in reading // data from the channel SelectionKey endKey = remoteChan.register( selector, SelectionKey.OP_READ ); // And now create a new endpoint NioEndpoint p = addEndpoint( remoteChan ); endKey.attach(p); endpointKeys.put(p, endKey); }
protected void newData( NioEndpoint p, SocketChannel c, ByteBuffer shared, int size ) { // Note: if ever desirable, it would be possible to accumulate // data per source channel and only 'finalize' it when // asked for more envelopes then were ready. I just don't // think it will be an issue in practice. The busier the // server, the more the buffers will fill before we get to them. // And if the server isn't busy, who cares if we chop things up // smaller... the network is still likely to deliver things in // bulk anyway. // Must copy the shared data before we use it byte[] dataCopy = new byte[size]; System.arraycopy(shared.array(), 0, dataCopy, 0, size); Envelope env = new Envelope( p, dataCopy, true ); addEnvelope( env ); }
protected void removeEndpoint( NioEndpoint p, SocketChannel c ) { endpoints.remove( p.getId() ); log.log( Level.FINE, "Endpoints size:{0}", endpoints.size() ); // Enqueue an endpoint event for the listeners addEvent( EndpointEvent.createRemove( this, p ) ); wakeupReader(); }
protected NioEndpoint addEndpoint( SocketChannel c ) { // Note: we purposely do NOT put the key in the endpoint. // SelectionKeys are dangerous outside the selector thread // and this is safer. NioEndpoint p = new NioEndpoint( this, nextEndpointId(), c ); endpoints.put( p.getId(), p ); // Enqueue an endpoint event for the listeners addEvent( EndpointEvent.createAdd( this, p ) ); return p; }
protected void removeEndpoint( NioEndpoint p, SocketChannel c ) { endpoints.remove( p.getId() ); // Enqueue an endpoint event for the listeners addEvent( EndpointEvent.createRemove( this, p ) ); // If there are no pending messages then add one so that the // kernel-user knows to wake up if it is only listening for // envelopes. if( !hasEnvelopes() ) { // Note: this is not really a race condition. At worst, our // event has already been handled by now and it does no harm // to check again. addEnvelope( EVENTS_PENDING ); } }
protected void cancel( SelectionKey key, SocketChannel c ) throws IOException { NioEndpoint p = (NioEndpoint)key.attachment(); log.log( Level.FINE, "Closing channel endpoint:{0}.", p ); Object o = endpointKeys.remove(p); log.log( Level.FINE, "Endpoint keys size:{0}", endpointKeys.size() ); key.cancel(); c.close(); removeEndpoint( p, c ); }
protected void write( SelectionKey key ) throws IOException { NioEndpoint p = (NioEndpoint)key.attachment(); SocketChannel c = (SocketChannel)key.channel(); // We will send what we can and move on. ByteBuffer current = p.peekPending(); if( current == NioEndpoint.CLOSE_MARKER ) { // This connection wants to be closed now closeEndpoint(p); // Nothing more to do return; } c.write( current ); // If we wrote all of that packet then we need to remove it if( current.remaining() == 0 ) { p.removePending(); } // If we happened to empty the pending queue then let's read // again. if( !p.hasPending() ) { key.interestOps( SelectionKey.OP_READ ); } }
/** * The wakeup option is used internally when the kernel is * broadcasting out to a bunch of endpoints and doesn't want to * necessarily wakeup right away. */ protected void send( ByteBuffer data, boolean copy, boolean wakeup ) { // We create a ByteBuffer per endpoint since we // use it to track the data sent to each endpoint // separately. ByteBuffer buffer; if( !copy ) { buffer = data; } else { // Copy the buffer buffer = ByteBuffer.allocate(data.remaining()); buffer.put(data); buffer.flip(); } // Queue it up outbound.add(buffer); if( wakeup ) kernel.wakeupSelector(); }
public void initialize() { if( thread != null ) throw new IllegalStateException( "Kernel already initialized." ); thread = createSelectorThread(); try { thread.connect(); thread.start(); } catch( IOException e ) { throw new KernelException( "Error hosting:" + address, e ); } }
protected void read( SelectionKey key ) throws IOException { NioEndpoint p = (NioEndpoint)key.attachment(); SocketChannel c = (SocketChannel)key.channel(); working.clear(); int size; try { size = c.read(working); } catch( IOException e ) { // The remove end forcibly closed the connection... // close out our end and cancel the key cancel( key, c ); return; } if( size == -1 ) { // The remote end shut down cleanly... // close out our end and cancel the key cancel( key, c ); return; } newData( p, c, working, size ); }
protected NioEndpoint addEndpoint( SocketChannel c ) { // Note: we purposely do NOT put the key in the endpoint. // SelectionKeys are dangerous outside the selector thread // and this is safer. NioEndpoint p = new NioEndpoint( this, nextEndpointId(), c ); endpoints.put( p.getId(), p ); // Enqueue an endpoint event for the listeners addEvent( EndpointEvent.createAdd( this, p ) ); return p; }
protected void cancel( NioEndpoint p ) throws IOException { SelectionKey key = endpointKeys.remove(p); if( key == null ) { //log.log( Level.FINE, "Endpoint already closed:{0}.", p ); return; // already closed it } log.log( Level.FINE, "Endpoint keys size:{0}", endpointKeys.size() ); log.log( Level.FINE, "Closing endpoint:{0}.", p ); SocketChannel c = (SocketChannel)key.channel(); // Note: key.cancel() is specifically thread safe. One of // the few things one can do with a key from another // thread. key.cancel(); c.close(); removeEndpoint( p, c ); }
public void close( boolean flushData ) { if( flushData ) { closing = true; // Enqueue a close marker message to let the server // know we should close send( CLOSE_MARKER, false, true ); return; } try { // Note: even though we may be disconnected from the socket.isConnected() // standpoint, it's still safest to tell the kernel so that it can be sure // to stop managing us gracefully. kernel.closeEndpoint(this); } catch( IOException e ) { throw new KernelException( "Error closing endpoint for socket:" + socket, e ); } }
public void broadcast( Filter<? super Endpoint> filter, ByteBuffer data, boolean reliable, boolean copy ) { if( !reliable ) throw new UnsupportedOperationException( "Unreliable send not supported by this kernel." ); if( copy ) { // Copy the data just once byte[] temp = new byte[data.remaining()]; System.arraycopy(data.array(), data.position(), temp, 0, data.remaining()); data = ByteBuffer.wrap(temp); } // Hand it to all of the endpoints that match our routing for( NioEndpoint p : endpoints.values() ) { // Does it match the filter? if( filter != null && !filter.apply(p) ) continue; // Give it the data... but let each endpoint track their // own completion over the shared array of bytes by // duplicating it p.send( data.duplicate(), false, false ); } // Wake up the selector so it can reinitialize its // state accordingly. wakeupSelector(); }
protected void removeEndpoint( NioEndpoint p, SocketChannel c ) { endpoints.remove( p.getId() ); log.log( Level.FINE, "Endpoints size:{0}", endpoints.size() ); // Enqueue an endpoint event for the listeners addEvent( EndpointEvent.createRemove( this, p ) ); wakeupReader(); }
public void initialize() { if( thread != null ) throw new IllegalStateException( "Kernel already initialized." ); thread = createSelectorThread(); try { thread.connect(); thread.start(); } catch( IOException e ) { throw new KernelException( "Error hosting:" + address, e ); } }
protected void read( SelectionKey key ) throws IOException { NioEndpoint p = (NioEndpoint)key.attachment(); SocketChannel c = (SocketChannel)key.channel(); working.clear(); int size; try { size = c.read(working); } catch( IOException e ) { // The remove end forcibly closed the connection... // close out our end and cancel the key cancel( key, c ); return; } if( size == -1 ) { // The remote end shut down cleanly... // close out our end and cancel the key cancel( key, c ); return; } newData( p, c, working, size ); }
protected NioEndpoint addEndpoint( SocketChannel c ) { // Note: we purposely do NOT put the key in the endpoint. // SelectionKeys are dangerous outside the selector thread // and this is safer. NioEndpoint p = new NioEndpoint( this, nextEndpointId(), c ); endpoints.put( p.getId(), p ); // Enqueue an endpoint event for the listeners addEvent( EndpointEvent.createAdd( this, p ) ); return p; }
protected void cancel( SelectionKey key, SocketChannel c ) throws IOException { NioEndpoint p = (NioEndpoint)key.attachment(); log.log( Level.INFO, "Closing channel endpoint:{0}.", p ); endpointKeys.remove(p); key.cancel(); c.close(); removeEndpoint( p, c ); }