protected void checkClosed() { if( sock == null ) throw new ConnectorException( "Connection is closed:" + remoteAddress ); }
@Override public String getAddress() { return channels[CH_RELIABLE] == null ? null : channels[CH_RELIABLE].getAddress(); }
public void broadcast( Filter<? super Endpoint> filter, ByteBuffer data, boolean reliable, boolean copy ) { kernel.broadcast( filter, data, reliable, copy ); }
public void run() { // Not guaranteed to always work but an extra datagram // to a dead connection isn't so big of a deal. if( !endpoint.isConnected() ) { return; } try { thread.getSocket().send(packet); } catch( Exception e ) { KernelException exc = new KernelException( "Error sending datagram to:" + address, e ); exc.fillInStackTrace(); reportError(exc); } } }
public void close( boolean flush ) { // No real reason to flush UDP traffic yet... especially // when considering that the outbound UDP isn't even // queued. try { kernel.closeEndpoint(this); connected = false; } catch( IOException e ) { throw new KernelException( "Error closing endpoint for socket:" + socket, e ); } }
protected void reportError( Endpoint p, Object context, Exception e ) { // Should really be queued up so the outer thread can // retrieve them. For now we'll just log it. FIXME log.log( Level.SEVERE, "Unhandled error, endpoint:" + p + ", context:" + context, e ); if( p.isConnected() ) { // In lieu of other options, at least close the endpoint p.close(); } }
public void send( ByteBuffer data ) { if( data == null ) { throw new IllegalArgumentException( "Data cannot be null." ); } if( closing ) { throw new KernelException( "Endpoint has been closed:" + socket ); } send( data, true, true ); }
public boolean available() { checkClosed(); try { return in.available() > 0; } catch( IOException e ) { throw new ConnectorException( "Error retrieving data availability for:" + remoteAddress, e ); } }
protected void createAndDispatch( EndpointEvent event ) { // Only need to tell the server about disconnects if( event.getType() == EndpointEvent.Type.REMOVE ) { connectionClosed( event.getEndpoint() ); } }
protected void flushEvents() { EndpointEvent event; while( (event = kernel.nextEvent()) != null ) { try { createAndDispatch( event ); } catch( Exception e ) { reportError(event.getEndpoint(), event, e); } } }
public void close() { go.set(false); // Kill the writer service writer.shutdown(); if( connector.isConnected() ) { // Kill the connector connector.close(); } }
protected void wakeupReader() { // If there are no pending messages then add one so that the // kernel-user knows to wake up if it is only listening for // envelopes. if( !hasEnvelopes() ) { // Note: this is not really a race condition. At worst, our // event has already been handled by now and it does no harm // to check again. addEnvelope( EVENTS_PENDING ); } }
public static EndpointEvent createRemove( Kernel source, Endpoint p ) { return new EndpointEvent( source, p, Type.REMOVE ); }
public void initialize() { kernel.initialize(); }
protected void addEnvelope( Envelope env ) { if( !envelopes.offer( env ) ) { throw new KernelException( "Critical error, could not enqueue envelope." ); } } }
private void write( ByteBuffer data ) { try { connector.write(data); } catch( Exception e ) { handleError( e ); } }
public void close() throws InterruptedException { go.set(false); // Kill the kernel kernel.terminate(); join(); }
protected void closeConnection() { if( closed ) return; closed = true; // Make sure all endpoints are closed. Note: reliable // should always already be closed through all paths that I // can conceive... but it doesn't hurt to be sure. for( Endpoint p : channels ) { if( p == null || !p.isConnected() ) continue; p.close(); } fireConnectionRemoved( this ); }
protected void checkClosed() { if( sock == null ) throw new ConnectorException( "Connection is closed:" + remoteAddress ); }
public static EndpointEvent createAdd( Kernel source, Endpoint p ) { return new EndpointEvent( source, p, Type.ADD ); }