protected void updateBloom( InetSocketAddress destination_address ) { // allow unresolved through (e.g. ipv6 dht seed) as handled later if ( !destination_address.isUnresolved()){ long diff = SystemTime.getCurrentTime() - last_bloom_rotation_time; if( diff < 0 || diff > BLOOM_ROTATION_PERIOD ) { // System.out.println( "bloom rotate: entries = " + bloom1.getEntryCount() + "/" + bloom2.getEntryCount()); bloom1 = bloom2; bloom2 = BloomFilterFactory.createAddOnly( BLOOM_FILTER_SIZE ); last_bloom_rotation_time = SystemTime.getCurrentTime(); } byte[] address_bytes = destination_address.getAddress().getAddress(); bloom1.add( address_bytes ); bloom2.add( address_bytes ); } }
protected void setKnownSeed( InetSocketAddress address ) { synchronized( registered_legacy_managers ){ if ( known_seeds == null ){ known_seeds = BloomFilterFactory.createAddOnly( 1024 ); } // can't include port as it will be a randomly allocated one in general. two people behind the // same NAT will have to connect to each other using LAN peer finder known_seeds.add( AddressUtils.getAddressBytes( address )); } }
public void recordSkew( InetSocketAddress originator_address, long skew ) { byte[] bytes = AddressUtils.getAddressBytes( originator_address ); if ( skew_originator_bloom.contains( bytes)){ //System.out.println( "skipping skew: " + originator_address ); return; } skew_originator_bloom.add( bytes ); //System.out.println( "adding skew: " + originator_address + "/" + skew ); int i_skew = skew<Integer.MAX_VALUE?(int)skew:(Integer.MAX_VALUE-1); // no sync here as not important so ensure things work ok int pos = skew_pos; skew_values[ pos++ ] = i_skew; if ( pos == SKEW_VALUE_MAX ){ pos = 0; } skew_pos = pos; }
protected void removeRequest( PeerNATTraversal request, int outcome ) { synchronized( initiators ){ LinkedList requests = (LinkedList)initiators.get( request.getInitiator()); if ( requests != null ){ requests.remove( request ); } pending_requests.remove( request ); if ( active_requests.remove( request )){ usage_average.addValue( request.getTimeUsed()); if ( outcome == OUTCOME_SUCCESS ){ success_count++; }else{ InetSocketAddress target = request.getTarget(); negative_result_bloom.add( target.toString().getBytes()); if ( outcome == OUTCOME_FAILED_NO_REND ){ failed_no_rendezvous++; } } } } }
protected boolean isPersistentlyDeleted( RelatedContent content ) { if ( persist_del_bloom == null ){ List<byte[]> entries = loadPersistentlyDeleted(); persist_del_bloom = BloomFilterFactory.createAddOnly( Math.max( PD_BLOOM_INITIAL_SIZE, entries.size()*10 + PD_BLOOM_INCREMENT_SIZE )); for ( byte[] k: entries ){ persist_del_bloom.add( k ); } } byte[] key = getPermDelKey( content ); return( persist_del_bloom.contains( key )); }
protected boolean tooManyUnauthConnections( String originator ) { synchronized( this ){ if ( unauth_bloom == null ){ unauth_bloom = BloomFilterFactory.createAddRemove4Bit( UNAUTH_BLOOM_CHUNK ); unauth_bloom_create_time = SystemTime.getCurrentTime(); } int hit_count = unauth_bloom.add( originator.getBytes()); if ( hit_count >= 8 ){ Debug.out( "Too many recent unauthorised connection attempts from " + originator ); return( true ); } return( false ); } }
@Override public void sentTo( DHTTransportContact contact ) { BloomFilter filter = sent_to_bloom; if ( filter == null || filter.getEntryCount() > 100 ){ filter = BloomFilterFactory.createAddOnly(500); sent_to_bloom = filter; } filter.add( contact.getID()); } }
protected void addPersistentlyDeleted( RelatedContent[] content ) { if ( content.length == 0 ){ return; } List<byte[]> entries = loadPersistentlyDeleted(); List<byte[]> new_keys = new ArrayList<>(content.length); for ( RelatedContent rc: content ){ byte[] key = getPermDelKey( rc ); new_keys.add( key ); entries.add( key ); } Map<String,Object> map = new HashMap<>(); map.put( "entries", entries ); FileUtil.writeResilientConfigFile( PERSIST_DEL_FILE, map ); if ( persist_del_bloom != null ){ if ( persist_del_bloom.getSize() / ( persist_del_bloom.getEntryCount() + content.length ) < 10 ){ persist_del_bloom = BloomFilterFactory.createAddOnly( Math.max( PD_BLOOM_INITIAL_SIZE, persist_del_bloom.getSize() *10 + PD_BLOOM_INCREMENT_SIZE + content.length )); for ( byte[] k: entries ){ persist_del_bloom.add( k ); } }else{ for ( byte[] k: new_keys ){ persist_del_bloom.add( k ); } } } }
protected void receive( DHTUDPPacketRequest request ) { if ( destroyed ){ return; } // incoming request if ( test_network_alive ){ request.setAddress( AddressUtils.adjustDHTAddress( request.getAddress(), false )); // an alien request is one that originates from a peer that we haven't recently // talked to byte[] bloom_key = request.getAddress().getAddress().getAddress(); boolean alien = !bloom1.contains( bloom_key ); if ( alien ){ // avoid counting consecutive requests from same contact more than once bloom1.add( bloom_key ); bloom2.add( bloom_key ); } stats.packetReceived( request.getSerialisedSize()); request_handler.process( request, alien ); } }
@Override public void reportBadFastExtensionUse( PEPeerTransport originator ) { try{ byte[] key = originator.getIp().getBytes( Constants.BYTE_ENCODING ); synchronized( naughty_fast_extension_bloom ){ if ( naughty_fast_extension_bloom.add( key ) == FE_EVENT_LIMIT ){ Logger.log(new LogEvent(disk_mgr.getTorrent(), LOGID, "Fast extension disabled for " + originator.getIp() + " due to repeat requests for the same pieces" )); } } }catch( Throwable e ){ } }
@Override public boolean isFastExtensionPermitted( PEPeerTransport originator ) { try{ byte[] key = originator.getIp().getBytes( Constants.BYTE_ENCODING ); synchronized( naughty_fast_extension_bloom ){ int events = naughty_fast_extension_bloom.add( key ); if ( events < FE_EVENT_LIMIT ){ return( true ); } Logger.log(new LogEvent(disk_mgr.getTorrent(), LOGID, "Fast extension disabled for " + originator.getIp() + " due to repeat connections" )); } }catch( Throwable e ){ } return( false ); }
protected void findContacts() { DHTTransportContact[] reachables = dht.getTransport().getReachableContacts(); for (int i=0;i<reachables.length;i++){ DHTTransportContact contact = reachables[i]; byte[] address = contact.getAddress().getAddress().getAddress(); if ( tried_bloom == null || tried_bloom.getEntryCount() > 500 ){ tried_bloom = BloomFilterFactory.createAddOnly( 4096 ); } if ( !tried_bloom.contains( address )){ tried_bloom.add( address ); synchronized( pending_contacts ){ potentialPing ping = new potentialPing( contact, DHTNetworkPositionManager.estimateRTT( contact.getNetworkPositions(), dht.getTransport().getLocalContact().getNetworkPositions())); pending_contacts.add( 0, ping ); if ( pending_contacts.size() > 60 ){ pending_contacts.removeLast(); } } } } }
int r = filters[i%filters.length].add( value );
int hits = new_filter.add( val.getOriginator().getBloomKey());
@Override public void contactKnown( byte[] node_id, DHTRouterContactAttachment attachment, boolean force ) { // especially for small DHTs we don't want to prevent a contact from being re-added as long as they've been away for // a bit if ( SystemTime.getMonotonousTime() - recent_contact_bloom.getStartTimeMono() > 10*60*1000 ){ recent_contact_bloom.clear(); } if ( recent_contact_bloom.contains( node_id )){ if ( !force ){ return; } } recent_contact_bloom.add( node_id ); addContact( node_id, attachment, false ); }
protected void incrementValueAdds( DHTTransportContact contact ) { // assume a node stores 1000 values at 20 (K) locations -> 20,000 values // assume a DHT size of 100,000 nodes // that is, on average, 1 value per 5 nodes // assume NAT of up to 30 ports per address // this gives 6 values per address // with a factor of 10 error this is still only 60 per address // However, for CVS DHTs we can have sizes of 1000 or less. byte[] bloom_key = contact.getBloomKey(); int hit_count = ip_count_bloom_filter.add( bloom_key ); if ( DHTLog.GLOBAL_BLOOM_TRACE ){ System.out.println( "direct add from " + contact.getAddress() + ", hit count = " + hit_count ); } // allow up to 10% bloom filter utilisation if ( ip_count_bloom_filter.getSize() / ip_count_bloom_filter.getEntryCount() < 10 ){ rebuildIPBloomFilter( true ); } if ( hit_count > 64 ){ // obviously being spammed, drop all data originated by this IP and ban it banContact( contact, "global flood" ); } }
filter.add( id );
ip_bloom_filter.add( bloom_key );
int hit_count = bloom.add( address_bytes );