@Override public void packetReceived(CommunicationPacket packet, Destination sender, long receiveTime) { BanList banList = BanList.getInstance(); banList.update(sender, packet); if (!banList.isBanned(sender)) // any type of incoming packet updates the peer's record in the bucket/sibling list, or adds the peer to the bucket/sibling list addOrUpdate(new KademliaPeer(sender)); else remove(sender); }
/** * Writes all peers to a file, sorted in descending order of uptime. * @param file */ private void writePeersSorted(File file) { List<KademliaPeer> peers = bucketManager.getAllPeers(); if (peers.isEmpty()) return; sortByUptime(peers); log.info("Writing peers to file: <" + file.getAbsolutePath() + ">"); writePeers(peers, file); }
/** * Returns the total number of peers that are not locked. * @return */ int getUnlockedPeerCount() { return getAllUnlockedPeers().size(); }
public void remove(Destination peer) { AbstractBucket bucket = getBucket(peer); if (bucket != null) { bucket.remove(peer); if (bucket instanceof SBucket) refillSiblings(); } else log.debug("Can't remove peer because no bucket contains it: " + Util.toBase32(peer)); }
bucketManager.addOrUpdate(bootstrapNode); log.info("Trying " + Util.toBase32(bootstrapNode) + " for bootstrapping."); Collection<Destination> closestNodes = getClosestNodes(localDestinationHash); bucketManager.remove(bootstrapNode); log.info("Bootstrapping finished. Number of peers = " + bucketManager.getPeerCount()); for (Destination peer: bucketManager.getAllPeers()) log.debug(" Peer: " + Util.toBase32(peer)); break outerLoop;
private void checkBucketConsistency() throws Exception { Iterator<KBucket> iter = bucketManager.iterator(); KBucket bucket1 = iter.next(); while (iter.hasNext()) { assertFalse(bucketManager.getAllPeers().contains(localDestination)); for (KademliaPeer sibling: bucketManager.getSBucket()) for (KademliaPeer peer: getKPeers()) assertTrue(comparator.compare(sibling, peer) <= 0);
/** * Adds a <code>{@link KademliaPeer}</code> to the s-bucket or a k-bucket, * depending on its distance to the local node and how full the buckets are. * @param destination */ public void addOrUpdate(KademliaPeer peer) { Hash destHash = peer.getDestinationHash(); if (localDestinationHash.equals(destHash)) { log.debug("Not adding local destination to bucket."); return; } KademliaPeer removedOrNotAdded = sBucket.addOrUpdate(peer); if (removedOrNotAdded == null) getKBucket(destHash).remove(peer); // if the peer was in a k-bucket, remove it because it is now in the s-bucket else addToKBucket(removedOrNotAdded); // if a peer was removed from the s-bucket or didn't qualify as a sibling, add it to a k-bucket // log int numBuckets = kBuckets.size(); int numPeers = getAllPeers().size(); int numSiblings = sBucket.size(); log.debug("Peer " + Util.toBase32(destHash) + " added/updated. Peers=" + numPeers + " sib=" + numSiblings + " buk=" + numBuckets + " (not counting the sibling bucket)"); }
log.info("Peer file doesn't exist, using built-in peers only (File not found: <" + peerFile.getAbsolutePath() + ">)"); bucketManager = new BucketManager(localDestinationHash); storageHandlers = new ConcurrentHashMap<Class<? extends DhtStorablePacket>, DhtStorageHandler>(); replicateThread = new ReplicateThread(localDestination, sendQueue, i2pReceiver, bucketManager);
/** * Calls <code>addOrUpdate(KademliaPeer)</code> for one or more peers. * @param peers */ public void addAll(Collection<KademliaPeer> peers) { for (KademliaPeer node: peers) addOrUpdate(node); }
int bucketIndex = getBucketIndex(peer.calculateHash()); KBucket bucket = kBuckets.get(bucketIndex); bucket = getKBucket(peer.calculateHash());
notQueriedYet.addAll(bucketManager.getAllUnlockedPeers()); logStatus(); Destination peer = request.getKey(); log.debug("FindCloseNodes request to peer " + Util.toShortenedBase32(peer) + " timed out."); bucketManager.noResponse(peer); pendingRequests.remove(peer);
/** * Refreshes all buckets whose <code>lastLookupTime</code> is too old. * @throws InterruptedException */ private void refreshOldBuckets() throws InterruptedException { long now = System.currentTimeMillis(); // refresh k-buckets for (KBucket bucket: Util.synchronizedCopy(bucketManager)) if (now > bucket.getLastLookupTime() + KademliaConstants.BUCKET_REFRESH_INTERVAL) { log.info("Refreshing k-bucket: " + bucket); refresh(bucket); } // Refresh the s-bucket by doing a lookup for a random key in each section of the bucket. // For example, if k=20 and s=100, there would be a lookup for a random key between // the 0th and the 20th sibling (i=0), another one for a random key between the 20th // and the 40th sibling (i=1), etc., and finally a lookup for a random key between the // 80th and the 100th sibling (i=4). SBucket sBucket = bucketManager.getSBucket(); BucketSection[] sections = sBucket.getSections(); for (int i=0; i<sections.length; i++) { BucketSection section = sections[i]; if (now > section.getLastLookupTime() + KademliaConstants.BUCKET_REFRESH_INTERVAL) { log.info("Refreshing s-bucket section " + i + " of " + sections.length + " (last refresh: " + new Date(section.getLastLookupTime()) + ")"); refresh(section); } } }
/** * Does a binary search for the k-bucket whose key range contains a given * {@link Hash}. * The bucket may or may not contain a peer with that hash. * @param key * @return */ private KBucket getKBucket(Hash key) { return kBuckets.get(getBucketIndex(key)); }
/** * Looks up a <code>KademliaPeer</code> by I2P destination. If no bucket * (k or s-bucket) contains the peer, <code>null</code> is returned. * @param destination * @return */ private KademliaPeer getPeer(Destination destination) { AbstractBucket bucket = getBucket(destination); if (bucket != null) return bucket.getPeer(destination); else return null; }
@Test public void test() throws Exception { assertEquals(0, bucketManager.getPeerCount()); assertEquals(0, bucketManager.getAllPeers().size()); assertEquals(1, getNumKBuckets()); bucketManager.addOrUpdate(new KademliaPeer(peers[i])); assertEquals(S, bucketManager.getPeerCount()); assertEquals(S, bucketManager.getAllPeers().size()); assertEquals(1, getNumKBuckets()); bucketManager.addOrUpdate(new KademliaPeer(peers[startIndex+i])); assertEquals(S + K, bucketManager.getPeerCount()); assertEquals(S + K, bucketManager.getAllPeers().size()); assertEquals(1, getNumKBuckets()); bucketManager.addOrUpdate(new KademliaPeer(peers[index])); assertEquals(S+K+1, bucketManager.getPeerCount()); assertEquals(S+K+1, bucketManager.getAllPeers().size()); assertTrue(getNumKBuckets() >= 2); // there should be at least 2 buckets after the split bucketManager.addOrUpdate(new KademliaPeer(peers[startIndex+i])); assertTrue(bucketManager.getAllPeers().contains(peers[i])); boolean isSibling = bucketManager.getSBucket().contains(peer); bucketManager.remove(peer); peersRemaining--; assertEquals(peersRemaining, bucketManager.getPeerCount());
peers[199] = new Destination("LsWHwmsa10PSqTYCJZA5VKNxFVrBdixfoUypyqolPalQRRjgYvbRbMvF3TDY1jvv8NV2wg~V8Ju35TXhyqUL2eUT8w1F5FN4ramRwIDO4ahCvDSiIy~CB2MlJGGncFV3q4F4g6FqwjEQcPEWz2lYeLJxtHgrB2MFZhLNAriNFMvuAujNouTEreWNSziJN92paXZgYYChvVknKd3C1sZ2T-upBD1Du6PYiV5HGnvJf1eqSpCFGAI4EqWe2ubsMg9ku8HP0-QzaBkCfslywh8WvcpwJr3yylw2d4tLmCCzt80VQjH-DEzXjOTwgxysD5dKqweQWFB2oh5Ogwj2KKO6pG5dIc2TsVLyH0nPxOkc1pMjhCxq0ChNfsSkX8kO-9WocuIlnw32yPn2nyrk8VneHVOWOzNBNbP~cIr1afLNYG5z12RYVx9KCr95e8f5LU-5-tVw8a3sjxWVzt3Dc~wfTKXGgdrHYWoRQdOFv2mkyckCaxM4DhLDFCGVdj2jjxk9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC-8jmLgwEOpRNMQfxoCAXP~ksLV36RxM9NNPEhwS4eSXeR3iSOzaYu8-etb5M~3p8"); bucketManager = new BucketManager(localDestination.calculateHash());
/** * Moves peers from the k-buckets to the s-bucket until the s-bucket is full * or all k-buckets are empty. */ private void refillSiblings() { // Sort all k-peers by distance to the local destination List<KademliaPeer> kPeers = new ArrayList<KademliaPeer>(); for (KBucket kBucket: kBuckets) kPeers.addAll(kBucket.getPeers()); Collections.sort(kPeers, new PeerDistanceComparator(localDestinationHash)); while (!sBucket.isFull() && !kPeers.isEmpty()) { // move the closest k-peer to the s-bucket KademliaPeer peerToMove = kPeers.remove(0); int bucketIndex = getBucketIndex(peerToMove.getDestinationHash()); kBuckets.get(bucketIndex).remove(peerToMove); sBucket.addOrUpdate(peerToMove); } }
/** * Returns the <code>count</code> peers that are closest to a given key, * and which are not locked. * Less than <code>count</code> peers may be returned if there aren't * enough peers in the k-buckets and the s-bucket. * @param key * @param count * @return Up to <code>count</code> peers, sorted by distance to <code>key</code>. */ public List<Destination> getClosestPeers(Hash key, int count) { // TODO don't put all peers in one huge list, only use two k-buckets and the s-bucket at a time List<Destination> peers = getAllUnlockedPeers(); Collections.sort(peers, new PeerDistanceComparator(key)); if (peers.size() < count) return peers; else return peers.subList(0, count); }