public BucketManager(Hash localDestinationHash) { this.localDestinationHash = localDestinationHash; kBuckets = new CopyOnWriteArrayList<KBucket>(); kBuckets.add(new KBucket(AbstractBucket.MIN_HASH_VALUE, AbstractBucket.MAX_HASH_VALUE, 0)); // this is the root bucket, so depth=0 sBucket = new SBucket(localDestinationHash); }
public synchronized List<KademliaPeer> getAllPeers() { List<KademliaPeer> allPeers = new ArrayList<KademliaPeer>(); for (KBucket bucket: kBuckets) allPeers.addAll(bucket.getPeers()); allPeers.addAll(sBucket.getPeers()); return allPeers; }
void setLastLookupTime(Hash key, long lastLookupTime) { BucketSection section = getSection(key); if (section != null) section.lastLookupTime = lastLookupTime; }
boolean isSibling = bucketManager.getSBucket().contains(peer); bucketManager.remove(peer); peersRemaining--; assertTrue(bucketManager.getSBucket().isFull() || getKPeers().isEmpty()); if (bucketManager.getSBucket().isFull()) for (int i=0; i<kBuckets.size(); i++) if (getKBuckets().get(i).size() == bucketSizes.get(i)-1)
if (isFull()) { if (insertionPoint > size()) log.error("insertionPoint > size(), this shouldn't happen."); if (insertionPoint < size()) { // if destination is closer than an existing sibling, replace the furthest away sibling and return the removed sibling peers.add(insertionPoint, new KademliaPeer(peer)); KademliaPeer removedPeer = peers.remove(size() - 1); return removedPeer;
/** * Moves peers from the k-buckets to the s-bucket until the s-bucket is full * or all k-buckets are empty. */ private void refillSiblings() { // Sort all k-peers by distance to the local destination List<KademliaPeer> kPeers = new ArrayList<KademliaPeer>(); for (KBucket kBucket: kBuckets) kPeers.addAll(kBucket.getPeers()); Collections.sort(kPeers, new PeerDistanceComparator(localDestinationHash)); while (!sBucket.isFull() && !kPeers.isEmpty()) { // move the closest k-peer to the s-bucket KademliaPeer peerToMove = kPeers.remove(0); int bucketIndex = getBucketIndex(peerToMove.getDestinationHash()); kBuckets.get(bucketIndex).remove(peerToMove); sBucket.addOrUpdate(peerToMove); } }
/** * Returns the (s or k) bucket that contains a given {@link Destination}. * The s-bucket is checked first, then the k-buckets. * If no bucket contains the peer, <code>null</code> is returned. * @param destination * @return */ private AbstractBucket getBucket(Destination destination) { if (sBucket.contains(destination)) return sBucket; else { KBucket kBucket = getKBucket(destination.calculateHash()); if (kBucket.contains(destination)) return kBucket; else return null; } }
/** * Adds a <code>{@link KademliaPeer}</code> to the s-bucket or a k-bucket, * depending on its distance to the local node and how full the buckets are. * @param destination */ public void addOrUpdate(KademliaPeer peer) { Hash destHash = peer.getDestinationHash(); if (localDestinationHash.equals(destHash)) { log.debug("Not adding local destination to bucket."); return; } KademliaPeer removedOrNotAdded = sBucket.addOrUpdate(peer); if (removedOrNotAdded == null) getKBucket(destHash).remove(peer); // if the peer was in a k-bucket, remove it because it is now in the s-bucket else addToKBucket(removedOrNotAdded); // if a peer was removed from the s-bucket or didn't qualify as a sibling, add it to a k-bucket // log int numBuckets = kBuckets.size(); int numPeers = getAllPeers().size(); int numSiblings = sBucket.size(); log.debug("Peer " + Util.toBase32(destHash) + " added/updated. Peers=" + numPeers + " sib=" + numSiblings + " buk=" + numBuckets + " (not counting the sibling bucket)"); }
/** * Return the total number of known Kademlia peers (locked + unlocked peers). * @return */ int getPeerCount() { int count = 0; for (KBucket bucket: kBuckets) count += bucket.size(); count += sBucket.size(); return count; }
/** * Updates the time at which the k-bucket for a DHT key, and the s-bucket, * was last refreshed. * @param key */ void updateLastLookupTime(Hash key) { long time = System.currentTimeMillis(); getKBucket(key).setLastLookupTime(time); sBucket.setLastLookupTime(key, time); }
/** * Refreshes all buckets whose <code>lastLookupTime</code> is too old. * @throws InterruptedException */ private void refreshOldBuckets() throws InterruptedException { long now = System.currentTimeMillis(); // refresh k-buckets for (KBucket bucket: Util.synchronizedCopy(bucketManager)) if (now > bucket.getLastLookupTime() + KademliaConstants.BUCKET_REFRESH_INTERVAL) { log.info("Refreshing k-bucket: " + bucket); refresh(bucket); } // Refresh the s-bucket by doing a lookup for a random key in each section of the bucket. // For example, if k=20 and s=100, there would be a lookup for a random key between // the 0th and the 20th sibling (i=0), another one for a random key between the 20th // and the 40th sibling (i=1), etc., and finally a lookup for a random key between the // 80th and the 100th sibling (i=4). SBucket sBucket = bucketManager.getSBucket(); BucketSection[] sections = sBucket.getSections(); for (int i=0; i<sections.length; i++) { BucketSection section = sections[i]; if (now > section.getLastLookupTime() + KademliaConstants.BUCKET_REFRESH_INTERVAL) { log.info("Refreshing s-bucket section " + i + " of " + sections.length + " (last refresh: " + new Date(section.getLastLookupTime()) + ")"); refresh(section); } } }
/** * Returns all peers that are not locked. * @return */ public synchronized List<Destination> getAllUnlockedPeers() { List<Destination> allPeers = new ArrayList<Destination>(); for (KBucket bucket: kBuckets) for (KademliaPeer peer: bucket.getPeers()) if (!peer.isLocked()) allPeers.add(peer); for (KademliaPeer peer: sBucket.getPeers()) if (!peer.isLocked()) allPeers.add(peer); return allPeers; }