/** * Splits the bucket in two by keeping all peers with a DHT key less than * <code>pivot</code> in the existing bucket, and moving the rest into a new bucket. * @param pivot * @return The new bucket (which contains the higher IDs) */ private KBucket split(BigInteger pivot) { depth++; KBucket newBucket = new KBucket(pivot, endId, depth); endId = pivot; for (int i=peers.size()-1; i>=0; i--) { KademliaPeer peer = peers.get(i); BigInteger nodeId = new BigInteger(1, peer.getDestinationHash().getData()); if (nodeId.compareTo(pivot) >= 0) { newBucket.add(peer); remove(peer); } } return newBucket; }
KBucket bucket = kBuckets.get(bucketIndex); if (bucket.shouldSplit(peer)) { KBucket newBucket = bucket.split(); kBuckets.add(bucketIndex+1, newBucket); // the new bucket is one higher than the old bucket while (newBucket.isEmpty() || bucket.isEmpty()) if (newBucket.isEmpty()) { newBucket = bucket.split(); kBuckets.add(bucketIndex+1, newBucket); bucketIndex++; bucket = newBucket; newBucket = newBucket.split(); kBuckets.add(bucketIndex+1, newBucket); bucket.addOrUpdate(peer);
private void refresh(KBucket bucket) throws InterruptedException { Hash key = createRandomHash(bucket.getStartId(), bucket.getEndId()); getClosestNodes(key); }
/** * Updates a known peer, or adds the peer if it isn't known. If the bucket * is full and the replacement cache is not empty, the oldest peer is removed * before adding the new peer. * If the bucket is full and the replacement cache is empty, the peer is * added to the replacement cache. * @param peer */ void addOrUpdate(KademliaPeer peer) { // TODO log an error if peer outside bucket's range int index = getPeerIndex(peer); if (index >= 0) { KademliaPeer existingPeer = peers.remove(index); existingPeer.responseReceived(); add(existingPeer); } else { if (!isFull()) add(peer); else addOrUpdateReplacement(peer); } }
@Test public void testAddOrUpdate() { for (KBucket bucket: buckets) { KademliaPeer[] peers = destinationMap.get(bucket); // add a peer bucket.addOrUpdate(peers[0]); assertEquals(1, bucket.size()); assertEquals(peers[0], bucket.iterator().next()); // add the same peer again, verify there is no duplicate bucket.addOrUpdate(peers[0]); assertEquals(1, bucket.size()); assertEquals(peers[0], bucket.iterator().next()); // add a different peer bucket.addOrUpdate(peers[1]); assertEquals(2, bucket.size()); assertEquals(peers[0], bucket.getPeer(peers[0])); assertEquals(peers[1], bucket.getPeer(peers[1])); } }
@Test public void testRemove() { for (KBucket bucket: buckets) { Destination[] destinations = destinationMap.get(bucket); assertEquals(0, bucket.size()); bucket.addOrUpdate(new KademliaPeer(destinations[0])); assertEquals(1, bucket.size()); bucket.remove(destinations[0]); assertEquals(0, bucket.size()); bucket.addOrUpdate(new KademliaPeer(destinations[0])); bucket.addOrUpdate(new KademliaPeer(destinations[1])); bucket.addOrUpdate(new KademliaPeer(destinations[2])); assertEquals(3, bucket.size()); bucket.remove(destinations[0]); assertEquals(2, bucket.size()); bucket.remove(destinations[1]); assertEquals(1, bucket.size()); bucket.remove(destinations[2]); assertEquals(0, bucket.size()); } }
@Test public void testSplit() throws SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException { assertEquals("K must be an even number for this test to work.", 0, K%2); for (KBucket bucket: buckets) { KademliaPeer[] peers = destinationMap.get(bucket); int originalDepth = KademliaTestUtil.getDepth(bucket); for (int i=0; i<K; i++) { assertFalse(bucket.shouldSplit(peers[i])); bucket.addOrUpdate(peers[i]); } assertTrue(bucket.shouldSplit(peers[K])); KBucket newBucket = bucket.split(); assertNotNull(newBucket); assertEquals(originalDepth+1, KademliaTestUtil.getDepth(bucket)); assertEquals(originalDepth+1, KademliaTestUtil.getDepth(bucket)); assertTrue(maxId(bucket).compareTo(minId(newBucket)) < 0); } }
List<KBucket> kBuckets = getKBuckets(); for (KBucket bucket: kBuckets) bucketSizes.add(bucket.size()); int peersRemaining = totalPeersAdded; while (peersRemaining > 0) { if (getKBuckets().get(i).size() == bucketSizes.get(i)-1) bucketSizes.set(i, bucketSizes.get(i)-1); if (bucket.size() == bucketSizes.get(i)-1) { bucketSizes.set(i, bucketSizes.get(i)-1); BigInteger startId = bucket.getStartId(); BigInteger endId = bucket.getEndId(); assertTrue("Peer is in the wrong bucket: peer id=" + peerId + ", bucket start=" + startId + " bucket end=" + endId, startId.compareTo(peerId)<=0 && endId.compareTo(peerId)>=0); assertEquals(bucket.size(), bucketSizes.get(i).intValue());
/** * Adds a peer to the tail of the bucket if it is locked, or to the * head of the bucket if it isn't locked. * The bucket cannot be full when calling this method. * @param peer */ private void add(KademliaPeer peer) { if (isFull()) log.error("Error: adding a node to a full k-bucket. Bucket needs to be split first. Size=" + size() + ", capacity=" + capacity); if (peer.isLocked()) peers.add(peer); else peers.add(0, peer); }
/** * Moves peers from the k-buckets to the s-bucket until the s-bucket is full * or all k-buckets are empty. */ private void refillSiblings() { // Sort all k-peers by distance to the local destination List<KademliaPeer> kPeers = new ArrayList<KademliaPeer>(); for (KBucket kBucket: kBuckets) kPeers.addAll(kBucket.getPeers()); Collections.sort(kPeers, new PeerDistanceComparator(localDestinationHash)); while (!sBucket.isFull() && !kPeers.isEmpty()) { // move the closest k-peer to the s-bucket KademliaPeer peerToMove = kPeers.remove(0); int bucketIndex = getBucketIndex(peerToMove.getDestinationHash()); kBuckets.get(bucketIndex).remove(peerToMove); sBucket.addOrUpdate(peerToMove); } }
@Test public void testIsFull() { for (KBucket bucket: buckets) { Destination[] destinations = destinationMap.get(bucket); for (int i=0; i<K-1; i++) { bucket.addOrUpdate(new KademliaPeer(destinations[i])); assertFalse(bucket.isFull()); } bucket.addOrUpdate(new KademliaPeer(destinations[K-1])); assertTrue(bucket.isFull()); } }
public synchronized List<KademliaPeer> getAllPeers() { List<KademliaPeer> allPeers = new ArrayList<KademliaPeer>(); for (KBucket bucket: kBuckets) allPeers.addAll(bucket.getPeers()); allPeers.addAll(sBucket.getPeers()); return allPeers; }
public BucketManager(Hash localDestinationHash) { this.localDestinationHash = localDestinationHash; kBuckets = new CopyOnWriteArrayList<KBucket>(); kBuckets.add(new KBucket(AbstractBucket.MIN_HASH_VALUE, AbstractBucket.MAX_HASH_VALUE, 0)); // this is the root bucket, so depth=0 sBucket = new SBucket(localDestinationHash); }
/** * Splits the bucket in two equal halves (only in terms of ID range, the number * of elements in the two buckets may differ) and moves peers to the new bucket * if necessary.<br/> * The existing bucket retains the lower IDs; the new bucket will contain the * higher IDs.<br/> * In other words, the bucket is split into two sub-branches in the Kademlia * tree, with the old bucket representing the left branch and the new bucket * representing the right branch. * @return The new bucket * @see split(BigInteger) */ KBucket split() { BigInteger pivot = startId.add(endId).divide(BigInteger.valueOf(2)); return split(pivot); }
/** * Return the total number of known Kademlia peers (locked + unlocked peers). * @return */ int getPeerCount() { int count = 0; for (KBucket bucket: kBuckets) count += bucket.size(); count += sBucket.size(); return count; }
/** * Adds a <code>{@link KademliaPeer}</code> to the s-bucket or a k-bucket, * depending on its distance to the local node and how full the buckets are. * @param destination */ public void addOrUpdate(KademliaPeer peer) { Hash destHash = peer.getDestinationHash(); if (localDestinationHash.equals(destHash)) { log.debug("Not adding local destination to bucket."); return; } KademliaPeer removedOrNotAdded = sBucket.addOrUpdate(peer); if (removedOrNotAdded == null) getKBucket(destHash).remove(peer); // if the peer was in a k-bucket, remove it because it is now in the s-bucket else addToKBucket(removedOrNotAdded); // if a peer was removed from the s-bucket or didn't qualify as a sibling, add it to a k-bucket // log int numBuckets = kBuckets.size(); int numPeers = getAllPeers().size(); int numSiblings = sBucket.size(); log.debug("Peer " + Util.toBase32(destHash) + " added/updated. Peers=" + numPeers + " sib=" + numSiblings + " buk=" + numBuckets + " (not counting the sibling bucket)"); }
/** * Returns all peers that are not locked. * @return */ public synchronized List<Destination> getAllUnlockedPeers() { List<Destination> allPeers = new ArrayList<Destination>(); for (KBucket bucket: kBuckets) for (KademliaPeer peer: bucket.getPeers()) if (!peer.isLocked()) allPeers.add(peer); for (KademliaPeer peer: sBucket.getPeers()) if (!peer.isLocked()) allPeers.add(peer); return allPeers; }
startId = new BigInteger("20165322902067810213412302882682490742329252029515918366652609455164745599540"); endId = new BigInteger("33702408623411834034558105528091829985375371188155381652341683994745035632399"); bucket = new KBucket(startId, endId, depth); buckets.add(bucket); destinationMap.put(bucket, peers1); startId = new BigInteger("54322108851495321962026285688661141713340520926301416800794348337691246625057"); endId = new BigInteger("66306385230365087118809986298744845651941672849188218336247011280980080257051"); bucket = new KBucket(startId, endId, depth); buckets.add(bucket); destinationMap.put(bucket, peers2); startId = new BigInteger("93473727204000033254175814578319686612288952448482536549858183451728938339338"); endId = new BigInteger("102977979631408937131925934912429413039310769350307229631811585815019336318945"); bucket = new KBucket(startId, endId, depth); buckets.add(bucket); destinationMap.put(bucket, peers3);
/** * Finds the index of the k-bucket whose key range contains a given {@link Hash}. * This method does a binary search "by hand" because <code>Collections.binarySearch()<code> * cannot be used to search for a <code>Hash</code> in a <code>List<KBucket></code>. * @param key * @return */ private int getBucketIndex(Hash key) { if (kBuckets.size() == 1) return 0; // initially, the search interval is 0..n-1 int lowIndex = 0; int highIndex = kBuckets.size() - 1; BigInteger keyValue = new BigInteger(1, key.getData()); while (lowIndex < highIndex) { int centerIndex = (highIndex + lowIndex) / 2; if (keyValue.compareTo(kBuckets.get(centerIndex).getStartId()) < 0) highIndex = centerIndex - 1; else if (keyValue.compareTo(kBuckets.get(centerIndex).getEndId()) >= 0) lowIndex = centerIndex + 1; else return centerIndex; } return lowIndex; }