/** * The keys closest to the key. * Returned list will never contain us. * @return non-null, closest first */ public List<T> getClosest(T key, int max) { return getClosest(key, max, Collections.<T> emptySet()); }
/** * DHT - get random keys to explore */ public List<NID> getExploreKeys() { return _kad.getExploreKeys(MAX_BUCKET_AGE); }
return getClosest(max, toIgnore); List<T> rv = new ArrayList<T>(max); int count = 0; getReadLock(); try { int start = pickBucket(key); } finally { releaseReadLock(); } Comparator<T> comp = new XORComparator<T>(key); Collections.sort(rv, comp);
/** * The bucket number (NOT the range number) that the xor of the key goes in * Caller must hold read lock * @return 0 to max-1 or -1 for us */ private int pickBucket(T key) { int range = getRange(key); if (range < 0) return -1; int rv = pickBucket(range); if (rv >= 0) { return rv; } _log.error("Key does not fit in any bucket?!\nKey : [" + DataHelper.toHexString(key.getData()) + "]" + "\nUs : " + _us + "\nDelta: [" + DataHelper.toHexString(DataHelper.xor(_us.getData(), key.getData())) + "]", new Exception("???")); _log.error(toString()); throw new IllegalStateException("pickBucket returned " + rv); //return -1; }
/** @since 0.9.10 */ public void testGenRandom() { int errors = 0; for (KBucket<Hash> b : set.getBuckets()) { for (int j = 0; j < 4000; j++) { Hash rand = set.generateRandomKey(b); int range = set.getRange(rand); if (range < b.getRangeBegin() || range > b.getRangeEnd()) { log.error("Generate random key failed range=" + range + " for " + rand + " meant for bucket " + b); errors++; } } } assertTrue(errors == 0); }
int b = pickBucket(r); while (shouldSplit(_buckets.get(b))) { KBucket<T> b0 = _buckets.get(b); if (_log.shouldLog(Log.INFO)) _log.info("Splitting (" + s1 + ',' + e2 + ") -> (" + s1 + ',' + e1 + ") (" + s2 + ',' + e2 + ')'); KBucket<T> b1 = createBucket(s1, e1); KBucket<T> b2 = createBucket(s2, e2); for (T key : b0.getEntries()) { if (getRange(key) < s2) b1.add(key); else
/** * For every bucket that hasn't been updated in this long, * or isn't close to full, * generate a random key that would be a member of that bucket. * The returned keys may be searched for to "refresh" the buckets. * @return non-null, closest first */ public List<T> getExploreKeys(long age) { List<T> rv = new ArrayList<T>(_buckets.size()); long old = _context.clock().now() - age; getReadLock(); try { for (KBucket<T> b : _buckets) { int curSize = b.getKeyCount(); // Always explore the closest bucket if ((b.getRangeBegin() == 0) || (b.getLastChanged() < old || curSize < BUCKET_SIZE * 3 / 4)) rv.add(generateRandomKey(b)); } } finally { releaseReadLock(); } return rv; }
public DHTNodes(I2PAppContext ctx, NID me) { _context = ctx; _expireTime = MAX_EXPIRE_TIME; _log = _context.logManager().getLog(DHTNodes.class); _nodeMap = new ConcurrentHashMap<NID, NodeInfo>(); _kad = new KBucketSet<NID>(ctx, me, KAD_K, KAD_B, new KBTrimmer(ctx, KAD_K)); }
/** * Debug info, HTML formatted * @since 0.9.10 */ @Override public void renderStatusHTML(Writer out) throws IOException { out.write(_kb.toString().replace("\n", "<br>\n")); } }
public NodeInfo remove(NID nid) { _kad.remove(nid); return _nodeMap.remove(nid); }
/** * @return a copy in a new set */ public Set<T> getAll(Set<T> toIgnore) { Set<T> all = getAll(); all.removeAll(toIgnore); return all; }
/** @since 0.9.10 */ public void testAudit() { int errors = 0; for (KBucket<Hash> b : set.getBuckets()) { for (Hash sds : b.getEntries()) { int range = set.getRange(sds); if (range < b.getRangeBegin() || range > b.getRangeEnd()) { log.error("Hash " + sds + " with range " + range + " does not belong in " + b); errors++; } } } assertTrue(errors == 0); }
public void clear() { _kad.clear(); _nodeMap.clear(); }
public void setUp(){ context = I2PAppContext.getGlobalContext(); log = context.logManager().getLog(KBucketSet.class); byte[] us = new byte[Hash.HASH_LENGTH]; context.random().nextBytes(us); usHash = new Hash(us); // We use the default RandomTrimmer so add() will never fail set = new KBucketSet<Hash>(context, usHash, K, B); // tests may be run in any order so prime it addRandom(1000); }
/** * Debug info, HTML formatted * @since 0.9.4 */ public void renderStatusHTML(StringBuilder buf) { buf.append(_kad.toString().replace("\n", "<br><hr class=\"debug\">\n")); }
/** * Final remove for a router info. * Do NOT use for leasesets. */ void dropAfterLookupFailed(Hash peer) { _context.peerManager().removeCapabilities(peer); _kb.remove(peer); //if (removed) { // if (_log.shouldLog(Log.INFO)) // _log.info("Removed kbucket entry for " + peer); //} _ds.remove(peer); }
/** * Pick out peers with the floodfill capacity set, returning them first, but then * after they're complete, sort via kademlia. * List will not include our own hash. * * @param key the ROUTING key (NOT the original key) * @param peersToIgnore can be null * @return List of Hash for the peers selected */ List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets, boolean preferConnected) { if (peersToIgnore == null) peersToIgnore = Collections.singleton(_context.routerHash()); else peersToIgnore.add(_context.routerHash()); // TODO this is very slow FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters); if (kbuckets == null) return new ArrayList<Hash>(); kbuckets.getAll(matches); List<Hash> rv = matches.get(maxNumRouters, preferConnected); if (_log.shouldLog(Log.DEBUG)) _log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": " + rv + " (not including " + peersToIgnore + ") [allHashes.size = " + matches.size() + "]", new Exception("Search by")); return rv; }
public synchronized void shutdown() { _initialized = false; if (_kb != null) _kb.clear(); // don't null out _kb, it can cause NPEs in concurrent operations //_kb = null; if (_ds != null) _ds.stop(); // don't null out _ds, it can cause NPEs in concurrent operations //_ds = null; _exploreKeys.clear(); // hope this doesn't cause an explosion, it shouldn't. // _exploreKeys = null; _negativeCache.clear(); }
public void testSelf() { // new implementation will never include myself assertFalse(set.add(usHash)); }