diff --git a/apps/routerconsole/jsp/debug.jsp b/apps/routerconsole/jsp/debug.jsp
index 90bd964eb9d3fe3626206bd3ef02f34f744c7bb4..93b9b58c07b9a21a85eed7dac95276bb8cf68210 100644
--- a/apps/routerconsole/jsp/debug.jsp
+++ b/apps/routerconsole/jsp/debug.jsp
@@ -21,6 +21,12 @@
      */
     net.i2p.router.RouterContext ctx = (net.i2p.router.RouterContext) net.i2p.I2PAppContext.getGlobalContext();
 
+    /*
+     *  Print out the status for the NetDB
+     */
+    out.print("<h2>Router DHT</h2>");
+    ctx.netDb().renderStatusHTML(out);
+
     /*
      *  Print out the status for the UpdateManager
      */
diff --git a/build.xml b/build.xml
index 0902f3f087914777b4d47bf203ce7d77c4d7fa6d..81cfe97c81a20ba7c2ecd9ba668c70cd3cf40952 100644
--- a/build.xml
+++ b/build.xml
@@ -476,7 +476,7 @@
             <group title="BOB Bridge" packages="net.i2p.BOB" />
             <group title="BOB Demos" packages="net.i2p.BOB.Demos.echo.echoclient:net.i2p.BOB.Demos.echo.echoserver" />
             <group title="Desktopgui Application" packages="net.i2p.desktopgui:net.i2p.desktopgui.*" />
-            <group title="I2PSnark Application" packages="org.klomp.snark:org.klomp.snark.*:net.i2p.kademlia" />
+            <group title="I2PSnark Application" packages="org.klomp.snark:org.klomp.snark.*" />
             <group title="I2PTunnel Application" packages="net.i2p.i2ptunnel:net.i2p.i2ptunnel.*" />
             <group title="Installer Utilities" packages="net.i2p.installer" />
             <group title="Jetty Starter and Logging" packages="net.i2p.jetty" />
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java b/core/java/src/net/i2p/kademlia/KBucket.java
similarity index 97%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java
rename to core/java/src/net/i2p/kademlia/KBucket.java
index 8c3b85f2331dec14483b6224d86de427e1fb4564..0e86df38ba6167a641f410eefb3625003d320f4e 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java
+++ b/core/java/src/net/i2p/kademlia/KBucket.java
@@ -17,7 +17,7 @@ import net.i2p.data.SimpleDataStructure;
  * a local key, using XOR as the distance metric
  *
  * Refactored from net.i2p.router.networkdb.kademlia
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public interface KBucket<T extends SimpleDataStructure> {
 
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java b/core/java/src/net/i2p/kademlia/KBucketImpl.java
similarity index 98%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java
rename to core/java/src/net/i2p/kademlia/KBucketImpl.java
index e3c72c0925f16395b47a1731dcb2a4e17c57c114..ac804b90a32d1e264fc0f22dc3bba56649d97017 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java
+++ b/core/java/src/net/i2p/kademlia/KBucketImpl.java
@@ -41,7 +41,7 @@ import net.i2p.util.ConcurrentHashSet;
  *  removing entries, this KBucket will exceed the max size.
  *
  *  Refactored from net.i2p.router.networkdb.kademlia
- *  @since 0.9.2
+ *  @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 class KBucketImpl<T extends SimpleDataStructure> implements KBucket<T> {
     /**
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java b/core/java/src/net/i2p/kademlia/KBucketSet.java
similarity index 99%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java
rename to core/java/src/net/i2p/kademlia/KBucketSet.java
index 542e3002685125904dd3521691b3b1279a589fee..1b2ba756a1eac3fdd598879da8fc9ee4eae7ad41 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java
+++ b/core/java/src/net/i2p/kademlia/KBucketSet.java
@@ -35,7 +35,7 @@ import net.i2p.util.Log;
  * times 2**(B-1) for Kademlia value B.
  *
  * Refactored from net.i2p.router.networkdb.kademlia
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public class KBucketSet<T extends SimpleDataStructure> {
     private final Log _log;
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketTrimmer.java b/core/java/src/net/i2p/kademlia/KBucketTrimmer.java
similarity index 91%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucketTrimmer.java
rename to core/java/src/net/i2p/kademlia/KBucketTrimmer.java
index b33f85ddb4c53217966e2338427c5e01666139e5..fb73737ea3bec02d1520aa049d75c5050f5f997f 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/KBucketTrimmer.java
@@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
 
 /**
  *  Called when a kbucket can no longer be split and is too big
- *  @since 0.9.2
+ *  @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public interface KBucketTrimmer<K extends SimpleDataStructure> {
     /**
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java b/core/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
similarity index 91%
rename from apps/i2psnark/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
rename to core/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
index ade28ce5006938de29cd40a3d41fa2cad6a35a43..dc50c8a2250d2e8cafb87d0840e021fbfb955def 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
@@ -5,7 +5,7 @@ import net.i2p.data.SimpleDataStructure;
 
 /**
  *  Removes a random element, but only if the bucket hasn't changed in 5 minutes.
- *  @since 0.9.2
+ *  @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public class RandomIfOldTrimmer<T extends SimpleDataStructure> extends RandomTrimmer<T> {
 
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/RandomTrimmer.java b/core/java/src/net/i2p/kademlia/RandomTrimmer.java
similarity index 93%
rename from apps/i2psnark/java/src/net/i2p/kademlia/RandomTrimmer.java
rename to core/java/src/net/i2p/kademlia/RandomTrimmer.java
index c1efff2624fe04388405e77a8a5846372870c6f2..72578ba1c972bfe602596d77c54892154870a119 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/RandomTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/RandomTrimmer.java
@@ -8,7 +8,7 @@ import net.i2p.data.SimpleDataStructure;
 
 /**
  *  Removes a random element. Not resistant to flooding.
- *  @since 0.9.2
+ *  @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public class RandomTrimmer<T extends SimpleDataStructure> implements KBucketTrimmer<T> {
     protected final I2PAppContext _ctx;
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/RejectTrimmer.java b/core/java/src/net/i2p/kademlia/RejectTrimmer.java
similarity index 85%
rename from apps/i2psnark/java/src/net/i2p/kademlia/RejectTrimmer.java
rename to core/java/src/net/i2p/kademlia/RejectTrimmer.java
index 2e29f28e27ca94070c262b581c809cd89d65972b..5704541ffbda2b0e1308b56ce21323541ce09987 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/RejectTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/RejectTrimmer.java
@@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
 
 /**
  *  Removes nothing and always rejects the add. Flood resistant..
- *  @since 0.9.2
+ *  @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public class RejectTrimmer<T extends SimpleDataStructure> implements KBucketTrimmer<T> {
     public boolean trim(KBucket<T> kbucket, T toAdd) {
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java b/core/java/src/net/i2p/kademlia/SelectionCollector.java
similarity index 80%
rename from apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java
rename to core/java/src/net/i2p/kademlia/SelectionCollector.java
index e4cb770dec0741c60fd86204e8dbf0c21919e343..06a6f0957c122f70867d5fd2252dafb2ec1d4df3 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java
+++ b/core/java/src/net/i2p/kademlia/SelectionCollector.java
@@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
 
 /**
  * Visit kbuckets, gathering matches
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
 public interface SelectionCollector<T extends SimpleDataStructure> {
     public void add(T entry);
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java b/core/java/src/net/i2p/kademlia/XORComparator.java
similarity index 88%
rename from apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java
rename to core/java/src/net/i2p/kademlia/XORComparator.java
index 2ac5017ddfe6b52d16ed96f1735f6566f0fbe5c9..5763a7b4f0bc08ed3f71e16cf2d99dd839fd7f94 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java
+++ b/core/java/src/net/i2p/kademlia/XORComparator.java
@@ -7,9 +7,9 @@ import net.i2p.data.SimpleDataStructure;
 /**
  * Help sort Hashes in relation to a base key using the XOR metric
  *
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
  */
-class XORComparator<T extends SimpleDataStructure> implements Comparator<T> {
+public class XORComparator<T extends SimpleDataStructure> implements Comparator<T> {
     private final byte[] _base;
 
     /**
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/package.html b/core/java/src/net/i2p/kademlia/package.html
similarity index 60%
rename from apps/i2psnark/java/src/net/i2p/kademlia/package.html
rename to core/java/src/net/i2p/kademlia/package.html
index fe1a24f43b83250a19c4db6edfc10ca1b7637709..f517b242f6ae28a8ea6dc4ac097d085f24b5ae65 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/package.html
+++ b/core/java/src/net/i2p/kademlia/package.html
@@ -1,6 +1,6 @@
 <html><body><p>
 This is a major rewrite of KBucket, KBucketSet, and KBucketImpl from net.i2p.router.networkdb.kademlia.
 The classes are now generic to support SHA1. SHA256, or other key lengths.
-The long-term goal is to prove out this new implementation in i2psnark,
-then move it to core, then convert the network database to use it.
+Packaged in i2psnark since 0.9.2, and moved to core in 0.9.10
+so the network database can use it.
 </p></body></html>
diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
index be0ce340e93add63a84a8a6a151bf805ee40fd2d..a488ff7fc72f38970a2f5d0d2816be575806ead5 100644
--- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
+++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
@@ -74,7 +74,8 @@ public abstract class NetworkDatabaseFacade implements Service {
     public int getKnownLeaseSets() { return 0; }
     public boolean isInitialized() { return true; }
     public void rescan() {}
-    /** @deprecated moved to router console */
+
+    /** Debug only - all user info moved to NetDbRenderer in router console */
     public void renderStatusHTML(Writer out) throws IOException {}
     /** public for NetDbRenderer in routerconsole */
     public Set<LeaseSet> getLeases() { return Collections.emptySet(); }
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java
index 05154420ba1457bb715e2fe0e4942d954c7ade1f..88122c0e27b39dd625b3496a5a8285734c6fd25f 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java
@@ -15,6 +15,7 @@ import java.util.Set;
 import net.i2p.data.Hash;
 import net.i2p.data.TunnelId;
 import net.i2p.data.i2np.DatabaseLookupMessage;
+import net.i2p.kademlia.KBucketSet;
 import net.i2p.router.RouterContext;
 import net.i2p.util.Log;
 
@@ -97,7 +98,7 @@ class ExploreJob extends SearchJob {
                 available--;
         }
 
-        KBucketSet ks = _facade.getKBuckets();
+        KBucketSet<Hash> ks = _facade.getKBuckets();
         Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget());
         // in a few releases, we can (and should) remove this,
         // as routers will honor the above flag, and we want the table to include
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java
index cb4f111f608e1ccd5804d2c6b7c9710e879429b7..d1f489d6a7a4e4a027e95bdd489b995bb939a410 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java
@@ -8,11 +8,14 @@ package net.i2p.router.networkdb.kademlia;
  *
  */
 
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Set;
 
 import net.i2p.data.Hash;
+import net.i2p.kademlia.KBucket;
+import net.i2p.kademlia.KBucketSet;
 import net.i2p.router.JobImpl;
 import net.i2p.router.RouterContext;
 import net.i2p.util.Log;
@@ -29,6 +32,7 @@ class ExploreKeySelectorJob extends JobImpl {
     private KademliaNetworkDatabaseFacade _facade;
     
     private final static long RERUN_DELAY_MS = 60*1000;
+    private final static long OLD_BUCKET_TIME = 15*60*1000;
     
     public ExploreKeySelectorJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
         super(context);
@@ -42,7 +46,7 @@ class ExploreKeySelectorJob extends JobImpl {
             requeue(30*RERUN_DELAY_MS);
             return;
         }
-        Set<Hash> toExplore = selectKeysToExplore();
+        Collection<Hash> toExplore = selectKeysToExplore();
         _log.info("Filling the explorer pool with: " + toExplore);
         if (toExplore != null)
             _facade.queueForExploration(toExplore);
@@ -54,33 +58,11 @@ class ExploreKeySelectorJob extends JobImpl {
      * for it, with a maximum number of keys limited by the exploration pool size
      *
      */
-    private Set<Hash> selectKeysToExplore() {
+    private Collection<Hash> selectKeysToExplore() {
         Set<Hash> alreadyQueued = _facade.getExploreKeys();
-        if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null;
-        Set<Hash> toExplore = new HashSet<Hash>(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
-        for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) {
-            KBucket bucket = _facade.getKBuckets().getBucket(i);
-            if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
-                boolean already = false;
-                for (Iterator<Hash> iter = alreadyQueued.iterator(); iter.hasNext(); ) {
-                    Hash key = iter.next();
-                    if (bucket.shouldContain(key)) {
-                        already = true;
-                        _log.debug("Bucket " + i + " is already queued for exploration \t" + key);
-                        break;
-                    }
-                }
-                if (!already) {
-                    // no keys are queued for exploring this still-too-small bucket yet
-                    Hash key = bucket.generateRandomKey();
-                    _log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key);
-                    toExplore.add(key);
-                }
-            } else {
-                _log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further");
-            }
-        }
-        return toExplore;
+        if (alreadyQueued.size() > KademliaNetworkDatabaseFacade.MAX_EXPLORE_QUEUE)
+            return null;
+        return _facade.getKBuckets().getExploreKeys(OLD_BUCKET_TIME);
     }
     
 }
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java
index c07f3de9be505e84a0cb4c307b1767f49ad8cdf3..9327489064b090c9ba747ec855b5346a717edb66 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java
@@ -8,6 +8,7 @@ import java.util.List;
 
 import net.i2p.data.Hash;
 import net.i2p.data.i2np.DatabaseLookupMessage;
+import net.i2p.kademlia.KBucketSet;
 import net.i2p.router.Job;
 import net.i2p.router.MessageSelector;
 import net.i2p.router.OutNetMessage;
@@ -70,7 +71,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
         //List<Hash> floodfillPeers = _facade.getFloodfillPeers();
         // new
         List<Hash> floodfillPeers;
-        KBucketSet ks = _facade.getKBuckets();
+        KBucketSet<Hash> ks = _facade.getKBuckets();
         if (ks != null) {
             Hash rkey = getContext().routingKeyGenerator().getRoutingKey(_key);
             // Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
index 3e054ed0c4709af4c36c4d5b2df7fa043881e2c2..fdf47bf3ccb2db8b6e0f740ef2bf6a237d7b71a9 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
@@ -20,6 +20,9 @@ import java.util.TreeSet;
 import net.i2p.data.Hash;
 import net.i2p.data.RouterAddress;
 import net.i2p.data.RouterInfo;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.SelectionCollector;
+import net.i2p.kademlia.XORComparator;
 import net.i2p.router.RouterContext;
 import net.i2p.router.peermanager.PeerProfile;
 import net.i2p.router.util.RandomIterator;
@@ -53,7 +56,7 @@ class FloodfillPeerSelector extends PeerSelector {
      * @return List of Hash for the peers selected
      */
     @Override
-    List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { 
+    List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) { 
         return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
     }
 
@@ -68,7 +71,7 @@ class FloodfillPeerSelector extends PeerSelector {
      * @return List of Hash for the peers selected
      */
     @Override
-    List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { 
+    List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) { 
         return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
     }
 
@@ -81,7 +84,7 @@ class FloodfillPeerSelector extends PeerSelector {
      * @param peersToIgnore can be null
      * @return List of Hash for the peers selected
      */
-    List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) { 
+    List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets, boolean preferConnected) { 
         if (peersToIgnore == null)
             peersToIgnore = Collections.singleton(_context.routerHash());
         else
@@ -104,7 +107,7 @@ class FloodfillPeerSelector extends PeerSelector {
      *  List will not include our own hash.
      *  List is not sorted and not shuffled.
      */
-    List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
+    List<Hash> selectFloodfillParticipants(KBucketSet<Hash> kbuckets) {
         Set<Hash> ignore = Collections.singleton(_context.routerHash());
         return selectFloodfillParticipants(ignore, kbuckets);
     }
@@ -116,7 +119,7 @@ class FloodfillPeerSelector extends PeerSelector {
      *  List MAY INCLUDE our own hash.
      *  List is not sorted and not shuffled.
      */
-    private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) {
+    private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
       /*****
         if (kbuckets == null) return Collections.EMPTY_LIST;
         // TODO this is very slow - use profile getPeersByCapability('f') instead
@@ -155,7 +158,7 @@ class FloodfillPeerSelector extends PeerSelector {
      *           success newer than failure
      *  Group 3: All others
      */
-    List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
+    List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet<Hash> kbuckets) {
         Set<Hash> ignore = Collections.singleton(_context.routerHash());
         return selectFloodfillParticipants(key, maxNumRouters, ignore, kbuckets);
     }
@@ -175,7 +178,7 @@ class FloodfillPeerSelector extends PeerSelector {
      *  @param toIgnore can be null
      *  @param kbuckets now unused
      */
-    List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
+    List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
         if (toIgnore == null) {
             toIgnore = Collections.singleton(_context.routerHash());
         } else if (!toIgnore.contains(_context.routerHash())) {
@@ -193,9 +196,9 @@ class FloodfillPeerSelector extends PeerSelector {
      *  @param toIgnore can be null
      *  @param kbuckets now unused
      */
-    private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
+    private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
         List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets);
-        TreeSet<Hash> sorted = new TreeSet<Hash>(new XORComparator(key));
+        TreeSet<Hash> sorted = new TreeSet<Hash>(new XORComparator<Hash>(key));
         sorted.addAll(ffs);
 
         List<Hash> rv = new ArrayList<Hash>(howMany);
@@ -339,7 +342,7 @@ class FloodfillPeerSelector extends PeerSelector {
         return Integer.valueOf(rv);
     }
 
-    private class FloodfillSelectionCollector implements SelectionCollector {
+    private class FloodfillSelectionCollector implements SelectionCollector<Hash> {
         private final TreeSet<Hash> _sorted;
         private final List<Hash>  _floodfillMatches;
         private final Hash _key;
@@ -354,7 +357,7 @@ class FloodfillPeerSelector extends PeerSelector {
          */
         public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
             _key = key;
-            _sorted = new TreeSet<Hash>(new XORComparator(key));
+            _sorted = new TreeSet<Hash>(new XORComparator<Hash>(key));
             _floodfillMatches = new ArrayList<Hash>(8);
             _toIgnore = toIgnore;
             _wanted = wanted;
@@ -475,7 +478,7 @@ class FloodfillPeerSelector extends PeerSelector {
      * @return List of Hash for the peers selected, ordered
      */
     @Override
-    List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
+    List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
         Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
         if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
             // return non-ff
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java
index 868f0d0b880a9d2818566fe055b42a3db3e75bcb..5eee0109ffcd74544ba2cc955bf928a7cf6a8f57 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java
@@ -16,6 +16,8 @@ import net.i2p.data.Hash;
 import net.i2p.data.RouterInfo;
 import net.i2p.data.i2np.DatabaseLookupMessage;
 import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.XORComparator;
 import net.i2p.router.CommSystemFacade;
 import net.i2p.router.Job;
 import net.i2p.router.MessageSelector;
@@ -93,7 +95,7 @@ class IterativeSearchJob extends FloodSearchJob {
         _timeoutMs = Math.min(timeoutMs, MAX_SEARCH_TIME);
         _expiration = _timeoutMs + ctx.clock().now();
         _rkey = ctx.routingKeyGenerator().getRoutingKey(key);
-        _toTry = new TreeSet<Hash>(new XORComparator(_rkey));
+        _toTry = new TreeSet<Hash>(new XORComparator<Hash>(_rkey));
         _unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES);
         _failedPeers = new HashSet<Hash>(TOTAL_SEARCH_LIMIT);
         _sentTime = new ConcurrentHashMap<Hash, Long>(TOTAL_SEARCH_LIMIT);
@@ -109,7 +111,7 @@ class IterativeSearchJob extends FloodSearchJob {
         }
         // pick some floodfill peers and send out the searches
         List<Hash> floodfillPeers;
-        KBucketSet ks = _facade.getKBuckets();
+        KBucketSet<Hash> ks = _facade.getKBuckets();
         if (ks != null) {
             // Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
             // but we're passing the rkey not the key, so we do it below instead in certain cases.
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucket.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucket.java
deleted file mode 100644
index b76b948edd64c251aba53f19e729a74c8fc7f99d..0000000000000000000000000000000000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucket.java
+++ /dev/null
@@ -1,83 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-/*
- * free (adj.): unencumbered; not under the control of others
- * Written by jrandom in 2003 and released into the public domain 
- * with no warranty of any kind, either expressed or implied.  
- * It probably won't make your computer catch on fire, or eat 
- * your children, but it might.  Use at your own risk.
- *
- */
-
-import java.util.Set;
-
-import net.i2p.data.Hash;
-
-/**
- * Group, without inherent ordering, a set of keys a certain distance away from
- * a local key, using XOR as the distance metric
- *
- */
-interface KBucket {
-    /** 
-     * lowest order high bit for difference keys 
-     */
-    public int getRangeBegin();
-    /**
-     * highest high bit for the difference keys
-     *
-     */
-    public int getRangeEnd();
-    /**
-     * Set the range low and high bits for difference keys
-     */
-    public void setRange(int lowOrderBitLimit, int highOrderBitLimit);
-    /**
-     * Number of keys already contained in this kbuckey
-     */
-    public int getKeyCount();
-    /**
-     * whether or not the key qualifies as part of this bucket
-     *
-     */
-    public boolean shouldContain(Hash key);
-    /**
-     * Add the peer to the bucket
-     *
-     * @return number of keys in the bucket after the addition
-     */
-    public int add(Hash key);
-    /**
-     * Remove the key from the bucket
-     * @return true if the key existed in the bucket before removing it, else false
-     */
-    public boolean remove(Hash key);
-    
-    /**
-     * Retrieve all routing table entries stored in the bucket
-     * @return set of Hash structures
-     */
-    public Set<Hash> getEntries();
-
-    /**
-     * Retrieve hashes stored in the bucket, excluding the ones specified 
-     * @return set of Hash structures
-     * @deprecated makes a copy, remove toIgnore in KBS instead
-     */
-    public Set<Hash> getEntries(Set<Hash> toIgnoreHashes);
-
-    public void getEntries(SelectionCollector collector);
-    
-    /**
-     * Fill the bucket with entries
-     * @param entries set of Hash structures
-     */
-    public void setEntries(Set<Hash> entries);
-
-    /** 
-     * Generate a random key that would go inside this bucket
-     *
-     */
-    public Hash generateRandomKey();
-    
-    public LocalHash getLocal();
-}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java
deleted file mode 100644
index 5b618991887db773ba2bbf5d43bfe268f646b07c..0000000000000000000000000000000000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java
+++ /dev/null
@@ -1,474 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-/*
- * free (adj.): unencumbered; not under the control of others
- * Written by jrandom in 2003 and released into the public domain
- * with no warranty of any kind, either expressed or implied.
- * It probably won't make your computer catch on fire, or eat
- * your children, but it might.  Use at your own risk.
- *
- */
-
-import java.math.BigInteger;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import net.i2p.I2PAppContext;
-import net.i2p.data.DataHelper;
-import net.i2p.data.Hash;
-import net.i2p.util.ConcurrentHashSet;
-import net.i2p.util.Log;
-import net.i2p.util.RandomSource;
-
-class KBucketImpl implements KBucket {
-    private Log _log;
-    /**
-     *  set of Hash objects for the peers in the kbucketx
-     *
-     *  jrandom switched from a HashSet to an ArrayList with this change:
-     *  2005-08-27  jrandom
-     *    * Minor logging and optimization tweaks in the router and SDK
-     *
-     *  Now we switch back to a ConcurrentHashSet and remove all the
-     *  synchronization, which may or may not be faster than
-     *  a synchronized ArrayList, with checks for existence before
-     *  adding a Hash. But the other benefit is it removes one
-     *  cause of profileMangager/netDb deadlock.
-     */
-    private final Set<Hash> _entries;
-    /** we center the kbucket set on the given hash, and derive distances from this */
-    private LocalHash _local;
-    /** include if any bits equal or higher to this bit (in big endian order) */
-    private int _begin;
-    /** include if no bits higher than this bit (inclusive) are set */
-    private int _end;
-    /** when did we last shake things up */
-    private long _lastShuffle;
-    private I2PAppContext _context;
-    
-    public KBucketImpl(I2PAppContext context, LocalHash local) {
-        _context = context;
-        _log = context.logManager().getLog(KBucketImpl.class);
-        _entries = new ConcurrentHashSet<Hash>(2); //all but the last 1 or 2 buckets will be empty
-        _lastShuffle = context.clock().now();
-        setLocal(local);
-    }
-    
-    /** for testing - use above constructor for production to get common caching */
-    public KBucketImpl(I2PAppContext context, Hash local) {
-        this(context, new LocalHash(local));
-    }
-    
-    public int getRangeBegin() { return _begin; }
-    public int getRangeEnd() { return _end; }
-    public void setRange(int lowOrderBitLimit, int highOrderBitLimit) {
-        _begin = lowOrderBitLimit;
-        _end = highOrderBitLimit;
-    }
-    public int getKeyCount() {
-        return _entries.size();
-    }
-    
-    public LocalHash getLocal() { return _local; }
-    private void setLocal(LocalHash local) {
-        _local = local; 
-        // we want to make sure we've got the cache in place before calling cachedXor
-        _local.prepareCache();
-        if (_log.shouldLog(Log.DEBUG))
-            _log.debug("Local hash reset to " + DataHelper.toHexString(local.getData()));
-    }
-    
-    private byte[] distanceFromLocal(Hash key) {
-        if (key == null) 
-            throw new IllegalArgumentException("Null key for distanceFromLocal?");
-        return _local.cachedXor(key);
-    }
-    
-    public boolean shouldContain(Hash key) {
-        byte distance[] = distanceFromLocal(key);
-        // rather than use a BigInteger and compare, we do it manually by 
-        // checking the bits
-        boolean tooLarge = distanceIsTooLarge(distance);
-        if (tooLarge) {
-            if (false && _log.shouldLog(Log.DEBUG))
-                _log.debug("too large [" + _begin + "-->" + _end + "] " 
-                           + "\nLow:  " + BigInteger.ZERO.setBit(_begin).toString(16)
-                           + "\nCur:  " + DataHelper.toHexString(distance)
-                           + "\nHigh: " + BigInteger.ZERO.setBit(_end).toString(16));
-            return false;
-        }
-        boolean tooSmall = distanceIsTooSmall(distance);
-        if (tooSmall) {
-            if (_log.shouldLog(Log.DEBUG))
-                _log.debug("too small [" + _begin + "-->" + _end + "] distance: " + DataHelper.toHexString(distance));
-            return false;
-        }
-        // this bed is juuuuust right
-        return true;
-        
-        /*
-        // woohah, incredibly excessive object creation! whee!
-        BigInteger kv = new BigInteger(1, distanceFromLocal(key));
-        int lowComp = kv.compareTo(_lowerBounds);
-        int highComp = kv.compareTo(_upperBounds);
-        
-        //_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp);
-        
-        if ( (lowComp >= 0) && (highComp < 0) ) return true;
-        return false;
-        */
-    }
-    
-    private final boolean distanceIsTooLarge(byte distance[]) {
-        int upperLimitBit = Hash.HASH_LENGTH*8 - _end;
-        // It is too large if there are any bits set before the upperLimitBit
-        int upperLimitByte = upperLimitBit > 0 ? upperLimitBit / 8 : 0;
-        
-        if (upperLimitBit <= 0)
-            return false;
-        
-        for (int i = 0; i < distance.length; i++) {
-            if (i < upperLimitByte) {
-                if (distance[i] != 0x00) {
-                    // outright too large
-                    return true;
-                }
-            } else if (i == upperLimitByte) {
-                if (distance[i] == 0x00) {
-                    // no bits set through the high bit
-                    return false;
-                } else {
-                    int upperVal = 1 << (upperLimitBit % 8);
-                    if (distance[i] > upperVal) {
-                        // still too large, but close
-                        return true;
-                    } else if (distance[i] == upperVal) {
-                        // ok, it *may* equal the upper limit,
-                        // if the rest of the bytes are 0
-                        for (int j = i+1; j < distance.length; j++) {
-                            if (distance[j] != 0x00) {
-                                // nope
-                                return true;
-                            }
-                        }
-                        // w00t, the rest is made of 0x00 bytes, so it
-                        // exactly matches the upper limit.  kooky, very improbable,
-                        // but possible
-                        return false;
-                    }
-                }
-            } else if (i > upperLimitByte) {
-                // no bits set before or at the upper limit, so its
-                // definitely not too large
-                return false;
-            }
-        }
-        _log.log(Log.CRIT, "wtf, gravity broke: distance=" + DataHelper.toHexString(distance) 
-                           + ", end=" + _end, new Exception("moo"));
-        return true;
-    }
-    
-    /** 
-     * Is the distance too small?
-     *
-     */
-    private final boolean distanceIsTooSmall(byte distance[]) {
-        int beginBit = Hash.HASH_LENGTH*8 - _begin;
-        // It is too small if there are no bits set before the beginBit
-        int beginByte = beginBit > 0 ? beginBit / 8 : 0;
-        
-        if (beginByte >= distance.length) {
-            if (_begin == 0)
-                return false;
-            else
-                return true;
-        }
-        
-        for (int i = 0; i < distance.length; i++) {
-            if ( (i < beginByte) && (distance[i] != 0x00) ) {
-                return false;
-            } else {
-                if (i != beginByte) {
-                    // zero value and too early... keep going
-                    continue;
-                } else {
-                    int beginVal = 1 << (_begin % 8);
-                    if (distance[i] >= beginVal) {
-                        return false;
-                    } else {
-                        // no bits set prior to the beginVal
-                        return true;
-                    }
-                }
-            }
-        }
-        _log.log(Log.CRIT, "wtf, gravity broke!  distance=" + DataHelper.toHexString(distance) 
-                           + " begin=" + _begin
-                           + " beginBit=" + beginBit 
-                           + " beginByte=" + beginByte, new Exception("moo"));
-        return true;
-    }
-    
-    /**
-     *  @return unmodifiable view
-     */
-    public Set<Hash> getEntries() {
-        return Collections.unmodifiableSet(_entries);
-    }
-
-    /**
-     *  @deprecated makes a copy, remove toIgnore in KBS instead
-     */
-    public Set<Hash> getEntries(Set<Hash> toIgnoreHashes) {
-        Set<Hash> entries = new HashSet<Hash>(_entries);
-        entries.removeAll(toIgnoreHashes);
-        return entries;
-    }
-    
-    public void getEntries(SelectionCollector collector) {
-        for (Hash h : _entries) {
-                collector.add(h);
-        }
-    }
-    
-    public void setEntries(Set<Hash> entries) {
-        _entries.clear();
-        _entries.addAll(entries);
-    }
-    
-    /**
-     *  Todo: shuffling here is a hack and doesn't work since
-     *  we switched back to a HashSet implementation
-     */
-    public int add(Hash peer) {
-            _entries.add(peer);
-/**********
-            // Randomize the bucket every once in a while if we are floodfill, so that
-            // exploration will return better results. See FloodfillPeerSelector.add(Hash).
-            if (_lastShuffle + SHUFFLE_DELAY < _context.clock().now() &&
-                !SearchJob.onlyQueryFloodfillPeers((RouterContext)_context)) {
-                Collections.shuffle(_entries, _context.random());
-                _lastShuffle = _context.clock().now();
-            }
-***********/
-        return _entries.size();
-    }
-    
-    public boolean remove(Hash peer) {
-        return _entries.remove(peer);
-    }
-    
-    /**
-     * Generate a random key to go within this bucket
-     *
-     * WARNING - Something is seriously broken here. testRand2() fails right away.
-     * ExploreKeySelectorJob is now disabled, ExploreJob just searches for a random
-     * key instead.
-     */
-    public Hash generateRandomKey() {
-        BigInteger variance = new BigInteger((_end-_begin)-1, _context.random());
-        variance = variance.setBit(_begin);
-        //_log.debug("Random variance for " + _size + " bits: " + variance);
-        byte data[] = variance.toByteArray();
-        byte hash[] = new byte[Hash.HASH_LENGTH];
-        if (data.length <= Hash.HASH_LENGTH) {
-            System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
-        } else {
-            System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
-        }
-        Hash key = new Hash(hash);
-        data = distanceFromLocal(key);
-        hash = new byte[Hash.HASH_LENGTH];
-        if (data.length <= Hash.HASH_LENGTH) {
-            System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
-        } else {
-            System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
-        }
-        key = new Hash(hash);
-        return key;
-    }
-    
-    public Hash getRangeBeginKey() {
-        BigInteger lowerBounds = getLowerBounds();
-        if ( (_local != null) && (_local.getData() != null) ) {
-            lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData()));
-        }
-        
-        byte data[] = lowerBounds.toByteArray();
-        byte hash[] = new byte[Hash.HASH_LENGTH];
-        if (data.length <= Hash.HASH_LENGTH) {
-            System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
-        } else {
-            System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
-        }
-        Hash key = new Hash(hash);
-        return key;
-    }
-    
-    public Hash getRangeEndKey() {
-        BigInteger upperBounds = getUpperBounds();
-        if ( (_local != null) && (_local.getData() != null) ) {
-            upperBounds = upperBounds.xor(new BigInteger(1, _local.getData()));
-        }
-        byte data[] = upperBounds.toByteArray();
-        byte hash[] = new byte[Hash.HASH_LENGTH];
-        if (data.length <= Hash.HASH_LENGTH) {
-            System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
-        } else {
-            System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
-        }
-        Hash key = new Hash(hash);
-        return key;
-    }
-    
-    private BigInteger getUpperBounds() {
-        return BigInteger.ZERO.setBit(_end);
-    }
-    private BigInteger getLowerBounds() {
-        if (_begin == 0)
-            return BigInteger.ZERO;
-        else
-            return BigInteger.ZERO.setBit(_begin);
-    }
-    
-    @Override
-    public String toString() {
-        StringBuilder buf = new StringBuilder(1024);
-        buf.append("KBucketImpl: ");
-        buf.append(_entries.toString()).append("\n");
-        buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n');
-        buf.append("Local key: \n");
-        if ( (_local != null) && (_local.getData() != null) )
-            buf.append(toString(_local.getData())).append('\n');
-        else
-            buf.append("[undefined]\n");
-        buf.append("Low and high keys:\n");
-        buf.append(toString(getRangeBeginKey().getData())).append('\n');
-        buf.append(toString(getRangeEndKey().getData())).append('\n');
-        buf.append("Low and high deltas:\n");
-        buf.append(getLowerBounds().toString(2)).append('\n');
-        buf.append(getUpperBounds().toString(2)).append('\n');
-        return buf.toString();
-    }
-    
-    /**
-     * Test harness to make sure its assigning keys to the right buckets
-     *
-     * WARNING - Something is seriously broken here. testRand2() fails right away.
-     */
-    public static void main(String args[]) {
-        testRand2();
-        testRand();
-        testLimits();
-        
-        try { Thread.sleep(10000); } catch (InterruptedException ie) {}
-    }
-    
-    private static void testLimits() {
-        int low = 1;
-        int high = 3;
-        Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
-        KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH);
-        bucket.setRange(low, high);
-        Hash lowerBoundKey = bucket.getRangeBeginKey();
-        Hash upperBoundKey = bucket.getRangeEndKey();
-        boolean okLow = bucket.shouldContain(lowerBoundKey);
-        boolean okHigh = bucket.shouldContain(upperBoundKey);
-        if (okLow && okHigh)
-            log.debug("Limit test ok");
-        else
-            log.error("Limit test failed!  ok low? " + okLow + " ok high? " + okHigh);
-    }
-    
-    private static void testRand() {
-        //StringBuilder buf = new StringBuilder(2048);
-        int low = 1;
-        int high = 3;
-        Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
-        LocalHash local = new LocalHash(Hash.FAKE_HASH);
-        local.prepareCache();
-        KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
-        bucket.setRange(low, high);
-        //Hash lowerBoundKey = bucket.getRangeBeginKey();
-        //Hash upperBoundKey = bucket.getRangeEndKey();
-        for (int i = 0; i < 100000; i++) {
-            Hash rnd = bucket.generateRandomKey();
-            //buf.append(toString(rnd.getData())).append('\n');
-            boolean ok = bucket.shouldContain(rnd);
-            if (!ok) {
-                //byte diff[] = bucket.getLocal().cachedXor(rnd);
-                //BigInteger dv = new BigInteger(1, diff);
-                //log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) 
-                //          + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) 
-                //          + "\nBucket: \n"+bucket, new Exception("WTF"));
-                log.error("wtf, bucket doesnt want a key that it generated.  i == " + i);
-                log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData()) 
-                           + "\nVal: " + DataHelper.toHexString(rnd.getData())
-                           + "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
-                try { Thread.sleep(1000); } catch (InterruptedException e) {}
-                System.exit(0);
-            } else {
-                //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
-            }
-            //_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray()));
-        }
-        log.info("Passed 100,000 random key generations against the null hash");
-    }
-    
-    private static void testRand2() {
-        Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
-        int low = 1;
-        int high = 200;
-        byte hash[] = new byte[Hash.HASH_LENGTH];
-        RandomSource.getInstance().nextBytes(hash);
-        LocalHash local = new LocalHash(hash);
-        local.prepareCache();
-        KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
-        bucket.setRange(low, high);
-        //Hash lowerBoundKey = bucket.getRangeBeginKey();
-        //Hash upperBoundKey = bucket.getRangeEndKey();
-        for (int i = 0; i < 100000; i++) {
-            Hash rnd = bucket.generateRandomKey();
-            //buf.append(toString(rnd.getData())).append('\n');
-            boolean ok = bucket.shouldContain(rnd);
-            if (!ok) {
-                //byte diff[] = bucket.getLocal().cachedXor(rnd);
-                //BigInteger dv = new BigInteger(1, diff);
-                //log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) 
-                //          + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) 
-                //          + "\nBucket: \n"+bucket, new Exception("WTF"));
-                log.error("wtf, bucket doesnt want a key that it generated.  i == " + i);
-                log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData()) 
-                           + "\nVal: " + DataHelper.toHexString(rnd.getData())
-                           + "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
-                try { Thread.sleep(1000); } catch (InterruptedException e) {}
-                System.exit(0);
-            } else {
-                //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
-            }
-        }
-        log.info("Passed 100,000 random key generations against a random hash");
-    }
-    
-    private final static String toString(byte b[]) {
-        if (true) return DataHelper.toHexString(b);
-        StringBuilder buf = new StringBuilder(b.length);
-        for (int i = 0; i < b.length; i++) {
-            buf.append(toString(b[i]));
-            buf.append(" ");
-        }
-        return buf.toString();
-    }
-    
-    private final static String toString(byte b) {
-        StringBuilder buf = new StringBuilder(8);
-        for (int i = 7; i >= 0; i--) {
-            boolean bb = (0 != (b & (1<<i)));
-            if (bb)
-                buf.append("1");
-            else
-                buf.append("0");
-        }
-        return buf.toString();
-    }
-}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketSet.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketSet.java
deleted file mode 100644
index 9b14bd9834e7739e4ee61ad1a246ad44fad557ec..0000000000000000000000000000000000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketSet.java
+++ /dev/null
@@ -1,219 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-/*
- * free (adj.): unencumbered; not under the control of others
- * Written by jrandom in 2003 and released into the public domain
- * with no warranty of any kind, either expressed or implied.
- * It probably won't make your computer catch on fire, or eat
- * your children, but it might.  Use at your own risk.
- *
- */
-
-import java.math.BigInteger;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import net.i2p.I2PAppContext;
-import net.i2p.data.DataHelper;
-import net.i2p.data.Hash;
-import net.i2p.util.Log;
-
-/**
- * In memory storage of buckets sorted by the XOR metric from the local router's
- * identity, with bucket N containing routers BASE^N through BASE^N+1 away, up through
- * 2^256 bits away (since we use SHA256).
- *
- */
-class KBucketSet {
-    private final Log _log;
-    private final I2PAppContext _context;
-    private final LocalHash _us;
-    private final KBucket _buckets[];
-    private final AtomicInteger _size = new AtomicInteger();
-    
-    public final static int BASE = 8; // must go into KEYSIZE_BITS evenly
-    public final static int KEYSIZE_BITS = Hash.HASH_LENGTH * 8;
-    public final static int NUM_BUCKETS = KEYSIZE_BITS/BASE;
-    private final static BigInteger BASE_I = new BigInteger(""+(1<<BASE));
-    public final static int BUCKET_SIZE = 500; // # values at which we start periodic trimming (500 ~= 250Kb)
-    
-    public KBucketSet(I2PAppContext context, Hash us) {
-        _us = new LocalHash(us);
-        _context = context;
-        _log = context.logManager().getLog(KBucketSet.class);
-        _buckets = createBuckets();
-        context.statManager().createRateStat("netDb.KBSGetAllTime", "Time to add all Hashes to the Collector", "NetworkDatabase", new long[] { 60*60*1000 });
-    }
-    
-    /**
-     * Return true if the peer is new to the bucket it goes in, or false if it was
-     * already in it
-     */
-    public boolean add(Hash peer) {
-        int bucket = pickBucket(peer);
-        if (bucket >= 0) {
-            int oldSize = _buckets[bucket].getKeyCount();
-            int numInBucket = _buckets[bucket].add(peer);
-            if (numInBucket != oldSize)
-                _size.incrementAndGet();
-            if (numInBucket > BUCKET_SIZE) {
-                // perhaps queue up coalesce job?  naaahh.. lets let 'er grow for now
-            }
-            if (_log.shouldLog(Log.DEBUG))
-                _log.debug("Peer " + peer + " added to bucket " + bucket);
-            return oldSize != numInBucket;
-        } else {
-            throw new IllegalArgumentException("Unable to pick a bucket.  wtf!");
-        }
-    }
-    
-    /**
-     * Not an exact count (due to concurrency issues) but generally correct
-     *
-     */
-    public int size() {
-        return _size.get();
-        /*
-        int size = 0;
-        for (int i = 0; i < _buckets.length; i++)
-            size += _buckets[i].getKeyCount();
-        return size;
-         */
-    }
-    
-    public boolean remove(Hash entry) {
-        int bucket = pickBucket(entry);
-        KBucket kbucket = getBucket(bucket);
-        boolean removed = kbucket.remove(entry);
-        if (removed)
-            _size.decrementAndGet();
-        return removed;
-    }
-    
-    /** @since 0.8.8 */
-    public void clear() {
-        for (int i = 0; i < _buckets.length; i++) {
-            _buckets[i].setEntries(Collections.<Hash> emptySet());
-        }
-        _size.set(0);
-        _us.clearXorCache();
-    }
-    
-    public Set<Hash> getAll() { return getAll(Collections.<Hash> emptySet()); };
-
-    public Set<Hash> getAll(Set<Hash> toIgnore) {
-        Set<Hash> all = new HashSet<Hash>(1024);
-        for (int i = 0; i < _buckets.length; i++) {
-            all.addAll(_buckets[i].getEntries());
-        }
-        all.removeAll(toIgnore);
-        return all;
-    }
-    
-    public void getAll(SelectionCollector collector) {
-        long start = _context.clock().now();
-        for (int i = 0; i < _buckets.length; i++)
-            _buckets[i].getEntries(collector);
-        _context.statManager().addRateData("netDb.KBSGetAllTime", _context.clock().now() - start, 0);
-    }
-    
-    public int pickBucket(Hash key) {
-        for (int i = 0; i < NUM_BUCKETS; i++) {
-            if (_buckets[i].shouldContain(key))
-                return i;
-        }
-        _log.error("Key does not fit in any bucket?! WTF!\nKey  : [" 
-                   + DataHelper.toHexString(key.getData()) + "]" 
-                   + "\nUs   : [" + toString(_us.getData()) + "]"
-                   + "\nDelta: ["
-                   + DataHelper.toHexString(DataHelper.xor(_us.getData(), key.getData()))
-                   + "]", new Exception("WTF"));
-        displayBuckets();
-        return -1;
-    }
-    
-    public KBucket getBucket(int bucket) { return _buckets[bucket]; }
-    
-    protected KBucket[] createBuckets() {
-        KBucket[] buckets = new KBucket[NUM_BUCKETS];
-        for (int i = 0; i < NUM_BUCKETS-1; i++) {
-            buckets[i] = createBucket(i*BASE, (i+1)*BASE);
-        }
-        buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1);
-        return buckets;
-    }
-    
-    protected KBucket createBucket(int start, int end) {
-        KBucket bucket = new KBucketImpl(_context, _us);
-        bucket.setRange(start, end);
-        _log.debug("Creating a bucket from " + start + " to " + (end));
-        return bucket;
-    }
-    
-    public void displayBuckets() {
-        _log.info(toString());
-    }
-    
-    @Override
-    public String toString() {
-        BigInteger us = new BigInteger(1, _us.getData());
-        StringBuilder buf = new StringBuilder(1024);
-        buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n");
-        for (int i = 0; i < NUM_BUCKETS; i++) {
-            buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n");
-            buf.append("Start:  ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n");
-            buf.append("End:    ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n");
-            buf.append("Contents:").append(_buckets[i].toString()).append("\n");
-        }
-        
-        return buf.toString();
-    }
-
-    final static String toString(byte b[]) {
-        byte val[] = new byte[Hash.HASH_LENGTH];
-        if (b.length < 32)
-            System.arraycopy(b, 0, val, Hash.HASH_LENGTH-b.length-1, b.length);
-        else
-            System.arraycopy(b, Hash.HASH_LENGTH-b.length, val, 0, val.length);
-        StringBuilder buf = new StringBuilder(KEYSIZE_BITS);
-        for (int i = 0; i < val.length; i++) {
-            for (int j = 7; j >= 0; j--) {
-                boolean bb = (0 != (val[i] & (1<<j)));
-                if (bb)
-                    buf.append("1");
-                else
-                    buf.append("0");
-            }
-            buf.append(" ");
-        }
-        //    buf.append(Integer.toBinaryString(val[i]));
-        return buf.toString();
-    }
-    
-    public static void main(String args[]) {
-        I2PAppContext context = I2PAppContext.getGlobalContext();
-        Log log = context.logManager().getLog(KBucketSet.class);
-        KBucketSet set = new KBucketSet(context, Hash.FAKE_HASH);
-        testSelf(set, log);
-        testRandom(set, 1000, context, log);
-    }
-    private static void testSelf(KBucketSet set, Log log) {
-        boolean added = set.add(Hash.FAKE_HASH);
-        if (!added) 
-            log.error("Unable to add self...");
-        else
-            log.debug("Added self");
-    }
-    private static void testRandom(KBucketSet set, int count, I2PAppContext context, Log log) {
-        for (int i = 0; i < count; i++) {
-            byte val[] = new byte[Hash.HASH_LENGTH];
-            context.random().nextBytes(val);
-            boolean added = set.add(new Hash(val));
-            if (!added)
-                log.error("Unable to add random key [" + DataHelper.toHexString(val) + "]");
-            else
-                log.debug("Added random key");
-        }
-    }
-}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java
index 851e7897ce853d8e01cd93c1fb9932779e933487..153656c984107c04400d2349e5b1234ff44150f4 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java
@@ -9,6 +9,8 @@ package net.i2p.router.networkdb.kademlia;
  */
 
 import java.io.IOException;
+import java.io.Writer;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
@@ -25,6 +27,9 @@ import net.i2p.data.RouterAddress;
 import net.i2p.data.RouterInfo;
 import net.i2p.data.i2np.DatabaseLookupMessage;
 import net.i2p.data.i2np.DatabaseStoreMessage;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.RejectTrimmer;
+import net.i2p.kademlia.SelectionCollector;
 import net.i2p.router.Job;
 import net.i2p.router.NetworkDatabaseFacade;
 import net.i2p.router.Router;
@@ -41,7 +46,7 @@ import net.i2p.util.Log;
  */
 public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
     protected final Log _log;
-    private KBucketSet _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
+    private KBucketSet<Hash> _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
     private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
     /** where the data store is pushing the data */
     private String _dbDir;
@@ -132,7 +137,14 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
      */
     protected final static long PUBLISH_JOB_DELAY = 5*60*1000l;
 
-    private static final int MAX_EXPLORE_QUEUE = 128;
+    static final int MAX_EXPLORE_QUEUE = 128;
+
+    /**
+     *  kad K
+     *  Was 500 in old implementation but that was with B ~= -8!
+     */
+    private static final int BUCKET_SIZE = 16;
+    private static final int KAD_B = 3;
 
     public KademliaNetworkDatabaseFacade(RouterContext context) {
         _context = context;
@@ -168,7 +180,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
         return _reseedChecker;
     }
 
-    KBucketSet getKBuckets() { return _kb; }
+    KBucketSet<Hash> getKBuckets() { return _kb; }
     DataStore getDataStore() { return _ds; }
     
     long getLastExploreNewDate() { return _lastExploreNew; }
@@ -185,13 +197,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
         return Collections.unmodifiableSet(_exploreKeys);
     }
     
-    public void removeFromExploreKeys(Set<Hash> toRemove) {
+    public void removeFromExploreKeys(Collection<Hash> toRemove) {
         if (!_initialized) return;
         _exploreKeys.removeAll(toRemove);
         _context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
     }
 
-    public void queueForExploration(Set<Hash> keys) {
+    public void queueForExploration(Collection<Hash> keys) {
         if (!_initialized) return;
         for (Iterator<Hash> iter = keys.iterator(); iter.hasNext() && _exploreKeys.size() < MAX_EXPLORE_QUEUE; ) {
             _exploreKeys.add(iter.next());
@@ -240,7 +252,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
         _log.info("Starting up the kademlia network database");
         RouterInfo ri = _context.router().getRouterInfo();
         String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
-        _kb = new KBucketSet(_context, ri.getIdentity().getHash());
+        _kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(),
+                                   BUCKET_SIZE, KAD_B, new RejectTrimmer<Hash>());
         try {
             _ds = new PersistentDataStore(_context, dbDir, this);
         } catch (IOException ioe) {
@@ -368,7 +381,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
         return count.size();
     }
     
-    private class CountRouters implements SelectionCollector {
+    private class CountRouters implements SelectionCollector<Hash> {
         private int _count;
         public int size() { return _count; }
         public void add(Hash entry) {
@@ -1042,4 +1055,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
         }
         _context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
     }
+
+    /**
+     * Debug info, HTML formatted
+     * @since 0.9.10
+     */
+    @Override
+    public void renderStatusHTML(Writer out) throws IOException {
+        out.write(_kb.toString().replace("\n", "<br>\n"));
+    }
 }
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
index 1dc2de75ec76588a352e900a18bd0f229e1f60a5..f73eb00fef5aa41f8f86568460cf2ffa2ecf4863 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
@@ -17,6 +17,8 @@ import java.util.TreeMap;
 
 import net.i2p.data.Hash;
 import net.i2p.data.RouterInfo;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.SelectionCollector;
 import net.i2p.router.RouterContext;
 import net.i2p.router.util.HashDistance;
 import net.i2p.util.Log;
@@ -41,7 +43,7 @@ class PeerSelector {
      *
      * @return ordered list of Hash objects
      */
-    List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {
+    List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet<Hash> kbuckets) {
         // get the peers closest to the key
         return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
     }
@@ -54,7 +56,7 @@ class PeerSelector {
      *
      * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
      */
-    List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
+    List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
         //if (true)
             return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
         
@@ -94,7 +96,7 @@ class PeerSelector {
      *
      * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
      */
-    List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
+    List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
         if (peersToIgnore == null)
             peersToIgnore = new HashSet<Hash>(1);
         peersToIgnore.add(_context.routerHash());
@@ -109,7 +111,7 @@ class PeerSelector {
     }
     
     /** UNUSED */
-    private class MatchSelectionCollector implements SelectionCollector {
+    private class MatchSelectionCollector implements SelectionCollector<Hash> {
         private TreeMap<BigInteger, Hash> _sorted;
         private Hash _key;
         private Set<Hash> _toIgnore;
@@ -200,7 +202,7 @@ class PeerSelector {
      * @param peersToIgnore can be null
      * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
      */
-    List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
+    List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
         // sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
         // would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
         // later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java
index 9a4f99c43b725819b3a8bd696e16af029216ebb8..62343ae9ed6c91623175b291fa907feea016593f 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java
@@ -10,6 +10,7 @@ import java.util.Set;
 import java.util.TreeSet;
 
 import net.i2p.data.Hash;
+import net.i2p.kademlia.XORComparator;
 import net.i2p.router.RouterContext;
 
 /**
@@ -61,7 +62,7 @@ class SearchState {
     private Set<Hash> locked_getClosest(Set<Hash> peers, int max, Hash target) {
         if (_attemptedPeers.size() <= max)
             return new HashSet<Hash>(_attemptedPeers);
-        TreeSet<Hash> closest = new TreeSet<Hash>(new XORComparator(target));
+        TreeSet<Hash> closest = new TreeSet<Hash>(new XORComparator<Hash>(target));
         closest.addAll(_attemptedPeers);
         Set<Hash> rv = new HashSet<Hash>(max);
         int i = 0;
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SelectionCollector.java b/router/java/src/net/i2p/router/networkdb/kademlia/SelectionCollector.java
deleted file mode 100644
index 020da4de47cdd77fde29a823c203914520aae747..0000000000000000000000000000000000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/SelectionCollector.java
+++ /dev/null
@@ -1,10 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-
-import net.i2p.data.Hash;
-
-/**
- * Visit kbuckets, gathering matches
- */
-interface SelectionCollector {
-    public void add(Hash entry);
-}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
index 72f6315aef4f5d5856048505ca64003a23753fa4..d0f3c6ddc0b225eb4f168c6e9a1caab1c0a278c9 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
@@ -19,6 +19,7 @@ import net.i2p.data.RouterInfo;
 import net.i2p.data.TunnelId;
 import net.i2p.data.i2np.DatabaseStoreMessage;
 import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.kademlia.KBucketSet;
 import net.i2p.router.Job;
 import net.i2p.router.JobImpl;
 import net.i2p.router.OutNetMessage;
@@ -233,7 +234,7 @@ class StoreJob extends JobImpl {
 
     private List<Hash> getClosestFloodfillRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
         Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
-        KBucketSet ks = _facade.getKBuckets();
+        KBucketSet<Hash> ks = _facade.getKBuckets();
         if (ks == null) return new ArrayList<Hash>();
         return ((FloodfillPeerSelector)_peerSelector).selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
     }
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/XORComparator.java b/router/java/src/net/i2p/router/networkdb/kademlia/XORComparator.java
deleted file mode 100644
index cd734b685f80e82fb002f259487bfa2fa798ea1d..0000000000000000000000000000000000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/XORComparator.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-
-import java.util.Comparator;
-
-import net.i2p.data.Hash;
-
-/**
- * Help sort Hashes in relation to a base key using the XOR metric.
- */
-class XORComparator implements Comparator<Hash> {
-    private final byte[] _base;
-
-    /**
-     * @param target key to compare distances with
-     */
-    public XORComparator(Hash target) {
-        _base = target.getData();
-    }
-
-    /**
-     * getData() of args must be non-null
-     */
-    public int compare(Hash lhs, Hash rhs) {
-        byte lhsb[] = lhs.getData();
-        byte rhsb[] = rhs.getData();
-        for (int i = 0; i < _base.length; i++) {
-            int ld = (lhsb[i] ^ _base[i]) & 0xff;
-            int rd = (rhsb[i] ^ _base[i]) & 0xff;
-            if (ld < rd)
-                return -1;
-            if (ld > rd)
-                return 1;
-        }
-        return 0;
-    }
-}