diff --git a/.mtn-ignore b/.mtn-ignore
index 67d2a1aa35cd7d4f1c7c86b8f992ccc75de5a511..c42160e3cdfe108246d64f2015cec082b9ca9f64 100644
--- a/.mtn-ignore
+++ b/.mtn-ignore
@@ -17,10 +17,10 @@ _jsp\.java$
 \.war$
 \.zip$
 ^\.
-^build/
+^build
 ^pkg-temp/
 ~$
-/build/
+/build
 /classes/
 ^debian/copyright
 override.properties
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java b/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java
new file mode 100644
index 0000000000000000000000000000000000000000..075547fbd4342611a3ecfa3ac94b9b46d07ecaa5
--- /dev/null
+++ b/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java
@@ -0,0 +1,75 @@
+package net.i2p.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain 
+ * with no warranty of any kind, either expressed or implied.  
+ * It probably won't make your computer catch on fire, or eat 
+ * your children, but it might.  Use at your own risk.
+ *
+ */
+
+import java.util.Set;
+
+import net.i2p.data.SimpleDataStructure;
+
+/**
+ * Group, without inherent ordering, a set of keys a certain distance away from
+ * a local key, using XOR as the distance metric
+ *
+ * Refactored from net.i2p.router.networkdb.kademlia
+ */
+public interface KBucket<T extends SimpleDataStructure> {
+
+    /** 
+     * Lowest order high bit for difference keys.
+     * The lower-bounds distance of this bucket is 2**begin.
+     * If begin == 0, this is the closest bucket.
+     */
+    public int getRangeBegin();
+
+    /**
+     * Highest high bit for the difference keys.
+     * The upper-bounds distance of this bucket is (2**(end+1)) - 1.
+     * If begin == end, the bucket cannot be split further.
+     * If end == (numbits - 1), this is the furthest bucket.
+     */
+    public int getRangeEnd();
+
+    /**
+     * Number of keys already contained in this kbucket
+     */
+    public int getKeyCount();
+
+    /**
+     * Add the peer to the bucket
+     *
+     * @return true if added
+     */
+    public boolean add(T key);
+
+    /**
+     * Remove the key from the bucket
+     * @return true if the key existed in the bucket before removing it, else false
+     */
+    public boolean remove(T key);
+    
+    /**
+     *  Update the last-changed timestamp to now.
+     */
+    public void setLastChanged();
+
+    /**
+     *  The last-changed timestamp
+     */
+    public long getLastChanged();
+
+    /**
+     * Retrieve all routing table entries stored in the bucket
+     * @return set of Hash structures
+     */
+    public Set<T> getEntries();
+
+    public void getEntries(SelectionCollector<T> collector);
+
+    public void clear();
+}
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..f73a4e0712929e7a95f112d037651cd3e3720511
--- /dev/null
+++ b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java
@@ -0,0 +1,149 @@
+package net.i2p.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might.  Use at your own risk.
+ *
+ */
+
+import java.util.Collections;
+import java.util.Set;
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.SimpleDataStructure;
+import net.i2p.util.ConcurrentHashSet;
+
+/**
+ *  A concurrent implementation using ConcurrentHashSet.
+ *  The max size (K) may be temporarily exceeded due to concurrency,
+ *  a pending split, or the behavior of the supplied trimmer,
+ *  as explained below.
+ *  The creator is responsible for splits.
+ *
+ *  This class has no knowledge of the DHT base used for XORing,
+ *  and thus there are no validity checks in add/remove.
+ *
+ *  The begin and end values are immutable.
+ *  All entries in this bucket will have at least one bit different
+ *  from us in the range [begin, end] inclusive.
+ *  Splits must be implemented by creating two new buckets
+ *  and discarding this one.
+ *
+ *  The keys are kept in a Set and are NOT sorted by last-seen.
+ *  Per-key last-seen-time, failures, etc. must be tracked elsewhere.
+ *
+ *  If this bucket is full (i.e. begin == end && size == max)
+ *  then add() will call KBucketTrimmer.trim() do
+ *  (possibly) remove older entries, and indicate whether
+ *  to add the new entry. If the trimmer returns true without
+ *  removing entries, this KBucket will exceed the max size.
+ *
+ *  Refactored from net.i2p.router.networkdb.kademlia
+ */
+class KBucketImpl<T extends SimpleDataStructure> implements KBucket<T> {
+    /**
+     *  set of Hash objects for the peers in the kbucket
+     */
+    private final Set<T> _entries;
+    /** include if any bits equal or higher to this bit (in big endian order) */
+    private final int _begin;
+    /** include if no bits higher than this bit (inclusive) are set */
+    private final int _end;
+    private final int _max;
+    private final KBucketSet.KBucketTrimmer _trimmer;
+    /** when did we last shake things up */
+    private long _lastChanged;
+    private final I2PAppContext _context;
+    
+    /**
+     *  All entries in this bucket will have at least one bit different
+     *  from us in the range [begin, end] inclusive.
+     */
+    public KBucketImpl(I2PAppContext context, int begin, int end, int max, KBucketSet.KBucketTrimmer trimmer) {
+        if (begin > end)
+            throw new IllegalArgumentException(begin + " > " + end);
+        _context = context;
+        _entries = new ConcurrentHashSet(max + 4);
+        _begin = begin;
+        _end = end;
+        _max = max;
+        _trimmer = trimmer;
+    }
+    
+    public int getRangeBegin() { return _begin; }
+
+    public int getRangeEnd() { return _end; }
+
+    public int getKeyCount() {
+        return _entries.size();
+    }
+    
+    /**
+     *  @return an unmodifiable view; not a copy
+     */
+    public Set<T> getEntries() {
+        return Collections.unmodifiableSet(_entries);
+    }
+
+    public void getEntries(SelectionCollector<T> collector) {
+        for (T h : _entries) {
+             collector.add(h);
+        }
+    }
+    
+    public void clear() {
+        _entries.clear();
+    }
+    
+    /**
+     *  Sets last-changed if rv is true OR if the peer is already present.
+     *  Calls the trimmer if begin == end and we are full.
+     *  If begin != end then add it and caller must do bucket splitting.
+     *  @return true if added
+     */
+    public boolean add(T peer) {
+        if (_begin != _end || _entries.size() < _max ||
+            _entries.contains(peer) || _trimmer.trim(this, peer)) {
+            // do this even if already contains, to call setLastChanged()
+            boolean rv = _entries.add(peer);
+            setLastChanged();
+            return rv;
+        }
+        return false;
+    }
+    
+    /**
+     *  @return if removed. Does NOT set lastChanged.
+     */
+    public boolean remove(T peer) {
+        boolean rv = _entries.remove(peer);
+        //if (rv)
+        //    setLastChanged();
+        return rv;
+    }
+    
+    /**
+     *  Update the last-changed timestamp to now.
+     */
+    public void setLastChanged() {
+        _lastChanged = _context.clock().now();
+    }
+
+    /**
+     *  The last-changed timestamp, which actually indicates last-added or last-seen.
+     */
+    public long getLastChanged() {
+        return _lastChanged;
+    }
+    
+    @Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder(1024);
+        buf.append(_entries.size());
+        buf.append(" entries in (").append(_begin).append(',').append(_end);
+        buf.append(") : ").append(_entries.toString());
+        return buf.toString();
+    }
+}
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java
new file mode 100644
index 0000000000000000000000000000000000000000..9b8f8474c1f53331bdb675bea1dfe78bb86ce7f5
--- /dev/null
+++ b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java
@@ -0,0 +1,853 @@
+package net.i2p.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might.  Use at your own risk.
+ *
+ */
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.DataHelper;
+import net.i2p.data.SimpleDataStructure;
+import net.i2p.util.Log;
+
+/**
+ * In-memory storage of buckets sorted by the XOR metric from the base (us)
+ * passed in via the constructor.
+ * This starts with one bucket covering the whole key space, and
+ * may eventually be split to a max of the number of bits in the data type
+ * (160 for SHA1Hash or 256 for Hash),
+ * times 2**(B-1) for Kademlia value B.
+ *
+ * Refactored from net.i2p.router.networkdb.kademlia
+ */
+public class KBucketSet<T extends SimpleDataStructure> {
+    private final Log _log;
+    private final I2PAppContext _context;
+    private final T _us;
+
+    /**
+     * The bucket list is locked by _bucketsLock, however the individual
+     * buckets are not locked. Users may see buckets that have more than
+     * the maximum k entries, or may have adds and removes silently fail
+     * when they appear to succeed.
+     *
+     * Closest values are in bucket 0, furthest are in the last bucket.
+     */
+    private final List<KBucket> _buckets;
+    private final Range<T> _rangeCalc;
+    private final KBucketTrimmer _trimmer;
+    
+    /**
+     *  Locked for reading only when traversing all the buckets.
+     *  Locked for writing only when splitting a bucket.
+     *  Adds/removes/gets from individual buckets are not locked.
+     */
+    private final ReentrantReadWriteLock _bucketsLock = new ReentrantReadWriteLock(false);
+
+    private final int KEYSIZE_BITS;
+    private final int NUM_BUCKETS;
+    private final int BUCKET_SIZE;
+    private final int B_VALUE;
+    private final int B_FACTOR;
+    
+    /**
+     * Use the default trim strategy, which removes a random entry.
+     * @param us the local identity (typically a SHA1Hash or Hash)
+     *           The class must have a zero-argument constructor.
+     * @param max the Kademlia value "k", the max per bucket, k >= 4
+     * @param b the Kademlia value "b", split buckets an extra 2**(b-1) times,
+     *           b > 0, use 1 for bittorrent, Kademlia paper recommends 5
+     */
+    public KBucketSet(I2PAppContext context, T us, int max, int b) {
+        this(context, us, max, b, new RandomTrimmer(context, max));
+    }
+
+    /**
+     * Use the supplied trim strategy.
+     */
+    public KBucketSet(I2PAppContext context, T us, int max, int b, KBucketTrimmer trimmer) {
+        _us = us;
+        _context = context;
+        _log = context.logManager().getLog(KBucketSet.class);
+        _trimmer = trimmer;
+        if (max <= 4 || b <= 0 || b > 8)
+            throw new IllegalArgumentException();
+        KEYSIZE_BITS = us.length() * 8;
+        B_VALUE = b;
+        B_FACTOR = 1 << (b - 1);
+        NUM_BUCKETS = KEYSIZE_BITS * B_FACTOR;
+        BUCKET_SIZE = max;
+        _buckets = createBuckets();
+        _rangeCalc = new Range(us, B_VALUE);
+        // this verifies the zero-argument constructor
+        makeKey(new byte[us.length()]);
+    }
+    
+    private void getReadLock() {
+        _bucketsLock.readLock().lock();
+    }
+
+    /**
+     *  Get the lock if we can. Non-blocking.
+     *  @return true if the lock was acquired
+     */
+    private boolean tryReadLock() {
+        return _bucketsLock.readLock().tryLock();
+    }
+
+    private void releaseReadLock() {
+        _bucketsLock.readLock().unlock();
+    }
+
+    /** @return true if the lock was acquired */
+    private boolean getWriteLock() {
+        try {
+            boolean rv = _bucketsLock.writeLock().tryLock(3000, TimeUnit.MILLISECONDS);
+            if ((!rv) && _log.shouldLog(Log.WARN))
+                _log.warn("no lock, size is: " + _bucketsLock.getQueueLength(), new Exception("rats"));
+            return rv;
+        } catch (InterruptedException ie) {}
+        return false;
+    }
+
+    private void releaseWriteLock() {
+        _bucketsLock.writeLock().unlock();
+    }
+
+    /**
+     * @return true if the peer is new to the bucket it goes in, or false if it was
+     *  already in it. Always returns false on an attempt to add ourselves.
+     *
+     */
+    public boolean add(T peer) {
+        KBucket bucket;
+        getReadLock();
+        try {
+            bucket = getBucket(peer);
+        } finally { releaseReadLock(); }
+        if (bucket != null) {
+            if (bucket.add(peer)) {
+                if (_log.shouldLog(Log.DEBUG))
+                    _log.debug("Peer " + peer + " added to bucket " + bucket);
+                if (shouldSplit(bucket)) {
+                    if (_log.shouldLog(Log.DEBUG))
+                        _log.debug("Splitting bucket " + bucket);
+                    split(bucket.getRangeBegin());
+                    //testAudit(this, _log);
+                }
+                return true;
+            } else {
+                if (_log.shouldLog(Log.DEBUG))
+                    _log.debug("Peer " + peer + " NOT added to bucket " + bucket);
+                return false;
+            }
+        } else {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("Failed to add, probably us: " + peer);
+            return false;
+        }
+    }
+
+    /**
+     *  No lock required.
+     *  FIXME will split the closest buckets too far if B > 1 and K < 2**B
+     *  Won't ever really happen and if it does it still works.
+     */
+    private boolean shouldSplit(KBucket b) {
+        return
+               b.getRangeBegin() != b.getRangeEnd() &&
+               b.getKeyCount() > BUCKET_SIZE;
+    }
+
+    /**
+     *  Grabs the write lock.
+     *  Caller must NOT have the read lock.
+     *  The bucket should be splittable (range start != range end).
+     *  @param r the range start of the bucket to be split
+     */
+    private void split(int r) {
+        if (!getWriteLock())
+            return;
+        try {
+            locked_split(r);
+        } finally { releaseWriteLock(); }
+    }
+
+    /**
+     *  Creates two or more new buckets. The old bucket is replaced and discarded.
+     *
+     *  Caller must hold write lock
+     *  The bucket should be splittable (range start != range end).
+     *  @param r the range start of the bucket to be split
+     */
+    private void locked_split(int r) {
+        int b = pickBucket(r);
+        while (shouldSplit(_buckets.get(b))) {
+            KBucket<T> b0 = _buckets.get(b);
+            // Each bucket gets half the keyspace.
+            // When B_VALUE = 1, or the bucket is larger than B_FACTOR, then
+            // e.g. 0-159 => 0-158, 159-159
+            // When B_VALUE > 1, and the bucket is smaller than B_FACTOR, then
+            // e.g. 1020-1023 => 1020-1021, 1022-1023
+            int s1, e1, s2, e2;
+            s1 = b0.getRangeBegin();
+            e2 = b0.getRangeEnd();
+            if (B_FACTOR > 1 &&
+                (s1 & (B_FACTOR - 1)) == 0 &&
+                ((e2 + 1) & (B_FACTOR - 1)) == 0 &&
+                e2 > s1 + B_FACTOR) {
+                // The bucket is a "whole" kbucket with a range > B_FACTOR,
+                // so it should be split into two "whole" kbuckets each with
+                // a range >= B_FACTOR.
+                // Log split
+                s2 = e2 + 1 - B_FACTOR;
+            } else {
+                // The bucket is the smallest "whole" kbucket with a range == B_FACTOR,
+                // or B_VALUE > 1 and the bucket has already been split.
+                // Start or continue splitting down to a depth B_VALUE.
+                // Linear split
+                s2 = s1 + ((1 + e2 - s1) / 2);
+            }	
+            e1 = s2 - 1;
+            if (_log.shouldLog(Log.INFO))
+                _log.info("Splitting (" + s1 + ',' + e2 + ") -> (" + s1 + ',' + e1 + ") (" + s2 + ',' + e2 + ')');
+            KBucket<T> b1 = createBucket(s1, e1);
+            KBucket<T> b2 = createBucket(s2, e2);
+            for (T key : b0.getEntries()) {
+                if (getRange(key) < s2)
+                    b1.add(key);
+                else
+                    b2.add(key);
+            }
+            _buckets.set(b, b1);
+            _buckets.add(b + 1, b2);
+            if (_log.shouldLog(Log.DEBUG))
+                _log.debug("Split bucket at idx " + b +
+                           ":\n" + b0 +
+                           "\ninto: " + b1 +
+                           "\nand: " + b2);
+            //if (_log.shouldLog(Log.DEBUG))
+            //    _log.debug("State is now: " + toString());
+
+            if (b2.getKeyCount() > BUCKET_SIZE) {
+                // should be rare... too hard to call _trimmer from here
+                // (and definitely not from inside the write lock)
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("All went into 2nd bucket after split");
+            }
+            // loop if all the entries went in the first bucket
+        }
+    }
+
+    /**
+     *  The current number of entries.
+     */
+    public int size() {
+        int rv = 0;
+        getReadLock();
+        try {
+            for (KBucket b : _buckets) {
+                rv += b.getKeyCount();
+            }
+        } finally { releaseReadLock(); }
+        return rv;
+    }
+    
+    public boolean remove(T entry) {
+        KBucket kbucket;
+        getReadLock();
+        try {
+            kbucket = getBucket(entry);
+        } finally { releaseReadLock(); }
+        boolean removed = kbucket.remove(entry);
+        return removed;
+    }
+    
+    /** @since 0.8.8 */
+    public void clear() {
+        getReadLock();
+        try {
+            for (KBucket b : _buckets) {
+                b.clear();
+            }
+        } finally { releaseReadLock(); }
+        _rangeCalc.clear();
+    }
+    
+    /**
+     *  @return a copy in a new set
+     */
+    public Set<T> getAll() {
+        Set<T> all = new HashSet(256);
+        getReadLock();
+        try {
+            for (KBucket b : _buckets) {
+                all.addAll(b.getEntries());
+            }
+        } finally { releaseReadLock(); }
+        return all;
+    }
+
+    /**
+     *  @return a copy in a new set
+     */
+    public Set<T> getAll(Set<T> toIgnore) {
+        Set<T> all = getAll();
+        all.removeAll(toIgnore);
+        return all;
+    }
+    
+    public void getAll(SelectionCollector<T> collector) {
+        getReadLock();
+        try {
+            for (KBucket b : _buckets) {
+                b.getEntries(collector);
+            }
+        } finally { releaseReadLock(); }
+    }
+    
+    /**
+     *  The keys closest to us.
+     *  Returned list will never contain us.
+     *  @return non-null, closest first
+     */
+    public List<T> getClosest(int max) {
+        return getClosest(max, Collections.EMPTY_SET);
+    }
+    
+    /**
+     *  The keys closest to us.
+     *  Returned list will never contain us.
+     *  @return non-null, closest first
+     */
+    public List<T> getClosest(int max, Collection<T> toIgnore) {
+        List<T> rv = new ArrayList(max);
+        int count = 0;
+        getReadLock();
+        try {
+            // start at first (closest) bucket
+            for (int i = 0; i < _buckets.size() && count < max; i++) {
+                Set<T> entries = _buckets.get(i).getEntries();
+                // add the whole bucket except for ignores,
+                // extras will be trimmed after sorting
+                for (T e : entries) {
+                    if (!toIgnore.contains(e)) {
+                        rv.add(e);
+                        count++;
+                    }
+                }
+            }
+        } finally { releaseReadLock(); }
+        Comparator comp = new XORComparator(_us);
+        Collections.sort(rv, comp);
+        int sz = rv.size();
+        for (int i = sz - 1; i >= max; i--) {
+            rv.remove(i);
+        }
+        return rv;
+    }
+    
+    /**
+     *  The keys closest to the key.
+     *  Returned list will never contain us.
+     *  @return non-null, closest first
+     */
+    public List<T> getClosest(T key, int max) {
+        return getClosest(key, max, Collections.EMPTY_SET);
+    }
+    
+    /**
+     *  The keys closest to the key.
+     *  Returned list will never contain us.
+     *  @return non-null, closest first
+     */
+    public List<T> getClosest(T key, int max, Collection<T> toIgnore) {
+        if (key.equals(_us))
+            return getClosest(max, toIgnore);
+        List<T> rv = new ArrayList(max);
+        int count = 0;
+        getReadLock();
+        try {
+            int start = pickBucket(key);
+            // start at closest bucket, then to the smaller (closer to us) buckets
+            for (int i = start; i >= 0 && count < max; i--) {
+                Set<T> entries = _buckets.get(i).getEntries();
+                for (T e : entries) {
+                    if (!toIgnore.contains(e)) {
+                        rv.add(e);
+                        count++;
+                    }
+                }
+            }
+            // then the farther from us buckets if necessary
+            for (int i = start + 1; i < _buckets.size() && count < max; i++) {
+                Set<T> entries = _buckets.get(i).getEntries();
+                for (T e : entries) {
+                    if (!toIgnore.contains(e)) {
+                        rv.add(e);
+                        count++;
+                    }
+                }
+            }
+        } finally { releaseReadLock(); }
+        Comparator comp = new XORComparator(key);
+        Collections.sort(rv, comp);
+        int sz = rv.size();
+        for (int i = sz - 1; i >= max; i--) {
+            rv.remove(i);
+        }
+        return rv;
+    }
+
+    /**
+     *  The bucket number (NOT the range number) that the xor of the key goes in
+     *  Caller must hold read lock
+     *  @return 0 to max-1 or -1 for us
+     */
+    private int pickBucket(T key) {
+        int range = getRange(key);
+        if (range < 0)
+            return -1;
+        int rv = pickBucket(range);
+        if (rv >= 0) {
+             return rv;
+        }
+        _log.error("Key does not fit in any bucket?! WTF!\nKey  : [" 
+                   + DataHelper.toHexString(key.getData()) + "]" 
+                   + "\nUs   : " + _us
+                   + "\nDelta: ["
+                   + DataHelper.toHexString(DataHelper.xor(_us.getData(), key.getData()))
+                   + "]", new Exception("WTF"));
+        _log.error(toString());
+        throw new IllegalStateException("pickBucket returned " + rv);
+        //return -1;
+    }
+    
+    /**
+     *  Returned list is a copy of the bucket list, closest first,
+     *  with the actual buckets (not a copy).
+     *
+     *  Primarily for testing. You shouldn't ever need to get all the buckets.
+     *  Use getClosest() or getAll() instead to get the keys.
+     *
+     *  @return non-null
+     */
+    List<KBucket<T>> getBuckets() {
+        getReadLock();
+        try {
+            return new ArrayList(_buckets);
+        } finally { releaseReadLock(); }
+    }
+
+    /**
+     *  The bucket that the xor of the key goes in
+     *  Caller must hold read lock
+     *  @return null if key is us
+     */
+    private KBucket getBucket(T key) {
+       int bucket = pickBucket(key);
+       if (bucket < 0)
+           return null;
+       return _buckets.get(bucket);
+    }
+    
+    /**
+     *  The bucket number that contains this range number
+     *  Caller must hold read lock or write lock
+     *  @return 0 to max-1 or -1 for us
+     */
+    private int pickBucket(int range) {
+        // If B is small, a linear search from back to front
+        // is most efficient since most of the keys are at the end...
+        // If B is larger, there's a lot of sub-buckets
+        // of equal size to be checked so a binary search is better
+        if (B_VALUE <= 3) {
+            for (int i = _buckets.size() - 1; i >= 0; i--) {
+                KBucket b = _buckets.get(i);
+                if (range >= b.getRangeBegin() && range <= b.getRangeEnd())
+                    return i;
+            }
+            return -1;
+        } else {
+            KBucket dummy = new DummyBucket(range);
+            return Collections.binarySearch(_buckets, dummy, new BucketComparator());
+        }
+    }
+
+    private List<KBucket> createBuckets() {
+        // just an initial size
+        List<KBucket> buckets = new ArrayList(4 * B_FACTOR);
+        buckets.add(createBucket(0, NUM_BUCKETS -1));
+        return buckets;
+    }
+    
+    private KBucket createBucket(int start, int end) {
+        if (end - start >= B_FACTOR &&
+            (((end + 1) & B_FACTOR - 1) != 0 ||
+             (start & B_FACTOR - 1) != 0))
+            throw new IllegalArgumentException("Sub-bkt crosses K-bkt boundary: " + start + '-' + end);
+        KBucket bucket = new KBucketImpl(_context, start, end, BUCKET_SIZE, _trimmer);
+        return bucket;
+    }
+    
+    /**
+     *  The number of bits minus 1 (range number) for the xor of the key.
+     *  Package private for testing only. Others shouldn't need this.
+     *  @return 0 to max-1 or -1 for us
+     */
+    int getRange(T key) {
+        return _rangeCalc.getRange(key);
+    }
+    
+    /**
+     *  For every bucket that hasn't been updated in this long,
+     *  generate a random key that would be a member of that bucket.
+     *  The returned keys may be searched for to "refresh" the buckets.
+     *  @return non-null, closest first
+     */
+    public List<T> getExploreKeys(long age) {
+        List<T> rv = new ArrayList(_buckets.size());
+        long old = _context.clock().now() - age;
+        getReadLock();
+        try {
+            for (KBucket b : _buckets) {
+                if (b.getLastChanged() < old)
+                    rv.add(generateRandomKey(b));
+            }
+        } finally { releaseReadLock(); }
+        return rv;
+    }
+    
+    /**
+     *  Generate a random key to go within this bucket
+     *  Package private for testing only. Others shouldn't need this.
+     */
+    T generateRandomKey(KBucket bucket) {
+        int begin = bucket.getRangeBegin();
+        int end = bucket.getRangeEnd();
+        // number of fixed bits, out of B_VALUE - 1 bits
+        int fixed = 0;
+        int bsz = 1 + end - begin;
+        // compute fixed = B_VALUE - log2(bsz)
+        // e.g for B=4, B_FACTOR=8, sz 4-> fixed 1, sz 2->fixed 2, sz 1 -> fixed 3
+        while (bsz < B_FACTOR) {
+            fixed++;
+            bsz <<= 1;
+        }
+        int fixedBits = 0;
+        if (fixed > 0) {
+            // 0x01, 03, 07, 0f, ...
+            int mask = (1 << fixed) - 1;
+            // fixed bits masked from begin
+            fixedBits = (begin >> (B_VALUE - (fixed + 1))) & mask;
+        }
+        int obegin = begin;
+        int oend = end;
+        begin >>= (B_VALUE - 1);
+        end >>= (B_VALUE - 1);
+        // we need randomness for [0, begin) bits
+        BigInteger variance;
+        // 00000000rrrr
+        if (begin > 0)
+            variance = new BigInteger(begin - fixed, _context.random());
+        else
+            variance = BigInteger.ZERO;
+        // we need nonzero randomness for [begin, end] bits
+        int numNonZero = 1 + end - begin;
+        if (numNonZero == 1) {
+            // 00001000rrrr
+            variance = variance.setBit(begin);
+            // fixed bits as the 'main' bucket is split
+            // 00001fffrrrr
+            if (fixed > 0)
+                variance = variance.or(BigInteger.valueOf(fixedBits).shiftLeft(begin - fixed));
+        } else {
+            // dont span main bucket boundaries with depth > 1
+            if (fixed > 0)
+                throw new IllegalStateException("WTF " + bucket);
+            BigInteger nonz;
+            if (numNonZero <= 62) {
+                // add one to ensure nonzero
+                long nz = 1 + _context.random().nextLong((1l << numNonZero) - 1);
+                nonz = BigInteger.valueOf(nz);
+            } else {
+                // loop to ensure nonzero
+                do {
+                    nonz = new BigInteger(numNonZero, _context.random());
+                } while (nonz.equals(BigInteger.ZERO));
+            }
+            // shift left and or-in the nonzero randomness
+            if (begin > 0)
+                nonz = nonz.shiftLeft(begin);
+            // 0000nnnnrrrr
+            variance = variance.or(nonz);
+        }
+
+        if (_log.shouldLog(Log.DEBUG))
+            _log.debug("SB(" + obegin + ',' + oend + ") KB(" + begin + ',' + end + ") fixed=" + fixed + " fixedBits=" + fixedBits + " numNonZ=" + numNonZero);
+        byte data[] = variance.toByteArray();
+        T key = makeKey(data);
+        byte[] hash = DataHelper.xor(key.getData(), _us.getData());
+        T rv = makeKey(hash);
+
+        // DEBUG
+        //int range = getRange(rv);
+        //if (range < obegin || range > oend) {
+        //    throw new IllegalStateException("Generate random key failed range=" + range + " for " + rv + " meant for bucket " + bucket);
+        //}
+
+        return rv;
+    }
+    
+    /**
+     *  Make a new SimpleDataStrucure from the data
+     *  @param data size <= SDS length, else throws IAE
+     *              Can be 1 bigger if top byte is zero
+     */
+    private T makeKey(byte[] data) {
+        int len = _us.length();
+        int dlen = data.length;
+        if (dlen > len + 1 ||
+            (dlen == len + 1 && data[0] != 0))
+            throw new IllegalArgumentException("bad length " + dlen + " > " + len);
+        T rv;
+        try {
+            rv = (T) _us.getClass().newInstance();
+        } catch (Exception e) {
+            _log.error("fail", e);
+            throw new RuntimeException(e);
+        }
+        if (dlen == len) {
+            rv.setData(data);
+        } else {
+            byte[] ndata = new byte[len];
+            if (dlen == len + 1) {
+                // one bigger
+                System.arraycopy(data, 1, ndata, 0, len);
+            } else {
+                // smaller
+                System.arraycopy(data, 0, ndata, len - dlen, dlen);
+            }
+            rv.setData(ndata);
+        }
+        return rv;
+    }
+
+    private static class Range<T extends SimpleDataStructure> {
+        private final int _bValue;
+        private final BigInteger _bigUs;
+        private final Map<T, Integer> _distanceCache;
+
+        public Range(T us, int bValue) {
+            _bValue = bValue;
+            _bigUs = new BigInteger(1, us.getData());
+            _distanceCache = new LHM(256);
+        }
+
+        /** @return 0 to max-1 or -1 for us */
+        public int getRange(T key) {
+            Integer rv;
+            synchronized (_distanceCache) {
+                rv = _distanceCache.get(key);
+                if (rv == null) {
+                    // easy way when _bValue == 1
+                    //rv = Integer.valueOf(_bigUs.xor(new BigInteger(1, key.getData())).bitLength() - 1);
+                    BigInteger xor = _bigUs.xor(new BigInteger(1, key.getData()));
+                    int range = xor.bitLength() - 1;
+                    if (_bValue > 1) {
+                        int toShift = range + 1 - _bValue;
+                        int highbit = range;
+                        range <<= _bValue - 1;
+                        if (toShift >= 0) {
+                            int extra = xor.clearBit(highbit).shiftRight(toShift).intValue();
+                            range += extra;
+                            //Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketSet.class);
+                            //if (log.shouldLog(Log.DEBUG))
+                            //    log.debug("highbit " + highbit + " toshift " + toShift + " extra " + extra + " new " + range);
+                        }
+                    }
+                    rv = Integer.valueOf(range);
+                    _distanceCache.put(key, rv);
+                }
+            }
+            return rv.intValue();
+        }
+
+        public void clear() {
+            synchronized (_distanceCache) {
+                _distanceCache.clear();
+            }
+        }
+    }
+
+    private static class LHM<K, V> extends LinkedHashMap<K, V> {
+        private final int _max;
+
+        public LHM(int max) {
+            super(max, 0.75f, true);
+            _max = max;
+        }
+
+        @Override
+        protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
+            return size() > _max;
+        }
+    }
+
+    /**
+     *  For Collections.binarySearch.
+     *  getRangeBegin == getRangeEnd.
+     */
+    private static class DummyBucket<T extends SimpleDataStructure> implements KBucket<T> {
+        private final int r;
+
+        public DummyBucket(int range) {
+            r = range;
+        }
+
+        public int getRangeBegin() { return r; }
+        public int getRangeEnd() { return r; }
+
+        public int getKeyCount() {
+            return 0;
+        }
+
+        public Set<T> getEntries() {
+            throw new UnsupportedOperationException();
+        }
+
+        public void getEntries(SelectionCollector<T> collector) {
+            throw new UnsupportedOperationException();
+        }
+    
+        public void clear() {}
+
+        public boolean add(T peer) {
+            throw new UnsupportedOperationException();
+        }
+    
+        public boolean remove(T peer) {
+            return false;
+        }
+
+        public void setLastChanged() {}
+
+        public long getLastChanged() {
+            return 0;
+        }
+    }
+
+    /**
+     *  For Collections.binarySearch.
+     *  Returns equal for any overlap.
+     */
+    private static class BucketComparator implements Comparator<KBucket> {
+        public int compare(KBucket l, KBucket r) {
+            if (l.getRangeEnd() < r.getRangeBegin())
+                return -1;
+            if (l.getRangeBegin() > r.getRangeEnd())
+                return 1;
+            return 0;
+        }
+    }
+
+    /**
+     *  Called when a kbucket can no longer be split and is too big
+     */
+    public interface KBucketTrimmer<K extends SimpleDataStructure> {
+        /**
+         *  Called from add() just before adding the entry.
+         *  You may call getEntries() and/or remove() from here.
+         *  Do NOT call add().
+         *  To always discard a newer entry, always return false.
+         *
+         *  @param kbucket the kbucket that is now too big
+         *  @return true to actually add the entry.
+         */
+        public boolean trim(KBucket<K> kbucket, K toAdd);
+    }
+
+    /**
+     *  Removes a random element. Not resistant to flooding.
+     */
+    public static class RandomTrimmer<T extends SimpleDataStructure> implements KBucketTrimmer<T> {
+        protected final I2PAppContext _ctx;
+        private final int _max;
+
+        public RandomTrimmer(I2PAppContext ctx, int max) {
+            _ctx = ctx;
+            _max = max;
+        }
+
+        public boolean trim(KBucket<T> kbucket, T toAdd) {
+            List<T> e = new ArrayList(kbucket.getEntries());
+            int sz = e.size();
+            // concurrency
+            if (sz < _max)
+                return true;
+            T toRemove = e.get(_ctx.random().nextInt(sz));
+            return kbucket.remove(toRemove);
+        }
+    }
+
+    /**
+     *  Removes a random element, but only if the bucket hasn't changed in 5 minutes.
+     */
+    public static class RandomIfOldTrimmer<T extends SimpleDataStructure> extends RandomTrimmer<T> {
+
+        public RandomIfOldTrimmer(I2PAppContext ctx, int max) {
+            super(ctx, max);
+        }
+
+        public boolean trim(KBucket<T> kbucket, T toAdd) {
+            if (kbucket.getLastChanged() > _ctx.clock().now() - 5*60*1000)
+                return false;
+            return super.trim(kbucket, toAdd);
+        }
+    }
+
+    /**
+     *  Removes nothing and always rejects the add. Flood resistant..
+     */
+    public static class RejectTrimmer<T extends SimpleDataStructure> implements KBucketTrimmer<T> {
+        public boolean trim(KBucket<T> kbucket, T toAdd) {
+            return false;
+        }
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder(1024);
+        buf.append("Bucket set rooted on: ").append(_us.toString())
+           .append(" K= ").append(BUCKET_SIZE)
+           .append(" B= ").append(B_VALUE)
+           .append(" with ").append(size())
+           .append(" keys in ").append(_buckets.size()).append(" buckets:\n");
+        getReadLock();
+        try {
+            int len = _buckets.size();
+            for (int i = 0; i < len; i++) {
+                KBucket b = _buckets.get(i);
+                buf.append("* Bucket ").append(i).append("/").append(len).append(": ");
+                buf.append(b.toString()).append("\n");
+            }
+        } finally { releaseReadLock(); }
+        return buf.toString();
+    }
+}
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java b/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java
new file mode 100644
index 0000000000000000000000000000000000000000..8d4b9972aef0f82024185e4c437a23adba7e8cc7
--- /dev/null
+++ b/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java
@@ -0,0 +1,10 @@
+package net.i2p.kademlia;
+
+import net.i2p.data.SimpleDataStructure;
+
+/**
+ * Visit kbuckets, gathering matches
+ */
+public interface SelectionCollector<T extends SimpleDataStructure> {
+    public void add(T entry);
+}
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java b/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java
new file mode 100644
index 0000000000000000000000000000000000000000..d11823f49bf24ce9af634c8b1afd7d93daccb4c2
--- /dev/null
+++ b/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java
@@ -0,0 +1,27 @@
+package net.i2p.kademlia;
+
+import java.util.Comparator;
+
+import net.i2p.data.DataHelper;
+import net.i2p.data.SimpleDataStructure;
+
+/**
+ * Help sort Hashes in relation to a base key using the XOR metric
+ *
+ */
+class XORComparator<T extends SimpleDataStructure> implements Comparator<T> {
+    private final byte[] _base;
+
+    /**
+     * @param target key to compare distances with
+     */
+    public XORComparator(T target) {
+        _base = target.getData();
+    }
+
+    public int compare(T lhs, T rhs) {
+        byte lhsDelta[] = DataHelper.xor(lhs.getData(), _base);
+        byte rhsDelta[] = DataHelper.xor(rhs.getData(), _base);
+        return DataHelper.compareTo(lhsDelta, rhsDelta);
+    }
+}
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/package.html b/apps/i2psnark/java/src/net/i2p/kademlia/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..fe1a24f43b83250a19c4db6edfc10ca1b7637709
--- /dev/null
+++ b/apps/i2psnark/java/src/net/i2p/kademlia/package.html
@@ -0,0 +1,6 @@
+<html><body><p>
+This is a major rewrite of KBucket, KBucketSet, and KBucketImpl from net.i2p.router.networkdb.kademlia.
+The classes are now generic to support SHA1. SHA256, or other key lengths.
+The long-term goal is to prove out this new implementation in i2psnark,
+then move it to core, then convert the network database to use it.
+</p></body></html>
diff --git a/apps/i2psnark/java/src/org/klomp/snark/ExtensionHandler.java b/apps/i2psnark/java/src/org/klomp/snark/ExtensionHandler.java
index 9f81e0156d0a07f7975740be680cc1e4c12f0116..c02b948fe3bcdfeb73ed7b0a315f959f1c465d27 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/ExtensionHandler.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/ExtensionHandler.java
@@ -28,6 +28,9 @@ abstract class ExtensionHandler {
     public static final int ID_PEX = 2;
     /** not ut_pex since the compact format is different */
     public static final String TYPE_PEX = "i2p_pex";
+    public static final int ID_DHT = 3;
+    /** not using the option bit since the compact format is different */
+    public static final String TYPE_DHT = "i2p_dht";
     /** Pieces * SHA1 Hash length, + 25% extra for file names, benconding overhead, etc */
     private static final int MAX_METADATA_SIZE = Storage.MAX_PIECES * 20 * 5 / 4;
     private static final int PARALLEL_REQUESTS = 3;
@@ -36,9 +39,10 @@ abstract class ExtensionHandler {
   /**
    *  @param metasize -1 if unknown
    *  @param pexAndMetadata advertise these capabilities
+   *  @param dht advertise DHT capability
    *  @return bencoded outgoing handshake message
    */
-    public static byte[] getHandshake(int metasize, boolean pexAndMetadata) {
+    public static byte[] getHandshake(int metasize, boolean pexAndMetadata, boolean dht) {
         Map<String, Object> handshake = new HashMap();
         Map<String, Integer> m = new HashMap();
         if (pexAndMetadata) {
@@ -47,6 +51,9 @@ abstract class ExtensionHandler {
             if (metasize >= 0)
                 handshake.put("metadata_size", Integer.valueOf(metasize));
         }
+        if (dht) {
+            m.put(TYPE_DHT, Integer.valueOf(ID_DHT));
+        }
         // include the map even if empty so the far-end doesn't NPE
         handshake.put("m", m);
         handshake.put("p", Integer.valueOf(6881));
@@ -65,6 +72,8 @@ abstract class ExtensionHandler {
             handleMetadata(peer, listener, bs, log);
         else if (id == ID_PEX)
             handlePEX(peer, listener, bs, log);
+        else if (id == ID_DHT)
+            handleDHT(peer, listener, bs, log);
         else if (log.shouldLog(Log.INFO))
             log.info("Unknown extension msg " + id + " from " + peer);
     }
@@ -87,6 +96,12 @@ abstract class ExtensionHandler {
                 // peer state calls peer listener calls sendPEX()
             }
 
+            if (msgmap.get(TYPE_DHT) != null) {
+                if (log.shouldLog(Log.DEBUG))
+                    log.debug("Peer supports DHT extension: " + peer);
+                // peer state calls peer listener calls sendDHT()
+            }
+
             MagnetState state = peer.getMagnetState();
 
             if (msgmap.get(TYPE_METADATA) == null) {
@@ -335,6 +350,28 @@ abstract class ExtensionHandler {
         }
     }
 
+    /**
+     * Receive the DHT port numbers
+     * @since DHT
+     */
+    private static void handleDHT(Peer peer, PeerListener listener, byte[] bs, Log log) {
+        if (log.shouldLog(Log.DEBUG))
+            log.debug("Got DHT msg from " + peer);
+        try {
+            InputStream is = new ByteArrayInputStream(bs);
+            BDecoder dec = new BDecoder(is);
+            BEValue bev = dec.bdecodeMap();
+            Map<String, BEValue> map = bev.getMap();
+            int qport = map.get("port").getInt();
+            int rport = map.get("rport").getInt();
+            listener.gotPort(peer, qport, rport);
+        } catch (Exception e) {
+            if (log.shouldLog(Log.INFO))
+                log.info("DHT msg exception from " + peer, e);
+            //peer.disconnect(false);
+        }
+    }
+
     /**
      * added.f and dropped unsupported
      * @param pList non-null
@@ -362,4 +399,22 @@ abstract class ExtensionHandler {
         }
     }
 
+    /**
+     *  Send the DHT port numbers
+     *  @since DHT
+     */
+    public static void sendDHT(Peer peer, int qport, int rport) {
+        Map<String, Object> map = new HashMap();
+        map.put("port", Integer.valueOf(qport));
+        map.put("rport", Integer.valueOf(rport));
+        byte[] payload = BEncoder.bencode(map);
+        try {
+            int hisMsgCode = peer.getHandshakeMap().get("m").getMap().get(TYPE_DHT).getInt();
+            peer.sendExtension(hisMsgCode, payload);
+        } catch (Exception e) {
+            // NPE, no DHT caps
+            //if (log.shouldLog(Log.INFO))
+            //    log.info("DHT msg exception to " + peer, e);
+        }
+    }
 }
diff --git a/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java b/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java
index e6a527135f0c86cac8fa27db785faffaff4c30f1..d0ac03092a7d5e9f45b54e2eac14c547a2c7e78e 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java
@@ -37,7 +37,7 @@ import net.i2p.util.SimpleTimer;
 import net.i2p.util.Translate;
 
 import org.klomp.snark.dht.DHT;
-//import org.klomp.snark.dht.KRPC;
+import org.klomp.snark.dht.KRPC;
 
 /**
  * I2P specific helpers for I2PSnark
@@ -65,6 +65,7 @@ public class I2PSnarkUtil {
     private final File _tmpDir;
     private int _startupDelay;
     private boolean _shouldUseOT;
+    private boolean _shouldUseDHT;
     private boolean _areFilesPublic;
     private List<String> _openTrackers;
     private DHT _dht;
@@ -77,7 +78,7 @@ public class I2PSnarkUtil {
     public static final int DEFAULT_MAX_UP_BW = 8;  //KBps
     public static final int MAX_CONNECTIONS = 16; // per torrent
     public static final String PROP_MAX_BW = "i2cp.outboundBytesPerSecond";
-    //private static final boolean ENABLE_DHT = true;
+    public static final boolean DEFAULT_USE_DHT = false;
 
     public I2PSnarkUtil(I2PAppContext ctx) {
         _context = ctx;
@@ -94,6 +95,7 @@ public class I2PSnarkUtil {
         _shouldUseOT = DEFAULT_USE_OPENTRACKERS;
         // FIXME split if default has more than one
         _openTrackers = Collections.singletonList(DEFAULT_OPENTRACKERS);
+        _shouldUseDHT = DEFAULT_USE_DHT;
         // This is used for both announce replies and .torrent file downloads,
         // so it must be available even if not connected to I2CP.
         // so much for multiple instances
@@ -241,12 +243,14 @@ public class I2PSnarkUtil {
                 opts.setProperty("i2p.streaming.maxTotalConnsPerMinute", "8");
             if (opts.getProperty("i2p.streaming.maxConnsPerHour") == null)
                 opts.setProperty("i2p.streaming.maxConnsPerHour", "20");
+            if (opts.getProperty("i2p.streaming.enforceProtocol") == null)
+                opts.setProperty("i2p.streaming.enforceProtocol", "true");
             _manager = I2PSocketManagerFactory.createManager(_i2cpHost, _i2cpPort, opts);
             _connecting = false;
         }
         // FIXME this only instantiates krpc once, left stuck with old manager
-        //if (ENABLE_DHT && _manager != null && _dht == null)
-        //    _dht = new KRPC(_context, _manager.getSession());
+        if (_shouldUseDHT && _manager != null && _dht == null)
+            _dht = new KRPC(_context, _manager.getSession());
         return (_manager != null);
     }
     
@@ -273,7 +277,11 @@ public class I2PSnarkUtil {
     /**
      * Destroy the destination itself
      */
-    public void disconnect() {
+    public synchronized void disconnect() {
+        if (_dht != null) {
+            _dht.stop();
+            _dht = null;
+        }
         I2PSocketManager mgr = _manager;
         // FIXME this can cause race NPEs elsewhere
         _manager = null;
@@ -447,7 +455,8 @@ public class I2PSnarkUtil {
                     if (sess != null) {
                         byte[] b = Base32.decode(ip.substring(0, BASE32_HASH_LENGTH));
                         if (b != null) {
-                            Hash h = new Hash(b);
+                            //Hash h = new Hash(b);
+                            Hash h = Hash.create(b);
                             if (_log.shouldLog(Log.INFO))
                                 _log.info("Using existing session for lookup of " + ip);
                             try {
@@ -522,6 +531,22 @@ public class I2PSnarkUtil {
     public boolean shouldUseOpenTrackers() {
         return _shouldUseOT;
     }
+    
+    /** @since DHT */
+    public synchronized void setUseDHT(boolean yes) {
+        _shouldUseDHT = yes;
+        if (yes && _manager != null && _dht == null) {
+            _dht = new KRPC(_context, _manager.getSession());
+        } else if (!yes && _dht != null) {
+            _dht.stop();
+            _dht = null;
+        }
+    }
+
+    /** @since DHT */
+    public boolean shouldUseDHT() {
+        return _shouldUseDHT;
+    }
 
     /**
      *  Like DataHelper.toHexString but ensures no loss of leading zero bytes
diff --git a/apps/i2psnark/java/src/org/klomp/snark/Peer.java b/apps/i2psnark/java/src/org/klomp/snark/Peer.java
index 02fb635605a0d2e87789cbb8113a23c39b87fd41..8f33c01a728bfcb747b6b5a6ac2219a4008bed9d 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/Peer.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/Peer.java
@@ -80,7 +80,9 @@ public class Peer implements Comparable
   static final long OPTION_FAST      = 0x0000000000000004l;
   static final long OPTION_DHT       = 0x0000000000000001l;
   /** we use a different bit since the compact format is different */
+/* no, let's use an extension message
   static final long OPTION_I2P_DHT   = 0x0000000040000000l;
+*/
   static final long OPTION_AZMP      = 0x1000000000000000l;
   private long options;
 
@@ -269,15 +271,17 @@ public class Peer implements Comparable
                 _log.debug("Peer supports extensions, sending reply message");
             int metasize = metainfo != null ? metainfo.getInfoBytes().length : -1;
             boolean pexAndMetadata = metainfo == null || !metainfo.isPrivate();
-            out.sendExtension(0, ExtensionHandler.getHandshake(metasize, pexAndMetadata));
+            boolean dht = util.getDHT() != null;
+            out.sendExtension(0, ExtensionHandler.getHandshake(metasize, pexAndMetadata, dht));
         }
 
-        if ((options & OPTION_I2P_DHT) != 0 && util.getDHT() != null) {
-            if (_log.shouldLog(Log.DEBUG))
-                _log.debug("Peer supports DHT, sending PORT message");
-            int port = util.getDHT().getPort();
-            out.sendPort(port);
-        }
+        // Old DHT PORT message
+        //if ((options & OPTION_I2P_DHT) != 0 && util.getDHT() != null) {
+        //    if (_log.shouldLog(Log.DEBUG))
+        //        _log.debug("Peer supports DHT, sending PORT message");
+        //    int port = util.getDHT().getPort();
+        //    out.sendPort(port);
+        //}
 
         // Send our bitmap
         if (bitfield != null)
diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerAcceptor.java b/apps/i2psnark/java/src/org/klomp/snark/PeerAcceptor.java
index e8b123373560865d6504ea9d34e3fbbb5261b31b..ee651ed855e2fb7641dd2a8821fe6c1a240766f8 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/PeerAcceptor.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/PeerAcceptor.java
@@ -117,9 +117,8 @@ public class PeerAcceptor
         }
     } else {
         // multitorrent capable, so lets see what we can handle
-        for (Iterator iter = coordinators.iterator(); iter.hasNext(); ) {
-            PeerCoordinator cur = (PeerCoordinator)iter.next();
-
+        PeerCoordinator cur = coordinators.get(peerInfoHash);
+        if (cur != null) {
             if (DataHelper.eq(cur.getInfoHash(), peerInfoHash)) {
                 if (cur.needPeers())
                   {
diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerCheckerTask.java b/apps/i2psnark/java/src/org/klomp/snark/PeerCheckerTask.java
index ac53f38b63c50df2e95ed5f1c6b757ed553a2204..2bb9feb5f531078d218218072d6bee443e2b7f5f 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/PeerCheckerTask.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/PeerCheckerTask.java
@@ -28,6 +28,8 @@ import net.i2p.I2PAppContext;
 import net.i2p.data.DataHelper;
 import net.i2p.util.Log;
 
+import org.klomp.snark.dht.DHT;
+
 /**
  * TimerTask that checks for good/bad up/downloader. Works together
  * with the PeerCoordinator to select which Peers get (un)choked.
@@ -74,6 +76,7 @@ class PeerCheckerTask implements Runnable
         List<Peer> removed = new ArrayList();
         int uploadLimit = coordinator.allowedUploaders();
         boolean overBWLimit = coordinator.overUpBWLimit();
+        DHT dht = _util.getDHT();
         for (Peer peer : peerList) {
 
             // Remove dying peers
@@ -218,8 +221,8 @@ class PeerCheckerTask implements Runnable
             if (coordinator.getNeededLength() > 0 || !peer.isCompleted())
                 peer.keepAlive();
             // announce them to local tracker (TrackerClient does this too)
-            if (_util.getDHT() != null && (_runCount % 5) == 0) {
-                _util.getDHT().announce(coordinator.getInfoHash(), peer.getPeerID().getDestHash());
+            if (dht != null && (_runCount % 5) == 0) {
+                dht.announce(coordinator.getInfoHash(), peer.getPeerID().getDestHash());
             }
           }
 
@@ -267,8 +270,8 @@ class PeerCheckerTask implements Runnable
         }
 
         // announce ourselves to local tracker (TrackerClient does this too)
-        if (_util.getDHT() != null && (_runCount % 16) == 0) {
-            _util.getDHT().announce(coordinator.getInfoHash());
+        if (dht != null && (_runCount % 16) == 0) {
+            dht.announce(coordinator.getInfoHash());
         }
   }
 }
diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinator.java b/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinator.java
index 80bd38bd99c07488cc87e50a08987ddd490afbbd..e06657e0d3890a97842b525c2f6eca41c0cf05be 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinator.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinator.java
@@ -1287,6 +1287,7 @@ class PeerCoordinator implements PeerListener
           }
       } else if (id == ExtensionHandler.ID_HANDSHAKE) {
           sendPeers(peer);
+          sendDHT(peer);
       }
   }
 
@@ -1315,6 +1316,26 @@ class PeerCoordinator implements PeerListener
       } catch (InvalidBEncodingException ibee) {}
   }
 
+  /**
+   *  Send a DHT message to the peer, if we both support DHT.
+   *  @since DHT
+   */
+  void sendDHT(Peer peer) {
+      DHT dht = _util.getDHT();
+      if (dht == null)
+          return;
+      Map<String, BEValue> handshake = peer.getHandshakeMap();
+      if (handshake == null)
+          return;
+      BEValue bev = handshake.get("m");
+      if (bev == null)
+          return;
+      try {
+          if (bev.getMap().get(ExtensionHandler.TYPE_DHT) != null)
+              ExtensionHandler.sendDHT(peer, dht.getPort(), dht.getRPort());
+      } catch (InvalidBEncodingException ibee) {}
+  }
+
   /**
    *  Sets the storage after transition out of magnet mode
    *  Snark calls this after we call gotMetaInfo()
@@ -1332,11 +1353,13 @@ class PeerCoordinator implements PeerListener
   /**
    *  PeerListener callback
    *  Tell the DHT to ping it, this will get back the node info
+   *  @param rport must be port + 1
    *  @since 0.8.4
    */
-  public void gotPort(Peer peer, int port) {
+  public void gotPort(Peer peer, int port, int rport) {
       DHT dht = _util.getDHT();
-      if (dht != null)
+      if (dht != null &&
+          port > 0 && port < 65535 && rport == port + 1)
           dht.ping(peer.getDestination(), port);
   }
 
diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinatorSet.java b/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinatorSet.java
index de367c4d4b86dd6f2493304327cbcbab09d92767..7b1347bf7907635389249ef704752cdb8252784c 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinatorSet.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/PeerCoordinatorSet.java
@@ -1,9 +1,10 @@
 package org.klomp.snark;
 
-import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.Iterator;
-import java.util.Set;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.i2p.crypto.SHA1Hash;
 
 /**
  * Hmm, any guesses as to what this is?  Used by the multitorrent functionality
@@ -12,26 +13,28 @@ import java.util.Set;
  * from it there too)
  */
 public class PeerCoordinatorSet {
-    private final Set _coordinators;
+    private final Map<SHA1Hash, PeerCoordinator> _coordinators;
     
     public PeerCoordinatorSet() {
-        _coordinators = new HashSet();
+        _coordinators = new ConcurrentHashMap();
     }
-    
-    public Iterator iterator() {
-        synchronized (_coordinators) {
-            return new ArrayList(_coordinators).iterator();
-        }
+     
+    public Iterator<PeerCoordinator> iterator() {
+        return _coordinators.values().iterator();
     }
-    
+
     public void add(PeerCoordinator coordinator) {
-        synchronized (_coordinators) {
-            _coordinators.add(coordinator);
-        }
+        _coordinators.put(new SHA1Hash(coordinator.getInfoHash()), coordinator);
     }
+
     public void remove(PeerCoordinator coordinator) {
-        synchronized (_coordinators) {
-            _coordinators.remove(coordinator);
-        }
+        _coordinators.remove(new SHA1Hash(coordinator.getInfoHash()));
+    }
+
+    /**
+     *  @since 0.9.2
+     */
+    public PeerCoordinator get(byte[] infoHash) {
+        return _coordinators.get(new SHA1Hash(infoHash));
     }
 }
diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerListener.java b/apps/i2psnark/java/src/org/klomp/snark/PeerListener.java
index f573d445528745feef1bbc54a632020d7e5a5ade..8b272bf81663f0f79e8c266250c0de3e2fcd35a1 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/PeerListener.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/PeerListener.java
@@ -190,13 +190,14 @@ interface PeerListener
   void gotExtension(Peer peer, int id, byte[] bs);
 
   /**
-   * Called when a port message is received.
+   * Called when a DHT port message is received.
    *
    * @param peer the Peer that got the message.
-   * @param port the port
+   * @param port the query port
+   * @param rport the response port
    * @since 0.8.4
    */
-  void gotPort(Peer peer, int port);
+  void gotPort(Peer peer, int port, int rport);
 
   /**
    * Called when peers are received via PEX
diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerState.java b/apps/i2psnark/java/src/org/klomp/snark/PeerState.java
index d221d978a002ee58f23bed4837840128f52af036..8433242ff35db26ced89cfab6a5910a4b11cc2a9 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/PeerState.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/PeerState.java
@@ -526,10 +526,14 @@ class PeerState implements DataLoader
           setInteresting(true);
   }
 
-  /** @since 0.8.4 */
+  /**
+   *  Unused
+   *  @since 0.8.4
+   */
   void portMessage(int port)
   {
-      listener.gotPort(peer, port);
+      // for compatibility with old DHT PORT message
+      listener.gotPort(peer, port, port + 1);
   }
 
   void unknownMessage(int type, byte[] bs)
diff --git a/apps/i2psnark/java/src/org/klomp/snark/Snark.java b/apps/i2psnark/java/src/org/klomp/snark/Snark.java
index c1f088f6e423a9c6df286b764bb9cb0d5035a576..efb491d318a8be3cfd001cce63043ed09bddad30 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/Snark.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/Snark.java
@@ -230,7 +230,7 @@ public class Snark
   private volatile boolean stopped;
   private volatile boolean starting;
   private byte[] id;
-  private byte[] infoHash;
+  private final byte[] infoHash;
   private String additionalTrackerURL;
   private final I2PSnarkUtil _util;
   private final Log _log;
@@ -321,6 +321,7 @@ public class Snark
     meta = null;
     File f = null;
     InputStream in = null;
+    byte[] x_infoHash = null;
     try
       {
         f = new File(torrent);
@@ -343,7 +344,7 @@ public class Snark
              throw new IOException("not found");
           }
         meta = new MetaInfo(in);
-        infoHash = meta.getInfoHash();
+        x_infoHash = meta.getInfoHash();
       }
     catch(IOException ioe)
       {
@@ -384,6 +385,7 @@ public class Snark
               try { in.close(); } catch (IOException ioe) {}
       }    
 
+    infoHash = x_infoHash;  // final
     if (_log.shouldLog(Log.INFO))
         _log.info(meta.toString());
     
@@ -1210,8 +1212,8 @@ public class Snark
     if (_peerCoordinatorSet == null || uploaders <= 0)
       return false;
     int totalUploaders = 0;
-    for (Iterator iter = _peerCoordinatorSet.iterator(); iter.hasNext(); ) {
-      PeerCoordinator c = (PeerCoordinator)iter.next();
+    for (Iterator<PeerCoordinator> iter = _peerCoordinatorSet.iterator(); iter.hasNext(); ) {
+      PeerCoordinator c = iter.next();
       if (!c.halted())
         totalUploaders += c.uploaders;
     }
@@ -1224,8 +1226,8 @@ public class Snark
     if (_peerCoordinatorSet == null)
       return false;
     long total = 0;
-    for (Iterator iter = _peerCoordinatorSet.iterator(); iter.hasNext(); ) {
-      PeerCoordinator c = (PeerCoordinator)iter.next();
+    for (Iterator<PeerCoordinator> iter = _peerCoordinatorSet.iterator(); iter.hasNext(); ) {
+      PeerCoordinator c = iter.next();
       if (!c.halted())
         total += c.getCurrentUploadRate();
     }
diff --git a/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java b/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java
index 7fd824a9228587a233e0771020fe64b65622909a..aa04d597d94b0f157827c311eec56767ea450e91 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java
@@ -92,6 +92,7 @@ public class SnarkManager implements Snark.CompleteListener {
     private static final String PROP_USE_OPENTRACKERS = "i2psnark.useOpentrackers";
     public static final String PROP_OPENTRACKERS = "i2psnark.opentrackers";
     public static final String PROP_PRIVATETRACKERS = "i2psnark.privatetrackers";
+    private static final String PROP_USE_DHT = "i2psnark.enableDHT";
 
     public static final int MIN_UP_BW = 2;
     public static final int DEFAULT_MAX_UP_BW = 10;
@@ -290,6 +291,9 @@ public class SnarkManager implements Snark.CompleteListener {
             _config.setProperty(PROP_STARTUP_DELAY, Integer.toString(DEFAULT_STARTUP_DELAY));
         if (!_config.containsKey(PROP_THEME))
             _config.setProperty(PROP_THEME, DEFAULT_THEME);
+        // no, so we can switch default to true later
+        //if (!_config.containsKey(PROP_USE_DHT))
+        //    _config.setProperty(PROP_USE_DHT, Boolean.toString(I2PSnarkUtil.DEFAULT_USE_DHT));
         updateConfig();
     }
     /**
@@ -380,6 +384,9 @@ public class SnarkManager implements Snark.CompleteListener {
         String useOT = _config.getProperty(PROP_USE_OPENTRACKERS);
         boolean bOT = useOT == null || Boolean.valueOf(useOT).booleanValue();
         _util.setUseOpenTrackers(bOT);
+        // careful, so we can switch default to true later
+        _util.setUseDHT(Boolean.valueOf(_config.getProperty(PROP_USE_DHT,
+                                          Boolean.toString(I2PSnarkUtil.DEFAULT_USE_DHT))).booleanValue());
         getDataDir().mkdirs();
         initTrackerMap();
     }
@@ -398,7 +405,7 @@ public class SnarkManager implements Snark.CompleteListener {
     public void updateConfig(String dataDir, boolean filesPublic, boolean autoStart, String refreshDelay,
                              String startDelay, String seedPct, String eepHost, 
                              String eepPort, String i2cpHost, String i2cpPort, String i2cpOpts,
-                             String upLimit, String upBW, boolean useOpenTrackers, String theme) {
+                             String upLimit, String upBW, boolean useOpenTrackers, boolean useDHT, String theme) {
         boolean changed = false;
         //if (eepHost != null) {
         //    // unused, we use socket eepget
@@ -582,6 +589,17 @@ public class SnarkManager implements Snark.CompleteListener {
             _util.setUseOpenTrackers(useOpenTrackers);
             changed = true;
         }
+        if (_util.shouldUseDHT() != useDHT) {
+            _config.setProperty(PROP_USE_DHT, Boolean.toString(useDHT));
+            if (useDHT)
+                addMessage(_("Enabled DHT."));
+            else
+                addMessage(_("Disabled DHT."));
+            if (_util.connected())
+                addMessage(_("DHT change requires tunnel shutdown and reopen"));
+            _util.setUseDHT(useDHT);
+            changed = true;
+        }
         if (theme != null) {
             if(!theme.equals(_config.getProperty(PROP_THEME))) {
                 _config.setProperty(PROP_THEME, theme);
diff --git a/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java b/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java
index 3ba623199e345809de3dfeaf5c5ed12dfb7855a0..ccb02813405de6150da2802cf3aa56093ec46d1f 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java
@@ -43,6 +43,8 @@ import net.i2p.util.I2PAppThread;
 import net.i2p.util.Log;
 import net.i2p.util.SimpleTimer2;
 
+import org.klomp.snark.dht.DHT;
+
 /**
  * Informs metainfo tracker of events and gets new peers for peer
  * coordinator.
@@ -323,8 +325,9 @@ public class TrackerClient implements Runnable {
             }
 
             // Local DHT tracker announce
-            if (_util.getDHT() != null)
-                _util.getDHT().announce(snark.getInfoHash());
+            DHT dht = _util.getDHT();
+            if (dht != null)
+                dht.announce(snark.getInfoHash());
 
             long uploaded = coordinator.getUploaded();
             long downloaded = coordinator.getDownloaded();
@@ -372,9 +375,10 @@ public class TrackerClient implements Runnable {
                         snark.setTrackerSeenPeers(tr.seenPeers);
 
                     // pass everybody over to our tracker
-                    if (_util.getDHT() != null) {
+                    dht = _util.getDHT();
+                    if (dht != null) {
                         for (Peer peer : peers) {
-                            _util.getDHT().announce(snark.getInfoHash(), peer.getPeerID().getDestHash());
+                            dht.announce(snark.getInfoHash(), peer.getPeerID().getDestHash());
                         }
                     }
 
@@ -458,19 +462,21 @@ public class TrackerClient implements Runnable {
 
             // Get peers from DHT
             // FIXME this needs to be in its own thread
-            if (_util.getDHT() != null && (meta == null || !meta.isPrivate()) && !stop) {
+            dht = _util.getDHT();
+            if (dht != null && (meta == null || !meta.isPrivate()) && !stop) {
                 int numwant;
                 if (event.equals(STOPPED_EVENT) || !coordinator.needOutboundPeers())
                     numwant = 1;
                 else
                     numwant = _util.getMaxConnections();
-                List<Hash> hashes = _util.getDHT().getPeers(snark.getInfoHash(), numwant, 2*60*1000);
+                List<Hash> hashes = dht.getPeers(snark.getInfoHash(), numwant, 2*60*1000);
                 if (_log.shouldLog(Log.INFO))
                     _log.info("Got " + hashes + " from DHT");
                 // announce  ourselves while the token is still good
                 // FIXME this needs to be in its own thread
                 if (!stop) {
-                    int good = _util.getDHT().announce(snark.getInfoHash(), 8, 5*60*1000);
+                    // announce only to the 1 closest
+                    int good = dht.announce(snark.getInfoHash(), 1, 5*60*1000);
                     if (_log.shouldLog(Log.INFO))
                         _log.info("Sent " + good + " good announces to DHT");
                 }
@@ -547,8 +553,9 @@ public class TrackerClient implements Runnable {
    */
   private void unannounce() {
       // Local DHT tracker unannounce
-      if (_util.getDHT() != null)
-          _util.getDHT().unannounce(snark.getInfoHash());
+      DHT dht = _util.getDHT();
+      if (dht != null)
+          dht.unannounce(snark.getInfoHash());
       int i = 0;
       for (Tracker tr : trackers) {
           if (_util.connected() &&
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/DHT.java b/apps/i2psnark/java/src/org/klomp/snark/dht/DHT.java
index 6a16e4e605d255d9946068e8d994d64f6b924104..d5642835bd29ada0c98e1e2230b5b590ad574a52 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/dht/DHT.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/DHT.java
@@ -17,10 +17,15 @@ public interface DHT {
 
 
     /**
-     *  @return The UDP port that should be included in a PORT message.
+     *  @return The UDP query port
      */
     public int getPort();
 
+    /**
+     *  @return The UDP response port
+     */
+    public int getRPort();
+
     /**
      *  Ping. We don't have a NID yet so the node is presumed
      *  to be absent from our DHT.
@@ -79,4 +84,14 @@ public interface DHT {
      *  @return the number of successful announces, not counting ourselves.
      */
     public int announce(byte[] ih, int max, long maxWait);
+
+    /**
+     * Stop everything.
+     */
+    public void stop();
+
+    /**
+     * Known nodes, not estimated total network size.
+     */
+    public int size();
 }
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/DHTNodes.java b/apps/i2psnark/java/src/org/klomp/snark/dht/DHTNodes.java
new file mode 100644
index 0000000000000000000000000000000000000000..8c37600ec69f4ef90043771ad7b36d25fcc365dd
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/DHTNodes.java
@@ -0,0 +1,161 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.i2p.I2PAppContext;
+import net.i2p.crypto.SHA1Hash;
+import net.i2p.data.DataHelper;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.util.Log;
+import net.i2p.util.SimpleTimer2;
+
+/**
+ *  All the nodes we know about, stored as a mapping from
+ *  node ID to a Destination and Port.
+ *
+ *  And a real Kademlia routing table, which stores node IDs only.
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class DHTNodes {
+
+    private final I2PAppContext _context;
+    private long _expireTime;
+    private final Log _log;
+    private final ConcurrentHashMap<NID, NodeInfo> _nodeMap;
+    private final KBucketSet<NID> _kad;
+    private volatile boolean _isRunning;
+
+    /** stagger with other cleaners */
+    private static final long CLEAN_TIME = 237*1000;
+    private static final long MAX_EXPIRE_TIME = 60*60*1000;
+    private static final long MIN_EXPIRE_TIME = 5*60*1000;
+    private static final long DELTA_EXPIRE_TIME = 7*60*1000;
+    private static final int MAX_PEERS = 999;
+
+    public DHTNodes(I2PAppContext ctx, NID me) {
+        _context = ctx;
+        _expireTime = MAX_EXPIRE_TIME;
+        _log = _context.logManager().getLog(DHTNodes.class);
+        _nodeMap = new ConcurrentHashMap();
+        _kad = new KBucketSet(ctx, me, 8, 1);
+    }
+
+    public void start() {
+        _isRunning = true;
+        new Cleaner();
+    }
+
+    public void stop() {
+        clear();
+        _isRunning = false;
+    }
+
+    // begin ConcurrentHashMap methods
+
+    public int size() {
+        return _nodeMap.size();
+    }
+
+    public void clear() {
+        _kad.clear();
+        _nodeMap.clear();
+    }
+
+    public NodeInfo get(NID nid) {
+        return _nodeMap.get(nid);
+    }
+
+    /**
+     *  @return the old value if present, else null
+     */
+    public NodeInfo putIfAbsent(NodeInfo nInfo) {
+        _kad.add(nInfo.getNID());
+        return _nodeMap.putIfAbsent(nInfo.getNID(), nInfo);
+    }
+
+    public NodeInfo remove(NID nid) {
+        _kad.remove(nid);
+        return _nodeMap.remove(nid);
+    }
+
+    public Collection<NodeInfo> values() {
+        return _nodeMap.values();
+    }
+
+    // end ConcurrentHashMap methods
+
+    /**
+     *  DHT
+     *  @param h either a InfoHash or a NID
+     */
+    public List<NodeInfo> findClosest(SHA1Hash h, int numWant) {
+        NID key;
+        if (h instanceof NID)
+            key = (NID) h;
+        else
+            key = new NID(h.getData());
+        List<NID> keys = _kad.getClosest(key, numWant);
+        List<NodeInfo> rv = new ArrayList(keys.size());
+        for (NID nid : keys) {
+            NodeInfo ninfo = _nodeMap.get(nid);
+            if (ninfo != null)
+                rv.add(ninfo);
+        }
+        return rv;
+    }
+
+    /**
+     *  DHT - get random keys to explore
+     */
+    public List<NID> getExploreKeys() {
+        return _kad.getExploreKeys(15*60*1000);
+    }
+
+    /** */
+    private class Cleaner extends SimpleTimer2.TimedEvent {
+
+        public Cleaner() {
+            super(SimpleTimer2.getInstance(), CLEAN_TIME);
+        }
+
+        public void timeReached() {
+            if (!_isRunning)
+                return;
+            long now = _context.clock().now();
+            int peerCount = 0;
+            for (Iterator<NodeInfo> iter = DHTNodes.this.values().iterator(); iter.hasNext(); ) {
+                 NodeInfo peer = iter.next();
+                 if (peer.lastSeen() < now - _expireTime) {
+                     iter.remove();
+                     _kad.remove(peer.getNID());
+                 } else {
+                     peerCount++;
+                }
+            }
+
+            if (peerCount > MAX_PEERS)
+                _expireTime = Math.max(_expireTime - DELTA_EXPIRE_TIME, MIN_EXPIRE_TIME);
+            else
+                _expireTime = Math.min(_expireTime + DELTA_EXPIRE_TIME, MAX_EXPIRE_TIME);
+
+            if (_log.shouldLog(Log.DEBUG))
+                _log.debug("DHT storage cleaner done, now with " +
+                         peerCount + " peers, " +
+                         DataHelper.formatDuration(_expireTime) + " expiration");
+
+            schedule(CLEAN_TIME);
+        }
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/DHTTracker.java b/apps/i2psnark/java/src/org/klomp/snark/dht/DHTTracker.java
new file mode 100644
index 0000000000000000000000000000000000000000..97fbcef73d997187aafc0133a7ea1666935351f9
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/DHTTracker.java
@@ -0,0 +1,143 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, relicensed to GPLv2
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.DataHelper;
+import net.i2p.data.Hash;
+import net.i2p.util.Log;
+import net.i2p.util.SimpleTimer2;
+
+/**
+ * The tracker stores peers, i.e. Dest hashes (not nodes).
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class DHTTracker {
+
+    private final I2PAppContext _context;
+    private final Torrents _torrents;
+    private long _expireTime;
+    private final Log _log;
+    private volatile boolean _isRunning;
+
+    /** stagger with other cleaners */
+    private static final long CLEAN_TIME = 199*1000;
+    /** make this longer than postman's tracker */
+    private static final long MAX_EXPIRE_TIME = 95*60*1000;
+    private static final long MIN_EXPIRE_TIME = 5*60*1000;
+    private static final long DELTA_EXPIRE_TIME = 7*60*1000;
+    private static final int MAX_PEERS = 2000;
+
+    DHTTracker(I2PAppContext ctx) {
+        _context = ctx;
+        _torrents = new Torrents();
+        _expireTime = MAX_EXPIRE_TIME;
+        _log = _context.logManager().getLog(DHTTracker.class);
+    }
+
+    public void start() {
+        _isRunning = true;
+        new Cleaner();
+    }
+
+    void stop() {
+        _torrents.clear();
+        _isRunning = false;
+    }
+
+    void announce(InfoHash ih, Hash hash) {
+        if (_log.shouldLog(Log.DEBUG))
+            _log.debug("Announce " + hash + " for " + ih);
+        Peers peers = _torrents.get(ih);
+        if (peers == null) {
+            peers = new Peers();
+            Peers peers2 = _torrents.putIfAbsent(ih, peers);
+            if (peers2 != null)
+                peers = peers2;
+        }
+
+        Peer peer = new Peer(hash.getData());
+        Peer peer2 = peers.putIfAbsent(peer, peer);
+        if (peer2 != null)
+            peer = peer2;
+        peer.setLastSeen(_context.clock().now());
+    }
+
+    void unannounce(InfoHash ih, Hash hash) {
+        Peers peers = _torrents.get(ih);
+        if (peers == null)
+            return;
+        Peer peer = new Peer(hash.getData());
+        peers.remove(peer);
+    }
+
+    /**
+     *  Caller's responsibility to remove himself from the list
+     *  @return list or empty list (never null)
+     */
+    List<Hash> getPeers(InfoHash ih, int max) {
+        Peers peers = _torrents.get(ih);
+        if (peers == null)
+            return Collections.EMPTY_LIST;
+
+        int size = peers.size();
+        List<Hash> rv = new ArrayList(peers.values());
+        if (max < size) {
+                Collections.shuffle(rv, _context.random());
+                rv = rv.subList(0, max);
+        }
+        return rv;
+    }
+
+    private class Cleaner extends SimpleTimer2.TimedEvent {
+
+        public Cleaner() {
+            super(SimpleTimer2.getInstance(), CLEAN_TIME);
+        }
+
+        public void timeReached() {
+            if (!_isRunning)
+                return;
+            long now = _context.clock().now();
+            int torrentCount = 0;
+            int peerCount = 0;
+            for (Iterator<Peers> iter = _torrents.values().iterator(); iter.hasNext(); ) {
+                Peers p = iter.next();
+                int recent = 0;
+                for (Iterator<Peer> iterp = p.values().iterator(); iterp.hasNext(); ) {
+                     Peer peer = iterp.next();
+                     if (peer.lastSeen() < now - _expireTime)
+                         iterp.remove();
+                     else {
+                         recent++;
+                         peerCount++;
+                     }
+                }
+                if (recent <= 0)
+                    iter.remove();
+                else
+                    torrentCount++;
+            }
+
+            if (peerCount > MAX_PEERS)
+                _expireTime = Math.max(_expireTime - DELTA_EXPIRE_TIME, MIN_EXPIRE_TIME);
+            else
+                _expireTime = Math.min(_expireTime + DELTA_EXPIRE_TIME, MAX_EXPIRE_TIME);
+
+            if (_log.shouldLog(Log.DEBUG))
+                _log.debug("DHT tracker cleaner done, now with " +
+                         torrentCount + " torrents, " +
+                         peerCount + " peers, " +
+                         DataHelper.formatDuration(_expireTime) + " expiration");
+            schedule(CLEAN_TIME);
+        }
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/InfoHash.java b/apps/i2psnark/java/src/org/klomp/snark/dht/InfoHash.java
new file mode 100644
index 0000000000000000000000000000000000000000..221d79af424f4411f66b05d2768e899b59b26e73
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/InfoHash.java
@@ -0,0 +1,19 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import net.i2p.crypto.SHA1Hash;
+
+/**
+ *  A 20-byte SHA1 info hash
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class InfoHash extends SHA1Hash {
+
+    public InfoHash(byte[] data) {
+        super(data);
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/KRPC.java b/apps/i2psnark/java/src/org/klomp/snark/dht/KRPC.java
new file mode 100644
index 0000000000000000000000000000000000000000..f892233e4c1eed1245d886b59710fc05ca675fbe
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/KRPC.java
@@ -0,0 +1,1485 @@
+package org.klomp.snark.dht;
+
+/*
+ *  GPLv2
+ */
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.i2p.I2PAppContext;
+import net.i2p.client.I2PClient;
+import net.i2p.client.I2PSession;
+import net.i2p.client.I2PSessionException;
+import net.i2p.client.I2PSessionMuxedListener;
+import net.i2p.client.datagram.I2PDatagramDissector;
+import net.i2p.client.datagram.I2PDatagramMaker;
+import net.i2p.client.datagram.I2PInvalidDatagramException;
+import net.i2p.crypto.SHA1Hash;
+import net.i2p.data.DataFormatException;
+import net.i2p.data.DataHelper;
+import net.i2p.data.Destination;
+import net.i2p.data.Hash;
+import net.i2p.data.SimpleDataStructure;
+import net.i2p.util.I2PAppThread;
+import net.i2p.util.Log;
+import net.i2p.util.SimpleTimer2;
+
+import org.klomp.snark.bencode.BDecoder;
+import org.klomp.snark.bencode.BEncoder;
+import org.klomp.snark.bencode.BEValue;
+import org.klomp.snark.bencode.InvalidBEncodingException;
+
+
+/**
+ * Standard BEP 5
+ * Mods for I2P:
+ * <pre>
+ * - The UDP port need not be pinged after receiving a PORT message.
+ *
+ * - The UDP (datagram) port listed in the compact node info is used
+ *   to receive repliable (signed) datagrams.
+ *   This is used for queries, except for announces.
+ *   We call this the "query port".
+ *   In addition to that UDP port, we use a second datagram
+ *   port equal to the signed port + 1. This is used to receive
+ *   unsigned (raw) datagrams for replies, errors, and announce queries..
+ *   We call this the "response port".
+ *
+ * - Compact peer info is 32 bytes (32 byte SHA256 Hash)
+ *   instead of 4 byte IP + 2 byte port. There is no peer port.
+ *
+ * - Compact node info is 54 bytes (20 byte SHA1 Hash + 32 byte SHA256 Hash + 2 byte port)
+ *   instead of 20 byte SHA1 Hash + 4 byte IP + 2 byte port.
+ *   Port is the query port, the response port is always the query port + 1.
+ *
+ * - The trackerless torrent dictionary "nodes" key is a list of
+ *   32 byte binary strings (SHA256 Hashes) instead of a list of lists
+ *   containing a host string and a port integer.
+ * </pre>
+ *
+ * Questions:
+ *   - nodes (in the find_node and get_peers response) is one concatenated string, not a list of strings, right?
+ *   - Node ID enforcement, keyspace rotation?
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+public class KRPC implements I2PSessionMuxedListener, DHT {
+
+    private final I2PAppContext _context;
+    private final Log _log;
+
+    /** our tracker */
+    private final DHTTracker _tracker;
+    /** who we know */
+    private final DHTNodes _knownNodes;
+    /** index to sent queries awaiting reply */
+    private final ConcurrentHashMap<MsgID, ReplyWaiter> _sentQueries;
+    /** index to outgoing tokens we generated, sent in reply to a get_peers query */
+    private final ConcurrentHashMap<Token, NodeInfo> _outgoingTokens;
+    /** index to incoming opaque tokens, received in a peers or nodes reply */
+    private final ConcurrentHashMap<NID, Token> _incomingTokens;
+
+    /** hook to inject and receive datagrams */
+    private final I2PSession _session;
+    /** 20 byte random id */
+    private final byte[] _myID;
+    /** 20 byte random id */
+    private final NID _myNID;
+    /** 20 byte random id + 32 byte Hash + 2 byte port */
+    private final NodeInfo _myNodeInfo;
+    /** unsigned dgrams */
+    private final int _rPort;
+    /** signed dgrams */
+    private final int _qPort;
+    private final File _dhtFile;
+    private volatile boolean _isRunning;
+    private volatile boolean _hasBootstrapped;
+
+    /** all-zero NID used for pings */
+    public static final NID FAKE_NID = new NID(new byte[NID.HASH_LENGTH]);
+
+    /** Max number of nodes to return. BEP 5 says 8 */
+    private static final int K = 8;
+    /** Max number of peers to return. BEP 5 doesn't say. We'll use the same as I2PSnarkUtil.MAX_CONNECTIONS */
+    private static final int MAX_WANT = 16;
+
+    /** overloads error codes which start with 201 */
+    private static final int REPLY_NONE = 0;
+    private static final int REPLY_PONG = 1;
+    private static final int REPLY_PEERS = 2;
+    private static final int REPLY_NODES = 3;
+
+    public static final boolean SECURE_NID = true;
+
+    /** how long since last heard from do we delete  - BEP 5 says 15 minutes */
+    private static final long MAX_NODEINFO_AGE = 60*60*1000;
+    /** how long since generated do we delete - BEP 5 says 10 minutes */
+    private static final long MAX_TOKEN_AGE = 60*60*1000;
+    private static final long MAX_INBOUND_TOKEN_AGE = MAX_TOKEN_AGE - 5*60*1000;
+    /** how long since sent do we wait for a reply */
+    private static final long MAX_MSGID_AGE = 2*60*1000;
+    /** how long since sent do we wait for a reply */
+    private static final long DEFAULT_QUERY_TIMEOUT = 75*1000;
+    /** stagger with other cleaners */
+    private static final long CLEAN_TIME = 63*1000;
+    private static final long EXPLORE_TIME = 877*1000;
+    private static final String DHT_FILE = "i2psnark.dht.dat";
+
+    public KRPC (I2PAppContext ctx, I2PSession session) {
+        _context = ctx;
+        _session = session;
+        _log = ctx.logManager().getLog(KRPC.class);
+        _tracker = new DHTTracker(ctx);
+
+        _sentQueries = new ConcurrentHashMap();
+        _outgoingTokens = new ConcurrentHashMap();
+        _incomingTokens = new ConcurrentHashMap();
+
+        // Construct my NodeInfo
+        // Pick ports over a big range to marginally increase security
+        // If we add a search DHT, adjust to stay out of each other's way
+        _qPort = 2555 + ctx.random().nextInt(61111);
+        _rPort = _qPort + 1;
+        if (SECURE_NID) {
+            _myNID = NodeInfo.generateNID(session.getMyDestination().calculateHash(), _qPort, _context.random());
+            _myID = _myNID.getData();
+        } else {
+            _myID = new byte[NID.HASH_LENGTH];
+            ctx.random().nextBytes(_myID);
+            _myNID = new NID(_myID);
+        }
+        _myNodeInfo = new NodeInfo(_myNID, session.getMyDestination(), _qPort);
+        _dhtFile = new File(ctx.getConfigDir(), DHT_FILE);
+        _knownNodes = new DHTNodes(ctx, _myNID);
+
+        session.addMuxedSessionListener(this, I2PSession.PROTO_DATAGRAM_RAW, _rPort);
+        session.addMuxedSessionListener(this, I2PSession.PROTO_DATAGRAM, _qPort);
+        start();
+    }
+
+    ///////////////// Public methods
+
+    /**
+     * Known nodes, not estimated total network size.
+     */
+    public int size() {
+        return _knownNodes.size();
+    }
+
+    /**
+     *  @return The UDP query port
+     */
+    public int getPort() {
+        return _qPort;
+    }
+
+    /**
+     *  @return The UDP response port
+     */
+    public int getRPort() {
+        return _rPort;
+    }
+
+    /**
+     *  Ping. We don't have a NID yet so the node is presumed
+     *  to be absent from our DHT.
+     *  Non-blocking, does not wait for pong.
+     *  If and when the pong is received the node will be inserted in our DHT.
+     */
+    public void ping(Destination dest, int port) {
+        NodeInfo nInfo = new NodeInfo(dest, port);
+        sendPing(nInfo);
+    }
+
+    /**
+     *  Bootstrapping or background thread.
+     *  Blocking!
+     *  This is almost the same as getPeers()
+     *
+     *  @param target the key we are searching for
+     *  @param maxNodes how many to contact
+     *  @param maxWait how long to wait for each to reply (not total) must be > 0
+     *  @param parallel how many outstanding at once (unimplemented, always 1)
+     */
+    private void explore(NID target, int maxNodes, long maxWait, int parallel) {
+        List<NodeInfo> nodes = _knownNodes.findClosest(target, maxNodes);
+        if (nodes.isEmpty()) {
+            if (_log.shouldLog(Log.WARN))
+                _log.info("DHT is empty, cannot explore");
+            return;
+        }
+        SortedSet<NodeInfo> toTry = new TreeSet(new NodeInfoComparator(target));
+        toTry.addAll(nodes);
+        Set<NodeInfo> tried = new HashSet();
+
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Starting explore of " + target);
+        for (int i = 0; i < maxNodes; i++) {
+            if (!_isRunning)
+                break;
+            NodeInfo nInfo;
+            try {
+                nInfo = toTry.first();
+            } catch (NoSuchElementException nsee) {
+                break;
+            }
+            toTry.remove(nInfo);
+            tried.add(nInfo);
+
+            ReplyWaiter waiter = sendFindNode(nInfo, target);
+            if (waiter == null)
+                continue;
+            synchronized(waiter) {
+                try {
+                    waiter.wait(maxWait);
+                } catch (InterruptedException ie) {}
+            }
+
+            int replyType = waiter.getReplyCode();
+            if (replyType == REPLY_NONE) {
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got no reply");
+            } else if (replyType == REPLY_NODES) {
+                 List<NodeInfo> reply = (List<NodeInfo>) waiter.getReplyObject();
+                 // It seems like we are just going to get back ourselves all the time
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got " + reply.size() + " nodes");
+                 for (NodeInfo ni : reply) {
+                     if (! (ni.equals(_myNodeInfo) || (toTry.contains(ni) && tried.contains(ni))))
+                         toTry.add(ni);
+                 }
+            } else {
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got unexpected reply " + replyType + ": " + waiter.getReplyObject());
+            }
+        }
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Finished explore of " + target);
+    }
+
+    /**
+     *  Local lookup only
+     *  @param ih a 20-byte info hash
+     *  @param max max to return
+     *  @return list or empty list (never null)
+     */
+    public List<NodeInfo> findClosest(byte[] ih, int max) {
+        List<NodeInfo> nodes = _knownNodes.findClosest(new InfoHash(ih), max);
+        return nodes;
+    }
+
+    /**
+     *  Get peers for a torrent.
+     *  This is an iterative lookup in the DHT.
+     *  Blocking!
+     *  Caller should run in a thread.
+     *
+     *  @param ih the Info Hash (torrent)
+     *  @param max maximum number of peers to return
+     *  @param maxWait the maximum time to wait (ms) must be > 0
+     *  @return list or empty list (never null)
+     */
+    public List<Hash> getPeers(byte[] ih, int max, long maxWait) {
+        // check local tracker first
+        InfoHash iHash = new InfoHash(ih);
+        List<Hash> rv = _tracker.getPeers(iHash, max);
+        rv.remove(_myNodeInfo.getHash());
+        if (!rv.isEmpty())
+            return rv;  // TODO get DHT too?
+
+        // Initial set to try, will get added to as we go
+        List<NodeInfo> nodes = _knownNodes.findClosest(iHash, max);
+        SortedSet<NodeInfo> toTry = new TreeSet(new NodeInfoComparator(iHash));
+        toTry.addAll(nodes);
+        Set<NodeInfo> tried = new HashSet();
+
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Starting getPeers with " + nodes.size() + " to try");
+        for (int i = 0; i < max; i++) {
+            if (!_isRunning)
+                break;
+            NodeInfo nInfo;
+            try {
+                nInfo = toTry.first();
+            } catch (NoSuchElementException nsee) {
+                break;
+            }
+            toTry.remove(nInfo);
+            tried.add(nInfo);
+
+            ReplyWaiter waiter = sendGetPeers(nInfo, iHash);
+            if (waiter == null)
+                continue;
+            synchronized(waiter) {
+                try {
+                    waiter.wait(maxWait);
+                } catch (InterruptedException ie) {}
+            }
+
+            int replyType = waiter.getReplyCode();
+            if (replyType == REPLY_NONE) {
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got no reply");
+            } else if (replyType == REPLY_PONG) {
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got pong");
+            } else if (replyType == REPLY_PEERS) {
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got peers");
+                 List<Hash> reply = (List<Hash>) waiter.getReplyObject();
+                 if (!reply.isEmpty()) {
+                     if (_log.shouldLog(Log.INFO))
+                         _log.info("Finished get Peers, returning " + reply.size());
+                     return reply;
+                 }
+            } else if (replyType == REPLY_NODES) {
+                 List<NodeInfo> reply = (List<NodeInfo>) waiter.getReplyObject();
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got " + reply.size() + " nodes");
+                 for (NodeInfo ni : reply) {
+                     if (! (ni.equals(_myNodeInfo) || tried.contains(ni) || toTry.contains(ni)))
+                         toTry.add(ni);
+                 }
+            } else {
+                 if (_log.shouldLog(Log.INFO))
+                     _log.info("Got unexpected reply " + replyType + ": " + waiter.getReplyObject());
+            }
+        }
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Finished get Peers, fail");
+        return Collections.EMPTY_LIST;
+    }
+
+    /**
+     *  Announce to ourselves.
+     *  Non-blocking.
+     *
+     *  @param ih the Info Hash (torrent)
+     */
+    public void announce(byte[] ih) {
+        InfoHash iHash = new InfoHash(ih);
+        _tracker.announce(iHash, _myNodeInfo.getHash());
+    }
+
+    /**
+     *  Announce somebody else we know about.
+     *  Non-blocking.
+     *
+     *  @param ih the Info Hash (torrent)
+     *  @param peerHash the peer's Hash
+     */
+    public void announce(byte[] ih, byte[] peerHash) {
+        InfoHash iHash = new InfoHash(ih);
+        _tracker.announce(iHash, new Hash(peerHash));
+        // Do NOT do this, corrupts the Hash cache and the Peer ID
+        //_tracker.announce(iHash, Hash.create(peerHash));
+    }
+
+    /**
+     *  Remove reference to ourselves in the local tracker.
+     *  Use when shutting down the torrent locally.
+     *  Non-blocking.
+     *
+     *  @param ih the Info Hash (torrent)
+     */
+    public void unannounce(byte[] ih) {
+        InfoHash iHash = new InfoHash(ih);
+        _tracker.unannounce(iHash, _myNodeInfo.getHash());
+    }
+
+    /**
+     *  Announce to the closest peers in the local DHT.
+     *  This is NOT iterative - call getPeers() first to get the closest
+     *  peers into the local DHT.
+     *  Blocking unless maxWait <= 0
+     *  Caller should run in a thread.
+     *  This also automatically announces ourself to our local tracker.
+     *  For best results do a getPeers() first so we have tokens.
+     *
+     *  @param ih the Info Hash (torrent)
+     *  @param max maximum number of peers to announce to
+     *  @param maxWait the maximum total time to wait (ms) or 0 to do all in parallel and return immediately.
+     *  @return the number of successful announces, not counting ourselves.
+     */
+    public int announce(byte[] ih, int max, long maxWait) {
+        announce(ih);
+        int rv = 0;
+        long start = _context.clock().now();
+        InfoHash iHash = new InfoHash(ih);
+        List<NodeInfo> nodes = _knownNodes.findClosest(iHash, max);
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Found " + nodes.size() + " to announce to for " + iHash);
+        for (NodeInfo nInfo : nodes) {
+            if (!_isRunning)
+                break;
+            if (announce(ih, nInfo, Math.min(maxWait, 60*1000)))
+                rv++;
+            maxWait -= _context.clock().now() - start;
+            if (maxWait < 1000)
+                break;
+        }
+        return rv;
+    }
+
+    /**
+     *  Announce to a single DHT peer.
+     *  Blocking unless maxWait <= 0
+     *  Caller should run in a thread.
+     *  For best results do a getPeers() first so we have a token.
+     *
+     *  @param ih the Info Hash (torrent)
+     *  @param nInfo the peer to announce to
+     *  @param maxWait the maximum time to wait (ms) or 0 to return immediately.
+     *  @return success
+     */
+    private boolean announce(byte[] ih, NodeInfo nInfo, long maxWait) {
+        InfoHash iHash = new InfoHash(ih);
+        // it isn't clear from BEP 5 if a token is bound to a single infohash?
+        // for now, just bind to the NID
+        //TokenKey tokenKey = new TokenKey(nInfo.getNID(), iHash);
+        Token token = _incomingTokens.get(nInfo.getNID());
+        if (token == null) {
+            // we have no token, have to do a getPeers first to get a token
+            if (maxWait <= 0)
+                return false;
+            if (_log.shouldLog(Log.INFO))
+                _log.info("No token for announce to " + nInfo + " sending get_peers first");
+            ReplyWaiter waiter = sendGetPeers(nInfo, iHash);
+            if (waiter == null)
+                return false;
+            long start = _context.clock().now();
+            synchronized(waiter) {
+                try {
+                    waiter.wait(maxWait);
+                } catch (InterruptedException ie) {}
+            }
+            int replyType = waiter.getReplyCode();
+            if (!(replyType == REPLY_PEERS || replyType == REPLY_NODES)) {
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Get_peers failed to " + nInfo);
+                return false;
+            }
+            // we should have a token now
+            token = _incomingTokens.get(nInfo.getNID());
+            if (token == null) {
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Huh? no token after get_peers succeeded to " + nInfo);
+                return false;
+            }
+            maxWait -= _context.clock().now() - start;
+            if (maxWait < 1000) {
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Ran out of time after get_peers succeeded to " + nInfo);
+                return false;
+            }
+        }
+
+        // send and wait on rcv msg lock unless maxWait <= 0
+        ReplyWaiter waiter = sendAnnouncePeer(nInfo, iHash, token);
+        if (waiter == null)
+            return false;
+        if (maxWait <= 0)
+            return true;
+        synchronized(waiter) {
+            try {
+                waiter.wait(maxWait);
+            } catch (InterruptedException ie) {}
+        }
+        int replyType = waiter.getReplyCode();
+        return replyType == REPLY_PONG;
+    }
+
+    /**
+     *  Loads the DHT from file.
+     *  Can't be restarted after stopping?
+     */
+    public void start() {
+        _knownNodes.start();
+        _tracker.start();
+        PersistDHT.loadDHT(this, _dhtFile);
+        // start the explore thread
+        _isRunning = true;
+        // no need to keep ref, it will eventually stop
+        new Cleaner();
+        new Explorer(5*1000);
+    }
+
+    /**
+     *  Stop everything.
+     */
+    public void stop() {
+        _isRunning = false;
+        // FIXME stop the explore thread
+        // unregister port listeners
+        _session.removeListener(I2PSession.PROTO_DATAGRAM, _qPort);
+        _session.removeListener(I2PSession.PROTO_DATAGRAM_RAW, _rPort);
+        // clear the DHT and tracker
+        _tracker.stop();
+        PersistDHT.saveDHT(_knownNodes, _dhtFile);
+        _knownNodes.stop();
+        _sentQueries.clear();
+        _outgoingTokens.clear();
+        _incomingTokens.clear();
+    }
+
+    /**
+     * Clears the tracker and DHT data.
+     * Call after saving DHT data to disk.
+     */
+    public void clear() {
+        _tracker.stop();
+        _knownNodes.clear();
+    }
+
+    ////////// All private below here /////////////////////////////////////
+
+    ///// Sending.....
+
+    // Queries.....
+    // The first 3 queries use the query port.
+    // Announces use the response port.
+
+    /**
+     *  @param nInfo who to send it to
+     *  @return null on error
+     */
+    private ReplyWaiter sendPing(NodeInfo nInfo) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending ping to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        map.put("q", "ping");
+        Map<String, Object> args = new HashMap();
+        map.put("a", args);
+        return sendQuery(nInfo, map, true);
+    }
+
+    /**
+     *  @param nInfo who to send it to
+     *  @param tID target ID we are looking for
+     *  @return null on error
+     */
+    private ReplyWaiter sendFindNode(NodeInfo nInfo, NID tID) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending find node of " + tID + " to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        map.put("q", "find_node");
+        Map<String, Object> args = new HashMap();
+        args.put("target", tID.getData());
+        map.put("a", args);
+        return sendQuery(nInfo, map, true);
+    }
+
+    /**
+     *  @param nInfo who to send it to
+     *  @return null on error
+     */
+    private ReplyWaiter sendGetPeers(NodeInfo nInfo, InfoHash ih) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending get peers of " + ih + " to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        map.put("q", "get_peers");
+        Map<String, Object> args = new HashMap();
+        args.put("info_hash", ih.getData());
+        map.put("a", args);
+        ReplyWaiter rv = sendQuery(nInfo, map, true);
+        // save the InfoHash so we can get it later
+        if (rv != null)
+            rv.setSentObject(ih);
+        return rv;
+    }
+
+    /**
+     *  @param nInfo who to send it to
+     *  @return null on error
+     */
+    private ReplyWaiter sendAnnouncePeer(NodeInfo nInfo, InfoHash ih, Token token) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending announce of " + ih + " to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        map.put("q", "announce_peer");
+        Map<String, Object> args = new HashMap();
+        args.put("info_hash", ih.getData());
+        // port ignored
+        args.put("port", Integer.valueOf(6881));
+        args.put("token", token.getData());
+        map.put("a", args);
+        // an announce need not be signed, we have a token
+        ReplyWaiter rv = sendQuery(nInfo, map, false);
+        return rv;
+    }
+
+    // Responses.....
+    // All responses use the response port.
+
+    /**
+     *  @param nInfo who to send it to
+     *  @return success
+     */
+    private boolean sendPong(NodeInfo nInfo, MsgID msgID) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending pong to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        Map<String, Object> resps = new HashMap();
+        map.put("r", resps);
+        return sendResponse(nInfo, msgID, map);
+    }
+
+    /** response to find_node (no token) */
+    private boolean sendNodes(NodeInfo nInfo, MsgID msgID, byte[] ids) {
+        return sendNodes(nInfo, msgID, null, ids);
+    }
+
+    /**
+     *  response to find_node (token is null) or get_peers (has a token)
+     *  @param nInfo who to send it to
+     *  @return success
+     */
+    private boolean sendNodes(NodeInfo nInfo, MsgID msgID, Token token, byte[] ids) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending nodes to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        Map<String, Object> resps = new HashMap();
+        map.put("r", resps);
+        if (token != null)
+            resps.put("token", token.getData());
+        resps.put("nodes", ids);
+        return sendResponse(nInfo, msgID, map);
+    }
+
+    /** @param token non-null */
+    private boolean sendPeers(NodeInfo nInfo, MsgID msgID, Token token, List<byte[]> peers) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending peers to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        Map<String, Object> resps = new HashMap();
+        map.put("r", resps);
+        resps.put("token", token.getData());
+        resps.put("values", peers);
+        return sendResponse(nInfo, msgID, map);
+    }
+
+    // All errors use the response port.
+
+    /**
+     *  @param nInfo who to send it to
+     *  @return success
+     */
+    private boolean sendError(NodeInfo nInfo, MsgID msgID, int err, String msg) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending error " + msg + " to: " + nInfo);
+        Map<String, Object> map = new HashMap();
+        Map<String, Object> resps = new HashMap();
+        map.put("r", resps);
+        return sendResponse(nInfo, msgID, map);
+    }
+
+    // Low-level send methods
+
+    // TODO sendQuery with onReply / onTimeout args
+
+    /**
+     *  Blocking if repliable and we must lookup b32
+     *  @param repliable true for all but announce
+     *  @return null on error
+     */
+    private ReplyWaiter sendQuery(NodeInfo nInfo, Map<String, Object> map, boolean repliable) {
+        if (nInfo.equals(_myNodeInfo))
+            throw new IllegalArgumentException("wtf don't send to ourselves");
+        if (_log.shouldLog(Log.DEBUG))
+            _log.debug("Sending query to: " + nInfo);
+        if (nInfo.getDestination() == null) {
+            NodeInfo newInfo = _knownNodes.get(nInfo.getNID());
+            if (newInfo != null && newInfo.getDestination() != null) {
+                nInfo = newInfo;
+            } else if (!repliable) {
+                // Don't lookup for announce query, we should already have it
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("Dropping non-repliable query, no dest for " + nInfo);
+                return null;
+            } else {
+                // Lookup the dest for the hash
+                // TODO spin off into thread or queue? We really don't want to block here
+                if (!lookupDest(nInfo)) {
+                    if (_log.shouldLog(Log.WARN))
+                        _log.warn("Dropping repliable query, no dest for " + nInfo);
+                    timeout(nInfo);
+                    return null;
+                }
+            }
+        }
+        map.put("y", "q");
+        MsgID mID = new MsgID(_context);
+        map.put("t", mID.getData());
+        Map<String, Object> args = (Map<String, Object>) map.get("a");
+        if (args == null)
+            throw new IllegalArgumentException("no args");
+        args.put("id", _myID);
+        int port = nInfo.getPort();
+        if (!repliable)
+            port++;
+        boolean success = sendMessage(nInfo.getDestination(), port, map, repliable);
+        if (success) {
+            // save for the caller to get
+            ReplyWaiter rv = new ReplyWaiter(mID, nInfo, null, null);
+            _sentQueries.put(mID, rv);
+            return rv;
+        }
+        return null;
+    }
+
+    /**
+     * @param toPort the query port, we will increment here
+     *  @return success
+     */
+    private boolean sendResponse(NodeInfo nInfo, MsgID msgID, Map<String, Object> map) {
+        if (nInfo.equals(_myNodeInfo))
+            throw new IllegalArgumentException("wtf don't send to ourselves");
+        if (_log.shouldLog(Log.DEBUG))
+            _log.debug("Sending response to: " + nInfo);
+        if (nInfo.getDestination() == null) {
+            NodeInfo newInfo = _knownNodes.get(nInfo.getNID());
+            if (newInfo != null && newInfo.getDestination() != null) {
+                nInfo = newInfo;
+            } else {
+                // lookup b32?
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("Dropping response, no dest for " + nInfo);
+                return false;
+            }
+        }
+        map.put("y", "r");
+        map.put("t", msgID.getData());
+        Map<String, Object> resps = (Map<String, Object>) map.get("r");
+        if (resps == null)
+            throw new IllegalArgumentException("no resps");
+        resps.put("id", _myID);
+        return sendMessage(nInfo.getDestination(), nInfo.getPort() + 1, map, false);
+    }
+
+    /**
+     *  @param toPort the query port, we will increment here
+     *  @return success
+     */
+    private boolean sendError(NodeInfo nInfo, MsgID msgID, Map<String, Object> map) {
+        if (nInfo.equals(_myNodeInfo))
+            throw new IllegalArgumentException("wtf don't send to ourselves");
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Sending error to: " + nInfo);
+        if (nInfo.getDestination() == null) {
+            NodeInfo newInfo = _knownNodes.get(nInfo.getNID());
+            if (newInfo != null && newInfo.getDestination() != null) {
+                nInfo = newInfo;
+            } else {
+                // lookup b32?
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("Dropping sendError, no dest for " + nInfo);
+                return false;
+            }
+        }
+        map.put("y", "e");
+        map.put("t", msgID.getData());
+        return sendMessage(nInfo.getDestination(), nInfo.getPort() + 1, map, false);
+    }
+
+    /**
+     *  Get the dest for a NodeInfo lacking it, and store it there.
+     *  Blocking.
+     *  @return success
+     */
+    private boolean lookupDest(NodeInfo nInfo) {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("looking up dest for " + nInfo);
+        try {
+            // use a short timeout for now
+            Destination dest = _session.lookupDest(nInfo.getHash(), 5*1000);
+            if (dest != null) {
+                nInfo.setDestination(dest);
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("lookup success for " + nInfo);
+                return true;
+            }
+        } catch (I2PSessionException ise) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("lookup fail", ise);
+        }
+        if (_log.shouldLog(Log.INFO))
+            _log.info("lookup fail for " + nInfo);
+        return false;
+    }
+
+    /**
+     *  Lowest-level send message call.
+     *  @param repliable true for all but announce
+     *  @return success
+     */
+    private boolean sendMessage(Destination dest, int toPort, Map<String, Object> map, boolean repliable) {
+        if (_session.isClosed()) {
+            // Don't allow DHT to open a closed session
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("Not sending message, session is closed");
+            return false;
+        }
+        if (dest.calculateHash().equals(_myNodeInfo.getHash()))
+            throw new IllegalArgumentException("wtf don't send to ourselves");
+        byte[] payload = BEncoder.bencode(map);
+        if (_log.shouldLog(Log.DEBUG)) {
+            ByteArrayInputStream bais = new ByteArrayInputStream(payload);
+            try {
+                _log.debug("Sending to: " + dest.calculateHash() + ' ' + BDecoder.bdecode(bais).toString());
+            } catch (IOException ioe) {}
+        }
+
+        // Always send query port, peer will increment for unsigned replies
+        int fromPort = _qPort;
+        if (repliable) {
+            I2PDatagramMaker dgMaker = new I2PDatagramMaker(_session);
+            payload = dgMaker.makeI2PDatagram(payload);
+            if (payload == null) {
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("WTF DGM fail");
+            }
+        }
+
+        try {
+            // TODO I2CP per-packet options
+            boolean success = _session.sendMessage(dest, payload, 0, payload.length, null, null, 60*1000,
+                                                   repliable ? I2PSession.PROTO_DATAGRAM : I2PSession.PROTO_DATAGRAM_RAW,
+                                                   fromPort, toPort);
+            if (!success) {
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("WTF sendMessage fail");
+            }
+            return success;
+        } catch (I2PSessionException ise) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("sendMessage fail", ise);
+            return false;
+        }
+    }
+
+    ///// Reception.....
+
+    /**
+     *  @param from dest or null if it didn't come in on signed port
+     */
+    private void receiveMessage(Destination from, int fromPort, byte[] payload) {
+
+        try {
+            InputStream is = new ByteArrayInputStream(payload);
+            BDecoder dec = new BDecoder(is);
+            BEValue bev = dec.bdecodeMap();
+            Map<String, BEValue> map = bev.getMap();
+            if (_log.shouldLog(Log.DEBUG))
+                _log.debug("Got KRPC message " + bev.toString());
+
+            // Lazy here, just let missing Map entries throw NPEs, caught below
+
+            byte[] msgIDBytes = map.get("t").getBytes();
+            MsgID mID = new MsgID(msgIDBytes);
+            String type = map.get("y").getString();
+            if (type.equals("q")) {
+                // queries must be repliable
+                String method = map.get("q").getString();
+                Map<String, BEValue> args = map.get("a").getMap();
+                receiveQuery(mID, from, fromPort, method, args);
+            } else if (type.equals("r") || type.equals("e")) {
+               // get dest from id->dest map
+                ReplyWaiter waiter = _sentQueries.remove(mID);
+                if (waiter != null) {
+                    // TODO verify waiter NID and port?
+                    if (type.equals("r")) {
+                        Map<String, BEValue> response = map.get("r").getMap();
+                        receiveResponse(waiter, response);
+                    } else {
+                        List<BEValue> error = map.get("e").getList();
+                        receiveError(waiter, error);
+                    }
+                } else {
+                    if (_log.shouldLog(Log.WARN))
+                        _log.warn("Rcvd msg with no one waiting: " + bev.toString());
+                }
+            } else {
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("Unknown msg type rcvd: " + bev.toString());
+                throw new InvalidBEncodingException("Unknown type: " + type);
+            }
+            // success
+      /***
+        } catch (InvalidBEncodingException e) {
+        } catch (IOException e) {
+        } catch (ArrayIndexOutOfBoundsException e) {
+        } catch (IllegalArgumentException e) {
+        } catch (ClassCastException e) {
+        } catch (NullPointerException e) {
+       ***/
+        } catch (Exception e) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("Receive error for message", e);
+        }
+    }
+
+
+    // Queries.....
+
+    /**
+     *  Adds sender to our DHT.
+     *  @param dest may be null for announce_peer method only
+     *  @throws NPE too
+     */
+    private void receiveQuery(MsgID msgID, Destination dest, int fromPort, String method, Map<String, BEValue> args) throws InvalidBEncodingException {
+        if (dest == null && !method.equals("announce_peer")) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("Received non-announce_peer query method on reply port: " + method);
+            return;
+        }
+        byte[] nid = args.get("id").getBytes();
+        NodeInfo nInfo;
+        if (dest != null) {
+            nInfo = new NodeInfo(new NID(nid), dest, fromPort);
+            nInfo = heardFrom(nInfo);
+            nInfo.setDestination(dest);
+            // ninfo.checkport ?
+        } else {
+            nInfo = null;
+        }
+
+        if (method.equals("ping")) {
+            receivePing(msgID, nInfo);
+        } else if (method.equals("find_node")) {
+            byte[] tid = args.get("target").getBytes();
+            NID tID = new NID(tid);
+            receiveFindNode(msgID, nInfo, tID);
+        } else if (method.equals("get_peers")) {
+            byte[] hash = args.get("info_hash").getBytes();
+            InfoHash ih = new InfoHash(hash);
+            receiveGetPeers(msgID, nInfo, ih);
+        } else if (method.equals("announce_peer")) {
+            byte[] hash = args.get("info_hash").getBytes();
+            InfoHash ih = new InfoHash(hash);
+            // this is the "TCP" port, we don't care
+            //int port = args.get("port").getInt();
+            byte[] token = args.get("token").getBytes();
+            receiveAnnouncePeer(msgID, ih, token);
+        } else {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("Unknown query method rcvd: " + method);
+        }
+    }
+
+    /**
+     *  Called for a request or response
+     *  @return old NodeInfo or nInfo if none, use this to reduce object churn
+     */
+    private NodeInfo heardFrom(NodeInfo nInfo) {
+        // try to keep ourselves out of the DHT
+        if (nInfo.equals(_myNodeInfo))
+            return _myNodeInfo;
+        NID nID = nInfo.getNID();
+        NodeInfo oldInfo = _knownNodes.get(nID);
+        if (oldInfo == null) {
+            if (_log.shouldLog(Log.INFO))
+                _log.info("Adding node: " + nInfo);
+            oldInfo = nInfo;
+            NodeInfo nInfo2 = _knownNodes.putIfAbsent(nInfo);
+            if (nInfo2 != null)
+                oldInfo = nInfo2;
+        } else {
+            if (oldInfo.getDestination() == null && nInfo.getDestination() != null)
+                oldInfo.setDestination(nInfo.getDestination());
+        }
+        oldInfo.getNID().setLastSeen();
+        return oldInfo;
+    }
+
+    /**
+     *  Called for bootstrap or for all nodes in a receiveNodes reply.
+     *  Package private for PersistDHT.
+     *  @return non-null nodeInfo from DB if present, otherwise the nInfo parameter is returned
+     */
+    NodeInfo heardAbout(NodeInfo nInfo) {
+        // try to keep ourselves out of the DHT
+        if (nInfo.equals(_myNodeInfo))
+            return _myNodeInfo;
+        NodeInfo rv = _knownNodes.putIfAbsent(nInfo);
+        if (rv == null)
+            rv = nInfo;
+        return rv;
+    }
+
+    /**
+     *  Called when a reply times out
+     */
+    private void timeout(NodeInfo nInfo) {
+        NID nid = nInfo.getNID();
+        boolean remove = nid.timeout();
+        if (remove) {
+            if (_knownNodes.remove(nid) != null) {
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Removed after consecutive timeouts: " + nInfo);
+            }
+        }
+    }
+
+    /**
+     *  Handle and respond to the query
+     */
+    private void receivePing(MsgID msgID, NodeInfo nInfo) throws InvalidBEncodingException {
+        if (_log.shouldLog(Log.INFO))
+            _log.info("Rcvd ping from: " + nInfo);
+        sendPong(nInfo, msgID);
+    }
+
+    /**
+     *  Handle and respond to the query
+     *  @param tID target ID they are looking for
+     */
+    private void receiveFindNode(MsgID msgID, NodeInfo nInfo, NID tID) throws InvalidBEncodingException {
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd find_node from: " + nInfo + " for: " + tID);
+        NodeInfo peer = _knownNodes.get(tID);
+        if (peer != null) {
+            // success, one answer
+            sendNodes(nInfo, msgID, peer.getData());
+        } else {
+            // get closest from DHT
+            List<NodeInfo> nodes = _knownNodes.findClosest(tID, K);
+            nodes.remove(nInfo);        // him
+            nodes.remove(_myNodeInfo);  // me
+            byte[] nodeArray = new byte[nodes.size() * NodeInfo.LENGTH];
+            for (int i = 0; i < nodes.size(); i ++) {
+                System.arraycopy(nodes.get(i).getData(), 0, nodeArray, i * NodeInfo.LENGTH, NodeInfo.LENGTH);
+            }
+            sendNodes(nInfo, msgID, nodeArray);
+        }
+    }
+
+    /**
+     *  Handle and respond to the query
+     */
+    private void receiveGetPeers(MsgID msgID, NodeInfo nInfo, InfoHash ih) throws InvalidBEncodingException {
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd get_peers from: " + nInfo + " for: " + ih);
+        // generate and save random token
+        Token token = new Token(_context);
+        _outgoingTokens.put(token, nInfo);
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Stored new OB token: " + token + " for: " + nInfo);
+
+        List<Hash> peers = _tracker.getPeers(ih, MAX_WANT);
+        if (peers.isEmpty()) {
+            // similar to find node, but with token
+            // get closest from DHT
+            List<NodeInfo> nodes = _knownNodes.findClosest(ih, K);
+            nodes.remove(nInfo);        // him
+            nodes.remove(_myNodeInfo);  // me
+            byte[] nodeArray = new byte[nodes.size() * NodeInfo.LENGTH];
+            for (int i = 0; i < nodes.size(); i ++) {
+                System.arraycopy(nodes.get(i).getData(), 0, nodeArray, i * NodeInfo.LENGTH, NodeInfo.LENGTH);
+            }
+            sendNodes(nInfo, msgID, token, nodeArray);
+        } else {
+            List<byte[]> hashes = new ArrayList(peers.size());
+            Hash him = nInfo.getHash();
+            for (Hash peer : peers) {
+                 if (!peer.equals(him))
+                     hashes.add(peer.getData());
+            }
+            sendPeers(nInfo, msgID, token, hashes);
+        }
+    }
+
+    /**
+     *  Handle and respond to the query.
+     *  We have no node info here, it came on response port, we have to get it from the token
+     */
+    private void receiveAnnouncePeer(MsgID msgID, InfoHash ih, byte[] tok) throws InvalidBEncodingException {
+        Token token = new Token(tok);
+        NodeInfo nInfo = _outgoingTokens.get(token);
+        if (nInfo == null) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("Unknown token in announce_peer: " + token);
+            if (_log.shouldLog(Log.INFO))
+                _log.info("Current known tokens: " + _outgoingTokens.keySet());
+            return;
+        }
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd announce from: " + nInfo + " for: " + ih);
+
+        _tracker.announce(ih, nInfo.getHash());
+        // the reply for an announce is the same as the reply for a ping
+        sendPong(nInfo, msgID);
+    }
+
+    // Responses.....
+
+    /**
+     *  Handle the response and alert whoever sent the query it is responding to.
+     *  Adds sender nodeinfo to our DHT.
+     *  @throws NPE, IllegalArgumentException, and others too
+     */
+    private void receiveResponse(ReplyWaiter waiter, Map<String, BEValue> response) throws InvalidBEncodingException {
+        NodeInfo nInfo = waiter.getSentTo();
+
+        BEValue nodes = response.get("nodes");
+        BEValue values = response.get("values");
+
+        // token handling - save it for later announces
+        if (nodes != null || values != null) {
+            BEValue btok = response.get("token");
+            InfoHash ih = (InfoHash) waiter.getSentObject();
+            if (btok != null && ih != null) {
+                byte[] tok = btok.getBytes();
+                Token token = new Token(_context, tok);
+                _incomingTokens.put(nInfo.getNID(), token);
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Got token: " + token + ", must be a response to get_peers");
+            } else {
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("No token and saved infohash, must be a response to find_node");
+            }
+        }
+
+        // now do the right thing
+        if (nodes != null) {
+            // find node or get peers response - concatenated NodeInfos
+            byte[] ids = nodes.getBytes();
+            List<NodeInfo> rlist = receiveNodes(nInfo, ids);
+            waiter.gotReply(REPLY_NODES, rlist);
+        } else if (values != null) {
+            // get peers response - list of Hashes
+            List<BEValue> peers = values.getList();
+            List<Hash> rlist = receivePeers(nInfo, peers);
+            waiter.gotReply(REPLY_PEERS, rlist);
+        } else {
+            // a ping response or an announce peer response
+            byte[] nid = response.get("id").getBytes();
+            receivePong(nInfo, nid);
+            waiter.gotReply(REPLY_PONG, null);
+        }
+    }
+
+    /**
+     *  rcv concatenated 54 byte NodeInfos, return as a List
+     *  Adds all received nodeinfos to our DHT.
+     *  @throws NPE, IllegalArgumentException, and others too
+     */
+    private List<NodeInfo> receiveNodes(NodeInfo nInfo, byte[] ids) throws InvalidBEncodingException {
+        List<NodeInfo> rv = new ArrayList(ids.length / NodeInfo.LENGTH);
+        for (int off = 0; off < ids.length; off += NodeInfo.LENGTH) {
+            NodeInfo nInf = new NodeInfo(ids, off);
+            nInf = heardAbout(nInf);
+            rv.add(nInf);
+        }
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd nodes from: " + nInfo + ": " + DataHelper.toString(rv));
+        return rv;
+    }
+
+    /**
+     *  rcv 32 byte Hashes, return as a List
+     *  @throws NPE, IllegalArgumentException, and others too
+     */
+    private List<Hash> receivePeers(NodeInfo nInfo, List<BEValue> peers) throws InvalidBEncodingException {
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd peers from: " + nInfo);
+        List<Hash> rv = new ArrayList(peers.size());
+        for (BEValue bev : peers) {
+            byte[] b = bev.getBytes();
+            //Hash h = new Hash(b);
+            Hash h = Hash.create(b);
+            rv.add(h);
+        }
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd peers from: " + nInfo + ": " + DataHelper.toString(rv));
+        return rv;
+    }
+
+    /**
+     *  If node info was previously created with the dummy NID,
+     *  replace it with the received NID.
+     */
+    private void receivePong(NodeInfo nInfo, byte[] nid) {
+        if (nInfo.getNID().equals(FAKE_NID)) {
+            NodeInfo newInfo = new NodeInfo(new NID(nid), nInfo.getHash(), nInfo.getPort());
+            Destination dest = nInfo.getDestination();
+            if (dest != null)
+                newInfo.setDestination(dest);
+            heardFrom(newInfo);
+        }
+        if (_log.shouldLog(Log.INFO))
+             _log.info("Rcvd pong from: " + nInfo);
+    }
+
+    // Errors.....
+
+    /**
+     *  @throws NPE, and others too
+     */
+    private void receiveError(ReplyWaiter waiter, List<BEValue> error) throws InvalidBEncodingException {
+        int errorCode = error.get(0).getInt();
+        String errorString = error.get(1).getString();
+        if (_log.shouldLog(Log.WARN))
+            _log.warn("Rcvd error from: " + waiter +
+                      " num: " + errorCode +
+                      " msg: " + errorString);
+        // this calls heardFrom()
+        waiter.gotReply(errorCode, errorString);
+    }
+
+    /**
+     * Callback for replies
+     */
+    private class ReplyWaiter extends SimpleTimer2.TimedEvent {
+        private final MsgID mid;
+        private final NodeInfo sentTo;
+        private final Runnable onReply;
+        private final Runnable onTimeout;
+        private int replyCode;
+        private Object sentObject;
+        private Object replyObject;
+
+        /**
+         *  Either wait on this object with a timeout, or use non-null Runnables.
+         *  Any sent data to be remembered may be stored by setSentObject().
+         *  Reply object may be in getReplyObject().
+         *  @param onReply must be fast, otherwise set to null and wait on this UNUSED
+         *  @param onTimeout must be fast, otherwise set to null and wait on this UNUSED
+         */
+        public ReplyWaiter(MsgID mID, NodeInfo nInfo, Runnable onReply, Runnable onTimeout) {
+            super(SimpleTimer2.getInstance(), DEFAULT_QUERY_TIMEOUT);
+            this.mid = mID;
+            this.sentTo = nInfo;
+            this.onReply = onReply;
+            this.onTimeout = onTimeout;
+        }
+
+        public NodeInfo getSentTo() {
+            return sentTo;
+        }
+
+        /** only used for get_peers, to save the Info Hash */
+        public void setSentObject(Object o) {
+            sentObject = o;
+        }
+
+        /** @return that stored with setSentObject() */
+        public Object getSentObject() {
+            return sentObject;
+        }
+
+        /**
+         *  Should contain null if getReplyCode is REPLY_PONG.
+         *  Should contain List<Hash> if getReplyCode is REPLY_PEERS.
+         *  Should contain List<NodeInfo> if getReplyCode is REPLY_NODES.
+         *  Should contain String if getReplyCode is > 200.
+         *  @return may be null depending on what happened. Cast to expected type.
+         */
+        public Object getReplyObject() {
+            return replyObject;
+        }
+
+        /**
+         *  If nonzero, we got a reply, and getReplyObject() may contain something.
+         *  @return code or 0 if no error
+         */
+        public int getReplyCode() {
+            return replyCode;
+        }
+
+        /**
+         *  Will notify this and run onReply.
+         *  Also removes from _sentQueries and calls heardFrom().
+         */
+        public void gotReply(int code, Object o) {
+            cancel();
+            _sentQueries.remove(mid);
+            replyObject = o;
+            replyCode = code;
+            // if it is fake, heardFrom is called by receivePong()
+            if (!sentTo.getNID().equals(FAKE_NID))
+                heardFrom(sentTo);
+            if (onReply != null)
+                onReply.run();
+            synchronized(this) {
+                this.notifyAll();
+            }
+        }
+
+        /** timer callback on timeout */
+        public void timeReached() {
+            _sentQueries.remove(mid);
+            if (onTimeout != null)
+                onTimeout.run();
+            timeout(sentTo);
+            if (_log.shouldLog(Log.INFO))
+                _log.warn("timeout waiting for reply from " + sentTo);
+            synchronized(this) {
+                this.notifyAll();
+            }
+        }
+    }
+
+    // I2PSessionMuxedListener interface ----------------
+
+    /**
+     * Instruct the client that the given session has received a message
+     *
+     * Will be called only if you register via addMuxedSessionListener().
+     * Will be called only for the proto(s) and toPort(s) you register for.
+     *
+     * @param session session to notify
+     * @param msgId message number available
+     * @param size size of the message - why it's a long and not an int is a mystery
+     * @param proto 1-254 or 0 for unspecified
+     * @param fromPort 1-65535 or 0 for unspecified
+     * @param toPort 1-65535 or 0 for unspecified
+     */
+    public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromPort, int toPort) {
+        try {
+            byte[] payload = session.receiveMessage(msgId);
+            if (toPort == _qPort) {
+                // repliable
+                I2PDatagramDissector dgDiss = new I2PDatagramDissector();
+                dgDiss.loadI2PDatagram(payload);
+                payload = dgDiss.getPayload();
+                Destination from = dgDiss.getSender();
+                receiveMessage(from, fromPort, payload);
+            } else if (toPort == _rPort) {
+                // raw
+                receiveMessage(null, fromPort, payload);
+            } else {
+                if (_log.shouldLog(Log.WARN))
+                    _log.warn("msg on bad port");
+            }
+        } catch (DataFormatException e) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("bad msg");
+        } catch (I2PInvalidDatagramException e) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("bad msg");
+        } catch (I2PSessionException e) {
+            if (_log.shouldLog(Log.WARN))
+                _log.warn("bad msg");
+        }
+    }
+
+    /** for non-muxed */
+    public void messageAvailable(I2PSession session, int msgId, long size) {}
+
+    public void reportAbuse(I2PSession session, int severity) {}
+
+    public void disconnected(I2PSession session) {
+        if (_log.shouldLog(Log.WARN))
+            _log.warn("KRPC disconnected");
+    }
+
+    public void errorOccurred(I2PSession session, String message, Throwable error) {
+        if (_log.shouldLog(Log.WARN))
+            _log.warn("KRPC got error msg: ", error);
+    }
+
+    /**
+     * Cleaner-upper
+     */
+    private class Cleaner extends SimpleTimer2.TimedEvent {
+
+        public Cleaner() {
+            super(SimpleTimer2.getInstance(), CLEAN_TIME);
+        }
+
+        public void timeReached() {
+            if (!_isRunning)
+                return;
+            long now = _context.clock().now();
+            for (Iterator<Token> iter = _outgoingTokens.keySet().iterator(); iter.hasNext(); ) {
+                Token tok = iter.next();
+                if (tok.lastSeen() < now - MAX_TOKEN_AGE)
+                    iter.remove();
+            }
+            for (Iterator<Token> iter = _incomingTokens.values().iterator(); iter.hasNext(); ) {
+                Token tok = iter.next();
+                if (tok.lastSeen() < now - MAX_INBOUND_TOKEN_AGE)
+                    iter.remove();
+            }
+            // TODO sent queries?
+            for (Iterator<NodeInfo> iter = _knownNodes.values().iterator(); iter.hasNext(); ) {
+                NodeInfo ni = iter.next();
+                if (ni.lastSeen() < now - MAX_NODEINFO_AGE)
+                    iter.remove();
+            }
+            if (_log.shouldLog(Log.DEBUG))
+                _log.debug("KRPC cleaner done, now with " +
+                          _outgoingTokens.size() + " sent Tokens, " +
+                          _incomingTokens.size() + " rcvd Tokens, " +
+                          _knownNodes.size() + " known peers, " +
+                          _sentQueries.size() + " queries awaiting response");
+            schedule(CLEAN_TIME);
+        }
+    }
+
+    /**
+     * Fire off explorer thread
+     */
+    private class Explorer extends SimpleTimer2.TimedEvent {
+
+        public Explorer(long delay) {
+            super(SimpleTimer2.getInstance(), delay);
+        }
+
+        public void timeReached() {
+            if (!_isRunning)
+                return;
+            if (_knownNodes.size() > 0)
+                (new I2PAppThread(new ExplorerThread(), "DHT Explore", true)).start();
+            else
+                schedule(60*1000);
+        }
+    }
+
+    /**
+     * explorer thread
+     */
+    private class ExplorerThread implements Runnable {
+
+        public void run() {
+            if (!_isRunning)
+                return;
+            if (!_hasBootstrapped) {
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Bootstrap start size: " + _knownNodes.size());
+                explore(_myNID, 8, 60*1000, 1);
+                if (_log.shouldLog(Log.INFO))
+                    _log.info("Bootstrap done size: " + _knownNodes.size());
+                _hasBootstrapped = true;
+            }
+            if (!_isRunning)
+                return;
+            if (_log.shouldLog(Log.INFO))
+                _log.info("Explore start size: " + _knownNodes.size());
+            List<NID> keys = _knownNodes.getExploreKeys();
+            for (NID nid : keys) {
+                explore(nid, 8, 60*1000, 1);
+            }
+            if (_log.shouldLog(Log.INFO))
+                _log.info("Explore done size: " + _knownNodes.size());
+            new Explorer(EXPLORE_TIME);
+        }
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/MsgID.java b/apps/i2psnark/java/src/org/klomp/snark/dht/MsgID.java
new file mode 100644
index 0000000000000000000000000000000000000000..6d53f7c8fb1a17bee2815aa54b6908f56766020e
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/MsgID.java
@@ -0,0 +1,32 @@
+package org.klomp.snark.dht;
+/*
+ *  GPLv2
+ */
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.ByteArray;
+
+/**
+ *  Used for both incoming and outgoing message IDs
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class MsgID extends ByteArray {
+
+    private static final int MY_TOK_LEN = 8;
+
+    /** outgoing - generate a random ID */
+    public MsgID(I2PAppContext ctx) {
+        super(null);
+        byte[] data = new byte[MY_TOK_LEN];
+        ctx.random().nextBytes(data);
+        setData(data);
+        setValid(MY_TOK_LEN);
+    }
+
+    /** incoming  - save the ID (arbitrary length) */
+    public MsgID(byte[] data) {
+        super(data);
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/NID.java b/apps/i2psnark/java/src/org/klomp/snark/dht/NID.java
new file mode 100644
index 0000000000000000000000000000000000000000..f61c857d8ff42b75960b5d5af68ca40e1f578da4
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/NID.java
@@ -0,0 +1,46 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import net.i2p.crypto.SHA1Hash;
+import net.i2p.util.Clock;
+
+/**
+ *  A 20-byte peer ID, used as a Map key in lots of places.
+ *  Must be public for constructor in KBucketSet.generateRandomKey()
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+public class NID extends SHA1Hash {
+
+    private long lastSeen;
+    private int fails;
+
+    private static final int MAX_FAILS = 3;
+
+    public NID() {
+        super(null);
+    }
+
+    public NID(byte[] data) {
+        super(data);
+    }
+
+    public long lastSeen() {
+        return lastSeen;
+    }
+
+    public void setLastSeen() {
+        lastSeen = Clock.getInstance().now();
+        fails = 0;
+    }
+
+    /**
+     *  @return if more than max timeouts
+     */
+    public boolean timeout() {
+        return fails++ > MAX_FAILS;
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfo.java b/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..9ed1a2912d428486652b4703804a77217de590fa
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfo.java
@@ -0,0 +1,253 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import net.i2p.data.Base64;
+import net.i2p.data.DataFormatException;
+import net.i2p.data.DataHelper;
+import net.i2p.data.Destination;
+import net.i2p.data.Hash;
+import net.i2p.data.SimpleDataStructure;
+import net.i2p.util.RandomSource;
+
+/*
+ *  A Node ID, Hash, and port, and an optional Destination.
+ *  This is what DHTNodes remembers. The DHT tracker just stores Hashes.
+ *  getData() returns the 54 byte compact info (NID, Hash, port).
+ *
+ *  Things are a little tricky in KRPC since we exchange Hashes and don't
+ *  always have the Destination.
+ *  The conpact info is immutable. The Destination may be added later.
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+
+class NodeInfo extends SimpleDataStructure {
+
+    private final NID nID;
+    private final Hash hash;
+    private Destination dest;
+    private final int port;
+
+    public static final int LENGTH = NID.HASH_LENGTH + Hash.HASH_LENGTH + 2;
+
+    /**
+     * With a fake NID used for pings
+     */
+    public NodeInfo(Destination dest, int port) {
+        super();
+        this.nID = KRPC.FAKE_NID;
+        this.dest = dest;
+        this.hash = dest.calculateHash();
+        this.port = port;
+        initialize();
+    }
+
+    /**
+     * Use this if we have the full destination
+     * @throws IllegalArgumentException
+     */
+    public NodeInfo(NID nID, Destination dest, int port) {
+        super();
+        this.nID = nID;
+        this.dest = dest;
+        this.hash = dest.calculateHash();
+        this.port = port;
+        initialize();
+        verify();
+    }
+
+    /**
+     * No Destination yet available
+     * @throws IllegalArgumentException
+     */
+    public NodeInfo(NID nID, Hash hash, int port) {
+        super();
+        this.nID = nID;
+        this.hash = hash;
+        this.port = port;
+        initialize();
+        verify();
+    }
+
+    /**
+     * No Destination yet available
+     * @param compactInfo 20 byte node ID, 32 byte destHash, 2 byte port
+     * @param offset starting at this offset in compactInfo
+     * @throws IllegalArgumentException
+     * @throws AIOOBE
+     */
+    public NodeInfo(byte[] compactInfo, int offset) {
+        super();
+        byte[] d = new byte[LENGTH];
+        System.arraycopy(compactInfo, offset, d, 0, LENGTH);
+        setData(d);
+        byte[] ndata = new byte[NID.HASH_LENGTH];
+        System.arraycopy(d, 0, ndata, 0, NID.HASH_LENGTH);
+        this.nID = new NID(ndata);
+        this.hash = Hash.create(d, NID.HASH_LENGTH);
+        this.port = (int) DataHelper.fromLong(d, NID.HASH_LENGTH + Hash.HASH_LENGTH, 2);
+        if (port <= 0 || port >= 65535)
+            throw new IllegalArgumentException("Bad port");
+        verify();
+    }
+
+    /**
+     * Create from persistent storage string.
+     * Format: NID:Hash:Destination:port
+     * First 3 in base 64; Destination may be empty string
+     * @throws IllegalArgumentException
+     */
+    public NodeInfo(String s) throws DataFormatException {
+        super();
+        String[] parts = s.split(":", 4);
+        if (parts.length != 4)
+            throw new DataFormatException("Bad format");
+        byte[] nid = Base64.decode(parts[0]);
+        if (nid == null)
+            throw new DataFormatException("Bad NID");
+        nID = new NID(nid);
+        byte[] h = Base64.decode(parts[1]);
+        if (h == null)
+            throw new DataFormatException("Bad hash");
+        //hash = new Hash(h);
+        hash = Hash.create(h);
+        if (parts[2].length() > 0)
+            dest = new Destination(parts[2]);
+        try {
+            port = Integer.parseInt(parts[3]);
+        } catch (NumberFormatException nfe) {
+            throw new DataFormatException("Bad port", nfe);
+        }
+        initialize();
+    }
+
+
+    /**
+     * Creates 54-byte compact info
+     * @throws IllegalArgumentException
+     */
+    private void initialize() {
+        if (port <= 0 || port >= 65535)
+            throw new IllegalArgumentException("Bad port");
+        byte[] compactInfo = new byte[LENGTH];
+        System.arraycopy(nID.getData(), 0, compactInfo, 0, NID.HASH_LENGTH);
+        System.arraycopy(hash.getData(), 0, compactInfo, NID.HASH_LENGTH, Hash.HASH_LENGTH);
+        DataHelper.toLong(compactInfo, NID.HASH_LENGTH + Hash.HASH_LENGTH, 2, port);
+        setData(compactInfo);
+    }
+
+    /**
+     * Generate a secure NID that matches the Hash and port.
+     * Rules: First 4 bytes must match Hash.
+     * Next 2 bytes must match Hash ^ port.
+     * Remaining bytes may be random.
+     *
+     * @throws IllegalArgumentException
+     */
+    public static NID generateNID(Hash h, int p, RandomSource random) {
+        byte[] n = new byte[NID.HASH_LENGTH];
+        System.arraycopy(h.getData(), 0, n, 0, 6);
+        n[4] ^= (byte) (p >> 8);
+        n[5] ^= (byte) p;
+        random.nextBytes(n, 6, NID.HASH_LENGTH - 6);
+        return new NID(n);
+    }
+
+    /**
+     * Verify the NID matches the Hash.
+     * See generateNID() for requirements.
+     * @throws IllegalArgumentException on mismatch
+     */
+    private void verify() {
+        if (!KRPC.SECURE_NID)
+            return;
+        byte[] nb = nID.getData();
+        byte[] hb = hash.getData();
+        if ((!DataHelper.eq(nb, 0, hb, 0, 4)) ||
+            ((nb[4] ^ (port >> 8)) & 0xff) != (hb[4] & 0xff) ||
+            ((nb[5] ^ port) & 0xff) != (hb[5] & 0xff))
+            throw new IllegalArgumentException("NID/Hash mismatch");
+    }
+
+    public int length() {
+        return LENGTH;
+    }
+
+    public NID getNID() {
+        return this.nID;
+    }
+
+    /** @return may be null if we don't have it */
+    public Destination getDestination() {
+        return this.dest;
+    }
+
+    public Hash getHash() {
+        return this.hash;
+    }
+
+    @Override
+    public Hash calculateHash() {
+        return this.hash;
+    }
+
+    /**
+     * This can come in later but the hash must match.
+     * @throws IllegalArgumentException if hash of dest doesn't match previous hash
+     */
+    public void setDestination(Destination dest) throws IllegalArgumentException {
+        if (this.dest != null)
+            return;
+        if (!dest.calculateHash().equals(this.hash))
+            throw new IllegalArgumentException("Hash mismatch, was: " + this.hash + " new: " + dest.calculateHash());
+        this.dest = dest;
+    }
+
+    public int getPort() {
+        return this.port;
+    }
+
+    public long lastSeen() {
+        return nID.lastSeen();
+    }
+
+    @Override
+    public int hashCode() {
+        return super.hashCode() ^ nID.hashCode() ^ port;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        try {
+            NodeInfo ni = (NodeInfo) o;
+            // assume dest matches, ignore it
+            return this.hash.equals(ni.hash) && nID.equals(ni.nID) && port == ni.port;
+        } catch (Exception e) {
+            return false;
+        }
+    }
+
+    @Override
+    public String toString() {
+        return "NodeInfo: " + nID + ' ' + hash + " port: " + port + (dest != null ? " known dest" : " null dest");
+    }
+
+    /**
+     * To persistent storage string.
+     * Format: NID:Hash:Destination:port
+     * First 3 in base 64; Destination may be empty string
+     */
+    public String toPersistentString() {
+        StringBuilder buf = new StringBuilder(650);
+        buf.append(nID.toBase64()).append(':');
+        buf.append(hash.toBase64()).append(':');
+        if (dest != null)
+            buf.append(dest.toBase64());
+        buf.append(':').append(port);
+        return buf.toString();
+    }
+
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java b/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java
new file mode 100644
index 0000000000000000000000000000000000000000..9995dfe57947f6843c5479a179a569ad011a2226
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java
@@ -0,0 +1,31 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import java.util.Comparator;
+
+import net.i2p.crypto.SHA1Hash;
+import net.i2p.data.DataHelper;
+
+/**
+ *  Closest to a InfoHash or NID key.
+ *  Use for NodeInfos.
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class NodeInfoComparator implements Comparator<NodeInfo> {
+    private final byte[] _base;
+
+    public NodeInfoComparator(SHA1Hash h) {
+        _base = h.getData();
+    }
+
+    public int compare(NodeInfo lhs, NodeInfo rhs) {
+        byte lhsDelta[] = DataHelper.xor(lhs.getNID().getData(), _base);
+        byte rhsDelta[] = DataHelper.xor(rhs.getNID().getData(), _base);
+        return DataHelper.compareTo(lhsDelta, rhsDelta);
+    }
+
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/Peer.java b/apps/i2psnark/java/src/org/klomp/snark/dht/Peer.java
new file mode 100644
index 0000000000000000000000000000000000000000..84fc263a7d59711bcc000a875adbdc8301028225
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/Peer.java
@@ -0,0 +1,30 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import net.i2p.data.Hash;
+
+/**
+ *  A single peer for a single torrent.
+ *  This is what the DHT tracker remembers.
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class Peer extends Hash {
+
+    private long lastSeen;
+
+    public Peer(byte[] data) {
+        super(data);
+    }
+
+    public long lastSeen() {
+        return lastSeen;
+    }
+
+    public void setLastSeen(long now) {
+        lastSeen = now;
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/Peers.java b/apps/i2psnark/java/src/org/klomp/snark/dht/Peers.java
new file mode 100644
index 0000000000000000000000000000000000000000..f16d903ec6c16354dbb85c06c2cacf05c1283570
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/Peers.java
@@ -0,0 +1,21 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, modded and relicensed to GPLv2
+ */
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import net.i2p.data.Hash;
+
+/**
+ *  All the peers for a single torrent
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class Peers extends ConcurrentHashMap<Hash, Peer> {
+
+    public Peers() {
+        super();
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/PersistDHT.java b/apps/i2psnark/java/src/org/klomp/snark/dht/PersistDHT.java
new file mode 100644
index 0000000000000000000000000000000000000000..730137caa5c12ed556c6d8a0adf12230cc64cd2e
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/PersistDHT.java
@@ -0,0 +1,82 @@
+package org.klomp.snark.dht;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.DataFormatException;
+import net.i2p.util.Log;
+import net.i2p.util.SecureFileOutputStream;
+
+/**
+ *  Retrieve / Store the local DHT in a file
+ *
+ */
+abstract class PersistDHT {
+
+    private static final long MAX_AGE = 60*60*1000;
+
+    public static synchronized void loadDHT(KRPC krpc, File file) {
+        Log log = I2PAppContext.getGlobalContext().logManager().getLog(PersistDHT.class);
+        int count = 0;
+        FileInputStream in = null;
+        try {
+            in = new FileInputStream(file);
+            BufferedReader br = new BufferedReader(new InputStreamReader(in, "ISO-8859-1"));
+            String line = null;
+            while ( (line = br.readLine()) != null) {
+                if (line.startsWith("#"))
+                    continue;
+                try {
+                    krpc.heardAbout(new NodeInfo(line));
+                    count++;
+                    // TODO limit number? this will flush the router's SDS caches
+                } catch (IllegalArgumentException iae) {
+                    if (log.shouldLog(Log.WARN))
+                        log.warn("Error reading DHT entry", iae);
+                } catch (DataFormatException dfe) {
+                    if (log.shouldLog(Log.WARN))
+                        log.warn("Error reading DHT entry", dfe);
+                }
+            }
+        } catch (IOException ioe) {
+            if (log.shouldLog(Log.WARN) && file.exists())
+                log.warn("Error reading the DHT File", ioe);
+        } finally {
+            if (in != null) try { in.close(); } catch (IOException ioe) {}
+        }
+        if (log.shouldLog(Log.INFO))
+            log.info("Loaded " + count + " nodes from " + file);
+    }
+
+    public static synchronized void saveDHT(DHTNodes nodes, File file) {
+        Log log = I2PAppContext.getGlobalContext().logManager().getLog(PersistDHT.class);
+        int count = 0;
+        long maxAge = I2PAppContext.getGlobalContext().clock().now() - MAX_AGE;
+        PrintWriter out = null;
+        try {
+            out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new SecureFileOutputStream(file), "ISO-8859-1")));
+            out.println("# DHT nodes, format is NID:Hash:Destination:port");
+            for (NodeInfo ni : nodes.values()) {
+                 if (ni.lastSeen() < maxAge)
+                     continue;
+                 // DHTNodes shouldn't contain us, if that changes check here
+                 out.println(ni.toPersistentString());
+                 count++;
+            }
+        } catch (IOException ioe) {
+            if (log.shouldLog(Log.WARN))
+                log.warn("Error writing the DHT File", ioe);
+        } finally {
+            if (out != null) out.close();
+        }
+        if (log.shouldLog(Log.INFO))
+            log.info("Stored " + count + " nodes to " + file);
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/Token.java b/apps/i2psnark/java/src/org/klomp/snark/dht/Token.java
new file mode 100644
index 0000000000000000000000000000000000000000..37a43575db736f057ae135addf92d16b9adcbdcc
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/Token.java
@@ -0,0 +1,71 @@
+package org.klomp.snark.dht;
+/*
+ *  GPLv2
+ */
+
+import java.util.Date;
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.ByteArray;
+import net.i2p.data.DataHelper;
+
+/**
+ *  Used for Both outgoing and incoming tokens
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class Token extends ByteArray {
+
+    private static final int MY_TOK_LEN = 8;
+    private final long lastSeen;
+
+    /** outgoing - generate a random token */
+    public Token(I2PAppContext ctx) {
+        super(null);
+        byte[] data = new byte[MY_TOK_LEN];
+        ctx.random().nextBytes(data);
+        setData(data);
+        setValid(MY_TOK_LEN);
+        lastSeen = ctx.clock().now();
+    }
+
+    /** incoming  - save the token (arbitrary length) */
+    public Token(I2PAppContext ctx, byte[] data) {
+        super(data);
+        lastSeen = ctx.clock().now();
+    }
+
+    /** incoming  - for lookup only, not storage, lastSeen is 0 */
+    public Token(byte[] data) {
+        super(data);
+        lastSeen = 0;
+    }
+
+    public long lastSeen() {
+        return lastSeen;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder(64);
+        buf.append("[Token: ");
+        byte[] bs = getData();
+        if (bs.length == 0) {
+            buf.append("0 bytes");
+        } else {
+            buf.append(bs.length).append(" bytes: 0x");
+            // backwards, but the same way BEValue does it
+            for (int i = 0; i < bs.length; i++) {
+                int b = bs[i] & 0xff;
+                if (b < 16)
+                    buf.append('0');
+                buf.append(Integer.toHexString(b));
+            }
+        }
+        if (lastSeen > 0)
+            buf.append(" created ").append((new Date(lastSeen)).toString());
+        buf.append(']');
+        return buf.toString();
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/TokenKey.java b/apps/i2psnark/java/src/org/klomp/snark/dht/TokenKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..996d43351ea1594835bd0d1482fe1a67b258f028
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/TokenKey.java
@@ -0,0 +1,20 @@
+package org.klomp.snark.dht;
+/*
+ *  GPLv2
+ */
+
+import net.i2p.crypto.SHA1Hash;
+import net.i2p.data.DataHelper;
+
+/**
+ *  Used to index incoming Tokens
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class TokenKey extends SHA1Hash {
+
+    public TokenKey(NID nID, InfoHash ih) {
+        super(DataHelper.xor(nID.getData(), ih.getData()));
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/Torrents.java b/apps/i2psnark/java/src/org/klomp/snark/dht/Torrents.java
new file mode 100644
index 0000000000000000000000000000000000000000..304b7c9491a35e09d1225fdf92a141654e075bea
--- /dev/null
+++ b/apps/i2psnark/java/src/org/klomp/snark/dht/Torrents.java
@@ -0,0 +1,19 @@
+package org.klomp.snark.dht;
+/*
+ *  From zzzot, relicensed to GPLv2
+ */
+
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ *  All the torrents
+ *
+ * @since 0.8.4
+ * @author zzz
+ */
+class Torrents extends ConcurrentHashMap<InfoHash, Peers> {
+
+    public Torrents() {
+        super();
+    }
+}
diff --git a/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java b/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java
index 21cf743569f9aca0ff1768c4d87eb80771adbbfb..3eafa3ecd83e5c0f8c8f29267411c782beacfe6d 100644
--- a/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java
+++ b/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java
@@ -42,6 +42,7 @@ import org.klomp.snark.SnarkManager;
 import org.klomp.snark.Storage;
 import org.klomp.snark.Tracker;
 import org.klomp.snark.TrackerClient;
+import org.klomp.snark.dht.DHT;
 
 import org.mortbay.jetty.servlet.DefaultServlet;
 import org.mortbay.resource.Resource;
@@ -470,6 +471,14 @@ public class I2PSnarkServlet extends DefaultServlet {
             out.write(", ");
             out.write(DataHelper.formatSize2(stats[5]) + "B, ");
             out.write(ngettext("1 connected peer", "{0} connected peers", (int) stats[4]));
+            DHT dht = _manager.util().getDHT();
+            if (dht != null) {
+                int dhts = dht.size();
+                if (dhts > 0) {
+                    out.write(", ");
+                    out.write(ngettext("1 DHT peer", "{0} DHT peers", dhts));
+                }
+            }
             out.write("</th>\n");
             if (_manager.util().connected()) {
                 out.write("    <th align=\"right\">" + formatSize(stats[0]) + "</th>\n" +
@@ -699,11 +708,12 @@ public class I2PSnarkServlet extends DefaultServlet {
             String refreshDel = req.getParameter("refreshDelay");
             String startupDel = req.getParameter("startupDelay");
             boolean useOpenTrackers = req.getParameter("useOpenTrackers") != null;
+            boolean useDHT = req.getParameter("useDHT") != null;
             //String openTrackers = req.getParameter("openTrackers");
             String theme = req.getParameter("theme");
             _manager.updateConfig(dataDir, filesPublic, autoStart, refreshDel, startupDel,
                                   seedPct, eepHost, eepPort, i2cpHost, i2cpPort, i2cpOpts,
-                                  upLimit, upBW, useOpenTrackers, theme);
+                                  upLimit, upBW, useOpenTrackers, useDHT, theme);
         } else if ("Save2".equals(action)) {
             String taction = req.getParameter("taction");
             if (taction != null)
@@ -1492,6 +1502,7 @@ public class I2PSnarkServlet extends DefaultServlet {
         boolean autoStart = _manager.shouldAutoStart();
         boolean useOpenTrackers = _manager.util().shouldUseOpenTrackers();
         //String openTrackers = _manager.util().getOpenTrackerString();
+        boolean useDHT = _manager.util().shouldUseDHT();
         //int seedPct = 0;
        
         out.write("<form action=\"/i2psnark/configure\" method=\"POST\">\n" +
@@ -1605,6 +1616,14 @@ public class I2PSnarkServlet extends DefaultServlet {
                   + (useOpenTrackers ? "checked " : "") 
                   + "title=\"");
         out.write(_("If checked, announce torrents to open trackers as well as the tracker listed in the torrent file"));
+        out.write("\" ></td></tr>\n" +
+        
+                  "<tr><td>");
+        out.write(_("Enable DHT") + " (**BETA**)");
+        out.write(": <td><input type=\"checkbox\" class=\"optbox\" name=\"useDHT\" value=\"true\" " 
+                  + (useDHT ? "checked " : "") 
+                  + "title=\"");
+        out.write(_("If checked, use DHT"));
         out.write("\" ></td></tr>\n");
 
         //          "<tr><td>");
diff --git a/apps/ministreaming/java/src/net/i2p/client/streaming/I2PSocketManagerFactory.java b/apps/ministreaming/java/src/net/i2p/client/streaming/I2PSocketManagerFactory.java
index 113d1e7ffdc922b277c5e5236afe89b59e5538b7..5c864110a30baa2da30a81cba88b52e2fbd49ce6 100644
--- a/apps/ministreaming/java/src/net/i2p/client/streaming/I2PSocketManagerFactory.java
+++ b/apps/ministreaming/java/src/net/i2p/client/streaming/I2PSocketManagerFactory.java
@@ -6,6 +6,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.lang.reflect.Constructor;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Properties;
 
 import net.i2p.I2PAppContext;
@@ -36,7 +37,7 @@ public class I2PSocketManagerFactory {
      * @return the newly created socket manager, or null if there were errors
      */
     public static I2PSocketManager createManager() {
-        return createManager(getHost(), getPort(), System.getProperties());
+        return createManager(getHost(), getPort(), (Properties) System.getProperties().clone());
     }
     
     /**
@@ -59,7 +60,7 @@ public class I2PSocketManagerFactory {
      * @return the newly created socket manager, or null if there were errors
      */
     public static I2PSocketManager createManager(String host, int port) {
-        return createManager(host, port, System.getProperties());
+        return createManager(host, port, (Properties) System.getProperties().clone());
     }
 
     /**
@@ -95,7 +96,7 @@ public class I2PSocketManagerFactory {
      * @return the newly created socket manager, or null if there were errors
      */
     public static I2PSocketManager createManager(InputStream myPrivateKeyStream) {
-        return createManager(myPrivateKeyStream, getHost(), getPort(), System.getProperties());
+        return createManager(myPrivateKeyStream, getHost(), getPort(), (Properties) System.getProperties().clone());
     }
     
     /**
@@ -126,10 +127,11 @@ public class I2PSocketManagerFactory {
         I2PClient client = I2PClientFactory.createClient();
         if (opts == null)
             opts = new Properties();
-        for (Iterator iter = System.getProperties().keySet().iterator(); iter.hasNext(); ) {
-            String name = (String)iter.next();
+        Properties syscopy = (Properties) System.getProperties().clone();
+        for (Map.Entry e : syscopy.entrySet()) {
+            String name = (String) e.getKey();
             if (!opts.containsKey(name))
-                opts.setProperty(name, System.getProperty(name));
+                opts.setProperty(name, (String) e.getValue());
         }
         //boolean oldLib = DEFAULT_MANAGER.equals(opts.getProperty(PROP_MANAGER, DEFAULT_MANAGER));
         //if (oldLib && false) {
diff --git a/apps/routerconsole/jsp/console.jsp b/apps/routerconsole/jsp/console.jsp
index aa9168caf7777aff50b267a605441098321ceb12..b70c4170eab4835353d2e850d6024a1177851ca5 100644
--- a/apps/routerconsole/jsp/console.jsp
+++ b/apps/routerconsole/jsp/console.jsp
@@ -36,26 +36,26 @@
  <jsp:useBean class="net.i2p.router.web.ContentHelper" id="contenthelper" scope="request" />
  <div class="welcome">
   <div class="langbox"> <% /* English, then alphabetical by English name please */ %>
-    <a href="/?lang=en&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=us" title="English" alt="English"></a>
-    <a href="/?lang=ar&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=lang_ar" title="عربية" alt="عربية"></a>
-    <a href="/?lang=zh&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=cn" title="中文" alt="中文"></a>
-    <a href="/?lang=cs&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=cz" title="čeština" alt="čeština"></a>
-    <a href="/?lang=da&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=dk" title="Dansk" alt="Dansk"></a>
-    <a href="/?lang=de&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=de" title="Deutsch" alt="Deutsch"></a>
-    <a href="/?lang=ee&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=ee" title="Eesti" alt="Eesti"></a>
-    <a href="/?lang=es&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=es" title="Español" alt="Español"></a>
-    <a href="/?lang=fi&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=fi" title="Suomi" alt="Suomi"></a>
-    <a href="/?lang=fr&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=fr" title="Français" alt="Français"></a><br>
-    <a href="/?lang=el&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=gr" title="ελληνικά" alt="ελληνικά"></a>
-    <a href="/?lang=hu&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=hu" title="Magyar" alt="Magyar"></a>
-    <a href="/?lang=it&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=it" title="Italiano" alt="Italiano"></a>
-    <a href="/?lang=nl&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=nl" title="Nederlands" alt="Nederlands"></a>
-    <a href="/?lang=pl&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=pl" title="Polski" alt="Polski"></a>
-    <a href="/?lang=pt&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=pt" title="Português" alt="Português"></a>
-    <a href="/?lang=ru&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=ru" title="Русский" alt="Русский"></a>
-    <a href="/?lang=sv&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=se" title="Svenska" alt="Svenska"></a>
-    <a href="/?lang=uk&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=ua" title="Українська" alt="Українська"></a>
-    <a href="/?lang=vi&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=vn" title="Tiếng Việt" alt="Tiếng Việt"></a>
+    <a href="/console?lang=en&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=us" title="English" alt="English"></a>
+    <a href="/console?lang=ar&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=lang_ar" title="عربية" alt="عربية"></a>
+    <a href="/console?lang=zh&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=cn" title="中文" alt="中文"></a>
+    <a href="/console?lang=cs&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=cz" title="čeština" alt="čeština"></a>
+    <a href="/console?lang=da&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=dk" title="Dansk" alt="Dansk"></a>
+    <a href="/console?lang=de&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=de" title="Deutsch" alt="Deutsch"></a>
+    <a href="/console?lang=ee&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=ee" title="Eesti" alt="Eesti"></a>
+    <a href="/console?lang=es&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=es" title="Español" alt="Español"></a>
+    <a href="/console?lang=fi&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=fi" title="Suomi" alt="Suomi"></a>
+    <a href="/console?lang=fr&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=fr" title="Français" alt="Français"></a><br>
+    <a href="/console?lang=el&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=gr" title="ελληνικά" alt="ελληνικά"></a>
+    <a href="/console?lang=hu&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=hu" title="Magyar" alt="Magyar"></a>
+    <a href="/console?lang=it&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=it" title="Italiano" alt="Italiano"></a>
+    <a href="/console?lang=nl&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=nl" title="Nederlands" alt="Nederlands"></a>
+    <a href="/console?lang=pl&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=pl" title="Polski" alt="Polski"></a>
+    <a href="/console?lang=pt&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=pt" title="Português" alt="Português"></a>
+    <a href="/console?lang=ru&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=ru" title="Русский" alt="Русский"></a>
+    <a href="/console?lang=sv&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=se" title="Svenska" alt="Svenska"></a>
+    <a href="/console?lang=uk&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=ua" title="Українська" alt="Українська"></a>
+    <a href="/console?lang=vi&amp;consoleNonce=<%=consoleNonce%>"><img height="11" width="16" style="padding: 0 2px;" src="/flags.jsp?c=vn" title="Tiếng Việt" alt="Tiếng Việt"></a>
   </div>
   <a name="top"></a>
   <h2><%=intl._("Welcome to I2P")%></h2>
diff --git a/apps/sam/java/src/net/i2p/sam/SAMStreamSession.java b/apps/sam/java/src/net/i2p/sam/SAMStreamSession.java
index d43f3a576ddbcd7fd6af7ad8348c5bbf37bbcfa6..ab5d9068b751aa4ccc8692ea652a694c5eb030b4 100644
--- a/apps/sam/java/src/net/i2p/sam/SAMStreamSession.java
+++ b/apps/sam/java/src/net/i2p/sam/SAMStreamSession.java
@@ -124,8 +124,7 @@ public class SAMStreamSession {
 
         _log.debug("SAM STREAM session instantiated");
 
-        Properties allprops = new Properties();
-        allprops.putAll(System.getProperties());
+        Properties allprops = (Properties) System.getProperties().clone();
         allprops.putAll(props);
 
         String i2cpHost = allprops.getProperty(I2PClient.PROP_TCP_HOST, "127.0.0.1");
diff --git a/apps/sam/java/src/net/i2p/sam/SAMv3StreamSession.java b/apps/sam/java/src/net/i2p/sam/SAMv3StreamSession.java
index 699058613c143e885ed84a4e9e4a69f295d43d2f..159798f601c6387bf58777482d39c68c85de5e42 100644
--- a/apps/sam/java/src/net/i2p/sam/SAMv3StreamSession.java
+++ b/apps/sam/java/src/net/i2p/sam/SAMv3StreamSession.java
@@ -85,8 +85,7 @@ public class SAMv3StreamSession  extends SAMStreamSession implements SAMv3Handle
 
 	    	_log.debug("SAM STREAM session instantiated");
 
-	    	Properties allprops = new Properties();
-	    	allprops.putAll(System.getProperties());
+	        Properties allprops = (Properties) System.getProperties().clone();
 	    	allprops.putAll(rec.getProps());
 	    	
 	    	String i2cpHost = allprops.getProperty(I2PClient.PROP_TCP_HOST, "127.0.0.1");
diff --git a/build.xml b/build.xml
index d69a8f95fd877fb3a7192adc3bcd3233394bc8a3..b8c35ab6c0dbd70a5ae08ac0b196de75b67cc7e4 100644
--- a/build.xml
+++ b/build.xml
@@ -422,7 +422,7 @@
             <group title="BOB Bridge" packages="net.i2p.BOB" />
             <group title="BOB Demos" packages="net.i2p.BOB.Demos.echo.echoclient:net.i2p.BOB.Demos.echo.echoserver" />
             <group title="Desktopgui Application" packages="net.i2p.desktopgui:net.i2p.desktopgui.*" />
-            <group title="I2PSnark Application" packages="org.klomp.snark:org.klomp.snark.*" />
+            <group title="I2PSnark Application" packages="org.klomp.snark:org.klomp.snark.*:net.i2p.kademlia" />
             <group title="I2PTunnel Application" packages="net.i2p.i2ptunnel:net.i2p.i2ptunnel.*" />
             <group title="Jetty Logging" packages="net.i2p.jetty" />
             <group title="SAM Bridge" packages="net.i2p.sam" />
diff --git a/core/java/src/net/i2p/I2PAppContext.java b/core/java/src/net/i2p/I2PAppContext.java
index 313f6e445e74d09bf48a32a125b7ab02b288906d..15189f876ec6a830609f5f07063428d0e5949f09 100644
--- a/core/java/src/net/i2p/I2PAppContext.java
+++ b/core/java/src/net/i2p/I2PAppContext.java
@@ -516,7 +516,8 @@ public class I2PAppContext {
      * @return set of Strings containing the names of defined system properties
      */
     public Set getPropertyNames() { 
-        Set names = new HashSet(System.getProperties().keySet());
+        // clone to avoid ConcurrentModificationException
+        Set names = new HashSet(((Properties) System.getProperties().clone()).keySet());
         if (_overrideProps != null)
             names.addAll(_overrideProps.keySet());
         return names;
@@ -531,8 +532,8 @@ public class I2PAppContext {
      * @since 0.8.4
      */
     public Properties getProperties() { 
-        Properties rv = new Properties();
-        rv.putAll(System.getProperties());
+        // clone to avoid ConcurrentModificationException
+        Properties rv = (Properties) System.getProperties().clone();
         rv.putAll(_overrideProps);
         return rv;
     }
diff --git a/core/java/src/net/i2p/client/I2PSession.java b/core/java/src/net/i2p/client/I2PSession.java
index 0e7f6269c25affcbe62a0136c79056823b88801d..c273e378489869dd762734dce7ed30a8cdd66645 100644
--- a/core/java/src/net/i2p/client/I2PSession.java
+++ b/core/java/src/net/i2p/client/I2PSession.java
@@ -258,5 +258,16 @@ public interface I2PSession {
     public static final int PROTO_ANY = 0;
     public static final int PROTO_UNSPECIFIED = 0;
     public static final int PROTO_STREAMING = 6;
+
+    /**
+     *  Generally a signed datagram, but could
+     *  also be a raw datagram, depending on the application
+     */
     public static final int PROTO_DATAGRAM = 17;
+
+    /**
+     *  A raw (unsigned) datagram
+     *  @since 0.9.1
+     */
+    public static final int PROTO_DATAGRAM_RAW = 18;
 }
diff --git a/core/java/src/net/i2p/client/I2PSessionImpl.java b/core/java/src/net/i2p/client/I2PSessionImpl.java
index ed670149f5050fd7ff0ace47cced15b54ff6260b..349f3d678289cb3f130644425d8419117b2e4e50 100644
--- a/core/java/src/net/i2p/client/I2PSessionImpl.java
+++ b/core/java/src/net/i2p/client/I2PSessionImpl.java
@@ -164,7 +164,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
         _log = context.logManager().getLog(getClass());
         _closed = true;
         if (options == null)
-            options = System.getProperties();
+            options = (Properties) System.getProperties().clone();
         loadConfig(options);
     }
 
diff --git a/history.txt b/history.txt
index eae3314f081307cc1d36e96708ee0b2647639dc3..b2a84d686eb35a9aed33b7f1e95df0c3ca88a276 100644
--- a/history.txt
+++ b/history.txt
@@ -1,3 +1,10 @@
+2012-08-06 zzz
+ * Clone System properties before iterating to avoid
+   ConcurrentModificationException (ticket #680)
+ * Console: Fix flag links on /console to return to same page
+ * i2psnark: Add support for DHT (disabled by default)
+ * jbigi: Add ARMv6 libjbigi.so for Raspberry Pi
+
 2012-08-05 zzz
  * I2PSessionImpl: One more volatile (ticket #659)
  * i2ptunnel, I2CP, EepGet: Buffer socket input streams (ticket #666)
diff --git a/installer/lib/jbigi/libjbigi-linux-armv6.so b/installer/lib/jbigi/libjbigi-linux-armv6.so
new file mode 100644
index 0000000000000000000000000000000000000000..1f9eb86cf323f3e2e96b9f276206543a708ac25f
Binary files /dev/null and b/installer/lib/jbigi/libjbigi-linux-armv6.so differ
diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java
index 0725033fa600cfb5180ac82283f5194ee35b34b0..f6e6df3a5c0d10fbf1efc2f7069c342c2fe16f4e 100644
--- a/router/java/src/net/i2p/router/RouterVersion.java
+++ b/router/java/src/net/i2p/router/RouterVersion.java
@@ -18,7 +18,7 @@ public class RouterVersion {
     /** deprecated */
     public final static String ID = "Monotone";
     public final static String VERSION = CoreVersion.VERSION;
-    public final static long BUILD = 3;
+    public final static long BUILD = 4;
 
     /** for example "-test" */
     public final static String EXTRA = "";