diff --git a/core/java/src/net/i2p/data/Hash.java b/core/java/src/net/i2p/data/Hash.java index a35381056..721b57763 100644 --- a/core/java/src/net/i2p/data/Hash.java +++ b/core/java/src/net/i2p/data/Hash.java @@ -57,23 +57,31 @@ public class Hash extends DataStructureImpl { _base64ed = null; } + /** + * Prepare this hash's cache for xor values - very few hashes will need it, + * so we don't want to waste the memory, and lazy initialization would incur + * online overhead to verify the initialization. + * + */ + public void prepareCache() { + synchronized (this) { + if (_xorCache == null) + _xorCache = new HashMap(MAX_CACHED_XOR); + } + } + /** * Calculate the xor with the current object and the specified hash, * caching values where possible. Currently this keeps up to MAX_CACHED_XOR * (1024) entries, and uses an essentially random ejection policy. Later - * perhaps go for an LRU or FIFO? + * perhaps go for an LRU or FIFO? * + * @throws IllegalStateException if you try to use the cache without first + * preparing this object's cache via .prepareCache() */ - public byte[] cachedXor(Hash key) { - if (_xorCache == null) { - // we dont want to create two of these - synchronized (this) { - if (_xorCache == null) - _xorCache = new HashMap(MAX_CACHED_XOR); - } - } - - // i think we can get away with this being outside the synchronized block + public byte[] cachedXor(Hash key) throws IllegalStateException { + if (_xorCache == null) + throw new IllegalStateException("To use the cache, you must first prepare it"); byte[] distance = (byte[])_xorCache.get(key); if (distance == null) { @@ -85,7 +93,8 @@ public class Hash extends DataStructureImpl { Set keys = new HashSet(toRemove); // this removes essentially random keys - we dont maintain any sort // of LRU or age. perhaps we should? - for (Iterator iter = _xorCache.keySet().iterator(); iter.hasNext(); ) + int removed = 0; + for (Iterator iter = _xorCache.keySet().iterator(); iter.hasNext() && removed < toRemove; removed++) keys.add(iter.next()); for (Iterator iter = keys.iterator(); iter.hasNext(); ) _xorCache.remove(iter.next()); @@ -94,7 +103,7 @@ public class Hash extends DataStructureImpl { _xorCache.put(key, (Object)distance); cached = _xorCache.size(); } - if (false && (_log.shouldLog(Log.DEBUG))) { + if (_log.shouldLog(Log.DEBUG)) { // explicit buffer, since the compiler can't guess how long it'll be StringBuffer buf = new StringBuffer(128); buf.append("miss [").append(cached).append("] from "); @@ -103,7 +112,7 @@ public class Hash extends DataStructureImpl { _log.debug(buf.toString(), new Exception()); } } else { - if (false && (_log.shouldLog(Log.DEBUG))) { + if (_log.shouldLog(Log.DEBUG)) { // explicit buffer, since the compiler can't guess how long it'll be StringBuffer buf = new StringBuffer(128); buf.append("hit from "); @@ -172,6 +181,7 @@ public class Hash extends DataStructureImpl { private static void testFill() { Hash local = new Hash(new byte[HASH_LENGTH]); // all zeroes + local.prepareCache(); for (int i = 0; i < MAX_CACHED_XOR; i++) { byte t[] = new byte[HASH_LENGTH]; for (int j = 0; j < HASH_LENGTH; j++) @@ -184,9 +194,11 @@ public class Hash extends DataStructureImpl { return; } } + _log.debug("Fill test passed"); } private static void testOverflow() { Hash local = new Hash(new byte[HASH_LENGTH]); // all zeroes + local.prepareCache(); for (int i = 0; i < MAX_CACHED_XOR*2; i++) { byte t[] = new byte[HASH_LENGTH]; for (int j = 0; j < HASH_LENGTH; j++) @@ -207,10 +219,12 @@ public class Hash extends DataStructureImpl { } } } + _log.debug("overflow test passed"); } private static void testFillCheck() { Set hashes = new HashSet(); Hash local = new Hash(new byte[HASH_LENGTH]); // all zeroes + local.prepareCache(); // fill 'er up for (int i = 0; i < MAX_CACHED_XOR; i++) { byte t[] = new byte[HASH_LENGTH]; @@ -248,5 +262,6 @@ public class Hash extends DataStructureImpl { return; } } + _log.debug("Fill check test passed"); } } \ No newline at end of file diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java index f88492789..5cb084a71 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java @@ -55,6 +55,8 @@ class KBucketImpl implements KBucket { public Hash getLocal() { return _local; } private void setLocal(Hash local) { _local = local; + // we want to make sure we've got the cache in place before calling cachedXor + _local.prepareCache(); if (_log.shouldLog(Log.DEBUG)) _log.debug("Local hash reset to " + (local == null ? "null" : DataHelper.toHexString(local.getData()))); } @@ -343,7 +345,9 @@ class KBucketImpl implements KBucket { int low = 1; int high = 3; Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class); - KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH); + Hash local = Hash.FAKE_HASH; + local.prepareCache(); + KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local); bucket.setRange(low, high); Hash lowerBoundKey = bucket.getRangeBeginKey(); Hash upperBoundKey = bucket.getRangeEndKey(); @@ -378,7 +382,9 @@ class KBucketImpl implements KBucket { int high = 200; byte hash[] = new byte[Hash.HASH_LENGTH]; RandomSource.getInstance().nextBytes(hash); - KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), new Hash(hash)); + Hash local = new Hash(hash); + local.prepareCache(); + KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local); bucket.setRange(low, high); Hash lowerBoundKey = bucket.getRangeBeginKey(); Hash upperBoundKey = bucket.getRangeEndKey();