diff --git a/apps/streaming/java/src/net/i2p/client/streaming/ConnThrottler.java b/apps/streaming/java/src/net/i2p/client/streaming/ConnThrottler.java index 07430e7511ae44d92bc9e5b5f9de706acab04569..5383873e9cb3718a45a115f47b70facfd413a283 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/ConnThrottler.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/ConnThrottler.java @@ -27,12 +27,11 @@ class ConnThrottler { ConnThrottler(int max, int totalMax, long period) { _max = max; _totalMax = totalMax; - if (max > 0) { - SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), period); + SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), period); + if (max > 0) this.counter = new ObjectCounter(); - } else { + else this.counter = null; - } if (totalMax > 0) _currentTotal = new AtomicInteger(); else diff --git a/core/java/src/net/i2p/data/LeaseSet.java b/core/java/src/net/i2p/data/LeaseSet.java index 81de8e32679f7a6beb137a8b25c44cbb0b0e1775..05d71949d65fbf89b73d66864d7966bba9d716ec 100644 --- a/core/java/src/net/i2p/data/LeaseSet.java +++ b/core/java/src/net/i2p/data/LeaseSet.java @@ -119,9 +119,10 @@ public class LeaseSet extends DataStructureImpl { /** * If true, we received this LeaseSet by searching for it * Default false. + * @since 0.7.14 */ public boolean getReceivedAsReply() { return _receivedAsReply; } - /** set to true */ + /** set to true @since 0.7.14 */ public void setReceivedAsReply() { _receivedAsReply = true; } public void addLease(Lease lease) { diff --git a/history.txt b/history.txt index 1ad9e0812e44b81bc7041a0ba674c4fc28cbc8cf..bca327011f8218192a7032e489b9c14c6df55134 100644 --- a/history.txt +++ b/history.txt @@ -1,3 +1,17 @@ +2010-06-05 zzz + * Netdb: + - Use new receivedAsReply flag in LeaseSet to mark + those received as response to a query + - Mark which methods in FloodfillPeerSelector may return + our own hash + - Redefine selectNearest() so it may return our own hash, + so it can be used for closeness measurement + - Redefine findNearestRouters() to return Hashes + instead of RouterInfos + - Fix LeaseSet response decisions for floodfills, based + on partial keyspace and closeness measurements + - List only count of published leasesets in netdb + 2010-06-03 zzz * NewsFetcher: Delay a minimum amount at startup * Update: Fix multiple updates after manually diff --git a/router/java/src/net/i2p/router/DummyNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/DummyNetworkDatabaseFacade.java index 5c5e5a0a45336a919124697812a2dac61a3d4ee9..6031cf630e26c2a64f424c2c28ce593951e5e74e 100644 --- a/router/java/src/net/i2p/router/DummyNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/DummyNetworkDatabaseFacade.java @@ -59,5 +59,5 @@ class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade { } public Set<Hash> getAllRouters() { return new HashSet(_routers.keySet()); } - public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) { return new HashSet(_routers.values()); } + public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) { return new HashSet(_routers.values()); } } diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java index 3885ea6d5393ba4319da352a82d99920517986f9..50b9d5364cc88f5bab7ada626721217509fa6e6b 100644 --- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java @@ -30,7 +30,7 @@ public abstract class NetworkDatabaseFacade implements Service { * @param maxNumRouters The maximum number of routers to return * @param peersToIgnore Hash of routers not to include */ - public abstract Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore); + public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore); public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs); public abstract LeaseSet lookupLeaseSetLocally(Hash key); diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index 098e16d499456339b4699536706ff45ebf0c4769..14408f1ed4aa00437ccef8702b43aba082dea759 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -18,7 +18,7 @@ public class RouterVersion { /** deprecated */ public final static String ID = "Monotone"; public final static String VERSION = CoreVersion.VERSION; - public final static long BUILD = 17; + public final static long BUILD = 18; /** for example "-test" */ public final static String EXTRA = "-rc"; diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java index 4cb5a9d2671eaffc97ae631b8751ef48a65ea98c..3166f7c70892c746a2eaaf18a3b00f48ef6f2a3d 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java @@ -41,14 +41,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { private RouterIdentity _from; private Hash _fromHash; private final static int MAX_ROUTERS_RETURNED = 3; - private final static int CLOSENESS_THRESHOLD = 10; // StoreJob.REDUNDANCY * 2 + private final static int CLOSENESS_THRESHOLD = 8; // FNDF.MAX_TO_FLOOD + 1 private final static int REPLY_TIMEOUT = 60*1000; private final static int MESSAGE_PRIORITY = 300; /** - * If a routerInfo structure isn't updated within an hour, drop it - * and search for a later version. This value should be large enough - * to deal with the Router.CLOCK_FUDGE_FACTOR. + * If a routerInfo structure isn't this recent, don't send it out. + * Equal to KNDF.ROUTER_INFO_EXPIRATION_FLOODFILL. */ public final static long EXPIRE_DELAY = 60*60*1000; @@ -85,29 +84,66 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_message.getSearchKey()); if (ls != null) { - boolean publish = getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey()); + // We have to be very careful here to decide whether or not to send out the leaseSet, + // to avoid anonymity vulnerabilities. + // As this is complex, lots of comments follow... + + boolean isLocal = getContext().clientManager().isLocal(ls.getDestination()); + boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey()); - // only answer a request for a LeaseSet if it has been published + // Only answer a request for a LeaseSet if it has been published // to us, or, if its local, if we would have published to ourselves - if (publish && (answerAllQueries() || ls.getReceivedAsPublished())) { + + // answerAllQueries: We are floodfill + // getReceivedAsPublished: + // false for local + // false for received over a tunnel + // false for received in response to our lookups + // true for received in a DatabaseStoreMessage unsolicited + if (ls.getReceivedAsPublished()) { + // Answer anything that was stored to us directly + // (i.e. "received as published" - not the result of a query, or received + // over a client tunnel). + // This is probably because we are floodfill, but also perhaps we used to be floodfill, + // so we don't check the answerAllQueries() flag. + // Local leasesets are not handled here + if (_log.shouldLog(Log.INFO)) + _log.info("We have the published LS " + _message.getSearchKey().toBase64() + ", answering query"); getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0); sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); - } else { - Set<RouterInfo> routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(), - CLOSENESS_THRESHOLD, - _message.getDontIncludePeers()); - if (getContext().clientManager().isLocal(ls.getDestination())) { - if (publish && weAreClosest(routerInfoSet)) { - getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0); - sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); - } else { - getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0); - sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel()); - } + } else if (shouldPublishLocal && answerAllQueries()) { + // We are floodfill, and this is our local leaseset, and we publish it. + // Only send it out if it is in our estimated keyspace. + // For this, we do NOT use their dontInclude list as it can't be trusted + // (i.e. it could mess up the closeness calculation) + Set<Hash> closestHashes = getContext().netDb().findNearestRouters(_message.getSearchKey(), + CLOSENESS_THRESHOLD, null); + if (weAreClosest(closestHashes)) { + // It's in our keyspace, so give it to them + if (_log.shouldLog(Log.INFO)) + _log.info("We have local LS " + _message.getSearchKey().toBase64() + ", answering query, in our keyspace"); + getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0); + sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); } else { - getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1, 0); - sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel()); + // Lie, pretend we don't have it + if (_log.shouldLog(Log.INFO)) + _log.info("We have local LS " + _message.getSearchKey().toBase64() + ", NOT answering query, out of our keyspace"); + getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0); + Set<Hash> routerHashSet = getNearestRouters(); + sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel()); } + } else { + // It was not published to us (we looked it up, for example) + // or it's local and we aren't floodfill, + // or it's local and we don't publish it. + // Lie, pretend we don't have it + if (_log.shouldLog(Log.INFO)) + _log.info("We have LS " + _message.getSearchKey().toBase64() + + ", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal + + " RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply()); + getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1, 0); + Set<Hash> routerHashSet = getNearestRouters(); + sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel()); } } else { RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey()); @@ -134,14 +170,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel()); } } else { - // not found locally - return closest peer routerInfo structs - Set<Hash> dontInclude = _message.getDontIncludePeers(); - // Honor flag to exclude all floodfills - //if (dontInclude.contains(Hash.FAKE_HASH)) { - // This is handled in FloodfillPeerSelector - Set<RouterInfo> routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(), - MAX_ROUTERS_RETURNED, - dontInclude); + // not found locally - return closest peer hashes + Set<Hash> routerHashSet = getNearestRouters(); // ERR: see above // // Remove hidden nodes from set.. @@ -154,13 +184,32 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("We do not have key " + _message.getSearchKey().toBase64() + - " locally. sending back " + routerInfoSet.size() + " peers to " + fromKey.toBase64()); - sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel()); + " locally. sending back " + routerHashSet.size() + " peers to " + fromKey.toBase64()); + sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel()); } } } - private boolean isUnreachable(RouterInfo info) { + /** + * Closest to the message's search key, + * honoring the message's dontInclude set. + * Will not include us. + * Side effect - adds us to the message's dontInclude set. + */ + private Set<Hash> getNearestRouters() { + Set<Hash> dontInclude = _message.getDontIncludePeers(); + if (dontInclude == null) + dontInclude = new HashSet(1); + dontInclude.add(getContext().routerHash()); + // Honor flag to exclude all floodfills + //if (dontInclude.contains(Hash.FAKE_HASH)) { + // This is handled in FloodfillPeerSelector + return getContext().netDb().findNearestRouters(_message.getSearchKey(), + MAX_ROUTERS_RETURNED, + dontInclude); + } + + private static boolean isUnreachable(RouterInfo info) { if (info == null) return true; String cap = info.getCapabilities(); if (cap == null) return false; @@ -171,21 +220,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { public static final boolean DEFAULT_PUBLISH_UNREACHABLE = true; private boolean publishUnreachable() { - String publish = getContext().getProperty(PROP_PUBLISH_UNREACHABLE); - if (publish != null) - return Boolean.valueOf(publish).booleanValue(); - else - return DEFAULT_PUBLISH_UNREACHABLE; + return getContext().getProperty(PROP_PUBLISH_UNREACHABLE, DEFAULT_PUBLISH_UNREACHABLE); } - private boolean weAreClosest(Set routerInfoSet) { - for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) { - RouterInfo cur = (RouterInfo)iter.next(); - if (cur.getIdentity().calculateHash().equals(getContext().routerHash())) { - return true; - } - } - return false; + private boolean weAreClosest(Set<Hash> routerHashSet) { + return routerHashSet.contains(getContext().routerHash()); } private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) { @@ -207,17 +246,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { sendMessage(msg, toPeer, replyTunnel); } - protected void sendClosest(Hash key, Set<RouterInfo> routerInfoSet, Hash toPeer, TunnelId replyTunnel) { + protected void sendClosest(Hash key, Set<Hash> routerHashes, Hash toPeer, TunnelId replyTunnel) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = " - + routerInfoSet.size() + " tunnel " + replyTunnel); + + routerHashes.size() + " tunnel " + replyTunnel); DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(getContext()); msg.setFromHash(getContext().routerHash()); msg.setSearchKey(key); - for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) { - RouterInfo peer = (RouterInfo)iter.next(); - msg.addReply(peer.getIdentity().getHash()); - if (msg.getNumReplies() >= MAX_ROUTERS_RETURNED) + int i = 0; + for (Hash h : routerHashes) { + msg.addReply(h); + if (++i >= MAX_ROUTERS_RETURNED) break; } getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupMatchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupMatchJob.java index 0c77f80c53e1fd16b844bd16cc712f9d5f708a1e..20572667e7f3c3d1ecd8d3f2bb71a99fa5ad82bd 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupMatchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupMatchJob.java @@ -54,10 +54,22 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob { } try { DatabaseStoreMessage dsm = (DatabaseStoreMessage)message; - if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) + if (_log.shouldLog(Log.INFO)) + _log.info(_search.getJobId() + ": got a DSM for " + + dsm.getKey().toBase64()); + // This store will be duplicated by HFDSMJ + // We do it here first to make sure it is in the DB before + // runJob() and search.success() is called??? + // Should we just pass the DataStructure directly back to somebody? + if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { + // Since HFDSMJ wants to setReceivedAsPublished(), we have to + // set a flag saying this was really the result of a query, + // so don't do that. + dsm.getLeaseSet().setReceivedAsReply(); getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet()); - else + } else { getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo()); + } } catch (IllegalArgumentException iae) { if (_log.shouldLog(Log.WARN)) _log.warn(_search.getJobId() + ": Received an invalid store reply", iae); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java index c4e1140c2a872cf38db4a03ff61f8e69024192a5..4eb4e482540127990ba30a6f407da9b0530974a9 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java @@ -38,11 +38,13 @@ class FloodfillPeerSelector extends PeerSelector { * Pick out peers with the floodfill capacity set, returning them first, but then * after they're complete, sort via kademlia. * Puts the floodfill peers that are directly connected first in the list. + * List will not include our own hash. * + * @param peersToIgnore can be null * @return List of Hash for the peers selected */ @Override - public List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { + List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true); } @@ -50,22 +52,32 @@ class FloodfillPeerSelector extends PeerSelector { * Pick out peers with the floodfill capacity set, returning them first, but then * after they're complete, sort via kademlia. * Does not prefer the floodfill peers that are directly connected. + * List will not include our own hash. * + * @param peersToIgnore can be null * @return List of Hash for the peers selected */ @Override - public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { + List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false); } - public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) { + /** + * Pick out peers with the floodfill capacity set, returning them first, but then + * after they're complete, sort via kademlia. + * List will not include our own hash. + * + * @param peersToIgnore can be null + * @return List of Hash for the peers selected + */ + List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) { if (peersToIgnore == null) peersToIgnore = new HashSet(1); peersToIgnore.add(_context.routerHash()); FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters); if (kbuckets == null) return new ArrayList(); kbuckets.getAll(matches); - List rv = matches.get(maxNumRouters, preferConnected); + List<Hash> rv = matches.get(maxNumRouters, preferConnected); if (_log.shouldLog(Log.DEBUG)) _log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": " + rv + " (not including " + peersToIgnore + ") [allHashes.size = " @@ -74,15 +86,24 @@ class FloodfillPeerSelector extends PeerSelector { } /** - * @return all floodfills not shitlisted forever. list will not include our own hash + * @return all floodfills not shitlisted forever. + * List will not include our own hash. * List is not sorted and not shuffled. */ - public List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) { - return selectFloodfillParticipants(null, kbuckets); + List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) { + Set<Hash> ignore = new HashSet(1); + ignore.add(_context.routerHash()); + return selectFloodfillParticipants(ignore, kbuckets); } - public List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) { - if (kbuckets == null) return new ArrayList(); + /** + * @param toIgnore can be null + * @return all floodfills not shitlisted forever. + * List MAY INCLUDE our own hash. + * List is not sorted and not shuffled. + */ + private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) { + if (kbuckets == null) return Collections.EMPTY_LIST; FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0); kbuckets.getAll(matches); return matches.getFloodfillParticipants(); @@ -92,8 +113,9 @@ class FloodfillPeerSelector extends PeerSelector { * Sort the floodfills. The challenge here is to keep the good ones * at the front and the bad ones at the back. If they are all good or bad, * searches and stores won't work well. + * List will not include our own hash. * - * @return all floodfills not shitlisted foreverx + * @return floodfills closest to the key that are not shitlisted forever * @param key the routing key * @param maxNumRouters max to return * Sorted by closest to the key if > maxNumRouters, otherwise not @@ -104,8 +126,10 @@ class FloodfillPeerSelector extends PeerSelector { * success newer than failure * Group 3: All others */ - public List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) { - return selectFloodfillParticipants(key, maxNumRouters, null, kbuckets); + List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) { + Set<Hash> ignore = new HashSet(1); + ignore.add(_context.routerHash()); + return selectFloodfillParticipants(key, maxNumRouters, ignore, kbuckets); } /** .5 * PublishLocalRouterInfoJob.PUBLISH_DELAY */ @@ -116,7 +140,29 @@ class FloodfillPeerSelector extends PeerSelector { private static final int NO_FAIL_LOOKUP_GOOD = NO_FAIL_LOOKUP_OK * 3; private static final int MAX_GOOD_RESP_TIME = 5*1000; - public List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) { + /** + * See above for description + * List will not include our own hash + * @param toIgnore can be null + */ + List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) { + if (toIgnore == null) { + toIgnore = new HashSet(1); + toIgnore.add(_context.routerHash()); + } else if (!toIgnore.contains(_context.routerHash())) { + // copy the Set so we don't confuse StoreJob + toIgnore = new HashSet(toIgnore); + toIgnore.add(_context.routerHash()); + } + return selectFloodfillParticipantsIncludingUs(key, howMany, toIgnore, kbuckets); + } + + /** + * See above for description + * List MAY CONTAIN our own hash unless included in toIgnore + * @param toIgnore can be null + */ + private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) { List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets); TreeSet<Hash> sorted = new TreeSet(new XORComparator(key)); sorted.addAll(ffs); @@ -204,6 +250,11 @@ class FloodfillPeerSelector extends PeerSelector { private Set<Hash> _toIgnore; private int _matches; private int _wanted; + + /** + * Warning - may return our router hash - add to toIgnore if necessary + * @param toIgnore can be null + */ public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) { _key = key; _sorted = new TreeSet(new XORComparator(key)); @@ -225,8 +276,8 @@ class FloodfillPeerSelector extends PeerSelector { // return; if ( (_toIgnore != null) && (_toIgnore.contains(entry)) ) return; - if (entry.equals(_context.routerHash())) - return; + //if (entry.equals(_context.routerHash())) + // return; // it isn't direct, so who cares if they're shitlisted //if (_context.shitlist().isShitlisted(entry)) // return; @@ -328,12 +379,14 @@ class FloodfillPeerSelector extends PeerSelector { * Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM. * UNLESS peersToIgnore contains Hash.FAKE_HASH (all zeros), in which case this is an exploratory * lookup, and the response should not include floodfills. + * List MAY INCLUDE our own router - add to peersToIgnore if you don't want * * @param key the original key (NOT the routing key) + * @param peersToIgnore can be null * @return List of Hash for the peers selected, ordered */ @Override - public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { + List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) { // return non-ff @@ -343,7 +396,7 @@ class FloodfillPeerSelector extends PeerSelector { return matches.get(maxNumRouters); } else { // return ff - return selectFloodfillParticipants(rkey, maxNumRouters, peersToIgnore, kbuckets); + return selectFloodfillParticipantsIncludingUs(rkey, maxNumRouters, peersToIgnore, kbuckets); } } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java index f8c7adf42492e52ad45f3393bf30531003cbd8ae..4e3c9c30ce377d539a505fd699df345d7aee8ea9 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java @@ -30,6 +30,12 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku super(ctx, receivedMessage, from, fromHash); } + /** + * @return are we floodfill + * We don't really answer all queries if this is true, + * since floodfills don't have the whole keyspace any more, + * see ../HTLMJ for discussion + */ @Override protected boolean answerAllQueries() { if (!FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) return false; @@ -42,7 +48,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku * will stop bugging us. */ @Override - protected void sendClosest(Hash key, Set routerInfoSet, Hash toPeer, TunnelId replyTunnel) { + protected void sendClosest(Hash key, Set<Hash> routerInfoSet, Hash toPeer, TunnelId replyTunnel) { super.sendClosest(key, routerInfoSet, toPeer, replyTunnel); // go away, you got the wrong guy, send our RI back unsolicited diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java index 452d8a60b9cd73273a41157eb984e13f4b2a4f0f..ea53c15667726b8dcf212e1fabcbfcee5d8921b1 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java @@ -76,16 +76,43 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl { key.toBase64().substring(0, 4)); } LeaseSet ls = _message.getLeaseSet(); - // mark it as something we received, so we'll answer queries - // for it. this flag does NOT get set on entries that we + //boolean oldrar = ls.getReceivedAsReply(); + //boolean oldrap = ls.getReceivedAsPublished(); + // If this was received as a response to a query, + // FloodOnlyLookupMatchJob called setReceivedAsReply(), + // and we are seeing this only as a duplicate, + // so we don't set the receivedAsPublished() flag. + // Otherwise, mark it as something we received unsolicited, so we'll answer queries + // for it. This flag must NOT get set on entries that we // receive in response to our own lookups. - ls.setReceivedAsPublished(true); + // See ../HDLMJ for more info + if (!ls.getReceivedAsReply()) + ls.setReceivedAsPublished(true); + //boolean rap = ls.getReceivedAsPublished(); + //if (_log.shouldLog(Log.INFO)) + // _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap); LeaseSet match = getContext().netDb().store(key, _message.getLeaseSet()); - if ( (match == null) || (match.getEarliestLeaseDate() < _message.getLeaseSet().getEarliestLeaseDate()) ) { + if (match == null) { wasNew = true; + } else if (match.getEarliestLeaseDate() < _message.getLeaseSet().getEarliestLeaseDate()) { + wasNew = true; + // If it is in our keyspace and we are talking to it + + + if (match.getReceivedAsPublished()) + ls.setReceivedAsPublished(true); } else { wasNew = false; - match.setReceivedAsPublished(true); + // The FloodOnlyLookupSelector goes away after the first good reply + // So on the second reply, FloodOnlyMatchJob is not called to set ReceivedAsReply. + // So then we think it's an unsolicited store. + // So we should skip this. + // If the 2nd reply is newer than the first, ReceivedAsPublished will be set incorrectly, + // that will hopefully be rare. + // A more elaborate solution would be a List of recent ReceivedAsReply LeaseSets, with receive time ? + // A real unsolicited store is likely to be new - hopefully... + //if (!ls.getReceivedAsReply()) + // match.setReceivedAsPublished(true); } } catch (IllegalArgumentException iae) { invalidMessage = iae.getMessage(); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index d978f8488f43e246c0d08e5189b98be2652c5213..688b8546d286c37c7055e8fb6d1e5960f67a70c9 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -314,12 +314,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { /** * Get the routers closest to that key in response to a remote lookup + * Only used by ../HDLMJ + * Set MAY INCLUDE our own router - add to peersToIgnore if you don't want + * + * @param key the real key, NOT the routing key + * @param peersToIgnore can be null */ - public Set<RouterInfo> findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) { - if (!_initialized) return null; - return getRouters(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb)); + public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) { + if (!_initialized) return Collections.EMPTY_SET; + return new HashSet(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb)); } +/***** private Set<RouterInfo> getRouters(Collection hashes) { if (!_initialized) return null; Set rv = new HashSet(hashes.size()); @@ -337,17 +343,16 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } return rv; } +*****/ /** get the hashes for all known routers */ public Set<Hash> getAllRouters() { - if (!_initialized) return new HashSet(0); - Set keys = _ds.getKeys(); - Set rv = new HashSet(keys.size()); + if (!_initialized) return Collections.EMPTY_SET; + Set<Hash> keys = _ds.getKeys(); + Set<Hash> rv = new HashSet(keys.size()); if (_log.shouldLog(Log.DEBUG)) _log.debug("getAllRouters(): # keys in the datastore: " + keys.size()); - for (Iterator iter = keys.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - + for (Hash key : keys) { DataStructure ds = _ds.get(key); if (ds == null) { if (_log.shouldLog(Log.INFO)) @@ -382,10 +387,27 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } } + /** + * This is only used by StatisticsManager to publish + * the count if we are floodfill. + * So to hide a clue that a popular eepsite is hosted + * on a floodfill router, only count leasesets that + * are "received as published", as of 0.7.14 + */ @Override public int getKnownLeaseSets() { if (_ds == null) return 0; - return _ds.countLeaseSets(); + //return _ds.countLeaseSets(); + Set<Hash> keys = _ds.getKeys(); + int rv = 0; + for (Hash key : keys) { + DataStructure ds = _ds.get(key); + if (ds != null && + ds instanceof LeaseSet && + ((LeaseSet)ds).getReceivedAsPublished()) + rv++; + } + return rv; } /* aparently, not used?? should be public if used elsewhere. */ @@ -622,6 +644,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { * Store the leaseSet * * @throws IllegalArgumentException if the leaseSet is not valid + * @return previous entry or null */ public LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException { if (!_initialized) return null; @@ -742,6 +765,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { * store the routerInfo * * @throws IllegalArgumentException if the routerInfo is not valid + * @return previous entry or null */ public RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException { return store(key, routerInfo, true); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java index 17441420262f10a03726dda007b98a522b5e1af1..2a6d889003b25054dfb02a6a86180ee05e3373b3 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java @@ -27,7 +27,10 @@ import net.i2p.stat.Rate; import net.i2p.stat.RateStat; import net.i2p.util.Log; -public class PeerSelector { +/** + * Mostly unused, see overrides in FloodfillPeerSelector + */ +class PeerSelector { protected Log _log; protected RouterContext _context; @@ -37,13 +40,14 @@ public class PeerSelector { } /** + * UNUSED - See FloodfillPeerSelector override * Search through the kbucket set to find the most reliable peers close to the * given key, skipping all of the ones already checked + * List will not include our own hash. * * @return ordered list of Hash objects */ - /* FIXME Exporting non-public type through public API FIXME */ - public List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API + List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) { // get the peers closest to the key return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets); } @@ -52,11 +56,11 @@ public class PeerSelector { * Ignore KBucket ordering and do the XOR explicitly per key. Runs in O(n*log(n)) * time (n=routing table size with c ~ 32 xor ops). This gets strict ordering * on closest + * List will not include our own hash. * * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) */ - /* FIXME Exporting non-public type through public API FIXME */ - public List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API + List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { //if (true) return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets); @@ -88,14 +92,15 @@ public class PeerSelector { } /** + * UNUSED - See FloodfillPeerSelector override * Ignore KBucket ordering and do the XOR explicitly per key. Runs in O(n*log(n)) * time (n=routing table size with c ~ 32 xor ops). This gets strict ordering * on closest + * List will not include our own hash. * * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) */ - /* FIXME Exporting non-public type through public API FIXME */ - public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API + List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { if (peersToIgnore == null) peersToIgnore = new HashSet(1); peersToIgnore.add(_context.routerHash()); @@ -109,6 +114,7 @@ public class PeerSelector { return rv; } + /** UNUSED */ private class MatchSelectionCollector implements SelectionCollector { private TreeMap<BigInteger, Hash> _sorted; private Hash _key; @@ -132,7 +138,7 @@ public class PeerSelector { if (info.getIdentity().isHidden()) return; - BigInteger diff = getDistance(_key, entry); + BigInteger diff = HashDistance.getDistance(_key, entry); _sorted.put(diff, entry); _matches++; } @@ -189,21 +195,18 @@ public class PeerSelector { } **********/ - public static BigInteger getDistance(Hash targetKey, Hash routerInQuestion) { - // plain XOR of the key and router - byte diff[] = DataHelper.xor(routerInQuestion.getData(), targetKey.getData()); - return new BigInteger(1, diff); - } - /** + * UNUSED - See FloodfillPeerSelector override * Generic KBucket filtering to find the hashes close to a key, regardless of other considerations. * This goes through the kbuckets, starting with the key's location, moving towards us, and then away from the * key's location's bucket, selecting peers until we have numClosest. + * List MAY INCLUDE our own router - add to peersToIgnore if you don't want * + * @param key the original key (NOT the routing key) + * @param peersToIgnore can be null * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) */ - /* FIXME Exporting non-public type through public API FIXME */ - public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API + List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia // would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm. // later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance() diff --git a/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java b/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java index 35121f59b3e9ecf04473237cfa8d84e52d44b689..10fc9537a9916775d931b57123327ec36bad5ad5 100644 --- a/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java +++ b/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java @@ -383,7 +383,10 @@ public class EventPumper implements Runnable { ServerSocketChannel servChan = (ServerSocketChannel)key.attachment(); try { SocketChannel chan = servChan.accept(); - chan.configureBlocking(false); + // don't throw an NPE if the connect is gone again + if(chan == null) + return; + chan.configureBlocking(false);; if (!_transport.allowConnection()) { if (_log.shouldLog(Log.WARN)) diff --git a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java index 33e9067096b771d9ee2bb9ec1894135a0eaee37f..fcdb77ea9ea47868f0e23f65d9f4aec9ec68e171 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java +++ b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java @@ -1,6 +1,7 @@ package net.i2p.router.tunnel; import net.i2p.data.Hash; +import net.i2p.data.LeaseSet; import net.i2p.data.Payload; import net.i2p.data.TunnelId; import net.i2p.data.i2np.DataMessage; @@ -16,6 +17,7 @@ import net.i2p.router.ClientMessage; import net.i2p.router.RouterContext; import net.i2p.router.TunnelInfo; import net.i2p.router.message.GarlicMessageReceiver; +//import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; import net.i2p.util.Log; /** @@ -35,8 +37,8 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec _client = client; _log = ctx.logManager().getLog(InboundMessageDistributor.class); _receiver = new GarlicMessageReceiver(ctx, this, client); - _context.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 10*60*1000, 60*60*1000 }); - _context.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*1000, 10*60*1000, 60*60*1000 }); + _context.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 60*60*1000 }); + _context.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*60*1000 }); } public void distribute(I2NPMessage msg, Hash target) { @@ -164,11 +166,19 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec DatabaseStoreMessage dsm = (DatabaseStoreMessage)data; try { if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { - // dont tell anyone else about it if we got it through a client tunnel - // (though this is the default, but it doesn't hurt to make it explicit) - if (_client != null) - dsm.getLeaseSet().setReceivedAsPublished(false); - _context.netDb().store(dsm.getKey(), dsm.getLeaseSet()); + // If it was stored to us before, don't undo the + // receivedAsPublished flag so we will continue to respond to requests + // for the leaseset. That is, we don't want this to change the + // RAP flag of the leaseset. + // When the keyspace rotates at midnight, and this leaseset moves out + // of our keyspace, maybe we shouldn't do this? + // Should we do this whether ff or not? + LeaseSet old = _context.netDb().store(dsm.getKey(), dsm.getLeaseSet()); + if (old != null && old.getReceivedAsPublished() + /** && ((FloodfillNetworkDatabaseFacade)_context.netDb()).floodfillEnabled() **/ ) + dsm.getLeaseSet().setReceivedAsPublished(true); + if (_log.shouldLog(Log.INFO)) + _log.info("Storing LS for: " + dsm.getKey() + " sent to: " + _client); } else { if (_client != null) { // drop it, since the data we receive shouldn't include router