diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java index 0f0008c1365e3ed96707b12dacbd809d29aa2e93..371cf26775e4c8e0846c12937fa698708a4b5261 100644 --- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java @@ -38,10 +38,18 @@ public abstract class NetworkDatabaseFacade implements Service { public abstract LeaseSet lookupLeaseSetLocally(Hash key); public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs); public abstract RouterInfo lookupRouterInfoLocally(Hash key); - /** return the leaseSet if another leaseSet already existed at that key */ - public abstract LeaseSet store(Hash key, LeaseSet leaseSet); - /** return the routerInfo if another router already existed at that key */ - public abstract RouterInfo store(Hash key, RouterInfo routerInfo); + /** + * return the leaseSet if another leaseSet already existed at that key + * + * @throws IllegalArgumentException if the data is not valid + */ + public abstract LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException; + /** + * return the routerInfo if another router already existed at that key + * + * @throws IllegalArgumentException if the data is not valid + */ + public abstract RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException; public abstract void publish(RouterInfo localRouterInfo); public abstract void publish(LeaseSet localLeaseSet); public abstract void unpublish(LeaseSet localLeaseSet); diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java index 1fa392b4c68fea8b86a986728917d6dc33843b3d..d7fa4479fd58a43430127925b104c30b83f27bd7 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java @@ -47,18 +47,31 @@ public class HandleDatabaseStoreMessageJob extends JobImpl { public void runJob() { if (_log.shouldLog(Log.DEBUG)) _log.debug("Handling database store message"); - + + boolean invalid = false; boolean wasNew = false; if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { - Object match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet()); - wasNew = (null == match); + try { + Object match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet()); + wasNew = (null == match); + } catch (IllegalArgumentException iae) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Not storing a leaseSet", iae); + invalid = true; + } } else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { if (_log.shouldLog(Log.INFO)) _log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of " + new Date(_message.getRouterInfo().getPublished())); - Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo()); - wasNew = (null == match); - getContext().profileManager().heardAbout(_message.getKey()); + try { + Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo()); + wasNew = (null == match); + getContext().profileManager().heardAbout(_message.getKey()); + } catch (IllegalArgumentException iae) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Not storing a routerInfo", iae); + invalid = true; + } } else { if (_log.shouldLog(Log.ERROR)) _log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType() @@ -70,9 +83,12 @@ public class HandleDatabaseStoreMessageJob extends JobImpl { if (_from != null) _fromHash = _from.getHash(); - if (_fromHash != null) - getContext().profileManager().dbStoreReceived(_fromHash, wasNew); - getContext().statManager().addRateData("netDb.storeHandled", 1, 0); + if (_fromHash != null) { + if (!invalid) { + getContext().profileManager().dbStoreReceived(_fromHash, wasNew); + getContext().statManager().addRateData("netDb.storeHandled", 1, 0); + } + } } private void sendAck() { @@ -86,8 +102,8 @@ public class HandleDatabaseStoreMessageJob extends JobImpl { return; } else { getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), msg, outTunnelId, - _message.getReplyGateway(), _message.getReplyTunnel(), - null, null, null, null, ACK_TIMEOUT, ACK_PRIORITY)); + _message.getReplyGateway(), _message.getReplyTunnel(), + null, null, null, null, ACK_TIMEOUT, ACK_PRIORITY)); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index 8bf79643f11eaa35d01af7547e1c37dea9581e6d..1e507a62c805f5d1d91e04cc3887c36c6cb79571 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -433,31 +433,47 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { /** I don't think it'll ever make sense to have a lease last for a full day */ private static final long MAX_LEASE_FUTURE = 24*60*60*1000; - public LeaseSet store(Hash key, LeaseSet leaseSet) { - long start = _context.clock().now(); - if (!_initialized) return null; + /** + * Determine whether this leaseSet will be accepted as valid and current + * given what we know now. + * + */ + boolean validate(Hash key, LeaseSet leaseSet) { if (!key.equals(leaseSet.getDestination().calculateHash())) { if (_log.shouldLog(Log.ERROR)) _log.error("Invalid store attempt! key does not match leaseSet.destination! key = " + key + ", leaseSet = " + leaseSet); - return null; + return false; } else if (!leaseSet.verifySignature()) { if (_log.shouldLog(Log.ERROR)) _log.error("Invalid leaseSet signature! leaseSet = " + leaseSet); - return null; + return false; } else if (leaseSet.getEarliestLeaseDate() <= _context.clock().now()) { if (_log.shouldLog(Log.WARN)) _log.warn("Old leaseSet! not storing it: " + leaseSet.getDestination().calculateHash().toBase64() + " expires on " + new Date(leaseSet.getEarliestLeaseDate()), new Exception("Rejecting store")); - return null; + return false; } else if (leaseSet.getEarliestLeaseDate() > _context.clock().now() + MAX_LEASE_FUTURE) { if (_log.shouldLog(Log.WARN)) _log.warn("LeaseSet to expire too far in the future: " + leaseSet.getDestination().calculateHash().toBase64() + " expires on " + new Date(leaseSet.getEarliestLeaseDate()), new Exception("Rejecting store")); - return null; + return false; } + return true; + } + + /** + * Store the leaseSet + * + * @throws IllegalArgumentException if the leaseSet is not valid + */ + public LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException { + if (!_initialized) return null; + + boolean valid = validate(key, leaseSet); + if (!valid) throw new IllegalArgumentException("LeaseSet is not valid"); LeaseSet rv = null; if (_ds.isKnown(key)) @@ -485,39 +501,54 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } } - long end = _context.clock().now(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Store leaseSet took [" + (end-start) + "ms]"); return rv; } - public RouterInfo store(Hash key, RouterInfo routerInfo) { - long start = _context.clock().now(); - if (!_initialized) return null; + /** + * Determine whether this routerInfo will be accepted as valid and current + * given what we know now. + * + */ + boolean validate(Hash key, RouterInfo routerInfo) { + long now = _context.clock().now(); + if (!key.equals(routerInfo.getIdentity().getHash())) { _log.error("Invalid store attempt! key does not match routerInfo.identity! key = " + key + ", router = " + routerInfo); - return null; + return false; } else if (!routerInfo.isValid()) { _log.error("Invalid routerInfo signature! forged router structure! router = " + routerInfo); - return null; + return false; } else if (!routerInfo.isCurrent(ExpireRoutersJob.EXPIRE_DELAY)) { int existing = _kb.size(); if (existing >= MIN_REMAINING_ROUTERS) { if (_log.shouldLog(Log.INFO)) _log.info("Not storing expired router for " + key.toBase64(), new Exception("Rejecting store")); - return null; + return false; } else { if (_log.shouldLog(Log.WARN)) _log.warn("Even though the peer is old, we have only " + existing + " peers left (curPeer: " + key.toBase64() + " published on " + new Date(routerInfo.getPublished())); } - } else if (routerInfo.getPublished() > start + Router.CLOCK_FUDGE_FACTOR) { + } else if (routerInfo.getPublished() > now + Router.CLOCK_FUDGE_FACTOR) { if (_log.shouldLog(Log.WARN)) _log.warn("Peer " + key.toBase64() + " published their routerInfo in the future?! [" + new Date(routerInfo.getPublished()) + "]", new Exception("Rejecting store")); - return null; + return false; } + return true; + } + + /** + * store the routerInfo + * + * @throws IllegalArgumentException if the routerInfo is not valid + */ + public RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException { + if (!_initialized) return null; + + boolean valid = validate(key, routerInfo); + if (!valid) throw new IllegalArgumentException("LeaseSet is not valid"); RouterInfo rv = null; if (_ds.isKnown(key)) @@ -534,9 +565,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _lastSent.put(key, new Long(0)); } _kb.add(key); - long end = _context.clock().now(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Store routerInfo took [" + (end-start) + "ms]"); return rv; } @@ -576,6 +604,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _log.info("Dropping a lease: " + dbEntry); } + if (o == null) { + boolean removed = _kb.remove(dbEntry); + // if we dont know the key, lets make sure it isn't a now-dead peer + } + _ds.remove(dbEntry); synchronized (_lastSent) { _lastSent.remove(dbEntry); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java index ff66c0981bad6fa04ffd5a77d87dfee388d730ff..baaa7a811144c6953b3175aa2692c2fdc3f85f70 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java @@ -21,6 +21,9 @@ import java.util.TreeMap; import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.router.RouterContext; +import net.i2p.router.peermanager.PeerProfile; +import net.i2p.stat.Rate; +import net.i2p.stat.RateStat; import net.i2p.util.Log; class PeerSelector { @@ -82,16 +85,33 @@ class PeerSelector { * */ private void removeFailingPeers(Set peerHashes) { - List failing = new ArrayList(16); + List failing = null; for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) { Hash cur = (Hash)iter.next(); if (_context.profileOrganizer().isFailing(cur)) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Peer " + cur.toBase64() + " is failing, don't include them in the peer selection"); + if (failing == null) + failing = new ArrayList(4); failing.add(cur); + } else if (_context.profileOrganizer().peerSendsBadReplies(cur)) { + if (failing == null) + failing = new ArrayList(4); + failing.add(cur); + if (_log.shouldLog(Log.WARN)) { + PeerProfile profile = _context.profileOrganizer().getProfile(cur); + if (profile != null) { + RateStat invalidReplyRateStat = profile.getDBHistory().getInvalidReplyRate(); + Rate invalidReplyRate = invalidReplyRateStat.getRate(60*60*1000l); + _log.warn("Peer " + cur.toBase64() + " sends us bad replies: current hour: " + + invalidReplyRate.getCurrentEventCount() + " and last hour: " + + invalidReplyRate.getLastEventCount() + ":\n" + invalidReplyRate.toString()); + } + } } } - peerHashes.removeAll(failing); + if (failing != null) + peerHashes.removeAll(failing); } protected BigInteger getDistance(Hash targetKey, Hash routerInQuestion) { diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java index d310e7ec1d4e667f7f2a7b97ee34c948cee35c22..d6be8bbc5d383d4123d16a6069d20cbd5906b18a 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java @@ -215,9 +215,9 @@ class PersistentDataStore extends TransientDataStore { fis = new FileInputStream(_leaseFile); LeaseSet ls = new LeaseSet(); ls.readBytes(fis); - _facade.store(ls.getDestination().calculateHash(), ls); - Object accepted = _facade.lookupLeaseSetLocally(ls.getDestination().calculateHash()); - if (accepted == null) { + try { + _facade.store(ls.getDestination().calculateHash(), ls); + } catch (IllegalArgumentException iae) { _log.info("Refused locally loaded leaseSet - deleting"); corrupt = true; } @@ -271,9 +271,9 @@ class PersistentDataStore extends TransientDataStore { fis = new FileInputStream(_routerFile); RouterInfo ri = new RouterInfo(); ri.readBytes(fis); - _facade.store(ri.getIdentity().getHash(), ri); - Object accepted = _facade.lookupRouterInfoLocally(ri.getIdentity().getHash()); - if (accepted == null) { + try { + _facade.store(ri.getIdentity().getHash(), ri); + } catch (IllegalArgumentException iae) { _log.info("Refused locally loaded routerInfo - deleting"); corrupt = true; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java index 6dbd25d6689301cc8f5169bceaa1dd44cb3a4de9..45f37cfafb23d0760f4cf3a7c4e66095f60e8746 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java @@ -26,6 +26,7 @@ import net.i2p.router.TunnelInfo; import net.i2p.router.TunnelSelectionCriteria; import net.i2p.router.message.SendMessageDirectJob; import net.i2p.router.message.SendTunnelMessageJob; +import net.i2p.router.peermanager.PeerProfile; import net.i2p.util.Log; /** @@ -87,6 +88,9 @@ class SearchJob extends JobImpl { getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); if (_log.shouldLog(Log.DEBUG)) _log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by")); } @@ -396,12 +400,22 @@ class SearchJob extends JobImpl { private final class SearchReplyJob extends JobImpl { private DatabaseSearchReplyMessage _msg; + /** + * Peer who we think sent us the reply. Note: could be spoofed! If the + * attacker knew we were searching for a particular key from a + * particular peer, they could send us some searchReply messages with + * shitty values, trying to get us to consider that peer unreliable. + * Potential fixes include either authenticated 'from' address or use a + * nonce in the search + searchReply (and check for it in the selector). + * + */ private Hash _peer; private int _curIndex; private int _invalidPeers; private int _seenPeers; private int _newPeers; private int _duplicatePeers; + private int _repliesPendingVerification; private long _duration; public SearchReplyJob(DatabaseSearchReplyMessage message, Hash peer, long duration) { super(SearchJob.this.getContext()); @@ -412,24 +426,40 @@ class SearchJob extends JobImpl { _seenPeers = 0; _newPeers = 0; _duplicatePeers = 0; + _repliesPendingVerification = 0; } public String getName() { return "Process Reply for Kademlia Search"; } public void runJob() { if (_curIndex >= _msg.getNumReplies()) { - getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers, - _invalidPeers, _duplicatePeers, _duration); - if (_newPeers > 0) - newPeersFound(_newPeers); + if (_repliesPendingVerification > 0) { + // we received new references from the peer, but still + // haven't verified all of them, so lets give it more time + SearchReplyJob.this.requeue(_timeoutMs); + } else { + // either they didn't tell us anything new or we have verified + // (or failed to verify) all of them. we're done + getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers, + _invalidPeers, _duplicatePeers, _duration); + if (_newPeers > 0) + newPeersFound(_newPeers); + } } else { Hash peer = _msg.getReply(_curIndex); RouterInfo info = getContext().netDb().lookupRouterInfoLocally(peer); if (info == null) { - // hmm, perhaps don't always send a lookup for this... - // but for now, wtf, why not. we may even want to adjust it so that - // we penalize or benefit peers who send us that which we can or - // cannot lookup - getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs); + // if the peer is giving us lots of bad peer references, + // dont try to fetch them. + + boolean sendsBadInfo = getContext().profileOrganizer().peerSendsBadReplies(_peer); + if (!sendsBadInfo) { + getContext().netDb().lookupRouterInfo(peer, new ReplyVerifiedJob(peer), new ReplyNotVerifiedJob(peer), _timeoutMs); + _repliesPendingVerification++; + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64()); + getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0); + } } if (_state.wasAttempted(peer)) { @@ -447,6 +477,38 @@ class SearchJob extends JobImpl { requeue(0); } } + + /** the peer gave us a reference to a new router, and we were able to fetch it */ + private final class ReplyVerifiedJob extends JobImpl { + private Hash _key; + public ReplyVerifiedJob(Hash key) { + super(SearchReplyJob.this.getContext()); + _key = key; + } + public String getName() { return "Search reply value verified"; } + public void runJob() { + if (_log.shouldLog(Log.INFO)) + _log.info("Peer reply from " + _peer.toBase64() + " verified: " + _key.toBase64()); + _repliesPendingVerification--; + getContext().statManager().addRateData("netDb.searchReplyValidated", 1, 0); + } + } + /** the peer gave us a reference to a new router, and we were NOT able to fetch it */ + private final class ReplyNotVerifiedJob extends JobImpl { + private Hash _key; + public ReplyNotVerifiedJob(Hash key) { + super(SearchReplyJob.this.getContext()); + _key = key; + } + public String getName() { return "Search reply value NOT verified"; } + public void runJob() { + if (_log.shouldLog(Log.INFO)) + _log.info("Peer reply from " + _peer.toBase64() + " failed verification: " + _key.toBase64()); + _repliesPendingVerification--; + _invalidPeers++; + getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1, 0); + } + } } /** @@ -534,6 +596,7 @@ class SearchJob extends JobImpl { if (_keepStats) { long time = getContext().clock().now() - _state.getWhenStarted(); getContext().statManager().addRateData("netDb.failedTime", time, 0); + _facade.fail(_state.getTarget()); } if (_onFailure != null) getContext().jobQueue().addJob(_onFailure); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java index a706eb995c46a8c789b520e3e7ead3851c6c580f..77922f134071fcd6e7f505f54e366567dc380c87 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java @@ -46,19 +46,27 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob { DatabaseStoreMessage msg = (DatabaseStoreMessage)_message; if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { - _facade.store(msg.getKey(), msg.getLeaseSet()); + try { + _facade.store(msg.getKey(), msg.getLeaseSet()); + getContext().profileManager().dbLookupSuccessful(_peer, timeToReply); + } catch (IllegalArgumentException iae) { + getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply); + } } else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": dbStore received on search containing router " + msg.getKey() + " with publishDate of " + new Date(msg.getRouterInfo().getPublished())); - _facade.store(msg.getKey(), msg.getRouterInfo()); + try { + _facade.store(msg.getKey(), msg.getRouterInfo()); + getContext().profileManager().dbLookupSuccessful(_peer, timeToReply); + } catch (IllegalArgumentException iae) { + getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply); + } } else { if (_log.shouldLog(Log.ERROR)) _log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType()); } - - getContext().profileManager().dbLookupSuccessful(_peer, timeToReply); } else if (_message instanceof DatabaseSearchReplyMessage) { _job.replyFound((DatabaseSearchReplyMessage)_message, _peer); } else { diff --git a/router/java/src/net/i2p/router/peermanager/DBHistory.java b/router/java/src/net/i2p/router/peermanager/DBHistory.java index 3ff6d2525e19ef24a514c0294ed579a7d2a3e50b..8c381c7fb70fe6a2dfb5395a4809c7a1f62616a6 100644 --- a/router/java/src/net/i2p/router/peermanager/DBHistory.java +++ b/router/java/src/net/i2p/router/peermanager/DBHistory.java @@ -18,6 +18,7 @@ public class DBHistory { private long _successfulLookups; private long _failedLookups; private RateStat _failedLookupRate; + private RateStat _invalidReplyRate; private long _lookupReplyNew; private long _lookupReplyOld; private long _lookupReplyDuplicate; @@ -34,6 +35,7 @@ public class DBHistory { _successfulLookups = 0; _failedLookups = 0; _failedLookupRate = null; + _invalidReplyRate = null; _lookupReplyNew = 0; _lookupReplyOld = 0; _lookupReplyDuplicate = 0; @@ -74,6 +76,8 @@ public class DBHistory { */ public RateStat getFailedLookupRate() { return _failedLookupRate; } + public RateStat getInvalidReplyRate() { return _invalidReplyRate; } + /** * Note that the peer was not only able to respond to the lookup, but sent us * the data we wanted! @@ -103,6 +107,10 @@ public class DBHistory { _lookupReplyOld += oldPeers; _lookupReplyInvalid += invalid; _lookupReplyDuplicate += duplicate; + + if (invalid > 0) { + _invalidReplyRate.addData(invalid, 0); + } } /** * Note that the peer sent us a lookup @@ -148,6 +156,7 @@ public class DBHistory { public void coallesceStats() { _log.debug("Coallescing stats"); _failedLookupRate.coallesceStats(); + _invalidReplyRate.coallesceStats(); } private final static String NL = System.getProperty("line.separator"); @@ -171,7 +180,7 @@ public class DBHistory { add(buf, "avgDelayBetweenLookupsReceived", _avgDelayBetweenLookupsReceived, "How long is it typically between each db lookup they send us? (in milliseconds)"); out.write(buf.toString().getBytes()); _failedLookupRate.store(out, "dbHistory.failedLookupRate"); - _log.debug("Writing out dbHistory.failedLookupRate"); + _invalidReplyRate.store(out, "dbHistory.invalidReplyRate"); } private void add(StringBuffer buf, String name, long val, String description) { @@ -197,12 +206,21 @@ public class DBHistory { _log.debug("Loading dbHistory.failedLookupRate"); } catch (IllegalArgumentException iae) { _log.warn("DB History failed lookup rate is corrupt, resetting", iae); + } + + try { + _invalidReplyRate.load(props, "dbHistory.invalidReplyRate", true); + } catch (IllegalArgumentException iae) { + _log.warn("DB History invalid reply rate is corrupt, resetting", iae); createRates(); } } private void createRates() { - _failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + if (_failedLookupRate == null) + _failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + if (_invalidReplyRate == null) + _invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); } private final static long getLong(Properties props, String key) { diff --git a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java index 3c0a7ae8ac79843ad20685c1c86e428067e77ee3..c6f1c919e54dea7c2d8c8f54cd5a554f156e2ef1 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java @@ -21,6 +21,8 @@ import java.util.TreeSet; import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.router.RouterContext; +import net.i2p.stat.Rate; +import net.i2p.stat.RateStat; import net.i2p.util.Log; /** @@ -197,6 +199,35 @@ public class ProfileOrganizer { public boolean isWellIntegrated(Hash peer) { synchronized (_reorganizeLock) { return _wellIntegratedPeers.containsKey(peer); } } public boolean isFailing(Hash peer) { synchronized (_reorganizeLock) { return _failingPeers.containsKey(peer); } } + + /** + * if a peer sends us more than 5 replies in a searchReply that we cannot + * fetch, stop listening to them. + * + */ + private final static int MAX_BAD_REPLIES_PER_HOUR = 5; + + /** + * Does the given peer send us bad replies - either invalid store messages + * (expired, corrupt, etc) or unreachable replies (pointing towards routers + * that don't exist). + * + */ + public boolean peerSendsBadReplies(Hash peer) { + PeerProfile profile = getProfile(peer); + if (profile != null) { + RateStat invalidReplyRateStat = profile.getDBHistory().getInvalidReplyRate(); + Rate invalidReplyRate = invalidReplyRateStat.getRate(60*60*1000l); + if ( (invalidReplyRate.getCurrentTotalValue() > MAX_BAD_REPLIES_PER_HOUR) || + (invalidReplyRate.getLastTotalValue() > MAX_BAD_REPLIES_PER_HOUR) ) { + return true; + } + } + return false; + } + + + /** * Return a set of Hashes for peers that are both fast and reliable. If an insufficient * number of peers are both fast and reliable, fall back onto high capacity peers, and if that diff --git a/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java b/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java index 563ea38919114b23d99717d232cbca2692c54c6c..e67f2d9710b97d3c0301be02991a931bf9c33ab2 100644 --- a/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java +++ b/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java @@ -191,8 +191,15 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { byte signedData[] = new byte[decr.length - rsig.getData().length]; System.arraycopy(decr, 0, signedData, 0, signedData.length); boolean valid = _context.dsa().verifySignature(rsig, signedData, _remoteIdentity.getSigningPublicKey()); - if (valid) - _context.netDb().store(_remoteIdentity.getHash(), peer); + if (valid) { + try { + _context.netDb().store(_remoteIdentity.getHash(), peer); + } catch (IllegalArgumentException iae) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Peer gave us invalid router info", iae); + valid = false; + } + } return valid; }