From 0a93466999a12f76a37b42c1bf5bc0d9b9af6488 Mon Sep 17 00:00:00 2001 From: zzz <zzz@mail.i2p> Date: Fri, 29 Jan 2010 13:53:14 +0000 Subject: [PATCH] - Add basic DOS prevention for lookups - Move flood throttle check so we don't throttle ourselves --- .../src/net/i2p/router/RouterContext.java | 4 +- .../src/net/i2p/router/RouterDoSThrottle.java | 1 + .../net/i2p/router/RouterThrottleImpl.java | 1 + .../networkdb/kademlia/FloodThrottler.java | 3 + ...FloodfillDatabaseLookupMessageHandler.java | 30 ++++---- .../FloodfillNetworkDatabaseFacade.java | 28 +++++--- ...andleFloodfillDatabaseStoreMessageJob.java | 11 ++- .../networkdb/kademlia/LookupThrottler.java | 70 +++++++++++++++++++ 8 files changed, 123 insertions(+), 25 deletions(-) create mode 100644 router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java index 8f1c240c35..dfa9d7c210 100644 --- a/router/java/src/net/i2p/router/RouterContext.java +++ b/router/java/src/net/i2p/router/RouterContext.java @@ -134,8 +134,8 @@ public class RouterContext extends I2PAppContext { _shitlist = new Shitlist(this); _blocklist = new Blocklist(this); _messageValidator = new MessageValidator(this); - //_throttle = new RouterThrottleImpl(this); - _throttle = new RouterDoSThrottle(this); + _throttle = new RouterThrottleImpl(this); + //_throttle = new RouterDoSThrottle(this); _integrationCalc = new IntegrationCalculator(this); _speedCalc = new SpeedCalculator(this); _capacityCalc = new CapacityCalculator(this); diff --git a/router/java/src/net/i2p/router/RouterDoSThrottle.java b/router/java/src/net/i2p/router/RouterDoSThrottle.java index 79471627a2..5f49206bb5 100644 --- a/router/java/src/net/i2p/router/RouterDoSThrottle.java +++ b/router/java/src/net/i2p/router/RouterDoSThrottle.java @@ -6,6 +6,7 @@ import net.i2p.data.Hash; * Minor extention of the router throttle to handle some DoS events and * throttle accordingly. * + * @deprecated unused */ class RouterDoSThrottle extends RouterThrottleImpl { public RouterDoSThrottle(RouterContext context) { diff --git a/router/java/src/net/i2p/router/RouterThrottleImpl.java b/router/java/src/net/i2p/router/RouterThrottleImpl.java index 750acb0730..289f929a8e 100644 --- a/router/java/src/net/i2p/router/RouterThrottleImpl.java +++ b/router/java/src/net/i2p/router/RouterThrottleImpl.java @@ -72,6 +72,7 @@ class RouterThrottleImpl implements RouterThrottle { } } + /** @deprecated unused, function moved to netdb */ public boolean acceptNetDbLookupRequest(Hash key) { long lag = _context.jobQueue().getMaxLag(); if (lag > JOB_LAG_LIMIT) { diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java index 303e8fc0ec..96b0f508d7 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java @@ -7,6 +7,9 @@ import net.i2p.util.SimpleTimer; /** * Count how often we have recently flooded a key + * This offers basic DOS protection but is not a complete solution. + * + * @since 0.7.11 */ class FloodThrottler { private ObjectCounter<Hash> counter; diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java index 50e5b7f80c..8ef121d060 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java @@ -23,12 +23,15 @@ import net.i2p.util.Log; */ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder { private RouterContext _context; + private FloodfillNetworkDatabaseFacade _facade; private Log _log; - public FloodfillDatabaseLookupMessageHandler(RouterContext context) { + + public FloodfillDatabaseLookupMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) { _context = context; + _facade = facade; _log = context.logManager().getLog(FloodfillDatabaseLookupMessageHandler.class); - _context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - _context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 60*60*1000l }); + _context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 60*60*1000l }); // following are for ../HDLMJ _context.statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 60*60*1000l }); _context.statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 60*60*1000l }); @@ -42,18 +45,19 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) { _context.statManager().addRateData("netDb.lookupsReceived", 1, 0); - if (true || _context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) { - Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash); - if (false) { - // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary - j.runJob(); - return null; - } else { + DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage; + if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) { + Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash); + //if (false) { + // // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary + // j.runJob(); + // return null; + //} else { return j; - } + //} } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Dropping lookup request as throttled"); + if (_log.shouldLog(Log.WARN)) + _log.warn("Dropping lookup request for " + dlm.getSearchKey() + " (throttled), reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel()); _context.statManager().addRateData("netDb.lookupsDropped", 1, 1); return null; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index 09e59c12e5..8993da4334 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -12,6 +12,7 @@ import net.i2p.data.DataStructure; import net.i2p.data.Hash; import net.i2p.data.LeaseSet; import net.i2p.data.RouterInfo; +import net.i2p.data.TunnelId; import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseStoreMessage; @@ -38,6 +39,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad private static String _alwaysQuery; private final Set<Hash> _verifiesInProgress; private FloodThrottler _floodThrottler; + private LookupThrottler _lookupThrottler; public FloodfillNetworkDatabaseFacade(RouterContext context) { super(context); @@ -63,11 +65,12 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad public void startup() { super.startup(); _context.jobQueue().addJob(new FloodfillMonitorJob(_context, this)); + _lookupThrottler = new LookupThrottler(); } @Override protected void createHandlers() { - _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context)); + _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context, this)); _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new FloodfillDatabaseStoreMessageHandler(_context, this)); } @@ -103,6 +106,22 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad } } + /** + * Increments and tests. + * @since 0.7.11 + */ + boolean shouldThrottleFlood(Hash key) { + return _floodThrottler != null && _floodThrottler.shouldThrottle(key); + } + + /** + * Increments and tests. + * @since 0.7.11 + */ + boolean shouldThrottleLookup(Hash from, TunnelId id) { + return _lookupThrottler.shouldThrottle(from, id); + } + private static final int MAX_TO_FLOOD = 7; /** @@ -116,13 +135,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad key = ((LeaseSet)ds).getDestination().calculateHash(); else key = ((RouterInfo)ds).getIdentity().calculateHash(); - // DOS prevention - if (_floodThrottler != null && _floodThrottler.shouldThrottle(key)) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Too many recent stores, not flooding key: " + key); - _context.statManager().addRateData("netDb.floodThrottled", 1, 0); - return; - } Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector(); List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets()); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java index d57f3bf97d..452d8a60b9 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java @@ -54,9 +54,9 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl { String invalidMessage = null; boolean wasNew = false; RouterInfo prevNetDb = null; + Hash key = _message.getKey(); if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0); - Hash key = _message.getKey(); if (_log.shouldLog(Log.INFO)) _log.info("Handling dbStore of leaseset " + _message); //_log.info("Handling dbStore of leasset " + key + " with expiration of " @@ -92,7 +92,6 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl { } } else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0); - Hash key = _message.getKey(); if (_log.shouldLog(Log.INFO)) _log.info("Handling dbStore of router " + key + " with publishDate of " + new Date(_message.getRouterInfo().getPublished())); @@ -163,6 +162,14 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl { FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()) && _message.getReplyToken() > 0) { if (wasNew) { + // DOS prevention + // Note this does not throttle the ack above + if (_facade.shouldThrottleFlood(key)) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Too many recent stores, not flooding key: " + key); + getContext().statManager().addRateData("netDb.floodThrottled", 1, 0); + return; + } long floodBegin = System.currentTimeMillis(); if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) _facade.flood(_message.getLeaseSet()); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java b/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java new file mode 100644 index 0000000000..b4cef3621e --- /dev/null +++ b/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java @@ -0,0 +1,70 @@ +package net.i2p.router.networkdb.kademlia; + +import net.i2p.data.Hash; +import net.i2p.data.TunnelId; +import net.i2p.util.ObjectCounter; +import net.i2p.util.SimpleScheduler; +import net.i2p.util.SimpleTimer; + +/** + * Count how often we have recently received a lookup request with + * the reply specified to go to a peer/TunnelId pair. + * This offers basic DOS protection but is not a complete solution. + * The reply peer/tunnel could be spoofed, for example. + * And a requestor could have up to 6 reply tunnels. + * + * @since 0.7.11 + */ +class LookupThrottler { + private ObjectCounter<ReplyTunnel> counter; + /** the id of this is -1 */ + private static final TunnelId DUMMY_ID = new TunnelId(); + /** this seems like plenty */ + private static final int MAX_LOOKUPS = 30; + private static final long CLEAN_TIME = 60*1000; + + LookupThrottler() { + this.counter = new ObjectCounter(); + SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME); + } + + /** + * increments before checking + * @param key non-null + * @param id null if for direct lookups + */ + boolean shouldThrottle(Hash key, TunnelId id) { + return this.counter.increment(new ReplyTunnel(key, id)) > MAX_LOOKUPS; + } + + private class Cleaner implements SimpleTimer.TimedEvent { + public void timeReached() { + LookupThrottler.this.counter.clear(); + } + } + + /** yes, we could have a two-level lookup, or just do h.tostring() + id.tostring() */ + private static class ReplyTunnel { + public Hash h; + public TunnelId id; + + ReplyTunnel(Hash h, TunnelId id) { + this.h = h; + if (id != null) + this.id = id; + else + this.id = DUMMY_ID; + } + + @Override + public boolean equals(Object obj) { + return this.h.equals(((ReplyTunnel)obj).h) && + this.id.equals(((ReplyTunnel)obj).id); + } + + @Override + public int hashCode() { + return this.h.hashCode() + this.id.hashCode(); + } + } +} -- GitLab