NetDB: Drop lookups with replies going to us

Extend lookup expire time
Cleanups
This commit is contained in:
zzz
2020-12-31 08:37:04 -05:00
parent afa4b9e66d
commit 2569123055
6 changed files with 68 additions and 83 deletions

View File

@@ -1,3 +1,29 @@
2020-12-31 zzz
* NetDB:
- Drop lookups with replies going to us
- Extend lookup expire time
2020-12-30 zzz
* Ratchet: Fix N pattern for ECIES build replies
2020-12-29 zzz
* Crypto: Cache AES Ciphers
* i2ptunnel: Disable shared clients (DSA)
* NetDB: Verify RI stores for a while after starting
* Ratchet: mixHash() not required after message for N pattern
2020-12-28 zzz
* Debian: Add Java 15/16 to control
2020-12-27 zzz
* SSU: Fix restoration of window after failed message
2020-12-26 zzz
* Console:
- Move flag overrides to war
- Move initial news to jar
* Router: Move some geoip files to jars
2020-12-24 zzz
* I2CP: Fix requesting leasesets for subsessions (ticket #2458)

View File

@@ -49,12 +49,6 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
public static final boolean USE_ECIES_FF = true;
//private static volatile long _currentLookupPeriod = 0;
//private static volatile int _currentLookupCount = 0;
// if we try to send over 20 netDb lookups in 10 seconds, we're acting up
//private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
//private static final long LOOKUP_THROTTLE_MAX = 50;
/** Insanely big. Not much more than 1500 will fit in a message.
Have to prevent a huge alloc on rcv of a malicious msg though */
private static final int MAX_NUM_PEERS = 512;
@@ -98,55 +92,9 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
/** @param locallyCreated ignored */
public DatabaseLookupMessage(I2PAppContext context, boolean locallyCreated) {
super(context);
//setSearchKey(null);
//setFrom(null);
//setDontIncludePeers(null);
// This is the wrong place for this, any throttling should be in netdb
// And it doesnt throttle anyway (that would have to be in netdb), it just increments a stat
//context.statManager().createRateStat("router.throttleNetDbDoSSend", "How many netDb lookup messages we are sending during a period with a DoS detected", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
//
// only check DoS generation if we are creating the message...
//if (locallyCreated) {
// // we do this in the writeMessage so we know that we have all the data
// int dosCount = detectDoS(context);
// if (dosCount > 0) {
// if (_log.shouldLog(Log.WARN))
// _log.warn("Are we flooding the network with NetDb messages? (" + dosCount
// + " messages so far)", new Exception("Flood cause"));
// }
//}
_type = Type.ANY;
}
/**
* Return number of netDb messages in this period, if flood, else 0
*
*/
/*****
private static int detectDoS(I2PAppContext context) {
int count = _currentLookupCount++;
// now lets check for DoS
long now = context.clock().now();
if (_currentLookupPeriod + LOOKUP_THROTTLE_PERIOD > now) {
// same period, check for DoS
if (count >= LOOKUP_THROTTLE_MAX) {
context.statManager().addRateData("router.throttleNetDbDoSSend", count, 0);
return count;
} else {
// no DoS, at least, not yet
return 0;
}
} else {
// on to the next period, reset counter, no DoS
// (no, I'm not worried about concurrency here)
_currentLookupPeriod = now;
_currentLookupCount = 1;
return 0;
}
}
*****/
/**
* Defines the key being searched for
*/

View File

@@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 10;
public final static long BUILD = 11;
/** for example "-test" */
public final static String EXTRA = "";

View File

@@ -65,37 +65,48 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
protected boolean answerAllQueries() { return false; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database lookup message for " + _message.getSearchKey());
Hash fromKey = _message.getFrom();
if (_log.shouldLog(Log.DEBUG)) {
if (_message.getReplyTunnel() != null)
_log.debug("dbLookup received with replies going to " + fromKey
+ " (tunnel " + _message.getReplyTunnel() + ")");
TunnelId toTunnel = _message.getReplyTunnel();
Hash searchKey = _message.getSearchKey();
if (toTunnel == null && fromKey.equals(getContext().routerHash())) {
if (_log.shouldWarn())
// exploratory, no reply key/tag. i2pd bug?
//_log.warn("Dropping dbLookup with replies going to us:\n" + _message);
_log.warn("Dropping dbLookup for " + searchKey + " with replies going to us");
return;
}
// If we are hidden we should not get queries, log and return
if (getContext().router().isHidden()) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("Uninvited dbLookup received with replies going to " + fromKey
+ " (tunnel " + _message.getReplyTunnel() + ")");
+ " (tunnel " + toTunnel + ")");
}
return;
}
// i2pd bug?
if (_message.getSearchKey().equals(Hash.FAKE_HASH)) {
if (searchKey.equals(Hash.FAKE_HASH)) {
if (_log.shouldWarn())
_log.warn("Zero lookup", new Exception());
getContext().statManager().addRateData("netDb.DLMAllZeros", 1);
return;
}
if (_log.shouldLog(Log.DEBUG)) {
if (toTunnel != null)
_log.debug("Handling database lookup message for " + searchKey +
" with replies going to " + fromKey +
" (tunnel " + toTunnel + ")");
else
_log.debug("Handling database lookup message for " + searchKey +
" with replies going to " + fromKey);
}
DatabaseLookupMessage.Type lookupType = _message.getSearchType();
// only lookup once, then cast to correct type
DatabaseEntry dbe = getContext().netDb().lookupLocally(_message.getSearchKey());
DatabaseEntry dbe = getContext().netDb().lookupLocally(searchKey);
int type = dbe != null ? dbe.getType() : -1;
if (DatabaseEntry.isLeaseSet(type) &&
(lookupType == DatabaseLookupMessage.Type.ANY || lookupType == DatabaseLookupMessage.Type.LS)) {
@@ -105,7 +116,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// As this is complex, lots of comments follow...
boolean isLocal = getContext().clientManager().isLocal(ls.getHash());
boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey());
boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(searchKey);
// Only answer a request for a LeaseSet if it has been published
// to us, or, if its local, if we would have published to ourselves
@@ -124,29 +135,29 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// so we don't check the answerAllQueries() flag.
// Local leasesets are not handled here
if (_log.shouldLog(Log.INFO))
_log.info("We have the published LS " + _message.getSearchKey() + ", answering query");
_log.info("We have the published LS " + searchKey + ", answering query");
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1);
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
sendData(searchKey, ls, fromKey, toTunnel);
} else if (shouldPublishLocal && answerAllQueries()) {
// We are floodfill, and this is our local leaseset, and we publish it.
// Only send it out if it is in our estimated keyspace.
// For this, we do NOT use their dontInclude list as it can't be trusted
// (i.e. it could mess up the closeness calculation)
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(_message.getSearchKey(),
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
CLOSENESS_THRESHOLD, null);
if (weAreClosest(closestHashes)) {
// It's in our keyspace, so give it to them
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + _message.getSearchKey() + ", answering query, in our keyspace");
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
sendData(searchKey, ls, fromKey, toTunnel);
} else {
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + _message.getSearchKey() + ", NOT answering query, out of our keyspace");
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// It was not published to us (we looked it up, for example)
@@ -154,12 +165,12 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + _message.getSearchKey() +
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
lookupType != DatabaseLookupMessage.Type.LS) {
@@ -169,13 +180,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not answering a query for a netDb peer who isn't reachable");
Set<Hash> us = Collections.singleton(getContext().routerHash());
sendClosest(_message.getSearchKey(), us, fromKey, _message.getReplyTunnel());
sendClosest(searchKey, us, fromKey, toTunnel);
} else {
// send that routerInfo to the _message.getFromHash peer
if (_log.shouldLog(Log.DEBUG))
_log.debug("We do have key " + _message.getSearchKey()
_log.debug("We do have key " + searchKey
+ " locally as a router info. sending to " + fromKey);
sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel());
sendData(searchKey, info, fromKey, toTunnel);
}
} else {
// expired locally - return closest peer hashes
@@ -191,17 +202,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// }
if (_log.shouldLog(Log.DEBUG))
_log.debug("Expired " + _message.getSearchKey() +
_log.debug("Expired " + searchKey +
" locally. sending back " + routerHashSet.size() + " peers to " + fromKey);
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// not found locally - return closest peer hashes
Set<Hash> routerHashSet = getNearestRouters(lookupType);
if (_log.shouldLog(Log.DEBUG))
_log.debug("We do not have key " + _message.getSearchKey() +
_log.debug("We do not have key " + searchKey +
" locally. sending back " + routerHashSet.size() + " peers to " + fromKey);
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
}

View File

@@ -119,8 +119,8 @@ class ExploreJob extends SearchJob {
// if (len > 0)
// msg.getDontIncludePeers().addAll(peers);
//}
available = MAX_CLOSEST - dontIncludePeers.size();
//available = MAX_CLOSEST - dontIncludePeers.size();
if (available > 0) {
// selectNearestExplicit adds our hash to the dontInclude set (3rd param) ...
// And we end up with MAX_CLOSEST+1 entries.

View File

@@ -102,7 +102,7 @@ public class IterativeSearchJob extends FloodSearchJob {
*/
private static final long SINGLE_SEARCH_TIME = 3*1000;
/** the actual expire time for a search message */
private static final long SINGLE_SEARCH_MSG_TIME = 10*1000;
private static final long SINGLE_SEARCH_MSG_TIME = 20*1000;
/**
* Use instead of CONCURRENT_SEARCHES in super() which is final.
* For now, we don't do concurrent, but we keep SINGLE_SEARCH_TIME very short,