ISJ: Reduce max search depth to reduce ff load

- from 7 to 6 if not ff
- from 7 to 3 if ff (we don't need to backtrack much if any, we know most of the ffs)
This commit is contained in:
zzz
2015-04-16 22:02:35 +00:00
parent 6c954f0b68
commit 3f46228f0b

View File

@@ -69,10 +69,14 @@ class IterativeSearchJob extends FloodSearchJob {
private final Hash _fromLocalDest; private final Hash _fromLocalDest;
/** testing */ /** testing */
private static Hash _alwaysQueryHash; private static Hash _alwaysQueryHash;
/** Max number of peers to query */
private final int _totalSearchLimit;
private static final int MAX_NON_FF = 3; private static final int MAX_NON_FF = 3;
/** Max number of peers to query */ /** Max number of peers to query */
private static final int TOTAL_SEARCH_LIMIT = 7; private static final int TOTAL_SEARCH_LIMIT = 6;
/** Max number of peers to query if we are ff */
private static final int TOTAL_SEARCH_LIMIT_WHEN_FF = 3;
/** TOTAL_SEARCH_LIMIT * SINGLE_SEARCH_TIME, plus some extra */ /** TOTAL_SEARCH_LIMIT * SINGLE_SEARCH_TIME, plus some extra */
private static final int MAX_SEARCH_TIME = 30*1000; private static final int MAX_SEARCH_TIME = 30*1000;
/** /**
@@ -121,9 +125,11 @@ class IterativeSearchJob extends FloodSearchJob {
_expiration = _timeoutMs + ctx.clock().now(); _expiration = _timeoutMs + ctx.clock().now();
_rkey = ctx.routingKeyGenerator().getRoutingKey(key); _rkey = ctx.routingKeyGenerator().getRoutingKey(key);
_toTry = new TreeSet<Hash>(new XORComparator<Hash>(_rkey)); _toTry = new TreeSet<Hash>(new XORComparator<Hash>(_rkey));
_totalSearchLimit = (facade.floodfillEnabled() && ctx.router().getUptime() > 30*60*1000) ?
TOTAL_SEARCH_LIMIT_WHEN_FF : TOTAL_SEARCH_LIMIT;
_unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES); _unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES);
_failedPeers = new HashSet<Hash>(TOTAL_SEARCH_LIMIT); _failedPeers = new HashSet<Hash>(_totalSearchLimit);
_sentTime = new ConcurrentHashMap<Hash, Long>(TOTAL_SEARCH_LIMIT); _sentTime = new ConcurrentHashMap<Hash, Long>(_totalSearchLimit);
_fromLocalDest = fromLocalDest; _fromLocalDest = fromLocalDest;
if (fromLocalDest != null && !isLease && _log.shouldLog(Log.WARN)) if (fromLocalDest != null && !isLease && _log.shouldLog(Log.WARN))
_log.warn("Search for RI " + key + " down client tunnel " + fromLocalDest, new Exception()); _log.warn("Search for RI " + key + " down client tunnel " + fromLocalDest, new Exception());
@@ -144,9 +150,9 @@ class IterativeSearchJob extends FloodSearchJob {
if (ks != null) { if (ks != null) {
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself, // Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
// but we're passing the rkey not the key, so we do it below instead in certain cases. // but we're passing the rkey not the key, so we do it below instead in certain cases.
floodfillPeers = ((FloodfillPeerSelector)_facade.getPeerSelector()).selectFloodfillParticipants(_rkey, TOTAL_SEARCH_LIMIT, ks); floodfillPeers = ((FloodfillPeerSelector)_facade.getPeerSelector()).selectFloodfillParticipants(_rkey, _totalSearchLimit, ks);
} else { } else {
floodfillPeers = new ArrayList<Hash>(TOTAL_SEARCH_LIMIT); floodfillPeers = new ArrayList<Hash>(_totalSearchLimit);
} }
// For testing or local networks... we will // For testing or local networks... we will
@@ -227,13 +233,13 @@ class IterativeSearchJob extends FloodSearchJob {
if (pend >= MAX_CONCURRENT) if (pend >= MAX_CONCURRENT)
return; return;
int done = _failedPeers.size(); int done = _failedPeers.size();
if (done >= TOTAL_SEARCH_LIMIT) { if (done >= _totalSearchLimit) {
failed(); failed();
return; return;
} }
// even if pend and todo are empty, we don't fail, as there may be more peers // even if pend and todo are empty, we don't fail, as there may be more peers
// coming via newPeerToTry() // coming via newPeerToTry()
if (done + pend >= TOTAL_SEARCH_LIMIT) if (done + pend >= _totalSearchLimit)
return; return;
if (_alwaysQueryHash != null && if (_alwaysQueryHash != null &&
!_unheardFrom.contains(_alwaysQueryHash) && !_unheardFrom.contains(_alwaysQueryHash) &&