forked from I2P_Developers/i2p.i2p
* NetDB: Reinstate ExpireRoutersJob
This commit is contained in:
@@ -1,3 +1,9 @@
|
||||
2013-10-19 zzz
|
||||
* NetDB:
|
||||
- Reinstate ExpireRoutersJob
|
||||
- Reduce min part. tunnels for floodfill
|
||||
- Reduce floodfill redundancy
|
||||
|
||||
2013-10-17 zzz
|
||||
* I2CP: Move SSL client socket code to util,
|
||||
move cert location to certificates/i2cp.
|
||||
|
||||
@@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 5;
|
||||
public final static long BUILD = 6;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
||||
@@ -11,6 +11,7 @@ package net.i2p.router.networkdb.kademlia;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.JobImpl;
|
||||
@@ -25,14 +26,13 @@ import net.i2p.util.Log;
|
||||
* or other criteria to minimize netdb size, but for now we just use _facade's
|
||||
* validate(), which is a sliding expriation based on netdb size.
|
||||
*
|
||||
* @deprecated unused - see comments in KNDF
|
||||
*/
|
||||
class ExpireRoutersJob extends JobImpl {
|
||||
private final Log _log;
|
||||
private final KademliaNetworkDatabaseFacade _facade;
|
||||
|
||||
/** rerun fairly often, so the fails don't queue up too many netdb searches at once */
|
||||
private final static long RERUN_DELAY_MS = 120*1000;
|
||||
private final static long RERUN_DELAY_MS = 5*60*1000;
|
||||
|
||||
public ExpireRoutersJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) {
|
||||
super(ctx);
|
||||
@@ -43,15 +43,9 @@ class ExpireRoutersJob extends JobImpl {
|
||||
public String getName() { return "Expire Routers Job"; }
|
||||
|
||||
public void runJob() {
|
||||
// this always returns an empty set (see below)
|
||||
Set<Hash> toExpire = selectKeysToExpire();
|
||||
int removed = expireKeys();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Routers to expire (drop and try to refetch): " + toExpire);
|
||||
for (Hash key : toExpire) {
|
||||
_facade.fail(key);
|
||||
}
|
||||
_facade.queueForExploration(toExpire);
|
||||
|
||||
_log.info("Routers expired: " + removed);
|
||||
requeue(RERUN_DELAY_MS);
|
||||
}
|
||||
|
||||
@@ -59,23 +53,32 @@ class ExpireRoutersJob extends JobImpl {
|
||||
/**
|
||||
* Run through all of the known peers and pick ones that have really old
|
||||
* routerInfo publish dates, excluding ones that we are connected to,
|
||||
* so that they can be failed & queued for searching
|
||||
* so that they can be failed
|
||||
*
|
||||
* @return nothing for now
|
||||
* @return number removed
|
||||
*/
|
||||
private Set<Hash> selectKeysToExpire() {
|
||||
for (Hash key : _facade.getAllRouters()) {
|
||||
private int expireKeys() {
|
||||
Set<Hash> keys = _facade.getAllRouters();
|
||||
keys.remove(getContext().routerHash());
|
||||
int removed = 0;
|
||||
for (Hash key : keys) {
|
||||
// Don't expire anybody we are connected to
|
||||
if (!getContext().commSystem().isEstablished(key)) {
|
||||
// This does a _facade.validate() and fail() which is sufficient...
|
||||
// no need to impose our own expiration here.
|
||||
// One issue is this will queue a ton of floodfill queries the first time it is run
|
||||
// after the 1h router startup grace period.
|
||||
_facade.lookupRouterInfoLocally(key);
|
||||
DatabaseEntry e = _facade.lookupLocallyWithoutValidation(key);
|
||||
if (e != null &&
|
||||
e.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
try {
|
||||
if (_facade.validate((RouterInfo) e) != null) {
|
||||
_facade.dropAfterLookupFailed(key);
|
||||
removed++;
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
_facade.dropAfterLookupFailed(key);
|
||||
removed++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// let _facade do all the work for now
|
||||
return Collections.EMPTY_SET;
|
||||
return removed;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -467,7 +467,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
public String getName() { return "Lookup on failure of netDb peer timed out"; }
|
||||
public void runJob() {
|
||||
dropAfterLookupFailed(_peer, _info);
|
||||
dropAfterLookupFailed(_peer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -487,7 +487,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
// great, a legitimate update
|
||||
} else {
|
||||
// they just sent us what we already had. kill 'em both
|
||||
dropAfterLookupFailed(_peer, _info);
|
||||
dropAfterLookupFailed(_peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,12 +259,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
elj.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
|
||||
_context.jobQueue().addJob(elj);
|
||||
|
||||
// the ExpireRoutersJob never fired since the tunnel pool manager lied
|
||||
// and said all peers are in use (for no good reason), but this expire
|
||||
// thing was a bit overzealous anyway, since the kbuckets are only
|
||||
// relevent when the network is huuuuuuuuge.
|
||||
//// expire some routers in overly full kbuckets
|
||||
////_context.jobQueue().addJob(new ExpireRoutersJob(_context, this));
|
||||
//// expire some routers
|
||||
// Don't run until after RefreshRoutersJob has run, and after validate() will return invalid for old routers.
|
||||
Job erj = new ExpireRoutersJob(_context, this);
|
||||
erj.getTiming().setStartAfter(_context.clock().now() + ROUTER_INFO_EXPIRATION_FLOODFILL + 10*60*1000);
|
||||
_context.jobQueue().addJob(erj);
|
||||
|
||||
if (!QUIET) {
|
||||
// fill the search queue with random keys in buckets that are too small
|
||||
@@ -434,6 +433,17 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Not for use without validation
|
||||
* @return RouterInfo, LeaseSet, or null, NOT validated
|
||||
* @since 0.9.9
|
||||
*/
|
||||
DatabaseEntry lookupLocallyWithoutValidation(Hash key) {
|
||||
if (!_initialized)
|
||||
return null;
|
||||
return _ds.get(key);
|
||||
}
|
||||
|
||||
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
|
||||
if (!_initialized) return;
|
||||
@@ -752,7 +762,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* @return reason why the entry is not valid, or null if it is valid
|
||||
* @since 0.9.7
|
||||
*/
|
||||
private String validate(RouterInfo routerInfo) throws IllegalArgumentException {
|
||||
String validate(RouterInfo routerInfo) throws IllegalArgumentException {
|
||||
long now = _context.clock().now();
|
||||
boolean upLongEnough = _context.router().getUptime() > 60*60*1000;
|
||||
// Once we're over MIN_ROUTERS routers, reduce the expiration time down from the default,
|
||||
@@ -858,6 +868,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Final remove for a leaseset.
|
||||
* For a router info, will look up in the network before dropping.
|
||||
*/
|
||||
public void fail(Hash dbEntry) {
|
||||
if (!_initialized) return;
|
||||
DatabaseEntry o = _ds.get(dbEntry);
|
||||
@@ -885,15 +899,20 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
/** don't use directly - see F.N.D.F. override */
|
||||
protected void lookupBeforeDropping(Hash peer, RouterInfo info) {
|
||||
//bah, humbug.
|
||||
dropAfterLookupFailed(peer, info);
|
||||
dropAfterLookupFailed(peer);
|
||||
}
|
||||
protected void dropAfterLookupFailed(Hash peer, RouterInfo info) {
|
||||
|
||||
/**
|
||||
* Final remove for a router info.
|
||||
* Do NOT use for leasesets.
|
||||
*/
|
||||
void dropAfterLookupFailed(Hash peer) {
|
||||
_context.peerManager().removeCapabilities(peer);
|
||||
boolean removed = _kb.remove(peer);
|
||||
if (removed) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Removed kbucket entry for " + peer);
|
||||
}
|
||||
//if (removed) {
|
||||
// if (_log.shouldLog(Log.INFO))
|
||||
// _log.info("Removed kbucket entry for " + peer);
|
||||
//}
|
||||
|
||||
_ds.remove(peer);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user