2006-07-26 jrandom

* When dropping a netDb router reference, only accept newer
      references as part of the update check
    * If we have been up for a while, don't accept really old
      router references (published 2 or more days ago)
    * Drop router references once they are no longer valid, even if
      they were allowed in due to the lax restrictions on startup
This commit is contained in:
jrandom
2006-07-27 00:56:49 +00:00
committed by zzz
parent d4e0f27c56
commit ec215777ec
6 changed files with 53 additions and 13 deletions

View File

@@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.434 $ $Date: 2006-07-18 15:08:02 $";
public final static String ID = "$Revision: 1.435 $ $Date: 2006-07-26 01:36:30 $";
public final static String VERSION = "0.6.1.22";
public final static long BUILD = 1;
public final static long BUILD = 2;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@@ -237,7 +237,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
// entry locally, firing no job if it gets a reply with an updated value (meaning
// we shouldn't drop them but instead use the new data), or if they all time out,
// firing the dropLookupFailedJob, which actually removes out local reference
search(peer, null, new DropLookupFailedJob(_context, peer, info), 10*1000, false);
search(peer, new DropLookupFoundJob(_context, peer, info), new DropLookupFailedJob(_context, peer, info), 10*1000, false);
}
private class DropLookupFailedJob extends JobImpl {
@@ -254,6 +254,26 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
dropAfterLookupFailed(_peer, _info);
}
}
private class DropLookupFoundJob extends JobImpl {
private Hash _peer;
private RouterInfo _info;
public DropLookupFoundJob(RouterContext ctx, Hash peer, RouterInfo info) {
super(ctx);
_peer = peer;
_info = info;
}
public String getName() { return "Lookup on failure of netDb peer matched"; }
public void runJob() {
RouterInfo updated = lookupRouterInfoLocally(_peer);
if ( (updated != null) && (updated.getPublished() > _info.getPublished()) ) {
// great, a legitimate update
} else {
// they just sent us what we already had. kill 'em both
dropAfterLookupFailed(_peer, _info);
}
}
}
}
/**

View File

@@ -462,9 +462,22 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
if (!_initialized) return null;
DataStructure ds = _ds.get(key);
if (ds != null) {
if (ds instanceof RouterInfo)
if (ds instanceof RouterInfo) {
// more aggressive than perhaps is necessary, but makes sure we
// drop old references that we had accepted on startup (since
// startup allows some lax rules).
boolean valid = true;
try {
valid = (null == validate(key, (RouterInfo)ds));
} catch (IllegalArgumentException iae) {
valid = false;
}
if (!valid) {
fail(key);
return null;
}
return (RouterInfo)ds;
else {
} else {
//_log.debug("Looking for a router [" + key + "] but it ISN'T a RouterInfo! " + ds, new Exception("Who thought that lease was a router?"));
return null;
}
@@ -677,6 +690,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
String rv = "Peer " + key.toBase64() + " is from another network, not accepting it (id="
+ routerInfo.getNetworkId() + ", want " + Router.NETWORK_ID + ")";
return rv;
} else if ( (_context.router().getUptime() > 60*60*1000) && (routerInfo.getPublished() < now - 2*24*60*60*1000l) ) {
long age = _context.clock().now() - routerInfo.getPublished();
return "Peer " + key.toBase64() + " published " + DataHelper.formatDuration(age) + " ago";
}
return null;
}

View File

@@ -44,6 +44,8 @@ public class TransportManager implements TransportEventListener {
public TransportManager(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(TransportManager.class);
_context.statManager().createRateStat("transport.shitlistOnUnreachable", "Add a peer to the shitlist since none of the transports can reach them", "Transport", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("transport.noBidsYetNotAllUnreachable", "Add a peer to the shitlist since none of the transports can reach them", "Transport", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
_transports = new ArrayList();
}
@@ -239,10 +241,16 @@ public class TransportManager implements TransportEventListener {
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Transport " + t.getStyle() + " did not produce a bid");
if (t.isUnreachable(peer))
unreachableTransports++;
}
}
if (unreachableTransports >= _transports.size())
if (unreachableTransports >= _transports.size()) {
_context.statManager().addRateData("transport.shitlistOnUnreachable", msg.getLifetime(), msg.getLifetime());
_context.shitlist().shitlistRouter(peer, "Unreachable on any transport");
} else if (rv == null) {
_context.statManager().addRateData("transport.noBidsYetNotAllUnreachable", unreachableTransports, msg.getLifetime());
}
return rv;
}

View File

@@ -256,7 +256,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
synchronized (_writeBufs) { blocks = _writeBufs.size(); }
if (_log.shouldLog(Log.ERROR))
_log.error("Too backlogged for too long (" + _consecutiveBacklog + " messages for " + DataHelper.formatDuration(queueTime()) + ", sched? " + wantsWrite + ", blocks: " + blocks + ") sending to " + _remotePeer.calculateHash().toBase64());
_context.statManager().addRateData("ntcp.closeOnBacklog", _consecutiveBacklog, getUptime());
_context.statManager().addRateData("ntcp.closeOnBacklog", getUptime(), getUptime());
close();
}
_context.statManager().addRateData("ntcp.dontSendOnBacklog", _consecutiveBacklog, msg.getLifetime());

View File

@@ -62,14 +62,13 @@ public class NTCPTransport extends TransportImpl {
_context.statManager().createRateStat("ntcp.sendBacklogTime", "How long the head of the send queue has been waiting when we fail to add a new one to the queue (period is the number of messages queued)", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.failsafeWrites", "How many times do we need to proactively add in an extra nio write to a peer at any given failsafe pass?", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.failsafeCloses", "How many times do we need to proactively close an idle connection to a peer at any given failsafe pass?", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.accept", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.attemptShitlistedPeer", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.attemptUnreachablePeer", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.closeOnBacklog", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.connectFailedIOE", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.connectFailedInvalidPort", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.connectFailedNoNTCPAddress", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.bidRejectedNoNTCPAddress", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.connectFailedTimeout", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.connectFailedTimeoutIOE", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.connectFailedUnresolved", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
@@ -113,8 +112,6 @@ public class NTCPTransport extends TransportImpl {
_context.statManager().createRateStat("ntcp.wantsQueuedWrite", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.write", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.writeError", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
_establishing = new ArrayList(4);
_conLock = new Object();
_conByIdent = new HashMap(64);
@@ -254,12 +251,11 @@ public class NTCPTransport extends TransportImpl {
_log.debug("fast bid when trying to send to " + toAddress.getIdentity().calculateHash().toBase64() + " as its already established");
return _fastBid;
}
RouterAddress addr = toAddress.getTargetAddress(STYLE);
if (addr == null) {
markUnreachable(peer);
_context.statManager().addRateData("ntcp.connectFailedNoNTCPAddress", 1, 0);
_context.statManager().addRateData("ntcp.bidRejectedNoNTCPAddress", 1, 0);
//_context.shitlist().shitlistRouter(toAddress.getIdentity().calculateHash(), "No NTCP address", STYLE);
if (_log.shouldLog(Log.DEBUG))
_log.debug("no bid when trying to send to " + toAddress.getIdentity().calculateHash().toBase64() + " as they don't have an ntcp address");