I2P Address: [http://git.idk.i2p]

Skip to content
Snippets Groups Projects
Commit 4426cc35 authored by zzz's avatar zzz
Browse files

* ExpireRoutersJob:

      - Rewrite, not enabled yet
    * TunnelManager:
      - Remove now-unused isInUse()
parent b1465b7d
No related branches found
No related tags found
No related merge requests found
...@@ -27,7 +27,6 @@ class DummyTunnelManagerFacade implements TunnelManagerFacade { ...@@ -27,7 +27,6 @@ class DummyTunnelManagerFacade implements TunnelManagerFacade {
public TunnelInfo selectInboundTunnel(Hash destination) { return null; } public TunnelInfo selectInboundTunnel(Hash destination) { return null; }
public TunnelInfo selectOutboundTunnel() { return null; } public TunnelInfo selectOutboundTunnel() { return null; }
public TunnelInfo selectOutboundTunnel(Hash destination) { return null; } public TunnelInfo selectOutboundTunnel(Hash destination) { return null; }
public boolean isInUse(Hash peer) { return false; }
public boolean isValidTunnel(Hash client, TunnelInfo tunnel) { return false; } public boolean isValidTunnel(Hash client, TunnelInfo tunnel) { return false; }
public int getParticipatingCount() { return 0; } public int getParticipatingCount() { return 0; }
public int getFreeTunnelCount() { return 0; } public int getFreeTunnelCount() { return 0; }
......
...@@ -35,12 +35,6 @@ public interface TunnelManagerFacade extends Service { ...@@ -35,12 +35,6 @@ public interface TunnelManagerFacade extends Service {
/** pick an outbound tunnel bound to the given destination */ /** pick an outbound tunnel bound to the given destination */
TunnelInfo selectOutboundTunnel(Hash destination); TunnelInfo selectOutboundTunnel(Hash destination);
/**
* True if the peer currently part of a tunnel
*
*/
boolean isInUse(Hash peer);
/** Is a tunnel a valid member of the pool? */ /** Is a tunnel a valid member of the pool? */
public boolean isValidTunnel(Hash client, TunnelInfo tunnel); public boolean isValidTunnel(Hash client, TunnelInfo tunnel);
......
...@@ -8,6 +8,7 @@ package net.i2p.router.networkdb.kademlia; ...@@ -8,6 +8,7 @@ package net.i2p.router.networkdb.kademlia;
* *
*/ */
import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
...@@ -20,25 +21,20 @@ import net.i2p.router.RouterContext; ...@@ -20,25 +21,20 @@ import net.i2p.router.RouterContext;
import net.i2p.util.Log; import net.i2p.util.Log;
/** /**
* Go through the routing table pick routers that are performing poorly or * Go through the routing table pick routers that are
* is out of date, but don't expire routers we're actively tunneling through. * is out of date, but don't expire routers we're actively connected to.
* If a peer is performing worse than some threshold (via profile.rankLiveliness) *
* drop it and don't ask any questions. If a peer isn't ranked really poorly, but * We could in the future use profile data, netdb total size, a Kademlia XOR distance,
* we just haven't heard from it in a while, drop it and add it to the set of * or other criteria to minimize netdb size, but for now we just use _facade's
* keys we want the netDb to explore. * validate(), which is a sliding expriation based on netdb size.
* *
*/ */
class ExpireRoutersJob extends JobImpl { class ExpireRoutersJob extends JobImpl {
private Log _log; private Log _log;
private KademliaNetworkDatabaseFacade _facade; private KademliaNetworkDatabaseFacade _facade;
/** rerun fairly often, so the fails don't queue up too many netdb searches at once */
private final static long RERUN_DELAY_MS = 120*1000; private final static long RERUN_DELAY_MS = 120*1000;
/**
* If a routerInfo structure isn't updated within an hour, drop it
* and search for a later version. This value should be large enough
* to deal with the Router.CLOCK_FUDGE_FACTOR.
*/
public final static long EXPIRE_DELAY = 60*60*1000;
public ExpireRoutersJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { public ExpireRoutersJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) {
super(ctx); super(ctx);
...@@ -62,44 +58,25 @@ class ExpireRoutersJob extends JobImpl { ...@@ -62,44 +58,25 @@ class ExpireRoutersJob extends JobImpl {
/** /**
* Run through all of the known peers and pick ones that have really old * Run through all of the known peers and pick ones that have really old
* routerInfo publish dates, excluding ones that are in use by some tunnels, * routerInfo publish dates, excluding ones that we are connected to,
* so that they can be failed & queued for searching * so that they can be failed & queued for searching
* *
* @return nothing for now
*/ */
private Set selectKeysToExpire() { private Set selectKeysToExpire() {
Set possible = getNotInUse(); for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) {
Set expiring = new HashSet(16);
for (Iterator iter = possible.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next(); Hash key = (Hash)iter.next();
RouterInfo ri = _facade.lookupRouterInfoLocally(key); // Don't expire anybody we are connected to
if (ri != null) { if (!getContext().commSystem().isEstablished(key)) {
if (!ri.isCurrent(EXPIRE_DELAY)) { // This does a _facade.validate() and fail() which is sufficient...
if (_log.shouldLog(Log.INFO)) // no need to impose our own expiration here.
_log.info("Expiring RouterInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]"); // One issue is this will queue a ton of floodfill queries the first time it is run
expiring.add(key); // after the 1h router startup grace period.
} else { RouterInfo ri = _facade.lookupRouterInfoLocally(key);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not expiring routerInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]");
}
} }
} }
return expiring; // let _facade do all the work for now
} return Collections.EMPTY_SET;
/** all peers not in use by tunnels */
private Set getNotInUse() {
Set possible = new HashSet(16);
for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
if (!getContext().tunnelManager().isInUse(peer)) {
possible.add(peer);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer is in use: " + peer.toBase64());
}
}
return possible;
} }
} }
...@@ -195,13 +195,6 @@ public class TunnelPoolManager implements TunnelManagerFacade { ...@@ -195,13 +195,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public int getParticipatingCount() { return _context.tunnelDispatcher().getParticipatingCount(); } public int getParticipatingCount() { return _context.tunnelDispatcher().getParticipatingCount(); }
public long getLastParticipatingExpiration() { return _context.tunnelDispatcher().getLastParticipatingExpiration(); } public long getLastParticipatingExpiration() { return _context.tunnelDispatcher().getLastParticipatingExpiration(); }
public boolean isInUse(Hash peer) {
// this lets peers that are in our tunnels expire (forcing us to refetch them)
// if the info is old
//!! no, dont. bad.
return true;
}
public boolean isValidTunnel(Hash client, TunnelInfo tunnel) { public boolean isValidTunnel(Hash client, TunnelInfo tunnel) {
if (tunnel.getExpiration() < _context.clock().now()) if (tunnel.getExpiration() < _context.clock().now())
return false; return false;
...@@ -571,7 +564,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { ...@@ -571,7 +564,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
if (outPool != null) { if (outPool != null) {
List pending = outPool.listPending(); List pending = outPool.listPending();
if (pending.size() > 0) if (pending.size() > 0)
out.write("In progress: " + pending.size() + " outbound<br />\n"); out.write("Build in progress: " + pending.size() + " outbound<br />\n");
live += pending.size(); live += pending.size();
} }
if (live <= 0) if (live <= 0)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment