merge of 'b5d2a1759a243f1d66cec0bc09016324eb6e0286'

and 'dae0a00fe84fa6972a8c42237b2b57ec8e4d689d'
This commit is contained in:
zzz
2011-02-13 21:23:16 +00:00
8 changed files with 57 additions and 31 deletions

View File

@@ -170,7 +170,7 @@ public class I2PTunnelConnectClient extends I2PTunnelHTTPClientBase implements R
// since we are passing the stream on to I2PTunnelRunner
line = DataHelper.readLine(in);
if(line == null) {
line = ""; // prevent NPE, Is this what we need to do?
break;
}
line = line.trim();
if (_log.shouldLog(Log.DEBUG))

View File

@@ -1,3 +1,9 @@
2011-02-13 zzz
* Connect Client: Minor NPE fix cleanup
* JobQueue: Prevet NPE at shutdown (thanks liberty)
* GeoIP: Prevent startup NPE (ticket #413, thanks RN)
* NetDB: Prevent ExpireLeaseJob NPE (thanks sponge)
2011-02-11 Mathiasdm
* routerconsole: fixed graphs using jrobin; and headless issue
in general: no more switches between headless and non-headless.

View File

@@ -227,10 +227,14 @@ public class JobQueue {
}
public long getMaxLag() {
// first job is the one that has been waiting the longest
Job j = _readyJobs.peek();
if (j == null) return 0;
// first job is the one that has been waiting the longest
long startAfter = j.getTiming().getStartAfter();
JobTiming jt = j.getTiming();
// PoisonJob timing is null, prevent NPE at shutdown
if (jt == null)
return 0;
long startAfter = jt.getStartAfter();
return _context.clock().now() - startAfter;
}

View File

@@ -6,6 +6,7 @@ import java.util.Properties;
import net.i2p.I2PAppContext;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.internal.InternalClientManager;
import net.i2p.router.client.ClientManagerFacadeImpl;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
@@ -170,8 +171,20 @@ public class RouterContext extends I2PAppContext {
/** what router is this context working for? */
public Router router() { return _router; }
/** convenience method for querying the router's ident */
public Hash routerHash() { return _router.getRouterInfo().getIdentity().getHash(); }
/**
* Convenience method for getting the router hash.
* Equivalent to context.router().getRouterInfo().getIdentity().getHash()
* @return may be null if called very early
*/
public Hash routerHash() {
if (_router == null)
return null;
RouterInfo ri = _router.getRouterInfo();
if (ri == null)
return null;
return ri.getIdentity().getHash();
}
/**
* How are we coordinating clients for the router?

View File

@@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 9;
public final static long BUILD = 10;
/** for example "-test" */
public final static String EXTRA = "";

View File

@@ -9,7 +9,7 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
@@ -27,8 +27,8 @@ import net.i2p.util.Log;
*
*/
class ExpireLeasesJob extends JobImpl {
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final Log _log;
private final KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 1*60*1000;
@@ -39,11 +39,11 @@ class ExpireLeasesJob extends JobImpl {
}
public String getName() { return "Expire Lease Sets Job"; }
public void runJob() {
Set toExpire = selectKeysToExpire();
Set<Hash> toExpire = selectKeysToExpire();
_log.info("Leases to expire: " + toExpire);
for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
for (Hash key : toExpire) {
_facade.fail(key);
//_log.info("Lease " + key + " is expiring, so lets look for it again", new Exception("Expire and search"));
//_facade.lookupLeaseSet(key, null, null, RERUN_DELAY_MS);
@@ -57,17 +57,15 @@ class ExpireLeasesJob extends JobImpl {
* don't have any leases that haven't yet passed, even with the CLOCK_FUDGE_FACTOR)
*
*/
private Set selectKeysToExpire() {
Set keys = _facade.getDataStore().getKeys();
Set toExpire = new HashSet(128);
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
DatabaseEntry obj = _facade.getDataStore().get(key);
private Set<Hash> selectKeysToExpire() {
Set<Hash> toExpire = new HashSet(128);
for (Map.Entry<Hash, DatabaseEntry> entry : _facade.getDataStore().getMapEntries()) {
DatabaseEntry obj = entry.getValue();
if (obj.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)obj;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
toExpire.add(key);
else
toExpire.add(entry.getKey());
else if (_log.shouldLog(Log.DEBUG))
_log.debug("Lease " + ls.getDestination().calculateHash() + " is current, no need to expire");
}
}

View File

@@ -9,7 +9,6 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.Hash;
@@ -28,8 +27,8 @@ import net.i2p.util.Log;
*
*/
class ExpireRoutersJob extends JobImpl {
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final Log _log;
private final KademliaNetworkDatabaseFacade _facade;
/** rerun fairly often, so the fails don't queue up too many netdb searches at once */
private final static long RERUN_DELAY_MS = 120*1000;
@@ -41,11 +40,13 @@ class ExpireRoutersJob extends JobImpl {
}
public String getName() { return "Expire Routers Job"; }
public void runJob() {
Set toExpire = selectKeysToExpire();
_log.info("Routers to expire (drop and try to refetch): " + toExpire);
for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
// this always returns an empty set (see below)
Set<Hash> toExpire = selectKeysToExpire();
if (_log.shouldLog(Log.INFO))
_log.info("Routers to expire (drop and try to refetch): " + toExpire);
for (Hash key : toExpire) {
_facade.fail(key);
}
_facade.queueForExploration(toExpire);
@@ -61,9 +62,8 @@ class ExpireRoutersJob extends JobImpl {
*
* @return nothing for now
*/
private Set selectKeysToExpire() {
for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
private Set<Hash> selectKeysToExpire() {
for (Hash key : _facade.getAllRouters()) {
// Don't expire anybody we are connected to
if (!getContext().commSystem().isEstablished(key)) {
// This does a _facade.validate() and fail() which is sufficient...

View File

@@ -16,6 +16,7 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
@@ -251,7 +252,11 @@ class GeoIP {
*/
private void updateOurCountry() {
String oldCountry = _context.router().getConfigSetting(PROP_IP_COUNTRY);
String country = _context.commSystem().getCountry(_context.routerHash());
Hash ourHash = _context.routerHash();
// we should always have a RouterInfo by now, but we had one report of an NPE here
if (ourHash == null)
return;
String country = _context.commSystem().getCountry(ourHash);
if (country != null && !country.equals(oldCountry)) {
_context.router().setConfigSetting(PROP_IP_COUNTRY, country);
_context.router().saveConfig();