I2P Address: [http://git.idk.i2p]

Skip to content
Snippets Groups Projects
Unverified Commit bef72946 authored by zzz's avatar zzz
Browse files

NetDB: Fix usage of dbResponseTime stat

Actually update the stat for stores in dbStoreSent();
we are generally storing to different ffs than lookups, so we need the
stat for stores as well, since we use it as the timeout in StoreJob.

Change from 1-day to 1-hour stat.
Switch to avgOrLifetimeAvg() so the rate is always valid.
Reduce max time used for timeout.
This allows more peers to be tried before total timeout
Previously, the per-peer timeout was almost always the max.
Make sendStore() package private.
Javadocs and cleanups.
parent d0e72aca
No related branches found
No related tags found
No related merge requests found
...@@ -155,8 +155,18 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad ...@@ -155,8 +155,18 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
sendStore(localRouterInfo.getIdentity().calculateHash(), localRouterInfo, null, null, PUBLISH_TIMEOUT, null); sendStore(localRouterInfo.getIdentity().calculateHash(), localRouterInfo, null, null, PUBLISH_TIMEOUT, null);
} }
/**
* Send out a store.
*
* @param key the DatabaseEntry hash
* @param onSuccess may be null, always called if we are ff and ds is an RI
* @param onFailure may be null, ignored if we are ff and ds is an RI
* @param sendTimeout ignored if we are ff and ds is an RI
* @param toIgnore may be null, if non-null, all attempted and skipped targets will be added as of 0.9.53,
* unused if we are ff and ds is an RI
*/
@Override @Override
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) { void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) {
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part // if we are a part of the floodfill netDb, don't send out our own leaseSets as part
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out. // of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something. // perhaps statistically adjust this so we are the source every 1/N times... or something.
......
...@@ -790,7 +790,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad ...@@ -790,7 +790,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
return; return;
} }
RepublishLeaseSetJob j = null; RepublishLeaseSetJob j;
synchronized (_publishingLeaseSets) { synchronized (_publishingLeaseSets) {
j = _publishingLeaseSets.get(h); j = _publishingLeaseSets.get(h);
if (j == null) { if (j == null) {
...@@ -1467,7 +1467,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad ...@@ -1467,7 +1467,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
* to be greater than MAX_PER_PEER_TIMEOUT * TIMEOUT_MULTIPLIER by a factor of at least * to be greater than MAX_PER_PEER_TIMEOUT * TIMEOUT_MULTIPLIER by a factor of at least
* 3 or 4, to allow at least that many peers to be attempted for a store. * 3 or 4, to allow at least that many peers to be attempted for a store.
*/ */
private static final int MAX_PER_PEER_TIMEOUT = 7*1000; private static final int MAX_PER_PEER_TIMEOUT = 5100;
private static final int TIMEOUT_MULTIPLIER = 3; private static final int TIMEOUT_MULTIPLIER = 3;
/** todo: does this need more tuning? */ /** todo: does this need more tuning? */
...@@ -1475,7 +1475,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad ...@@ -1475,7 +1475,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
PeerProfile prof = _context.profileOrganizer().getProfile(peer); PeerProfile prof = _context.profileOrganizer().getProfile(peer);
double responseTime = MAX_PER_PEER_TIMEOUT; double responseTime = MAX_PER_PEER_TIMEOUT;
if (prof != null && prof.getIsExpandedDB()) { if (prof != null && prof.getIsExpandedDB()) {
responseTime = prof.getDbResponseTime().getRate(24*60*60*1000l).getAverageValue(); responseTime = prof.getDbResponseTime().getRate(60*60*1000L).getAvgOrLifetimeAvg();
// if 0 then there is no data, set to max. // if 0 then there is no data, set to max.
if (responseTime <= 0 || responseTime > MAX_PER_PEER_TIMEOUT) if (responseTime <= 0 || responseTime > MAX_PER_PEER_TIMEOUT)
responseTime = MAX_PER_PEER_TIMEOUT; responseTime = MAX_PER_PEER_TIMEOUT;
...@@ -1485,8 +1485,17 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad ...@@ -1485,8 +1485,17 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
return TIMEOUT_MULTIPLIER * (int)responseTime; // give it up to 3x the average response time return TIMEOUT_MULTIPLIER * (int)responseTime; // give it up to 3x the average response time
} }
/** unused (overridden in FNDF) */ /**
public abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore); * See implementation in FNDF
*
* @param key the DatabaseEntry hash
* @param onSuccess may be null, always called if we are ff and ds is an RI
* @param onFailure may be null, ignored if we are ff and ds is an RI
* @param sendTimeout ignored if we are ff and ds is an RI
* @param toIgnore may be null, if non-null, all attempted and skipped targets will be added as of 0.9.53,
* unused if we are ff and ds is an RI
*/
abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore);
/** /**
* Increment in the negative lookup cache * Increment in the negative lookup cache
......
...@@ -113,7 +113,10 @@ public class ProfileManagerImpl implements ProfileManager { ...@@ -113,7 +113,10 @@ public class ProfileManagerImpl implements ProfileManager {
* was successfully tested with the given round trip latency * was successfully tested with the given round trip latency
* *
* Non-blocking. Will not update the profile if we can't get the lock. * Non-blocking. Will not update the profile if we can't get the lock.
*
* @deprecated disabled
*/ */
@Deprecated
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public void tunnelTestSucceeded(Hash peer, long responseTimeMs) { public void tunnelTestSucceeded(Hash peer, long responseTimeMs) {
if (PeerProfile.ENABLE_TUNNEL_TEST_RESPONSE_TIME) { if (PeerProfile.ENABLE_TUNNEL_TEST_RESPONSE_TIME) {
...@@ -260,10 +263,11 @@ public class ProfileManagerImpl implements ProfileManager { ...@@ -260,10 +263,11 @@ public class ProfileManagerImpl implements ProfileManager {
* Note that we've confirmed a successful send of db data to the peer (though we haven't * Note that we've confirmed a successful send of db data to the peer (though we haven't
* necessarily requested it again from them, so they /might/ be lying) * necessarily requested it again from them, so they /might/ be lying)
* *
* This is not really interesting, since they could be lying, so we do not * As of 0.9.53 we update the DbResponseTime.
* increment any DB stats at all. On verify, call dbStoreSuccessful().
* *
* @param responseTimeMs ignored * This will force creation of DB stats
*
* @param responseTimeMs duration
*/ */
public void dbStoreSent(Hash peer, long responseTimeMs) { public void dbStoreSent(Hash peer, long responseTimeMs) {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
...@@ -271,10 +275,9 @@ public class ProfileManagerImpl implements ProfileManager { ...@@ -271,10 +275,9 @@ public class ProfileManagerImpl implements ProfileManager {
long now = _context.clock().now(); long now = _context.clock().now();
data.setLastHeardFrom(now); data.setLastHeardFrom(now);
data.setLastSendSuccessful(now); data.setLastSendSuccessful(now);
//if (!data.getIsExpandedDB()) if (!data.getIsExpandedDB())
// data.expandDBProfile(); data.expandDBProfile();
//DBHistory hist = data.getDBHistory(); data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
//hist.storeSuccessful();
} }
/** /**
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment