diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index 94d20815f24745b166ed811ed088090a56b1127e..0c011070d1910a85ca67ca8f531665139dc1eb5a 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -155,8 +155,18 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad sendStore(localRouterInfo.getIdentity().calculateHash(), localRouterInfo, null, null, PUBLISH_TIMEOUT, null); } + /** + * Send out a store. + * + * @param key the DatabaseEntry hash + * @param onSuccess may be null, always called if we are ff and ds is an RI + * @param onFailure may be null, ignored if we are ff and ds is an RI + * @param sendTimeout ignored if we are ff and ds is an RI + * @param toIgnore may be null, if non-null, all attempted and skipped targets will be added as of 0.9.53, + * unused if we are ff and ds is an RI + */ @Override - public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) { + void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) { // if we are a part of the floodfill netDb, don't send out our own leaseSets as part // of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out. // perhaps statistically adjust this so we are the source every 1/N times... or something. diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index 10612b8b912a38ca7144f1ffc98c5326eeab5597..4878f3942fce281584974a751ce3724fa4f88bf9 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -790,7 +790,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad return; } - RepublishLeaseSetJob j = null; + RepublishLeaseSetJob j; synchronized (_publishingLeaseSets) { j = _publishingLeaseSets.get(h); if (j == null) { @@ -1467,7 +1467,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad * to be greater than MAX_PER_PEER_TIMEOUT * TIMEOUT_MULTIPLIER by a factor of at least * 3 or 4, to allow at least that many peers to be attempted for a store. */ - private static final int MAX_PER_PEER_TIMEOUT = 7*1000; + private static final int MAX_PER_PEER_TIMEOUT = 5100; private static final int TIMEOUT_MULTIPLIER = 3; /** todo: does this need more tuning? */ @@ -1475,7 +1475,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad PeerProfile prof = _context.profileOrganizer().getProfile(peer); double responseTime = MAX_PER_PEER_TIMEOUT; if (prof != null && prof.getIsExpandedDB()) { - responseTime = prof.getDbResponseTime().getRate(24*60*60*1000l).getAverageValue(); + responseTime = prof.getDbResponseTime().getRate(60*60*1000L).getAvgOrLifetimeAvg(); // if 0 then there is no data, set to max. if (responseTime <= 0 || responseTime > MAX_PER_PEER_TIMEOUT) responseTime = MAX_PER_PEER_TIMEOUT; @@ -1485,8 +1485,17 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad return TIMEOUT_MULTIPLIER * (int)responseTime; // give it up to 3x the average response time } - /** unused (overridden in FNDF) */ - public abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore); + /** + * See implementation in FNDF + * + * @param key the DatabaseEntry hash + * @param onSuccess may be null, always called if we are ff and ds is an RI + * @param onFailure may be null, ignored if we are ff and ds is an RI + * @param sendTimeout ignored if we are ff and ds is an RI + * @param toIgnore may be null, if non-null, all attempted and skipped targets will be added as of 0.9.53, + * unused if we are ff and ds is an RI + */ + abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore); /** * Increment in the negative lookup cache diff --git a/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java b/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java index 761cb0b1261c11f4b17b67dfdead024d2c7514b2..056cef70d0d4647313d082fe8c94de4fb088f627 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java @@ -113,7 +113,10 @@ public class ProfileManagerImpl implements ProfileManager { * was successfully tested with the given round trip latency * * Non-blocking. Will not update the profile if we can't get the lock. + * + * @deprecated disabled */ + @Deprecated @SuppressWarnings("deprecation") public void tunnelTestSucceeded(Hash peer, long responseTimeMs) { if (PeerProfile.ENABLE_TUNNEL_TEST_RESPONSE_TIME) { @@ -260,10 +263,11 @@ public class ProfileManagerImpl implements ProfileManager { * Note that we've confirmed a successful send of db data to the peer (though we haven't * necessarily requested it again from them, so they /might/ be lying) * - * This is not really interesting, since they could be lying, so we do not - * increment any DB stats at all. On verify, call dbStoreSuccessful(). + * As of 0.9.53 we update the DbResponseTime. * - * @param responseTimeMs ignored + * This will force creation of DB stats + * + * @param responseTimeMs duration */ public void dbStoreSent(Hash peer, long responseTimeMs) { PeerProfile data = getProfile(peer); @@ -271,10 +275,9 @@ public class ProfileManagerImpl implements ProfileManager { long now = _context.clock().now(); data.setLastHeardFrom(now); data.setLastSendSuccessful(now); - //if (!data.getIsExpandedDB()) - // data.expandDBProfile(); - //DBHistory hist = data.getDBHistory(); - //hist.storeSuccessful(); + if (!data.getIsExpandedDB()) + data.expandDBProfile(); + data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); } /**