netdb minor cleanups

This commit is contained in:
zzz
2015-12-23 10:59:53 +00:00
parent 8da3257856
commit 2f09389ddd
11 changed files with 34 additions and 34 deletions

View File

@@ -220,7 +220,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
getContext().profileManager().dbLookupFailed(h);
}
_facade.complete(_key);
getContext().statManager().addRateData("netDb.failedTime", time, 0);
getContext().statManager().addRateData("netDb.failedTime", time);
for (Job j : _onFailed) {
getContext().jobQueue().addJob(j);
}
@@ -251,7 +251,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
}
}
_facade.complete(_key);
getContext().statManager().addRateData("netDb.successTime", time, 0);
getContext().statManager().addRateData("netDb.successTime", time);
for (Job j : _onFind) {
getContext().jobQueue().addJob(j);
}

View File

@@ -43,7 +43,7 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
_context.statManager().addRateData("netDb.lookupsReceived", 1);
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {

View File

@@ -257,7 +257,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
getContext().profileManager().dbLookupSuccessful(_target, delay);
if (_sentTo != null)
getContext().profileManager().dbStoreSuccessful(_sentTo);
getContext().statManager().addRateData("netDb.floodfillVerifyOK", delay, 0);
getContext().statManager().addRateData("netDb.floodfillVerifyOK", delay);
if (_log.shouldLog(Log.INFO))
_log.info("Verify success for " + _key);
if (_isRouterInfo)
@@ -290,7 +290,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
// though it is the real problem.
if (_target != null && !_target.equals(_sentTo))
getContext().profileManager().dbLookupFailed(_target);
getContext().statManager().addRateData("netDb.floodfillVerifyFail", delay, 0);
getContext().statManager().addRateData("netDb.floodfillVerifyFail", delay);
resend();
}
public void setMessage(I2NPMessage message) { _message = message; }
@@ -328,7 +328,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
getContext().profileManager().dbLookupFailed(_target);
//if (_sentTo != null)
// getContext().profileManager().dbStoreFailed(_sentTo);
getContext().statManager().addRateData("netDb.floodfillVerifyTimeout", getContext().clock().now() - _sendTime, 0);
getContext().statManager().addRateData("netDb.floodfillVerifyTimeout", getContext().clock().now() - _sendTime);
if (_log.shouldLog(Log.WARN))
_log.warn("Verify timed out for: " + _key);
if (_ignore.size() < MAX_PEERS_TO_TRY) {

View File

@@ -212,7 +212,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void removeFromExploreKeys(Collection<Hash> toRemove) {
if (!_initialized) return;
_exploreKeys.removeAll(toRemove);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size());
}
public void queueForExploration(Collection<Hash> keys) {
@@ -220,7 +220,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
for (Iterator<Hash> iter = keys.iterator(); iter.hasNext() && _exploreKeys.size() < MAX_EXPLORE_QUEUE; ) {
_exploreKeys.add(iter.next());
}
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size());
}
public synchronized void shutdown() {

View File

@@ -45,8 +45,8 @@ class LookupThrottler {
/** yes, we could have a two-level lookup, or just do h.tostring() + id.tostring() */
private static class ReplyTunnel {
public Hash h;
public TunnelId id;
public final Hash h;
public final TunnelId id;
ReplyTunnel(Hash h, TunnelId id) {
this.h = h;

View File

@@ -112,15 +112,14 @@ class PeerSelector {
/** UNUSED */
private class MatchSelectionCollector implements SelectionCollector<Hash> {
private TreeMap<BigInteger, Hash> _sorted;
private Hash _key;
private Set<Hash> _toIgnore;
private final TreeMap<BigInteger, Hash> _sorted;
private final Hash _key;
private final Set<Hash> _toIgnore;
private int _matches;
public MatchSelectionCollector(Hash key, Set<Hash> toIgnore) {
_key = key;
_sorted = new TreeMap<BigInteger, Hash>();
_toIgnore = toIgnore;
_matches = 0;
}
public void add(Hash entry) {
// deadlock seen here, and we don't mark profiles failing anymore

View File

@@ -165,7 +165,7 @@ public class PersistentDataStore extends TransientDataStore {
}
private class RemoveJob extends JobImpl {
private Hash _key;
private final Hash _key;
public RemoveJob(Hash key) {
super(PersistentDataStore.this._context);
_key = key;

View File

@@ -53,7 +53,7 @@ public class RepublishLeaseSetJob extends JobImpl {
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Publishing " + ls);
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0);
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1);
_facade.sendStore(_dest, ls, null, new OnRepublishFailure(getContext(), this), REPUBLISH_LEASESET_TIMEOUT, null);
_lastPublished = getContext().clock().now();
//getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
@@ -105,7 +105,7 @@ public class RepublishLeaseSetJob extends JobImpl {
/** requeue */
private static class OnRepublishFailure extends JobImpl {
private RepublishLeaseSetJob _job;
private final RepublishLeaseSetJob _job;
public OnRepublishFailure(RouterContext ctx, RepublishLeaseSetJob job) {
super(ctx);
_job = job;

View File

@@ -781,10 +781,10 @@ class SearchJob extends JobImpl {
}
private static class Search {
private Job _onFind;
private Job _onFail;
private long _expiration;
private boolean _isLease;
private final Job _onFind;
private final Job _onFail;
private final long _expiration;
private final boolean _isLease;
public Search(Job onFind, Job onFail, long expiration, boolean isLease) {
_onFind = onFind;

View File

@@ -100,7 +100,7 @@ class SearchReplyJob extends JobImpl {
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64());
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1);
}
}
@@ -125,14 +125,14 @@ class SearchReplyJob extends JobImpl {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64());
_repliesPendingVerification--;
getContext().statManager().addRateData("netDb.searchReplyValidated", 1, 0);
getContext().statManager().addRateData("netDb.searchReplyValidated", 1);
}
void replyNotVerified() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64());
_repliesPendingVerification--;
_invalidPeers++;
getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1, 0);
getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1);
}
}

View File

@@ -293,7 +293,8 @@ class StoreJob extends JobImpl {
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
}
msg.setEntry(_state.getData());
msg.setMessageExpiration(getContext().clock().now() + _timeoutMs);
long now = getContext().clock().now();
msg.setMessageExpiration(now + _timeoutMs);
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
// don't send it to ourselves
@@ -305,7 +306,7 @@ class StoreJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Send store timeout is " + responseTime);
sendStore(msg, router, getContext().clock().now() + responseTime);
sendStore(msg, router, now + responseTime);
}
/**
@@ -315,14 +316,14 @@ class StoreJob extends JobImpl {
*/
private void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
if (msg.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetSent", 1, 0);
getContext().statManager().addRateData("netDb.storeLeaseSetSent", 1);
// if it is an encrypted leaseset...
if (getContext().keyRing().get(msg.getKey()) != null)
sendStoreThroughGarlic(msg, peer, expiration);
else
sendStoreThroughClient(msg, peer, expiration);
} else {
getContext().statManager().addRateData("netDb.storeRouterInfoSent", 1, 0);
getContext().statManager().addRateData("netDb.storeRouterInfoSent", 1);
sendDirect(msg, peer, expiration);
}
}
@@ -557,9 +558,9 @@ class StoreJob extends JobImpl {
*
*/
private class SendSuccessJob extends JobImpl implements ReplyJob {
private RouterInfo _peer;
private TunnelInfo _sendThrough;
private int _msgSize;
private final RouterInfo _peer;
private final TunnelInfo _sendThrough;
private final int _msgSize;
public SendSuccessJob(RouterContext enclosingContext, RouterInfo peer) {
this(enclosingContext, peer, null, 0);
@@ -615,8 +616,8 @@ class StoreJob extends JobImpl {
*
*/
private class FailedJob extends JobImpl {
private RouterInfo _peer;
private long _sendOn;
private final RouterInfo _peer;
private final long _sendOn;
public FailedJob(RouterContext enclosingContext, RouterInfo peer, long sendOn) {
super(enclosingContext);
@@ -635,7 +636,7 @@ class StoreJob extends JobImpl {
_state.replyTimeout(hash);
getContext().profileManager().dbStoreFailed(hash);
getContext().statManager().addRateData("netDb.replyTimeout", getContext().clock().now() - _sendOn, 0);
getContext().statManager().addRateData("netDb.replyTimeout", getContext().clock().now() - _sendOn);
sendNext();
}