I2P Address: [http://git.idk.i2p]

Skip to content
Snippets Groups Projects
Commit 474909ae authored by zzz's avatar zzz
Browse files

- Don't set the DatabaseLookupMessage dont-include collection until

  it's complete, as DLM now makes a copy
- SearchState generics and cleanups
parent caada2bf
No related branches found
No related tags found
No related merge requests found
...@@ -70,6 +70,9 @@ class ExploreJob extends SearchJob { ...@@ -70,6 +70,9 @@ class ExploreJob extends SearchJob {
* and PeerSelector doesn't include the floodfill peers, * and PeerSelector doesn't include the floodfill peers,
* so we add the ff peers ourselves and then use the regular PeerSelector. * so we add the ff peers ourselves and then use the regular PeerSelector.
* *
* TODO should we encrypt this also like we do for normal lookups?
* Could the OBEP capture it and reply with a reference to a hostile peer?
*
* @param replyTunnelId tunnel to receive replies through * @param replyTunnelId tunnel to receive replies through
* @param replyGateway gateway for the reply tunnel * @param replyGateway gateway for the reply tunnel
* @param expiration when the search should stop * @param expiration when the search should stop
...@@ -79,16 +82,18 @@ class ExploreJob extends SearchJob { ...@@ -79,16 +82,18 @@ class ExploreJob extends SearchJob {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true); DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(getState().getTarget()); msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway); msg.setFrom(replyGateway);
msg.setDontIncludePeers(getState().getClosestAttempted(MAX_CLOSEST)); // Moved below now that DLM makes a copy
//msg.setDontIncludePeers(getState().getClosestAttempted(MAX_CLOSEST));
Set<Hash> dontIncludePeers = getState().getClosestAttempted(MAX_CLOSEST);
msg.setMessageExpiration(expiration); msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId); msg.setReplyTunnel(replyTunnelId);
int available = MAX_CLOSEST - msg.getDontIncludePeers().size(); int available = MAX_CLOSEST - dontIncludePeers.size();
if (available > 0) { if (available > 0) {
// Add a flag to say this is an exploration and we don't want floodfills in the responses. // Add a flag to say this is an exploration and we don't want floodfills in the responses.
// Doing it this way is of course backwards-compatible. // Doing it this way is of course backwards-compatible.
// Supported as of 0.7.9 // Supported as of 0.7.9
if (msg.getDontIncludePeers().add(Hash.FAKE_HASH)) if (dontIncludePeers.add(Hash.FAKE_HASH))
available--; available--;
} }
...@@ -105,21 +110,22 @@ class ExploreJob extends SearchJob { ...@@ -105,21 +110,22 @@ class ExploreJob extends SearchJob {
// msg.getDontIncludePeers().addAll(peers); // msg.getDontIncludePeers().addAll(peers);
//} //}
available = MAX_CLOSEST - msg.getDontIncludePeers().size(); available = MAX_CLOSEST - dontIncludePeers.size();
if (available > 0) { if (available > 0) {
// selectNearestExplicit adds our hash to the dontInclude set (3rd param) ... // selectNearestExplicit adds our hash to the dontInclude set (3rd param) ...
// And we end up with MAX_CLOSEST+1 entries. // And we end up with MAX_CLOSEST+1 entries.
// We don't want our hash in the message's don't-include list though. // We don't want our hash in the message's don't-include list though.
// We're just exploring, but this could give things away, and tie our exploratory tunnels to our router, // We're just exploring, but this could give things away, and tie our exploratory tunnels to our router,
// so let's not put our hash in there. // so let's not put our hash in there.
Set dontInclude = new HashSet(msg.getDontIncludePeers()); Set<Hash> dontInclude = new HashSet(dontIncludePeers);
List peers = _peerSelector.selectNearestExplicit(rkey, available, dontInclude, ks); List<Hash> peers = _peerSelector.selectNearestExplicit(rkey, available, dontInclude, ks);
msg.getDontIncludePeers().addAll(peers); dontIncludePeers.addAll(peers);
} }
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Peers we don't want to hear about: " + msg.getDontIncludePeers()); _log.debug("Peers we don't want to hear about: " + dontIncludePeers);
msg.setDontIncludePeers(dontIncludePeers);
return msg; return msg;
} }
......
...@@ -5,6 +5,7 @@ import java.util.Date; ...@@ -5,6 +5,7 @@ import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
...@@ -16,14 +17,14 @@ import net.i2p.router.RouterContext; ...@@ -16,14 +17,14 @@ import net.i2p.router.RouterContext;
* *
*/ */
class SearchState { class SearchState {
private RouterContext _context; private final RouterContext _context;
private final HashSet _pendingPeers; private final HashSet<Hash> _pendingPeers;
private HashMap _pendingPeerTimes; private final Map<Hash, Long> _pendingPeerTimes;
private final HashSet _attemptedPeers; private final HashSet<Hash> _attemptedPeers;
private final HashSet _failedPeers; private final HashSet<Hash> _failedPeers;
private final HashSet _successfulPeers; private final HashSet<Hash> _successfulPeers;
private final HashSet _repliedPeers; private final HashSet<Hash> _repliedPeers;
private Hash _searchKey; private final Hash _searchKey;
private volatile long _completed; private volatile long _completed;
private volatile long _started; private volatile long _started;
...@@ -41,30 +42,30 @@ class SearchState { ...@@ -41,30 +42,30 @@ class SearchState {
} }
public Hash getTarget() { return _searchKey; } public Hash getTarget() { return _searchKey; }
public Set getPending() { public Set<Hash> getPending() {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
return (Set)_pendingPeers.clone(); return (Set)_pendingPeers.clone();
} }
} }
public Set getAttempted() { public Set<Hash> getAttempted() {
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
return (Set)_attemptedPeers.clone(); return (Set)_attemptedPeers.clone();
} }
} }
public Set getClosestAttempted(int max) { public Set<Hash> getClosestAttempted(int max) {
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
return locked_getClosest(_attemptedPeers, max, _searchKey); return locked_getClosest(_attemptedPeers, max, _searchKey);
} }
} }
private Set locked_getClosest(Set peers, int max, Hash target) { private Set<Hash> locked_getClosest(Set<Hash> peers, int max, Hash target) {
if (_attemptedPeers.size() <= max) if (_attemptedPeers.size() <= max)
return new HashSet(_attemptedPeers); return new HashSet(_attemptedPeers);
TreeSet closest = new TreeSet(new XORComparator(target)); TreeSet closest = new TreeSet(new XORComparator(target));
closest.addAll(_attemptedPeers); closest.addAll(_attemptedPeers);
HashSet rv = new HashSet(max); Set<Hash> rv = new HashSet(max);
int i = 0; int i = 0;
for (Iterator iter = closest.iterator(); iter.hasNext() && i < max; i++) { for (Iterator<Hash> iter = closest.iterator(); iter.hasNext() && i < max; i++) {
rv.add(iter.next()); rv.add(iter.next());
} }
return rv; return rv;
...@@ -75,12 +76,12 @@ class SearchState { ...@@ -75,12 +76,12 @@ class SearchState {
return _attemptedPeers.contains(peer); return _attemptedPeers.contains(peer);
} }
} }
public Set getSuccessful() { public Set<Hash> getSuccessful() {
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
return (Set)_successfulPeers.clone(); return (Set)_successfulPeers.clone();
} }
} }
public Set getFailed() { public Set<Hash> getFailed() {
synchronized (_failedPeers) { synchronized (_failedPeers) {
return (Set)_failedPeers.clone(); return (Set)_failedPeers.clone();
} }
...@@ -94,11 +95,11 @@ class SearchState { ...@@ -94,11 +95,11 @@ class SearchState {
public long getWhenStarted() { return _started; } public long getWhenStarted() { return _started; }
public long getWhenCompleted() { return _completed; } public long getWhenCompleted() { return _completed; }
public void addPending(Collection pending) { public void addPending(Collection<Hash> pending) {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.addAll(pending); _pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); ) for (Hash peer : pending)
_pendingPeerTimes.put(iter.next(), Long.valueOf(_context.clock().now())); _pendingPeerTimes.put(peer, Long.valueOf(_context.clock().now()));
} }
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending); _attemptedPeers.addAll(pending);
...@@ -129,7 +130,7 @@ class SearchState { ...@@ -129,7 +130,7 @@ class SearchState {
long rv = -1; long rv = -1;
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer); Long when = _pendingPeerTimes.remove(peer);
if (when != null) if (when != null)
rv = _context.clock().now() - when.longValue(); rv = _context.clock().now() - when.longValue();
} }
...@@ -146,7 +147,7 @@ class SearchState { ...@@ -146,7 +147,7 @@ class SearchState {
} }
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer); Long when = _pendingPeerTimes.remove(peer);
if (when != null) if (when != null)
return _context.clock().now() - when.longValue(); return _context.clock().now() - when.longValue();
else else
...@@ -178,32 +179,28 @@ class SearchState { ...@@ -178,32 +179,28 @@ class SearchState {
buf.append("\n\tAttempted: "); buf.append("\n\tAttempted: ");
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
buf.append(_attemptedPeers.size()).append(' '); buf.append(_attemptedPeers.size()).append(' ');
for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) { for (Hash peer : _attemptedPeers) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append("\n\tPending: "); buf.append("\n\tPending: ");
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
buf.append(_pendingPeers.size()).append(' '); buf.append(_pendingPeers.size()).append(' ');
for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) { for (Hash peer : _pendingPeers) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append("\n\tFailed: "); buf.append("\n\tFailed: ");
synchronized (_failedPeers) { synchronized (_failedPeers) {
buf.append(_failedPeers.size()).append(' '); buf.append(_failedPeers.size()).append(' ');
for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) { for (Hash peer : _failedPeers) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append("\n\tSuccessful: "); buf.append("\n\tSuccessful: ");
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
buf.append(_successfulPeers.size()).append(' '); buf.append(_successfulPeers.size()).append(' ');
for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) { for (Hash peer : _successfulPeers) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment