forked from I2P_Developers/i2p.i2p
Router: type arguments, unused imports
This commit is contained in:
@@ -212,7 +212,7 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
|
||||
public Set<Hash> getDontIncludePeers() {
|
||||
if (_dontIncludePeers == null)
|
||||
return null;
|
||||
return new HashSet(_dontIncludePeers);
|
||||
return new HashSet<Hash>(_dontIncludePeers);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -225,7 +225,7 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
|
||||
public void setDontIncludePeers(Collection<Hash> peers) {
|
||||
_hasChecksum = false;
|
||||
if (peers != null)
|
||||
_dontIncludePeers = new ArrayList(peers);
|
||||
_dontIncludePeers = new ArrayList<Hash>(peers);
|
||||
else
|
||||
_dontIncludePeers = null;
|
||||
}
|
||||
@@ -239,7 +239,7 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
|
||||
*/
|
||||
public void addDontIncludePeer(Hash peer) {
|
||||
if (_dontIncludePeers == null)
|
||||
_dontIncludePeers = new ArrayList();
|
||||
_dontIncludePeers = new ArrayList<Hash>();
|
||||
else if (_dontIncludePeers.contains(peer))
|
||||
return;
|
||||
_hasChecksum = false;
|
||||
@@ -256,7 +256,7 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
|
||||
public void addDontIncludePeers(Collection<Hash> peers) {
|
||||
_hasChecksum = false;
|
||||
if (_dontIncludePeers == null) {
|
||||
_dontIncludePeers = new ArrayList(peers);
|
||||
_dontIncludePeers = new ArrayList<Hash>(peers);
|
||||
} else {
|
||||
for (Hash peer : peers) {
|
||||
if (!_dontIncludePeers.contains(peer))
|
||||
@@ -297,7 +297,7 @@ public class DatabaseLookupMessage extends FastI2NPMessageImpl {
|
||||
|
||||
if ( (numPeers < 0) || (numPeers > MAX_NUM_PEERS) )
|
||||
throw new I2NPMessageException("Invalid number of peers - " + numPeers);
|
||||
List<Hash> peers = new ArrayList(numPeers);
|
||||
List<Hash> peers = new ArrayList<Hash>(numPeers);
|
||||
for (int i = 0; i < numPeers; i++) {
|
||||
//byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
|
||||
@@ -33,7 +33,7 @@ public class DatabaseSearchReplyMessage extends FastI2NPMessageImpl {
|
||||
// do this in netdb if we need it
|
||||
//_context.statManager().createRateStat("netDb.searchReplyMessageSend", "How many search reply messages we send", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
//_context.statManager().createRateStat("netDb.searchReplyMessageReceive", "How many search reply messages we receive", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
_peerHashes = new ArrayList(3);
|
||||
_peerHashes = new ArrayList<Hash>(3);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -9,7 +9,6 @@ package net.i2p.data.i2np;
|
||||
*/
|
||||
|
||||
import net.i2p.I2PException;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Represent an error serializing or deserializing an APIMessage
|
||||
|
||||
@@ -48,7 +48,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
//private static final boolean RAW_FULL_SIZE = false;
|
||||
|
||||
/** unused */
|
||||
private static final Map<Integer, Builder> _builders = new ConcurrentHashMap(1);
|
||||
private static final Map<Integer, Builder> _builders = new ConcurrentHashMap<Integer, Builder>(1);
|
||||
|
||||
/** @deprecated unused */
|
||||
public static final void registerBuilder(Builder builder, int type) { _builders.put(Integer.valueOf(type), builder); }
|
||||
|
||||
@@ -58,7 +58,7 @@ public class Banlist {
|
||||
public Banlist(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(Banlist.class);
|
||||
_entries = new ConcurrentHashMap(16);
|
||||
_entries = new ConcurrentHashMap<Hash, Entry>(16);
|
||||
_context.jobQueue().addJob(new Cleanup(_context));
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ public class Banlist {
|
||||
private List<Hash> _toUnbanlist;
|
||||
public Cleanup(RouterContext ctx) {
|
||||
super(ctx);
|
||||
_toUnbanlist = new ArrayList(4);
|
||||
_toUnbanlist = new ArrayList<Hash>(4);
|
||||
getTiming().setStartAfter(ctx.clock().now() + BANLIST_CLEANER_START_DELAY);
|
||||
}
|
||||
public String getName() { return "Expire banned peers"; }
|
||||
@@ -74,8 +74,8 @@ public class Banlist {
|
||||
_toUnbanlist.clear();
|
||||
long now = getContext().clock().now();
|
||||
try {
|
||||
for (Iterator iter = _entries.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Map.Entry<Hash, Entry> e = (Map.Entry) iter.next();
|
||||
for (Iterator<Map.Entry<Hash, Entry>> iter = _entries.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Map.Entry<Hash, Entry> e = iter.next();
|
||||
if (e.getValue().expireOn <= now) {
|
||||
iter.remove();
|
||||
_toUnbanlist.add(e.getKey());
|
||||
@@ -169,7 +169,7 @@ public class Banlist {
|
||||
e.causeCode = reasonCode;
|
||||
e.transports = null;
|
||||
if (transport != null) {
|
||||
e.transports = new ConcurrentHashSet(2);
|
||||
e.transports = new ConcurrentHashSet<String>(2);
|
||||
e.transports.add(transport);
|
||||
}
|
||||
|
||||
|
||||
@@ -75,8 +75,8 @@ public class Blocklist {
|
||||
private int _blocklistSize;
|
||||
private final Object _lock = new Object();
|
||||
private Entry _wrapSave;
|
||||
private final Set<Hash> _inProcess = new HashSet(4);
|
||||
private Map<Hash, String> _peerBlocklist = new HashMap(4);
|
||||
private final Set<Hash> _inProcess = new HashSet<Hash>(4);
|
||||
private Map<Hash, String> _peerBlocklist = new HashMap<Hash, String>(4);
|
||||
|
||||
/**
|
||||
* Limits of transient (in-memory) blocklists.
|
||||
@@ -86,8 +86,8 @@ public class Blocklist {
|
||||
private static final int MAX_IPV4_SINGLES = 256;
|
||||
private static final int MAX_IPV6_SINGLES = 512;
|
||||
|
||||
private final Set<Integer> _singleIPBlocklist = new ConcurrentHashSet(4);
|
||||
private final Map<BigInteger, Object> _singleIPv6Blocklist = new LHMCache(MAX_IPV6_SINGLES);
|
||||
private final Set<Integer> _singleIPBlocklist = new ConcurrentHashSet<Integer>(4);
|
||||
private final Map<BigInteger, Object> _singleIPv6Blocklist = new LHMCache<BigInteger, Object>(MAX_IPV6_SINGLES);
|
||||
|
||||
private static final Object DUMMY = Integer.valueOf(0);
|
||||
|
||||
@@ -518,8 +518,8 @@ public class Blocklist {
|
||||
private List<byte[]> getAddresses(Hash peer) {
|
||||
RouterInfo pinfo = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (pinfo == null)
|
||||
return Collections.EMPTY_LIST;
|
||||
List<byte[]> rv = new ArrayList(4);
|
||||
return Collections.emptyList();
|
||||
List<byte[]> rv = new ArrayList<byte[]>(4);
|
||||
// for each peer address
|
||||
for (RouterAddress pa : pinfo.getAddresses()) {
|
||||
byte[] pib = pa.getIP();
|
||||
@@ -753,7 +753,7 @@ public class Blocklist {
|
||||
* Additional jobs can wait.
|
||||
* Although could this clog up the job queue runners? Yes.
|
||||
* So we also stagger these jobs.
|
||||
*
|
||||
*(Map.Entry)
|
||||
*/
|
||||
private synchronized void banlistForever(Hash peer, List<byte[]> ips) {
|
||||
String file = _context.getProperty(PROP_BLOCKLIST_FILE, BLOCKLIST_FILE_DEFAULT);
|
||||
@@ -818,7 +818,7 @@ public class Blocklist {
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
// move to the jsp
|
||||
//out.write("<h2>Banned IPs</h2>");
|
||||
Set<Integer> singles = new TreeSet();
|
||||
Set<Integer> singles = new TreeSet<Integer>();
|
||||
singles.addAll(_singleIPBlocklist);
|
||||
if (!(singles.isEmpty() && _singleIPv6Blocklist.isEmpty())) {
|
||||
out.write("<table><tr><th align=\"center\" colspan=\"2\"><b>");
|
||||
@@ -846,7 +846,7 @@ public class Blocklist {
|
||||
if (!_singleIPv6Blocklist.isEmpty()) {
|
||||
List<BigInteger> s6;
|
||||
synchronized(_singleIPv6Blocklist) {
|
||||
s6 = new ArrayList(_singleIPv6Blocklist.keySet());
|
||||
s6 = new ArrayList<BigInteger>(_singleIPv6Blocklist.keySet());
|
||||
}
|
||||
Collections.sort(s6);
|
||||
for (BigInteger bi : s6) {
|
||||
|
||||
@@ -89,7 +89,7 @@ public abstract class ClientManagerFacade implements Service {
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
public Set<Destination> listClients() { return Collections.EMPTY_SET; }
|
||||
public Set<Destination> listClients() { return Collections.emptySet(); }
|
||||
|
||||
/**
|
||||
* Return the client's current config, or null if not connected
|
||||
|
||||
@@ -12,8 +12,6 @@ import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterAddress;
|
||||
|
||||
@@ -29,14 +27,14 @@ public abstract class CommSystemFacade implements Service {
|
||||
public void renderStatusHTML(Writer out) throws IOException { renderStatusHTML(out, null, 0); }
|
||||
|
||||
/** Create the list of RouterAddress structures based on the router's config */
|
||||
public List<RouterAddress> createAddresses() { return Collections.EMPTY_LIST; }
|
||||
public List<RouterAddress> createAddresses() { return Collections.emptyList(); }
|
||||
|
||||
public int countActivePeers() { return 0; }
|
||||
public int countActiveSendPeers() { return 0; }
|
||||
public boolean haveInboundCapacity(int pct) { return true; }
|
||||
public boolean haveOutboundCapacity(int pct) { return true; }
|
||||
public boolean haveHighOutboundCapacity() { return true; }
|
||||
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
|
||||
public List getMostRecentErrorMessages() { return Collections.emptyList(); }
|
||||
|
||||
/**
|
||||
* Median clock skew of connected peers in seconds, or null if we cannot answer.
|
||||
|
||||
@@ -36,9 +36,9 @@ public class InNetMessagePool implements Service {
|
||||
private final HandlerJobBuilder _handlerJobBuilders[];
|
||||
|
||||
/** following 5 unused unless DISPATCH_DIRECT == false */
|
||||
private final List _pendingDataMessages;
|
||||
private final List _pendingDataMessagesFrom;
|
||||
private final List _pendingGatewayMessages;
|
||||
private final List<I2NPMessage> _pendingDataMessages;
|
||||
private final List<Hash> _pendingDataMessagesFrom;
|
||||
private final List<I2NPMessage> _pendingGatewayMessages;
|
||||
private SharedShortCircuitDataJob _shortCircuitDataJob;
|
||||
private SharedShortCircuitGatewayJob _shortCircuitGatewayJob;
|
||||
|
||||
@@ -56,7 +56,7 @@ public class InNetMessagePool implements Service {
|
||||
* using the jobQueue's single thread.
|
||||
*
|
||||
*/
|
||||
public static final String PROP_DISPATCH_THREADED = "router.dispatchThreaded";
|
||||
public static final String PROP_DISPATCH_THREADED = "router.dispatchTemptyList()hreaded";
|
||||
public static final boolean DEFAULT_DISPATCH_THREADED = false;
|
||||
/**
|
||||
* If we aren't doing threaded dispatch for tunnel messages, should we
|
||||
@@ -75,9 +75,9 @@ public class InNetMessagePool implements Service {
|
||||
_pendingDataMessagesFrom = null;
|
||||
_pendingGatewayMessages = null;
|
||||
} else {
|
||||
_pendingDataMessages = new ArrayList(16);
|
||||
_pendingDataMessagesFrom = new ArrayList(16);
|
||||
_pendingGatewayMessages = new ArrayList(16);
|
||||
_pendingDataMessages = new ArrayList<I2NPMessage>(16);
|
||||
_pendingDataMessagesFrom = new ArrayList<Hash>(16);
|
||||
_pendingGatewayMessages = new ArrayList<I2NPMessage>(16);
|
||||
_shortCircuitDataJob = new SharedShortCircuitDataJob(context);
|
||||
_shortCircuitGatewayJob = new SharedShortCircuitGatewayJob(context);
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.InputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
@@ -27,7 +26,6 @@ import net.i2p.data.PrivateKey;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.util.Clock;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SecureDirectory;
|
||||
import net.i2p.util.SecureFileOutputStream;
|
||||
@@ -57,7 +55,7 @@ public class KeyManager {
|
||||
public KeyManager(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(KeyManager.class);
|
||||
_leaseSetKeys = new ConcurrentHashMap();
|
||||
_leaseSetKeys = new ConcurrentHashMap<Hash, LeaseSetKeys>();
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
|
||||
@@ -56,7 +56,7 @@ public class MessageHistory {
|
||||
_log = context.logManager().getLog(getClass());
|
||||
_fmt = new SimpleDateFormat("yy/MM/dd.HH:mm:ss.SSS");
|
||||
_fmt.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
_unwrittenEntries = new LinkedBlockingQueue();
|
||||
_unwrittenEntries = new LinkedBlockingQueue<String>();
|
||||
_reinitializeJob = new ReinitializeJob();
|
||||
_writeJob = new WriteJob();
|
||||
_firstPass = true;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.ArrayList;
|
||||
@@ -13,7 +12,6 @@ import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Fire up multiple routers in the same VM, all with their own RouterContext
|
||||
|
||||
@@ -77,9 +77,9 @@ public abstract class NetworkDatabaseFacade implements Service {
|
||||
/** @deprecated moved to router console */
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
/** public for NetDbRenderer in routerconsole */
|
||||
public Set<LeaseSet> getLeases() { return Collections.EMPTY_SET; }
|
||||
public Set<LeaseSet> getLeases() { return Collections.emptySet(); }
|
||||
/** public for NetDbRenderer in routerconsole */
|
||||
public Set<RouterInfo> getRouters() { return Collections.EMPTY_SET; }
|
||||
public Set<RouterInfo> getRouters() { return Collections.emptySet(); }
|
||||
|
||||
/** @since 0.9 */
|
||||
public ReseedChecker reseedChecker() { return null; };
|
||||
|
||||
@@ -154,7 +154,7 @@ public class OutNetMessage implements CDPQEntry {
|
||||
return (Map<String, Long>)_timestamps.clone();
|
||||
}
|
||||
}
|
||||
return Collections.EMPTY_MAP;
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
@@ -170,8 +170,8 @@ public class OutNetMessage implements CDPQEntry {
|
||||
|
||||
private void locked_initTimestamps() {
|
||||
if (_timestamps == null) {
|
||||
_timestamps = new HashMap(8);
|
||||
_timestampOrder = new ArrayList(8);
|
||||
_timestamps = new HashMap<String, Long>(8);
|
||||
_timestampOrder = new ArrayList<String>(8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,7 +279,7 @@ public class OutNetMessage implements CDPQEntry {
|
||||
|
||||
public synchronized void transportFailed(String transportStyle) {
|
||||
if (_failedTransports == null)
|
||||
_failedTransports = new HashSet(2);
|
||||
_failedTransports = new HashSet<String>(2);
|
||||
_failedTransports.add(transportStyle);
|
||||
}
|
||||
|
||||
|
||||
@@ -11,20 +11,16 @@ package net.i2p.router;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.Certificate;
|
||||
@@ -53,7 +49,6 @@ import net.i2p.util.Log;
|
||||
import net.i2p.util.OrderedProperties;
|
||||
import net.i2p.util.SecureFileOutputStream;
|
||||
import net.i2p.util.SimpleByteCache;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SystemVersion;
|
||||
import net.i2p.util.Translate;
|
||||
|
||||
@@ -147,7 +142,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
|
||||
public Router(String configFilename, Properties envProps) {
|
||||
_gracefulExitCode = -1;
|
||||
_config = new ConcurrentHashMap();
|
||||
_config = new ConcurrentHashMap<String, String>();
|
||||
|
||||
if (configFilename == null) {
|
||||
if (envProps != null) {
|
||||
@@ -1028,7 +1023,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
* @return success
|
||||
* @since 0.8.13
|
||||
*/
|
||||
public boolean saveConfig(Map toAdd, Collection<String> toRemove) {
|
||||
public boolean saveConfig(Map<String, String> toAdd, Collection<String> toRemove) {
|
||||
synchronized(_configFileLock) {
|
||||
if (toAdd != null)
|
||||
_config.putAll(toAdd);
|
||||
|
||||
@@ -57,7 +57,7 @@ public class RouterClock extends Clock {
|
||||
super(context);
|
||||
_lastStratum = WORST_STRATUM;
|
||||
_lastSlewed = System.currentTimeMillis();
|
||||
_shiftListeners = new CopyOnWriteArraySet();
|
||||
_shiftListeners = new CopyOnWriteArraySet<ClockShiftListener>();
|
||||
_lastShiftNanos = System.nanoTime();
|
||||
_timeStamper = new RouterTimestamper(context, this);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
@@ -69,7 +68,7 @@ public class RouterContext extends I2PAppContext {
|
||||
private volatile boolean _initialized;
|
||||
private final Object _lock1 = new Object(), _lock2 = new Object(), _lock3 = new Object();
|
||||
|
||||
private static final List<RouterContext> _contexts = new CopyOnWriteArrayList();
|
||||
private static final List<RouterContext> _contexts = new CopyOnWriteArrayList<RouterContext>();
|
||||
|
||||
/**
|
||||
* Caller MUST call initAll() after instantiation.
|
||||
@@ -89,7 +88,7 @@ public class RouterContext extends I2PAppContext {
|
||||
//initAll();
|
||||
if (!_contexts.isEmpty())
|
||||
System.err.println("Warning - More than one router in this JVM");
|
||||
_finalShutdownTasks = new CopyOnWriteArraySet();
|
||||
_finalShutdownTasks = new CopyOnWriteArraySet<Runnable>();
|
||||
_contexts.add(this);
|
||||
}
|
||||
|
||||
|
||||
@@ -46,8 +46,6 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.RandomSource;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
/**
|
||||
@@ -119,9 +117,9 @@ class ClientConnectionRunner {
|
||||
_manager = manager;
|
||||
_socket = socket;
|
||||
// unused for fastReceive
|
||||
_messages = new ConcurrentHashMap();
|
||||
_alreadyProcessed = new ArrayList();
|
||||
_acceptedPending = new ConcurrentHashSet();
|
||||
_messages = new ConcurrentHashMap<MessageId, Payload>();
|
||||
_alreadyProcessed = new ArrayList<MessageId>();
|
||||
_acceptedPending = new ConcurrentHashSet<MessageId>();
|
||||
_messageId = new AtomicInteger(_context.random().nextInt());
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ package net.i2p.router.client;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
@@ -78,9 +77,9 @@ class ClientManager {
|
||||
// "How large are messages received by the client?",
|
||||
// "ClientMessages",
|
||||
// new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_runners = new ConcurrentHashMap();
|
||||
_runnersByHash = new ConcurrentHashMap();
|
||||
_pendingRunners = new HashSet();
|
||||
_runners = new ConcurrentHashMap<Destination, ClientConnectionRunner>();
|
||||
_runnersByHash = new ConcurrentHashMap<Hash, ClientConnectionRunner>();
|
||||
_pendingRunners = new HashSet<ClientConnectionRunner>();
|
||||
_port = port;
|
||||
// following are for RequestLeaseSetJob
|
||||
_ctx.statManager().createRateStat("client.requestLeaseSetSuccess", "How frequently the router requests successfully a new leaseSet?", "ClientMessages", new long[] { 60*60*1000 });
|
||||
@@ -124,7 +123,7 @@ class ClientManager {
|
||||
_log.info("Shutting down the ClientManager");
|
||||
if (_listener != null)
|
||||
_listener.stopListening();
|
||||
Set<ClientConnectionRunner> runners = new HashSet();
|
||||
Set<ClientConnectionRunner> runners = new HashSet<ClientConnectionRunner>();
|
||||
synchronized (_runners) {
|
||||
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
@@ -153,8 +152,8 @@ class ClientManager {
|
||||
public I2CPMessageQueue internalConnect() throws I2PSessionException {
|
||||
if (!_isStarted)
|
||||
throw new I2PSessionException("Router client manager is shut down");
|
||||
LinkedBlockingQueue<I2CPMessage> in = new LinkedBlockingQueue(INTERNAL_QUEUE_SIZE);
|
||||
LinkedBlockingQueue<I2CPMessage> out = new LinkedBlockingQueue(INTERNAL_QUEUE_SIZE);
|
||||
LinkedBlockingQueue<I2CPMessage> in = new LinkedBlockingQueue<I2CPMessage>(INTERNAL_QUEUE_SIZE);
|
||||
LinkedBlockingQueue<I2CPMessage> out = new LinkedBlockingQueue<I2CPMessage>(INTERNAL_QUEUE_SIZE);
|
||||
I2CPMessageQueue myQueue = new I2CPMessageQueueImpl(in, out);
|
||||
I2CPMessageQueue hisQueue = new I2CPMessageQueueImpl(out, in);
|
||||
ClientConnectionRunner runner = new QueuedClientConnectionRunner(_ctx, this, myQueue);
|
||||
@@ -344,7 +343,7 @@ class ClientManager {
|
||||
* Unsynchronized
|
||||
*/
|
||||
public Set<Destination> listClients() {
|
||||
Set<Destination> rv = new HashSet();
|
||||
Set<Destination> rv = new HashSet<Destination>();
|
||||
rv.addAll(_runners.keySet());
|
||||
return rv;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ package net.i2p.router.client;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.client.I2PSessionException;
|
||||
@@ -239,7 +238,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade implements Inte
|
||||
if (_manager != null)
|
||||
return _manager.listClients();
|
||||
else
|
||||
return Collections.EMPTY_SET;
|
||||
return Collections.emptySet();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -28,7 +28,7 @@ class ClientWriterRunner implements Runnable {
|
||||
|
||||
public ClientWriterRunner(RouterContext context, ClientConnectionRunner runner) {
|
||||
//_log = context.logManager().getLog(ClientWriterRunner.class);
|
||||
_messagesToWrite = new LinkedBlockingQueue(QUEUE_SIZE);
|
||||
_messagesToWrite = new LinkedBlockingQueue<I2CPMessage>(QUEUE_SIZE);
|
||||
_runner = runner;
|
||||
//_id = ++__id;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import net.i2p.data.i2cp.I2CPMessageException;
|
||||
import net.i2p.internal.I2CPMessageQueue;
|
||||
import net.i2p.internal.QueuedI2CPMessageReader;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Zero-copy in-JVM.
|
||||
|
||||
@@ -16,7 +16,6 @@ import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
import net.i2p.data.i2cp.RequestLeaseSetMessage;
|
||||
import net.i2p.data.i2cp.RequestVariableLeaseSetMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@@ -4,15 +4,11 @@ import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.ServerSocket;
|
||||
import java.security.KeyStore;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.cert.Certificate;
|
||||
import java.security.cert.CertificateEncodingException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
@@ -21,9 +17,7 @@ import javax.net.ssl.SSLServerSocketFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
|
||||
import net.i2p.client.I2PClient;
|
||||
import net.i2p.crypto.CertUtil;
|
||||
import net.i2p.crypto.KeyStoreUtil;
|
||||
import net.i2p.data.Base32;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SecureDirectory;
|
||||
@@ -94,7 +88,7 @@ class SSLClientListenerRunner extends ClientListenerRunner {
|
||||
if (success) {
|
||||
success = ks.exists();
|
||||
if (success) {
|
||||
Map<String, String> changes = new HashMap();
|
||||
Map<String, String> changes = new HashMap<String, String>();
|
||||
changes.put(PROP_KEYSTORE_PASSWORD, DEFAULT_KEYSTORE_PASSWORD);
|
||||
changes.put(PROP_KEY_PASSWORD, keyPassword);
|
||||
_context.router().saveConfig(changes, null);
|
||||
|
||||
@@ -23,11 +23,11 @@ import net.i2p.router.NetworkDatabaseFacade;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
public class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
private Map _routers;
|
||||
private Map<Hash, RouterInfo> _routers;
|
||||
private RouterContext _context;
|
||||
|
||||
public DummyNetworkDatabaseFacade(RouterContext ctx) {
|
||||
_routers = Collections.synchronizedMap(new HashMap());
|
||||
_routers = Collections.synchronizedMap(new HashMap<Hash, RouterInfo>());
|
||||
_context = ctx;
|
||||
}
|
||||
|
||||
@@ -61,6 +61,6 @@ public class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_routers.remove(dbEntry);
|
||||
}
|
||||
|
||||
public Set<Hash> getAllRouters() { return new HashSet(_routers.keySet()); }
|
||||
public Set<Hash> getAllRouters() { return new HashSet<Hash>(_routers.keySet()); }
|
||||
public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) { return new HashSet(_routers.values()); }
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ public class VMCommSystem extends CommSystemFacade {
|
||||
/**
|
||||
* Mapping from Hash to VMCommSystem for all routers hooked together
|
||||
*/
|
||||
private static Map _commSystemFacades = Collections.synchronizedMap(new HashMap(16));
|
||||
private static Map<Hash, VMCommSystem> _commSystemFacades = Collections.synchronizedMap(new HashMap<Hash, VMCommSystem>(16));
|
||||
|
||||
public VMCommSystem(RouterContext context) {
|
||||
_context = context;
|
||||
|
||||
@@ -25,7 +25,7 @@ class CloveSet {
|
||||
private long _expiration;
|
||||
|
||||
public CloveSet() {
|
||||
_cloves = new ArrayList(4);
|
||||
_cloves = new ArrayList<GarlicClove>(4);
|
||||
_msgId = -1;
|
||||
_expiration = -1;
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ class GarlicConfig {
|
||||
public GarlicConfig() {
|
||||
_id = -1;
|
||||
_expiration = -1;
|
||||
_cloveConfigs = new ArrayList(4);
|
||||
_cloveConfigs = new ArrayList<GarlicConfig>(4);
|
||||
//_replyBlockMessageId = -1;
|
||||
//_replyBlockExpiration = -1;
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ public class GarlicMessageBuilder {
|
||||
private static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) {
|
||||
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
|
||||
log.error("buildMessage 2 args, using router SKM", new Exception("who did it"));
|
||||
return buildMessage(ctx, config, new SessionKey(), new HashSet(), ctx.sessionKeyManager());
|
||||
return buildMessage(ctx, config, new SessionKey(), new HashSet<SessionTag>(), ctx.sessionKeyManager());
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -11,7 +11,6 @@ import net.i2p.data.LeaseSet;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
/**
|
||||
@@ -43,12 +42,12 @@ public class OutboundCache {
|
||||
*
|
||||
* NOT concurrent.
|
||||
*/
|
||||
final Map<HashPair, TunnelInfo> tunnelCache = new HashMap(64);
|
||||
final Map<HashPair, TunnelInfo> tunnelCache = new HashMap<HashPair, TunnelInfo>(64);
|
||||
|
||||
/*
|
||||
* NOT concurrent.
|
||||
*/
|
||||
final Map<HashPair, TunnelInfo> backloggedTunnelCache = new HashMap(64);
|
||||
final Map<HashPair, TunnelInfo> backloggedTunnelCache = new HashMap<HashPair, TunnelInfo>(64);
|
||||
|
||||
/**
|
||||
* Returns the reply lease set if forced to do so,
|
||||
@@ -68,7 +67,7 @@ public class OutboundCache {
|
||||
*
|
||||
* Concurrent.
|
||||
*/
|
||||
final Map<HashPair, LeaseSet> leaseSetCache = new ConcurrentHashMap(64);
|
||||
final Map<HashPair, LeaseSet> leaseSetCache = new ConcurrentHashMap<HashPair, LeaseSet>(64);
|
||||
|
||||
/**
|
||||
* Use the same inbound tunnel (i.e. lease) as we did for the same destination previously,
|
||||
@@ -84,7 +83,7 @@ public class OutboundCache {
|
||||
*
|
||||
* Concurrent.
|
||||
*/
|
||||
final ConcurrentHashMap<HashPair, Lease> leaseCache = new ConcurrentHashMap(64);
|
||||
final ConcurrentHashMap<HashPair, Lease> leaseCache = new ConcurrentHashMap<HashPair, Lease>(64);
|
||||
|
||||
/**
|
||||
* This cache is used to ensure that we request a reply every so often.
|
||||
@@ -94,7 +93,7 @@ public class OutboundCache {
|
||||
*
|
||||
* Concurrent.
|
||||
*/
|
||||
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap(64);
|
||||
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap<HashPair, Long>(64);
|
||||
|
||||
private final RouterContext _context;
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
@@ -322,7 +321,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
}
|
||||
|
||||
// get the possible leases
|
||||
List<Lease> leases = new ArrayList(_leaseSet.getLeaseCount());
|
||||
List<Lease> leases = new ArrayList<Lease>(_leaseSet.getLeaseCount());
|
||||
for (int i = 0; i < _leaseSet.getLeaseCount(); i++) {
|
||||
Lease lease = _leaseSet.getLease(i);
|
||||
if (lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) {
|
||||
@@ -454,7 +453,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
PublicKey key = _leaseSet.getEncryptionKey();
|
||||
SessionKey sessKey = new SessionKey();
|
||||
Set<SessionTag> tags = new HashSet();
|
||||
Set<SessionTag> tags = new HashSet<SessionTag>();
|
||||
|
||||
LeaseSet replyLeaseSet;
|
||||
// Per-message flag == false overrides session option which is default true
|
||||
|
||||
@@ -9,7 +9,6 @@ package net.i2p.router.networkdb;
|
||||
*/
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
|
||||
@@ -58,7 +58,7 @@ class ExpireLeasesJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private Set<Hash> selectKeysToExpire() {
|
||||
Set<Hash> toExpire = new HashSet(128);
|
||||
Set<Hash> toExpire = new HashSet<Hash>(128);
|
||||
for (Map.Entry<Hash, DatabaseEntry> entry : _facade.getDataStore().getMapEntries()) {
|
||||
DatabaseEntry obj = entry.getValue();
|
||||
if (obj.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
|
||||
@@ -8,7 +8,6 @@ package net.i2p.router.networkdb.kademlia;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
|
||||
@@ -117,7 +117,7 @@ class ExploreJob extends SearchJob {
|
||||
// We don't want our hash in the message's don't-include list though.
|
||||
// We're just exploring, but this could give things away, and tie our exploratory tunnels to our router,
|
||||
// so let's not put our hash in there.
|
||||
Set<Hash> dontInclude = new HashSet(dontIncludePeers);
|
||||
Set<Hash> dontInclude = new HashSet<Hash>(dontIncludePeers);
|
||||
List<Hash> peers = _peerSelector.selectNearestExplicit(rkey, available, dontInclude, ks);
|
||||
dontIncludePeers.addAll(peers);
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ class ExploreKeySelectorJob extends JobImpl {
|
||||
requeue(30*RERUN_DELAY_MS);
|
||||
return;
|
||||
}
|
||||
Set toExplore = selectKeysToExplore();
|
||||
Set<Hash> toExplore = selectKeysToExplore();
|
||||
_log.info("Filling the explorer pool with: " + toExplore);
|
||||
if (toExplore != null)
|
||||
_facade.queueForExploration(toExplore);
|
||||
@@ -54,16 +54,16 @@ class ExploreKeySelectorJob extends JobImpl {
|
||||
* for it, with a maximum number of keys limited by the exploration pool size
|
||||
*
|
||||
*/
|
||||
private Set selectKeysToExplore() {
|
||||
Set alreadyQueued = _facade.getExploreKeys();
|
||||
private Set<Hash> selectKeysToExplore() {
|
||||
Set<Hash> alreadyQueued = _facade.getExploreKeys();
|
||||
if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null;
|
||||
Set toExplore = new HashSet(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
|
||||
Set<Hash> toExplore = new HashSet<Hash>(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
|
||||
for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) {
|
||||
KBucket bucket = _facade.getKBuckets().getBucket(i);
|
||||
if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
|
||||
boolean already = false;
|
||||
for (Iterator iter = alreadyQueued.iterator(); iter.hasNext(); ) {
|
||||
Hash key = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = alreadyQueued.iterator(); iter.hasNext(); ) {
|
||||
Hash key = iter.next();
|
||||
if (bucket.shouldContain(key)) {
|
||||
already = true;
|
||||
_log.debug("Bucket " + i + " is already queued for exploration \t" + key);
|
||||
|
||||
@@ -55,7 +55,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
// these override the settings in super
|
||||
_timeoutMs = Math.min(timeoutMs, SearchJob.PER_FLOODFILL_PEER_TIMEOUT);
|
||||
_expiration = _timeoutMs + ctx.clock().now();
|
||||
_unheardFrom = new HashSet(CONCURRENT_SEARCHES);
|
||||
_unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES);
|
||||
_replySelector = new FloodOnlyLookupSelector(getContext(), this);
|
||||
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
|
||||
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
|
||||
@@ -77,7 +77,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
// but we're passing the rkey not the key, so we do it below instead in certain cases.
|
||||
floodfillPeers = ((FloodfillPeerSelector)_facade.getPeerSelector()).selectFloodfillParticipants(rkey, MIN_FOR_NO_DSRM, ks);
|
||||
} else {
|
||||
floodfillPeers = Collections.EMPTY_LIST;
|
||||
floodfillPeers = Collections.emptyList();
|
||||
}
|
||||
|
||||
// If we dont know enough floodfills,
|
||||
@@ -91,7 +91,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
// so this situation should be temporary
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Running netDb searches against the floodfill peers, but we don't know any");
|
||||
floodfillPeers = new ArrayList(_facade.getAllRouters());
|
||||
floodfillPeers = new ArrayList<Hash>(_facade.getAllRouters());
|
||||
if (floodfillPeers.isEmpty()) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("We don't know any peers at all");
|
||||
|
||||
@@ -43,10 +43,10 @@ public class FloodSearchJob extends JobImpl {
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_facade = facade;
|
||||
_key = key;
|
||||
_onFind = new CopyOnWriteArrayList();
|
||||
_onFind = new CopyOnWriteArrayList<Job>();
|
||||
if (onFind != null)
|
||||
_onFind.add(onFind);
|
||||
_onFailed = new CopyOnWriteArrayList();
|
||||
_onFailed = new CopyOnWriteArrayList<Job>();
|
||||
if (onFailed != null)
|
||||
_onFailed.add(onFailed);
|
||||
int timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
|
||||
|
||||
@@ -17,7 +17,7 @@ class FloodThrottler {
|
||||
private static final long CLEAN_TIME = 60*1000;
|
||||
|
||||
FloodThrottler() {
|
||||
this.counter = new ObjectCounter();
|
||||
this.counter = new ObjectCounter<Hash>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.TunnelId;
|
||||
@@ -47,8 +46,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
|
||||
public FloodfillNetworkDatabaseFacade(RouterContext context) {
|
||||
super(context);
|
||||
_activeFloodQueries = new HashMap();
|
||||
_verifiesInProgress = new ConcurrentHashSet(8);
|
||||
_activeFloodQueries = new HashMap<Hash, FloodSearchJob>();
|
||||
_verifiesInProgress = new ConcurrentHashSet<Hash>(8);
|
||||
|
||||
_context.statManager().createRequiredRateStat("netDb.successTime", "Time for successful lookup (ms)", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
@@ -140,7 +139,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
|
||||
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) {
|
||||
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part
|
||||
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
||||
// perhaps statistically adjust this so we are the source every 1/N times... or something.
|
||||
@@ -268,7 +267,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
|
||||
public List<RouterInfo> getKnownRouterData() {
|
||||
List<RouterInfo> rv = new ArrayList();
|
||||
List<RouterInfo> rv = new ArrayList<RouterInfo>();
|
||||
DataStore ds = getDataStore();
|
||||
if (ds != null) {
|
||||
for (DatabaseEntry o : ds.getEntries()) {
|
||||
|
||||
@@ -88,7 +88,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
// TODO this is very slow
|
||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters);
|
||||
if (kbuckets == null) return new ArrayList();
|
||||
if (kbuckets == null) return new ArrayList<Hash>();
|
||||
kbuckets.getAll(matches);
|
||||
List<Hash> rv = matches.get(maxNumRouters, preferConnected);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@@ -126,7 +126,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
return matches.getFloodfillParticipants();
|
||||
*****/
|
||||
Set<Hash> set = _context.peerManager().getPeersByCapability(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL);
|
||||
List<Hash> rv = new ArrayList(set.size());
|
||||
List<Hash> rv = new ArrayList<Hash>(set.size());
|
||||
for (Hash h : set) {
|
||||
if ((toIgnore != null && toIgnore.contains(h)) ||
|
||||
_context.banlist().isBanlistedForever(h))
|
||||
@@ -180,7 +180,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
toIgnore = Collections.singleton(_context.routerHash());
|
||||
} else if (!toIgnore.contains(_context.routerHash())) {
|
||||
// copy the Set so we don't confuse StoreJob
|
||||
toIgnore = new HashSet(toIgnore);
|
||||
toIgnore = new HashSet<Hash>(toIgnore);
|
||||
toIgnore.add(_context.routerHash());
|
||||
}
|
||||
return selectFloodfillParticipantsIncludingUs(key, howMany, toIgnore, kbuckets);
|
||||
@@ -195,12 +195,12 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
*/
|
||||
private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||
List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets);
|
||||
TreeSet<Hash> sorted = new TreeSet(new XORComparator(key));
|
||||
TreeSet<Hash> sorted = new TreeSet<Hash>(new XORComparator(key));
|
||||
sorted.addAll(ffs);
|
||||
|
||||
List<Hash> rv = new ArrayList(howMany);
|
||||
List<Hash> okff = new ArrayList(ffs.size());
|
||||
List<Hash> badff = new ArrayList(ffs.size());
|
||||
List<Hash> rv = new ArrayList<Hash>(howMany);
|
||||
List<Hash> okff = new ArrayList<Hash>(ffs.size());
|
||||
List<Hash> badff = new ArrayList<Hash>(ffs.size());
|
||||
int found = 0;
|
||||
long now = _context.clock().now();
|
||||
|
||||
@@ -219,7 +219,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
// 5 == FNDF.MAX_TO_FLOOD + 1
|
||||
int limit = Math.max(5, howMany);
|
||||
limit = Math.min(limit, ffs.size());
|
||||
Set<Integer> maskedIPs = new HashSet(limit + 4);
|
||||
Set<Integer> maskedIPs = new HashSet<Integer>(limit + 4);
|
||||
// split sorted list into 3 sorted lists
|
||||
for (int i = 0; found < howMany && i < limit; i++) {
|
||||
Hash entry = sorted.first();
|
||||
@@ -303,7 +303,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* @since 0.9.5 modified from ProfileOrganizer
|
||||
*/
|
||||
private Set<Integer> maskedIPSet(Hash peer, RouterInfo pinfo, int mask) {
|
||||
Set<Integer> rv = new HashSet(4);
|
||||
Set<Integer> rv = new HashSet<Integer>(4);
|
||||
byte[] commIP = _context.commSystem().getIP(peer);
|
||||
if (commIP != null)
|
||||
rv.add(maskedIP(commIP, mask));
|
||||
@@ -354,8 +354,8 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
*/
|
||||
public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
|
||||
_key = key;
|
||||
_sorted = new TreeSet(new XORComparator(key));
|
||||
_floodfillMatches = new ArrayList(8);
|
||||
_sorted = new TreeSet<Hash>(new XORComparator(key));
|
||||
_floodfillMatches = new ArrayList<Hash>(8);
|
||||
_toIgnore = toIgnore;
|
||||
_wanted = wanted;
|
||||
}
|
||||
@@ -410,15 +410,15 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* Group 4: Non-floodfills, sorted by closest-to-the-key
|
||||
*/
|
||||
public List<Hash> get(int howMany, boolean preferConnected) {
|
||||
List<Hash> rv = new ArrayList(howMany);
|
||||
List<Hash> badff = new ArrayList(howMany);
|
||||
List<Hash> unconnectedff = new ArrayList(howMany);
|
||||
List<Hash> rv = new ArrayList<Hash>(howMany);
|
||||
List<Hash> badff = new ArrayList<Hash>(howMany);
|
||||
List<Hash> unconnectedff = new ArrayList<Hash>(howMany);
|
||||
int found = 0;
|
||||
long now = _context.clock().now();
|
||||
// Only add in "good" floodfills here...
|
||||
// Let's say published in last 3h and no failed sends in last 30m
|
||||
// (Forever banlisted ones are excluded in add() above)
|
||||
for (Iterator<Hash> iter = new RandomIterator(_floodfillMatches); (found < howMany) && iter.hasNext(); ) {
|
||||
for (Iterator<Hash> iter = new RandomIterator<Hash>(_floodfillMatches); (found < howMany) && iter.hasNext(); ) {
|
||||
Hash entry = iter.next();
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
|
||||
if (info != null && now - info.getPublished() > 3*60*60*1000) {
|
||||
|
||||
@@ -55,7 +55,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_sentTo = sentTo;
|
||||
_facade = facade;
|
||||
_ignore = new HashSet(MAX_PEERS_TO_TRY);
|
||||
_ignore = new HashSet<Hash>(MAX_PEERS_TO_TRY);
|
||||
if (sentTo != null) {
|
||||
_ignore.add(_sentTo);
|
||||
}
|
||||
@@ -277,7 +277,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
private void resend() {
|
||||
DatabaseEntry ds = _facade.lookupLocally(_key);
|
||||
if (ds != null) {
|
||||
Set<Hash> toSkip = new HashSet(2);
|
||||
Set<Hash> toSkip = new HashSet<Hash>(2);
|
||||
if (_sentTo != null)
|
||||
toSkip.add(_sentTo);
|
||||
if (_target != null)
|
||||
|
||||
@@ -57,9 +57,9 @@ class HarvesterJob extends JobImpl {
|
||||
public String getName() { return "Harvest the netDb"; }
|
||||
public void runJob() {
|
||||
if (shouldHarvest()) {
|
||||
List peers = selectPeersToUpdate();
|
||||
List<Hash> peers = selectPeersToUpdate();
|
||||
for (int i = 0; i < peers.size(); i++) {
|
||||
Hash peer= (Hash)peers.get(i);
|
||||
Hash peer= peers.get(i);
|
||||
harvest(peer);
|
||||
}
|
||||
}
|
||||
@@ -75,12 +75,12 @@ class HarvesterJob extends JobImpl {
|
||||
* Retrieve a list of hashes for peers we want to update
|
||||
*
|
||||
*/
|
||||
private List selectPeersToUpdate() {
|
||||
Map routersByAge = new TreeMap();
|
||||
Set peers = _facade.getAllRouters();
|
||||
private List<Hash> selectPeersToUpdate() {
|
||||
Map<Long, Hash> routersByAge = new TreeMap<Long, Hash>();
|
||||
Set<Hash> peers = _facade.getAllRouters();
|
||||
long now = getContext().clock().now();
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = peers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
RouterInfo info = _facade.lookupRouterInfoLocally(peer);
|
||||
if (info != null) {
|
||||
long when = info.getPublished();
|
||||
@@ -95,9 +95,9 @@ class HarvesterJob extends JobImpl {
|
||||
// ok now we have the known peers sorted by date (oldest first),
|
||||
// ignoring peers that are new, so lets grab the oldest MAX_PER_RUN
|
||||
// entries
|
||||
List rv = new ArrayList();
|
||||
for (Iterator iter = routersByAge.values().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
List<Hash> rv = new ArrayList<Hash>();
|
||||
for (Iterator<Hash> iter = routersByAge.values().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
rv.add(peer);
|
||||
if (rv.size() >= MAX_PER_RUN)
|
||||
break;
|
||||
|
||||
@@ -25,7 +25,6 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.util.RandomIterator;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.VersionComparator;
|
||||
|
||||
/**
|
||||
* A traditional Kademlia search that continues to search
|
||||
@@ -94,10 +93,10 @@ class IterativeSearchJob extends FloodSearchJob {
|
||||
_timeoutMs = Math.min(timeoutMs, MAX_SEARCH_TIME);
|
||||
_expiration = _timeoutMs + ctx.clock().now();
|
||||
_rkey = ctx.routingKeyGenerator().getRoutingKey(key);
|
||||
_toTry = new TreeSet(new XORComparator(_rkey));
|
||||
_unheardFrom = new HashSet(CONCURRENT_SEARCHES);
|
||||
_failedPeers = new HashSet(TOTAL_SEARCH_LIMIT);
|
||||
_sentTime = new ConcurrentHashMap(TOTAL_SEARCH_LIMIT);
|
||||
_toTry = new TreeSet<Hash>(new XORComparator(_rkey));
|
||||
_unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES);
|
||||
_failedPeers = new HashSet<Hash>(TOTAL_SEARCH_LIMIT);
|
||||
_sentTime = new ConcurrentHashMap<Hash, Long>(TOTAL_SEARCH_LIMIT);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -116,7 +115,7 @@ class IterativeSearchJob extends FloodSearchJob {
|
||||
// but we're passing the rkey not the key, so we do it below instead in certain cases.
|
||||
floodfillPeers = ((FloodfillPeerSelector)_facade.getPeerSelector()).selectFloodfillParticipants(_rkey, TOTAL_SEARCH_LIMIT, ks);
|
||||
} else {
|
||||
floodfillPeers = new ArrayList(TOTAL_SEARCH_LIMIT);
|
||||
floodfillPeers = new ArrayList<Hash>(TOTAL_SEARCH_LIMIT);
|
||||
}
|
||||
|
||||
// For testing or local networks... we will
|
||||
@@ -137,14 +136,14 @@ class IterativeSearchJob extends FloodSearchJob {
|
||||
// so this situation should be temporary
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Running netDb searches against the floodfill peers, but we don't know any");
|
||||
List<Hash> all = new ArrayList(_facade.getAllRouters());
|
||||
List<Hash> all = new ArrayList<Hash>(_facade.getAllRouters());
|
||||
if (all.isEmpty()) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("We don't know any peers at all");
|
||||
failed();
|
||||
return;
|
||||
}
|
||||
Iterator<Hash> iter = new RandomIterator(all);
|
||||
Iterator<Hash> iter = new RandomIterator<Hash>(all);
|
||||
// Limit non-FF to 3, because we don't sort the FFs ahead of the non-FFS,
|
||||
// so once we get some FFs we want to be sure to query them
|
||||
for (int i = 0; iter.hasNext() && i < MAX_NON_FF; i++) {
|
||||
|
||||
@@ -49,7 +49,7 @@ class KBucketImpl implements KBucket {
|
||||
public KBucketImpl(I2PAppContext context, LocalHash local) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(KBucketImpl.class);
|
||||
_entries = new ConcurrentHashSet(2); //all but the last 1 or 2 buckets will be empty
|
||||
_entries = new ConcurrentHashSet<Hash>(2); //all but the last 1 or 2 buckets will be empty
|
||||
_lastShuffle = context.clock().now();
|
||||
setLocal(local);
|
||||
}
|
||||
@@ -219,8 +219,8 @@ class KBucketImpl implements KBucket {
|
||||
/**
|
||||
* @deprecated makes a copy, remove toIgnore in KBS instead
|
||||
*/
|
||||
public Set<Hash> getEntries(Set toIgnoreHashes) {
|
||||
Set<Hash> entries = new HashSet(_entries);
|
||||
public Set<Hash> getEntries(Set<Hash> toIgnoreHashes) {
|
||||
Set<Hash> entries = new HashSet<Hash>(_entries);
|
||||
entries.removeAll(toIgnoreHashes);
|
||||
return entries;
|
||||
}
|
||||
|
||||
@@ -94,16 +94,16 @@ class KBucketSet {
|
||||
/** @since 0.8.8 */
|
||||
public void clear() {
|
||||
for (int i = 0; i < _buckets.length; i++) {
|
||||
_buckets[i].setEntries(Collections.EMPTY_SET);
|
||||
_buckets[i].setEntries(Collections.<Hash> emptySet());
|
||||
}
|
||||
_size.set(0);
|
||||
_us.clearXorCache();
|
||||
}
|
||||
|
||||
public Set<Hash> getAll() { return getAll(Collections.EMPTY_SET); };
|
||||
public Set<Hash> getAll() { return getAll(Collections.<Hash> emptySet()); };
|
||||
|
||||
public Set<Hash> getAll(Set<Hash> toIgnore) {
|
||||
Set<Hash> all = new HashSet(1024);
|
||||
Set<Hash> all = new HashSet<Hash>(1024);
|
||||
for (int i = 0; i < _buckets.length; i++) {
|
||||
all.addAll(_buckets[i].getEntries());
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
/** where the data store is pushing the data */
|
||||
private String _dbDir;
|
||||
// set of Hash objects that we should search on (to fill up a bucket, not to get data)
|
||||
private final Set<Hash> _exploreKeys = new ConcurrentHashSet(64);
|
||||
private final Set<Hash> _exploreKeys = new ConcurrentHashSet<Hash>(64);
|
||||
private boolean _initialized;
|
||||
/** Clock independent time of when we started up */
|
||||
private long _started;
|
||||
@@ -138,8 +138,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(getClass());
|
||||
_peerSelector = createPeerSelector();
|
||||
_publishingLeaseSets = new HashMap(8);
|
||||
_activeRequests = new HashMap(8);
|
||||
_publishingLeaseSets = new HashMap<Hash, RepublishLeaseSetJob>(8);
|
||||
_activeRequests = new HashMap<Hash, SearchJob>(8);
|
||||
_reseedChecker = new ReseedChecker(context);
|
||||
context.statManager().createRateStat("netDb.lookupDeferred", "how many lookups are deferred?", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||
context.statManager().createRateStat("netDb.exploreKeySet", "how many keys are queued for exploration?", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||
@@ -181,7 +181,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
/** @return unmodifiable set */
|
||||
public Set<Hash> getExploreKeys() {
|
||||
if (!_initialized)
|
||||
return Collections.EMPTY_SET;
|
||||
return Collections.emptySet();
|
||||
return Collections.unmodifiableSet(_exploreKeys);
|
||||
}
|
||||
|
||||
@@ -323,8 +323,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* @param peersToIgnore can be null
|
||||
*/
|
||||
public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) {
|
||||
if (!_initialized) return Collections.EMPTY_SET;
|
||||
return new HashSet(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb));
|
||||
if (!_initialized) return Collections.emptySet();
|
||||
return new HashSet<Hash>(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb));
|
||||
}
|
||||
|
||||
/*****
|
||||
@@ -349,9 +349,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
|
||||
/** get the hashes for all known routers */
|
||||
public Set<Hash> getAllRouters() {
|
||||
if (!_initialized) return Collections.EMPTY_SET;
|
||||
if (!_initialized) return Collections.emptySet();
|
||||
Set<Map.Entry<Hash, DatabaseEntry>> entries = _ds.getMapEntries();
|
||||
Set<Hash> rv = new HashSet(entries.size());
|
||||
Set<Hash> rv = new HashSet<Hash>(entries.size());
|
||||
for (Map.Entry<Hash, DatabaseEntry> entry : entries) {
|
||||
if (entry.getValue().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
rv.add(entry.getKey());
|
||||
@@ -988,10 +988,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
@Override
|
||||
public Set<LeaseSet> getLeases() {
|
||||
if (!_initialized) return null;
|
||||
Set leases = new HashSet();
|
||||
Set<LeaseSet> leases = new HashSet<LeaseSet>();
|
||||
for (DatabaseEntry o : getDataStore().getEntries()) {
|
||||
if (o.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
|
||||
leases.add(o);
|
||||
leases.add((LeaseSet)o);
|
||||
}
|
||||
return leases;
|
||||
}
|
||||
@@ -1000,10 +1000,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
@Override
|
||||
public Set<RouterInfo> getRouters() {
|
||||
if (!_initialized) return null;
|
||||
Set routers = new HashSet();
|
||||
Set<RouterInfo> routers = new HashSet<RouterInfo>();
|
||||
for (DatabaseEntry o : getDataStore().getEntries()) {
|
||||
if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
|
||||
routers.add(o);
|
||||
routers.add((RouterInfo)o);
|
||||
}
|
||||
return routers;
|
||||
}
|
||||
@@ -1034,7 +1034,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
|
||||
/** unused (overridden in FNDF) */
|
||||
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
|
||||
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) {
|
||||
if ( (ds == null) || (key == null) ) {
|
||||
if (onFailure != null)
|
||||
_context.jobQueue().addJob(onFailure);
|
||||
|
||||
@@ -48,7 +48,7 @@ class LocalHash extends Hash {
|
||||
public void prepareCache() {
|
||||
synchronized (this) {
|
||||
if (_xorCache == null)
|
||||
_xorCache = new HashMap(MAX_CACHED_XOR);
|
||||
_xorCache = new HashMap<Hash, byte[]>(MAX_CACHED_XOR);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,13 +70,13 @@ class LocalHash extends Hash {
|
||||
synchronized (_xorCache) {
|
||||
int toRemove = _xorCache.size() + 1 - MAX_CACHED_XOR;
|
||||
if (toRemove > 0) {
|
||||
Set keys = new HashSet(toRemove);
|
||||
Set<Hash> keys = new HashSet<Hash>(toRemove);
|
||||
// this removes essentially random keys - we dont maintain any sort
|
||||
// of LRU or age. perhaps we should?
|
||||
int removed = 0;
|
||||
for (Iterator iter = _xorCache.keySet().iterator(); iter.hasNext() && removed < toRemove; removed++)
|
||||
for (Iterator<Hash> iter = _xorCache.keySet().iterator(); iter.hasNext() && removed < toRemove; removed++)
|
||||
keys.add(iter.next());
|
||||
for (Iterator iter = keys.iterator(); iter.hasNext(); )
|
||||
for (Iterator<Hash> iter = keys.iterator(); iter.hasNext(); )
|
||||
_xorCache.remove(iter.next());
|
||||
}
|
||||
distance = DataHelper.xor(key.getData(), getData());
|
||||
|
||||
@@ -24,7 +24,7 @@ class LookupThrottler {
|
||||
private static final long CLEAN_TIME = 60*1000;
|
||||
|
||||
LookupThrottler() {
|
||||
this.counter = new ObjectCounter();
|
||||
this.counter = new ObjectCounter<ReplyTunnel>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ public class MessageWrapper {
|
||||
if (skm == null)
|
||||
return null;
|
||||
SessionKey sentKey = new SessionKey();
|
||||
Set<SessionTag> sentTags = new HashSet();
|
||||
Set<SessionTag> sentTags = new HashSet<SessionTag>();
|
||||
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, payload, sentKey, sentTags,
|
||||
NETDB_TAGS_TO_DELIVER, NETDB_LOW_THRESHOLD, skm);
|
||||
if (msg == null)
|
||||
@@ -192,7 +192,7 @@ public class MessageWrapper {
|
||||
private static OneTimeSession generateSession(RouterContext ctx, SessionKeyManager skm) {
|
||||
SessionKey key = ctx.keyGenerator().generateSessionKey();
|
||||
SessionTag tag = new SessionTag(true);
|
||||
Set<SessionTag> tags = new RemovableSingletonSet(tag);
|
||||
Set<SessionTag> tags = new RemovableSingletonSet<SessionTag>(tag);
|
||||
skm.tagsReceived(key, tags, 2*60*1000);
|
||||
return new OneTimeSession(key, tag);
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ class NegativeLookupCache {
|
||||
private static final long CLEAN_TIME = 4*60*1000;
|
||||
|
||||
public NegativeLookupCache() {
|
||||
this.counter = new ObjectCounter();
|
||||
this.counter = new ObjectCounter<Hash>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
|
||||
@@ -96,11 +96,11 @@ class PeerSelector {
|
||||
*/
|
||||
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||
if (peersToIgnore == null)
|
||||
peersToIgnore = new HashSet(1);
|
||||
peersToIgnore = new HashSet<Hash>(1);
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
MatchSelectionCollector matches = new MatchSelectionCollector(key, peersToIgnore);
|
||||
kbuckets.getAll(matches);
|
||||
List rv = matches.get(maxNumRouters);
|
||||
List<Hash> rv = matches.get(maxNumRouters);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": "
|
||||
+ rv + " (not including " + peersToIgnore + ") [allHashes.size = "
|
||||
@@ -116,7 +116,7 @@ class PeerSelector {
|
||||
private int _matches;
|
||||
public MatchSelectionCollector(Hash key, Set<Hash> toIgnore) {
|
||||
_key = key;
|
||||
_sorted = new TreeMap();
|
||||
_sorted = new TreeMap<BigInteger, Hash>();
|
||||
_toIgnore = toIgnore;
|
||||
_matches = 0;
|
||||
}
|
||||
@@ -138,7 +138,7 @@ class PeerSelector {
|
||||
}
|
||||
/** get the first $howMany entries matching */
|
||||
public List<Hash> get(int howMany) {
|
||||
List<Hash> rv = new ArrayList(howMany);
|
||||
List<Hash> rv = new ArrayList<Hash>(howMany);
|
||||
for (int i = 0; i < howMany; i++) {
|
||||
if (_sorted.isEmpty())
|
||||
break;
|
||||
|
||||
@@ -196,7 +196,7 @@ class PersistentDataStore extends TransientDataStore {
|
||||
private volatile boolean _quit;
|
||||
|
||||
public Writer() {
|
||||
_keys = new ConcurrentHashMap(64);
|
||||
_keys = new ConcurrentHashMap<Hash, DatabaseEntry>(64);
|
||||
_waitLock = new Object();
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ class SearchJob extends JobImpl {
|
||||
private final boolean _keepStats;
|
||||
private Job _pendingRequeueJob;
|
||||
private final PeerSelector _peerSelector;
|
||||
private final List _deferredSearches;
|
||||
private final List<Search> _deferredSearches;
|
||||
private boolean _deferredCleared;
|
||||
private long _startedOn;
|
||||
private boolean _floodfillPeersExhausted;
|
||||
@@ -99,7 +99,7 @@ class SearchJob extends JobImpl {
|
||||
_onFailure = onFailure;
|
||||
_timeoutMs = timeoutMs;
|
||||
_keepStats = keepStats;
|
||||
_deferredSearches = new ArrayList(0);
|
||||
_deferredSearches = new ArrayList<Search>(0);
|
||||
_peerSelector = facade.getPeerSelector();
|
||||
_startedOn = -1;
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
@@ -260,7 +260,7 @@ class SearchJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
int sent = 0;
|
||||
Set attempted = _state.getAttempted();
|
||||
Set<Hash> attempted = _state.getAttempted();
|
||||
while (sent <= 0) {
|
||||
//boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext());
|
||||
boolean onlyFloodfill = true;
|
||||
@@ -271,7 +271,7 @@ class SearchJob extends JobImpl {
|
||||
fail();
|
||||
return;
|
||||
}
|
||||
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted);
|
||||
List<Hash> closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted);
|
||||
if ( (closestHashes == null) || (closestHashes.isEmpty()) ) {
|
||||
if (_state.getPending().isEmpty()) {
|
||||
// we tried to find some peers, but there weren't any and no one else is going to answer
|
||||
@@ -290,8 +290,8 @@ class SearchJob extends JobImpl {
|
||||
return;
|
||||
} else {
|
||||
attempted.addAll(closestHashes);
|
||||
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
DatabaseEntry ds = _facade.getDataStore().get(peer);
|
||||
if (ds == null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@@ -365,7 +365,7 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
* @return ordered list of Hash objects
|
||||
*/
|
||||
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
|
||||
private List<Hash> getClosestRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
|
||||
@@ -627,11 +627,11 @@ class SearchJob extends JobImpl {
|
||||
_facade.sendStore(_state.getTarget(), ds, null, null, RESEND_TIMEOUT, _state.getSuccessful());
|
||||
}
|
||||
} else {
|
||||
Set sendTo = _state.getRepliedPeers(); // _state.getFailed();
|
||||
Set<Hash> sendTo = _state.getRepliedPeers(); // _state.getFailed();
|
||||
sendTo.addAll(_state.getPending());
|
||||
int numSent = 0;
|
||||
for (Iterator iter = sendTo.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = sendTo.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
RouterInfo peerInfo = _facade.lookupRouterInfoLocally(peer);
|
||||
if (peerInfo == null) continue;
|
||||
if (resend(peerInfo, (LeaseSet)ds))
|
||||
@@ -726,10 +726,10 @@ class SearchJob extends JobImpl {
|
||||
}
|
||||
|
||||
private void handleDeferred(boolean success) {
|
||||
List deferred = null;
|
||||
List<Search> deferred = null;
|
||||
synchronized (_deferredSearches) {
|
||||
if (!_deferredSearches.isEmpty()) {
|
||||
deferred = new ArrayList(_deferredSearches);
|
||||
deferred = new ArrayList<Search>(_deferredSearches);
|
||||
_deferredSearches.clear();
|
||||
}
|
||||
_deferredCleared = true;
|
||||
@@ -737,7 +737,7 @@ class SearchJob extends JobImpl {
|
||||
if (deferred != null) {
|
||||
long now = getContext().clock().now();
|
||||
for (int i = 0; i < deferred.size(); i++) {
|
||||
Search cur = (Search)deferred.get(i);
|
||||
Search cur = deferred.get(i);
|
||||
if (cur.getExpiration() < now)
|
||||
getContext().jobQueue().addJob(cur.getOnFail());
|
||||
else if (success)
|
||||
|
||||
@@ -31,12 +31,12 @@ class SearchState {
|
||||
public SearchState(RouterContext context, Hash key) {
|
||||
_context = context;
|
||||
_searchKey = key;
|
||||
_pendingPeers = new HashSet(16);
|
||||
_attemptedPeers = new HashSet(16);
|
||||
_failedPeers = new HashSet(16);
|
||||
_successfulPeers = new HashSet(16);
|
||||
_pendingPeerTimes = new HashMap(16);
|
||||
_repliedPeers = new HashSet(16);
|
||||
_pendingPeers = new HashSet<Hash>(16);
|
||||
_attemptedPeers = new HashSet<Hash>(16);
|
||||
_failedPeers = new HashSet<Hash>(16);
|
||||
_successfulPeers = new HashSet<Hash>(16);
|
||||
_pendingPeerTimes = new HashMap<Hash, Long>(16);
|
||||
_repliedPeers = new HashSet<Hash>(16);
|
||||
_completed = -1;
|
||||
_started = _context.clock().now();
|
||||
}
|
||||
@@ -44,12 +44,12 @@ class SearchState {
|
||||
public Hash getTarget() { return _searchKey; }
|
||||
public Set<Hash> getPending() {
|
||||
synchronized (_pendingPeers) {
|
||||
return (Set)_pendingPeers.clone();
|
||||
return (Set<Hash>)_pendingPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set<Hash> getAttempted() {
|
||||
synchronized (_attemptedPeers) {
|
||||
return (Set)_attemptedPeers.clone();
|
||||
return (Set<Hash>)_attemptedPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set<Hash> getClosestAttempted(int max) {
|
||||
@@ -60,10 +60,10 @@ class SearchState {
|
||||
|
||||
private Set<Hash> locked_getClosest(Set<Hash> peers, int max, Hash target) {
|
||||
if (_attemptedPeers.size() <= max)
|
||||
return new HashSet(_attemptedPeers);
|
||||
TreeSet closest = new TreeSet(new XORComparator(target));
|
||||
return new HashSet<Hash>(_attemptedPeers);
|
||||
TreeSet<Hash> closest = new TreeSet<Hash>(new XORComparator(target));
|
||||
closest.addAll(_attemptedPeers);
|
||||
Set<Hash> rv = new HashSet(max);
|
||||
Set<Hash> rv = new HashSet<Hash>(max);
|
||||
int i = 0;
|
||||
for (Iterator<Hash> iter = closest.iterator(); iter.hasNext() && i < max; i++) {
|
||||
rv.add(iter.next());
|
||||
@@ -78,12 +78,12 @@ class SearchState {
|
||||
}
|
||||
public Set<Hash> getSuccessful() {
|
||||
synchronized (_successfulPeers) {
|
||||
return (Set)_successfulPeers.clone();
|
||||
return (Set<Hash>)_successfulPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set<Hash> getFailed() {
|
||||
synchronized (_failedPeers) {
|
||||
return (Set)_failedPeers.clone();
|
||||
return (Set<Hash>)_failedPeers.clone();
|
||||
}
|
||||
}
|
||||
public boolean completed() { return _completed != -1; }
|
||||
@@ -155,7 +155,7 @@ class SearchState {
|
||||
}
|
||||
}
|
||||
|
||||
public Set getRepliedPeers() { synchronized (_repliedPeers) { return (Set)_repliedPeers.clone(); } }
|
||||
public Set<Hash> getRepliedPeers() { synchronized (_repliedPeers) { return (Set<Hash>)_repliedPeers.clone(); } }
|
||||
|
||||
public void replyTimeout(Hash peer) {
|
||||
synchronized (_pendingPeers) {
|
||||
|
||||
@@ -133,7 +133,7 @@ class StartExplorersJob extends JobImpl {
|
||||
Set<Hash> queued = _facade.getExploreKeys();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Keys waiting for exploration: " + queued.size());
|
||||
Set<Hash> rv = new HashSet(num);
|
||||
Set<Hash> rv = new HashSet<Hash>(num);
|
||||
for (Hash key : queued) {
|
||||
if (rv.size() >= num) break;
|
||||
rv.add(key);
|
||||
|
||||
@@ -234,7 +234,7 @@ class StoreJob extends JobImpl {
|
||||
private List<Hash> getClosestFloodfillRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
KBucketSet ks = _facade.getKBuckets();
|
||||
if (ks == null) return new ArrayList();
|
||||
if (ks == null) return new ArrayList<Hash>();
|
||||
return ((FloodfillPeerSelector)_peerSelector).selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.MessageWrapper.WrappedMessage;
|
||||
|
||||
/**
|
||||
* Tracks the state of a StoreJob
|
||||
@@ -38,16 +39,16 @@ class StoreState {
|
||||
_context = ctx;
|
||||
_key = key;
|
||||
_data = data;
|
||||
_pendingPeers = new HashSet(4);
|
||||
_pendingPeerTimes = new HashMap(4);
|
||||
_pendingMessages = new ConcurrentHashMap(4);
|
||||
_attemptedPeers = new HashSet(8);
|
||||
_pendingPeers = new HashSet<Hash>(4);
|
||||
_pendingPeerTimes = new HashMap<Hash, Long>(4);
|
||||
_pendingMessages = new ConcurrentHashMap<Hash, WrappedMessage>(4);
|
||||
_attemptedPeers = new HashSet<Hash>(8);
|
||||
if (toSkip != null) {
|
||||
_attemptedPeers.addAll(toSkip);
|
||||
_completeCount = toSkip.size();
|
||||
}
|
||||
_failedPeers = new HashSet(8);
|
||||
_successfulPeers = new HashSet(4);
|
||||
_failedPeers = new HashSet<Hash>(8);
|
||||
_successfulPeers = new HashSet<Hash>(4);
|
||||
//_successfulExploratoryPeers = new HashSet(16);
|
||||
_completed = -1;
|
||||
_started = _context.clock().now();
|
||||
|
||||
@@ -33,7 +33,7 @@ class TransientDataStore implements DataStore {
|
||||
public TransientDataStore(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_data = new ConcurrentHashMap(1024);
|
||||
_data = new ConcurrentHashMap<Hash, DatabaseEntry>(1024);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Data Store initialized");
|
||||
}
|
||||
|
||||
@@ -251,7 +251,7 @@ public class Reseeder {
|
||||
* @return count of routerinfos successfully fetched
|
||||
*/
|
||||
private int reseed(boolean echoStatus) {
|
||||
List<String> URLList = new ArrayList();
|
||||
List<String> URLList = new ArrayList<String>();
|
||||
String URLs = _context.getProperty(PROP_RESEED_URL);
|
||||
boolean defaulted = URLs == null;
|
||||
boolean SSLDisable = _context.getBooleanProperty(PROP_SSL_DISABLE);
|
||||
@@ -267,7 +267,7 @@ public class Reseeder {
|
||||
Collections.shuffle(URLList, _context.random());
|
||||
if (defaulted && !SSLDisable) {
|
||||
// put the non-SSL at the end of the SSL
|
||||
List<String> URLList2 = new ArrayList();
|
||||
List<String> URLList2 = new ArrayList<String>();
|
||||
tok = new StringTokenizer(DEFAULT_SEED_URL, " ,");
|
||||
while (tok.hasMoreTokens())
|
||||
URLList2.add(tok.nextToken().trim());
|
||||
@@ -334,7 +334,7 @@ public class Reseeder {
|
||||
String content = new String(contentRaw);
|
||||
// This isn't really URLs, but Base64 hashes
|
||||
// but they may include % encoding
|
||||
Set<String> urls = new HashSet(1024);
|
||||
Set<String> urls = new HashSet<String>(1024);
|
||||
Hash ourHash = _context.routerHash();
|
||||
String ourB64 = ourHash != null ? ourHash.toBase64() : null;
|
||||
int cur = 0;
|
||||
@@ -370,7 +370,7 @@ public class Reseeder {
|
||||
return 0;
|
||||
}
|
||||
|
||||
List<String> urlList = new ArrayList(urls);
|
||||
List<String> urlList = new ArrayList<String>(urls);
|
||||
Collections.shuffle(urlList, _context.random());
|
||||
int fetched = 0;
|
||||
int errors = 0;
|
||||
|
||||
@@ -73,10 +73,10 @@ class PeerManager {
|
||||
_persistenceHelper = new ProfilePersistenceHelper(context);
|
||||
_organizer = context.profileOrganizer();
|
||||
_organizer.setUs(context.routerHash());
|
||||
_capabilitiesByPeer = new ConcurrentHashMap(256);
|
||||
_peersByCapability = new HashMap(TRACKED_CAPS.length());
|
||||
_capabilitiesByPeer = new ConcurrentHashMap<Hash, String>(256);
|
||||
_peersByCapability = new HashMap<Character, Set<Hash>>(TRACKED_CAPS.length());
|
||||
for (int i = 0; i < TRACKED_CAPS.length(); i++)
|
||||
_peersByCapability.put(Character.valueOf(Character.toLowerCase(TRACKED_CAPS.charAt(i))), new ConcurrentHashSet());
|
||||
_peersByCapability.put(Character.valueOf(Character.toLowerCase(TRACKED_CAPS.charAt(i))), new ConcurrentHashSet<Hash>());
|
||||
loadProfilesInBackground();
|
||||
////_context.jobQueue().addJob(new EvaluateProfilesJob(_context));
|
||||
//SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, REORGANIZE_TIME);
|
||||
@@ -119,7 +119,7 @@ class PeerManager {
|
||||
void clearProfiles() {
|
||||
_organizer.clearProfiles();
|
||||
_capabilitiesByPeer.clear();
|
||||
for (Set p : _peersByCapability.values())
|
||||
for (Set<Hash> p : _peersByCapability.values())
|
||||
p.clear();
|
||||
}
|
||||
|
||||
@@ -177,9 +177,9 @@ class PeerManager {
|
||||
* Only used by PeerTestJob (PURPOSE_TEST)
|
||||
*/
|
||||
List<Hash> selectPeers(PeerSelectionCriteria criteria) {
|
||||
Set<Hash> peers = new HashSet(criteria.getMinimumRequired());
|
||||
Set<Hash> peers = new HashSet<Hash>(criteria.getMinimumRequired());
|
||||
// not a singleton, SANFP adds to it
|
||||
Set<Hash> exclude = new HashSet(1);
|
||||
Set<Hash> exclude = new HashSet<Hash>(1);
|
||||
exclude.add(_context.routerHash());
|
||||
switch (criteria.getPurpose()) {
|
||||
case PeerSelectionCriteria.PURPOSE_TEST:
|
||||
@@ -221,7 +221,7 @@ class PeerManager {
|
||||
}
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peers selected: " + peers);
|
||||
return new ArrayList(peers);
|
||||
return new ArrayList<Hash>(peers);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -300,6 +300,6 @@ class PeerManager {
|
||||
Set<Hash> peers = locked_getPeers(capability);
|
||||
if (peers != null)
|
||||
return Collections.unmodifiableSet(peers);
|
||||
return Collections.EMPTY_SET;
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
|
||||
* @return non-null unmodifiable set
|
||||
*/
|
||||
public Set<Hash> getPeersByCapability(char capability) {
|
||||
if (_manager == null) return Collections.EMPTY_SET;
|
||||
if (_manager == null) return Collections.emptySet();
|
||||
return _manager.getPeersByCapability(capability);
|
||||
}
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ public class PeerProfile {
|
||||
* Only routers in these countries will use a same-country metric.
|
||||
* Yes this is an arbitrary cutoff.
|
||||
*/
|
||||
private static final Set<String> _bigCountries = new HashSet();
|
||||
private static final Set<String> _bigCountries = new HashSet<String>();
|
||||
|
||||
static {
|
||||
String[] big = new String[] { "fr", "de", "ru", "ua", "us" };
|
||||
|
||||
@@ -69,12 +69,12 @@ public class PeerTestJob extends JobImpl {
|
||||
|
||||
public void runJob() {
|
||||
if (!_keepTesting) return;
|
||||
Set peers = selectPeersToTest();
|
||||
Set<RouterInfo> peers = selectPeersToTest();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Testing " + peers.size() + " peers");
|
||||
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
|
||||
RouterInfo peer = (RouterInfo)iter.next();
|
||||
for (Iterator<RouterInfo> iter = peers.iterator(); iter.hasNext(); ) {
|
||||
RouterInfo peer = iter.next();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Testing peer " + peer.getIdentity().getHash().toBase64());
|
||||
testPeer(peer);
|
||||
@@ -87,19 +87,19 @@ public class PeerTestJob extends JobImpl {
|
||||
*
|
||||
* @return set of RouterInfo structures
|
||||
*/
|
||||
private Set selectPeersToTest() {
|
||||
private Set<RouterInfo> selectPeersToTest() {
|
||||
PeerSelectionCriteria criteria = new PeerSelectionCriteria();
|
||||
criteria.setMinimumRequired(getTestConcurrency());
|
||||
criteria.setMaximumRequired(getTestConcurrency());
|
||||
criteria.setPurpose(PeerSelectionCriteria.PURPOSE_TEST);
|
||||
List peerHashes = _manager.selectPeers(criteria);
|
||||
List<Hash> peerHashes = _manager.selectPeers(criteria);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Peer selection found " + peerHashes.size() + " peers");
|
||||
|
||||
Set peers = new HashSet(peerHashes.size());
|
||||
for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
Set<RouterInfo> peers = new HashSet<RouterInfo>(peerHashes.size());
|
||||
for (Iterator<Hash> iter = peerHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
RouterInfo peerInfo = getContext().netDb().lookupRouterInfoLocally(peer);
|
||||
if (peerInfo != null) {
|
||||
peers.add(peerInfo);
|
||||
|
||||
@@ -2,13 +2,10 @@ package net.i2p.router.peermanager;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.DecimalFormatSymbols;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
@@ -97,13 +94,13 @@ public class ProfileOrganizer {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(ProfileOrganizer.class);
|
||||
_comp = new InverseCapacityComparator();
|
||||
_fastPeers = new HashMap(32);
|
||||
_highCapacityPeers = new HashMap(64);
|
||||
_wellIntegratedPeers = new HashMap(128);
|
||||
_notFailingPeers = new HashMap(256);
|
||||
_notFailingPeersList = new ArrayList(256);
|
||||
_failingPeers = new HashMap(16);
|
||||
_strictCapacityOrder = new TreeSet(_comp);
|
||||
_fastPeers = new HashMap<Hash, PeerProfile>(32);
|
||||
_highCapacityPeers = new HashMap<Hash, PeerProfile>(64);
|
||||
_wellIntegratedPeers = new HashMap<Hash, PeerProfile>(128);
|
||||
_notFailingPeers = new HashMap<Hash, PeerProfile>(256);
|
||||
_notFailingPeersList = new ArrayList<Hash>(256);
|
||||
_failingPeers = new HashMap<Hash, PeerProfile>(16);
|
||||
_strictCapacityOrder = new TreeSet<PeerProfile>(_comp);
|
||||
_persistenceHelper = new ProfilePersistenceHelper(_context);
|
||||
|
||||
_context.statManager().createRateStat("peer.profileSortTime", "How long the reorg takes sorting peers", "Peers", new long[] { 60*60*1000 });
|
||||
@@ -219,7 +216,7 @@ public class ProfileOrganizer {
|
||||
return old;
|
||||
}
|
||||
|
||||
private int count(Map m) {
|
||||
private int count(Map<? extends Object, ? extends Object> m) {
|
||||
getReadLock();
|
||||
try {
|
||||
return m.size();
|
||||
@@ -567,7 +564,7 @@ public class ProfileOrganizer {
|
||||
*/
|
||||
private void selectActiveNotFailingPeers2(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
|
||||
if (matches.size() < howMany) {
|
||||
Map<Hash, PeerProfile> activePeers = new HashMap();
|
||||
Map<Hash, PeerProfile> activePeers = new HashMap<Hash, PeerProfile>();
|
||||
getReadLock();
|
||||
try {
|
||||
for (Iterator<Map.Entry<Hash, PeerProfile>> iter = _notFailingPeers.entrySet().iterator(); iter.hasNext(); ) {
|
||||
@@ -604,11 +601,11 @@ public class ProfileOrganizer {
|
||||
if (matches.size() < howMany) {
|
||||
int orig = matches.size();
|
||||
int needed = howMany - orig;
|
||||
List selected = new ArrayList(needed);
|
||||
List<Hash> selected = new ArrayList<Hash>(needed);
|
||||
getReadLock();
|
||||
try {
|
||||
// use RandomIterator to avoid shuffling the whole thing
|
||||
for (Iterator<Hash> iter = new RandomIterator(_notFailingPeersList); (selected.size() < needed) && iter.hasNext(); ) {
|
||||
for (Iterator<Hash> iter = new RandomIterator<Hash>(_notFailingPeersList); (selected.size() < needed) && iter.hasNext(); ) {
|
||||
Hash cur = iter.next();
|
||||
if (matches.contains(cur) ||
|
||||
(exclude != null && exclude.contains(cur))) {
|
||||
@@ -666,9 +663,9 @@ public class ProfileOrganizer {
|
||||
getReadLock();
|
||||
try {
|
||||
count = _notFailingPeers.size();
|
||||
n = new ArrayList(_notFailingPeers.keySet());
|
||||
n = new ArrayList<Hash>(_notFailingPeers.keySet());
|
||||
} finally { releaseReadLock(); }
|
||||
List<Hash> l = new ArrayList(count / 4);
|
||||
List<Hash> l = new ArrayList<Hash>(count / 4);
|
||||
for (Iterator<Hash> iter = n.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
if (_context.commSystem().wasUnreachable(peer))
|
||||
@@ -719,7 +716,7 @@ public class ProfileOrganizer {
|
||||
try {
|
||||
long cutoff = _context.clock().now() - (20*1000);
|
||||
int count = _notFailingPeers.size();
|
||||
List<Hash> l = new ArrayList(count / 128);
|
||||
List<Hash> l = new ArrayList<Hash>(count / 128);
|
||||
for (Iterator<PeerProfile> iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
|
||||
PeerProfile prof = iter.next();
|
||||
if (prof.getTunnelHistory().getLastRejectedBandwidth() > cutoff)
|
||||
@@ -736,7 +733,7 @@ public class ProfileOrganizer {
|
||||
public Set<Hash> selectAllPeers() {
|
||||
getReadLock();
|
||||
try {
|
||||
Set<Hash> allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _highCapacityPeers.size() + _fastPeers.size());
|
||||
Set<Hash> allPeers = new HashSet<Hash>(_failingPeers.size() + _notFailingPeers.size() + _highCapacityPeers.size() + _fastPeers.size());
|
||||
allPeers.addAll(_failingPeers.keySet());
|
||||
allPeers.addAll(_notFailingPeers.keySet());
|
||||
allPeers.addAll(_highCapacityPeers.keySet());
|
||||
@@ -806,7 +803,7 @@ public class ProfileOrganizer {
|
||||
//allPeers.addAll(_highCapacityPeers.values());
|
||||
//allPeers.addAll(_fastPeers.values());
|
||||
|
||||
Set<PeerProfile> reordered = new TreeSet(_comp);
|
||||
Set<PeerProfile> reordered = new TreeSet<PeerProfile>(_comp);
|
||||
long sortStart = System.currentTimeMillis();
|
||||
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
||||
PeerProfile prof = iter.next();
|
||||
@@ -932,7 +929,7 @@ public class ProfileOrganizer {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Need to explicitly demote " + numToDemote + " peers from the fast group");
|
||||
// sort by speed, slowest-first
|
||||
Set<PeerProfile> sorted = new TreeSet(new SpeedComparator());
|
||||
Set<PeerProfile> sorted = new TreeSet<PeerProfile>(new SpeedComparator());
|
||||
sorted.addAll(_fastPeers.values());
|
||||
Iterator<PeerProfile> iter = sorted.iterator();
|
||||
for (int i = 0; i < numToDemote && iter.hasNext(); i++) {
|
||||
@@ -1024,7 +1021,7 @@ public class ProfileOrganizer {
|
||||
private void locked_calculateThresholds(Set<PeerProfile> allPeers) {
|
||||
double totalCapacity = 0;
|
||||
double totalIntegration = 0;
|
||||
Set<PeerProfile> reordered = new TreeSet(_comp);
|
||||
Set<PeerProfile> reordered = new TreeSet<PeerProfile>(_comp);
|
||||
for (Iterator<PeerProfile> iter = allPeers.iterator(); iter.hasNext(); ) {
|
||||
PeerProfile profile = iter.next();
|
||||
|
||||
@@ -1217,10 +1214,10 @@ public class ProfileOrganizer {
|
||||
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
|
||||
*/
|
||||
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches, int mask) {
|
||||
List<Hash> all = new ArrayList(peers.keySet());
|
||||
Set<Integer> IPSet = new HashSet(8);
|
||||
List<Hash> all = new ArrayList<Hash>(peers.keySet());
|
||||
Set<Integer> IPSet = new HashSet<Integer>(8);
|
||||
// use RandomIterator to avoid shuffling the whole thing
|
||||
for (Iterator<Hash> iter = new RandomIterator(all); (matches.size() < howMany) && iter.hasNext(); ) {
|
||||
for (Iterator<Hash> iter = new RandomIterator<Hash>(all); (matches.size() < howMany) && iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
if (toExclude != null && toExclude.contains(peer))
|
||||
continue;
|
||||
@@ -1262,7 +1259,7 @@ public class ProfileOrganizer {
|
||||
* @return an opaque set of masked IPs for this peer
|
||||
*/
|
||||
private Set<Integer> maskedIPSet(Hash peer, int mask) {
|
||||
Set<Integer> rv = new HashSet(4);
|
||||
Set<Integer> rv = new HashSet<Integer>(4);
|
||||
byte[] commIP = _context.commSystem().getIP(peer);
|
||||
if (commIP != null)
|
||||
rv.add(maskedIP(commIP, mask));
|
||||
@@ -1299,7 +1296,7 @@ public class ProfileOrganizer {
|
||||
}
|
||||
|
||||
/** does a contain any of the elements in b? */
|
||||
private static boolean containsAny(Set a, Set b) {
|
||||
private static boolean containsAny(Set<? extends Object> a, Set<? extends Object> b) {
|
||||
for (Object o : b) {
|
||||
if (a.contains(o))
|
||||
return true;
|
||||
@@ -1320,9 +1317,9 @@ public class ProfileOrganizer {
|
||||
*</pre>
|
||||
*/
|
||||
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches, Hash randomKey, int subTierMode) {
|
||||
List<Hash> all = new ArrayList(peers.keySet());
|
||||
List<Hash> all = new ArrayList<Hash>(peers.keySet());
|
||||
// use RandomIterator to avoid shuffling the whole thing
|
||||
for (Iterator<Hash> iter = new RandomIterator(all); (matches.size() < howMany) && iter.hasNext(); ) {
|
||||
for (Iterator<Hash> iter = new RandomIterator<Hash>(all); (matches.size() < howMany) && iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
if (toExclude != null && toExclude.contains(peer))
|
||||
continue;
|
||||
|
||||
@@ -178,7 +178,7 @@ class ProfilePersistenceHelper {
|
||||
public Set<PeerProfile> readProfiles() {
|
||||
long start = _context.clock().now();
|
||||
List<File> files = selectFiles();
|
||||
Set<PeerProfile> profiles = new HashSet(files.size());
|
||||
Set<PeerProfile> profiles = new HashSet<PeerProfile>(files.size());
|
||||
for (File f : files) {
|
||||
PeerProfile profile = readProfile(f);
|
||||
if (profile != null)
|
||||
@@ -202,7 +202,7 @@ class ProfilePersistenceHelper {
|
||||
File files[] = _profileDir.listFiles(filter);
|
||||
if (files != null && files.length > 0)
|
||||
migrate(files);
|
||||
List rv = new ArrayList(1024);
|
||||
List<File> rv = new ArrayList<File>(1024);
|
||||
for (int j = 0; j < B64.length(); j++) {
|
||||
File subdir = new File(_profileDir, DIR_PREFIX + B64.charAt(j));
|
||||
files = subdir.listFiles(filter);
|
||||
|
||||
@@ -151,7 +151,7 @@ public class ClientAppConfig {
|
||||
try {
|
||||
DataHelper.loadProps(clientApps, cfgFile);
|
||||
} catch (IOException ioe) {
|
||||
return Collections.EMPTY_LIST;
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return getClientApps(clientApps);
|
||||
}
|
||||
@@ -162,7 +162,7 @@ public class ClientAppConfig {
|
||||
* @since 0.7.12
|
||||
*/
|
||||
private static List<ClientAppConfig> getClientApps(Properties clientApps) {
|
||||
List<ClientAppConfig> rv = new ArrayList(8);
|
||||
List<ClientAppConfig> rv = new ArrayList<ClientAppConfig>(8);
|
||||
int i = 0;
|
||||
while (true) {
|
||||
String className = clientApps.getProperty(PREFIX + i + ".main");
|
||||
|
||||
@@ -10,7 +10,6 @@ package net.i2p.router.startup;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Properties;
|
||||
|
||||
@@ -36,7 +36,7 @@ public class LoadClientAppsJob extends JobImpl {
|
||||
if (_loaded) return;
|
||||
_loaded = true;
|
||||
}
|
||||
List apps = ClientAppConfig.getClientApps(getContext());
|
||||
List<ClientAppConfig> apps = ClientAppConfig.getClientApps(getContext());
|
||||
if (apps.isEmpty()) {
|
||||
_log.error("Warning - No client apps or router console configured - we are just a router");
|
||||
System.err.println("Warning - No client apps or router console configured - we are just a router");
|
||||
@@ -106,7 +106,7 @@ public class LoadClientAppsJob extends JobImpl {
|
||||
* @return non-null, 0-length if args is null
|
||||
*/
|
||||
public static String[] parseArgs(String args) {
|
||||
List<String> argList = new ArrayList(4);
|
||||
List<String> argList = new ArrayList<String>(4);
|
||||
if (args != null) {
|
||||
char data[] = args.toCharArray();
|
||||
StringBuilder buf = new StringBuilder(32);
|
||||
|
||||
@@ -15,7 +15,6 @@ import java.io.InputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import net.i2p.crypto.KeyGenerator;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.PrivateKey;
|
||||
import net.i2p.data.PublicKey;
|
||||
|
||||
@@ -35,8 +35,8 @@ public class RouterAppManager implements ClientAppManager {
|
||||
public RouterAppManager(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(RouterAppManager.class);
|
||||
_clients = new ConcurrentHashMap(16);
|
||||
_registered = new ConcurrentHashMap(8);
|
||||
_clients = new ConcurrentHashMap<ClientApp, String[]>(16);
|
||||
_registered = new ConcurrentHashMap<String, ClientApp>(8);
|
||||
ctx.addShutdownTask(new Shutdown());
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ public class RouterAppManager implements ClientAppManager {
|
||||
* @since 0.9.6
|
||||
*/
|
||||
public synchronized void shutdown() {
|
||||
Set<ClientApp> apps = new HashSet(_clients.keySet());
|
||||
Set<ClientApp> apps = new HashSet<ClientApp>(_clients.keySet());
|
||||
for (ClientApp app : apps) {
|
||||
ClientAppState state = app.getState();
|
||||
if (state == RUNNING || state == STARTING) {
|
||||
@@ -214,7 +214,7 @@ public class RouterAppManager implements ClientAppManager {
|
||||
* @since 0.9.6
|
||||
*/
|
||||
private void toString1(StringBuilder buf) {
|
||||
List<String> list = new ArrayList(_clients.size());
|
||||
List<String> list = new ArrayList<String>(_clients.size());
|
||||
for (Map.Entry<ClientApp, String[]> entry : _clients.entrySet()) {
|
||||
ClientApp key = entry.getKey();
|
||||
String[] val = entry.getValue();
|
||||
@@ -231,7 +231,7 @@ public class RouterAppManager implements ClientAppManager {
|
||||
* @since 0.9.6
|
||||
*/
|
||||
private void toString2(StringBuilder buf) {
|
||||
List<String> list = new ArrayList(_registered.size());
|
||||
List<String> list = new ArrayList<String>(_registered.size());
|
||||
for (Map.Entry<String, ClientApp> entry : _registered.entrySet()) {
|
||||
String key = entry.getKey();
|
||||
ClientApp val = entry.getValue();
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package net.i2p.router.tasks;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.CommSystemFacade;
|
||||
@@ -10,7 +8,6 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.util.EventLog;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.ShellCommand;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SystemVersion;
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ class NtpClient {
|
||||
public static long currentTime(String serverNames[]) {
|
||||
if (serverNames == null)
|
||||
throw new IllegalArgumentException("No NTP servers specified");
|
||||
ArrayList names = new ArrayList(serverNames.length);
|
||||
ArrayList<String> names = new ArrayList<String>(serverNames.length);
|
||||
for (int i = 0; i < serverNames.length; i++)
|
||||
names.add(serverNames[i]);
|
||||
Collections.shuffle(names);
|
||||
@@ -87,7 +87,7 @@ class NtpClient {
|
||||
public static long[] currentTimeAndStratum(String serverNames[]) {
|
||||
if (serverNames == null)
|
||||
throw new IllegalArgumentException("No NTP servers specified");
|
||||
ArrayList names = new ArrayList(serverNames.length);
|
||||
ArrayList<String> names = new ArrayList<String>(serverNames.length);
|
||||
for (int i = 0; i < serverNames.length; i++)
|
||||
names.add(serverNames[i]);
|
||||
Collections.shuffle(names);
|
||||
|
||||
@@ -61,8 +61,8 @@ public class RouterTimestamper extends Timestamper {
|
||||
public RouterTimestamper(I2PAppContext ctx, UpdateListener lsnr, boolean daemon) {
|
||||
super();
|
||||
// moved here to prevent problems with synchronized statements.
|
||||
_servers = new ArrayList(3);
|
||||
_listeners = new CopyOnWriteArrayList();
|
||||
_servers = new ArrayList<String>(3);
|
||||
_listeners = new CopyOnWriteArrayList<UpdateListener>();
|
||||
_context = ctx;
|
||||
_daemon = daemon;
|
||||
// DO NOT initialize _log here, stack overflow via LogManager init loop
|
||||
@@ -178,7 +178,7 @@ public class RouterTimestamper extends Timestamper {
|
||||
lastFailed = !queryTime(_servers.toArray(new String[_servers.size()]));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if ( (!_initialized) && (_log.shouldLog(Log.ERROR)) ) {
|
||||
List<String> all = new ArrayList();
|
||||
List<String> all = new ArrayList<String>();
|
||||
if (_priorityServers != null)
|
||||
all.addAll(_priorityServers);
|
||||
all.addAll(_servers);
|
||||
@@ -299,7 +299,7 @@ public class RouterTimestamper extends Timestamper {
|
||||
country = country.toLowerCase(Locale.US);
|
||||
}
|
||||
if (country != null && country.length() > 0) {
|
||||
_priorityServers = new ArrayList(3);
|
||||
_priorityServers = new ArrayList<String>(3);
|
||||
for (int i = 0; i < 3; i++)
|
||||
_priorityServers.add(i + "." + country + ".pool.ntp.org");
|
||||
} else {
|
||||
|
||||
@@ -56,7 +56,7 @@ abstract class BadCountries {
|
||||
/* Vietnam */ "VN",
|
||||
/* Yemen */ "YE"
|
||||
};
|
||||
_countries = new HashSet(Arrays.asList(c));
|
||||
_countries = new HashSet<String>(Arrays.asList(c));
|
||||
}
|
||||
|
||||
/** @param country non-null, two letter code, case-independent */
|
||||
|
||||
@@ -12,12 +12,9 @@ import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Vector;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
@@ -30,7 +27,6 @@ import net.i2p.router.transport.udp.UDPTransport;
|
||||
import net.i2p.router.util.EventLog;
|
||||
import net.i2p.util.Addresses;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.SimpleTimer2;
|
||||
import net.i2p.util.Translate;
|
||||
@@ -191,7 +187,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
// No, don't do this, it makes it almost impossible to build inbound tunnels
|
||||
//if (_context.router().isHidden())
|
||||
// return Collections.EMPTY_SET;
|
||||
List<RouterAddress> addresses = new ArrayList(_manager.getAddresses());
|
||||
List<RouterAddress> addresses = new ArrayList<RouterAddress>(_manager.getAddresses());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Creating addresses: " + addresses, new Exception("creator"));
|
||||
return addresses;
|
||||
|
||||
@@ -91,8 +91,8 @@ public class FIFOBandwidthLimiter {
|
||||
_context.statManager().createRateStat("bwLimiter.pendingInboundRequests", "How many inbound requests are ahead of the current one (ignoring ones with 0)?", "BandwidthLimiter", new long[] { 5*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bwLimiter.outboundDelayedTime", "How long it takes to honor an outbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 5*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 5*60*1000l, 60*60*1000l });
|
||||
_pendingInboundRequests = new ArrayList(16);
|
||||
_pendingOutboundRequests = new ArrayList(16);
|
||||
_pendingInboundRequests = new ArrayList<Request>(16);
|
||||
_pendingOutboundRequests = new ArrayList<Request>(16);
|
||||
_lastTotalSent = _totalAllocatedOutboundBytes.get();
|
||||
_lastTotalReceived = _totalAllocatedInboundBytes.get();
|
||||
_lastStatsUpdated = now();
|
||||
@@ -796,7 +796,7 @@ public class FIFOBandwidthLimiter {
|
||||
* @param priority 0 for now
|
||||
*/
|
||||
public SimpleRequest(int bytes, int priority) {
|
||||
satisfiedBuffer = new ArrayList(1);
|
||||
satisfiedBuffer = new ArrayList<Request>(1);
|
||||
_total = bytes;
|
||||
_priority = priority;
|
||||
// following two are temp until switch to PBQ
|
||||
|
||||
@@ -7,6 +7,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter.Request;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@@ -92,7 +93,7 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
public void run() {
|
||||
// bootstrap 'em with nothing
|
||||
_lastRefillTime = _limiter.now();
|
||||
List<FIFOBandwidthLimiter.Request> buffer = new ArrayList(2);
|
||||
List<FIFOBandwidthLimiter.Request> buffer = new ArrayList<Request>(2);
|
||||
while (_isRunning) {
|
||||
long now = _limiter.now();
|
||||
if (now >= _lastCheckConfigTime + _configCheckPeriodMs) {
|
||||
|
||||
@@ -8,9 +8,7 @@ import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
@@ -62,12 +60,12 @@ class GeoIP {
|
||||
public GeoIP(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(GeoIP.class);
|
||||
_codeToName = new ConcurrentHashMap(512);
|
||||
_codeCache = new ConcurrentHashMap(512);
|
||||
_IPToCountry = new ConcurrentHashMap();
|
||||
_pendingSearch = new ConcurrentHashSet();
|
||||
_pendingIPv6Search = new ConcurrentHashSet();
|
||||
_notFound = new ConcurrentHashSet();
|
||||
_codeToName = new ConcurrentHashMap<String, String>(512);
|
||||
_codeCache = new ConcurrentHashMap<String, String>(512);
|
||||
_IPToCountry = new ConcurrentHashMap<Long, String>();
|
||||
_pendingSearch = new ConcurrentHashSet<Long>();
|
||||
_pendingIPv6Search = new ConcurrentHashSet<Long>();
|
||||
_notFound = new ConcurrentHashSet<Long>();
|
||||
_lock = new AtomicBoolean();
|
||||
readCountryFile();
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ class GeoIPv6 {
|
||||
*/
|
||||
private static boolean compressGeoIPv6CSVFiles(List<File> inFiles, File outFile) {
|
||||
boolean DEBUG = false;
|
||||
List<V6Entry> entries = new ArrayList(20000);
|
||||
List<V6Entry> entries = new ArrayList<V6Entry>(20000);
|
||||
for (File geoFile : inFiles) {
|
||||
int count = 0;
|
||||
InputStream in = null;
|
||||
@@ -342,7 +342,7 @@ class GeoIPv6 {
|
||||
System.err.println("Usage: GeoIPv6 infile1.csv [infile2.csv...] outfile.dat.gz");
|
||||
System.exit(1);
|
||||
}
|
||||
List<File> infiles = new ArrayList();
|
||||
List<File> infiles = new ArrayList<File>();
|
||||
for (int i = 0; i < args.length - 1; i++) {
|
||||
infiles.add(new File(args[i]));
|
||||
}
|
||||
@@ -353,6 +353,6 @@ class GeoIPv6 {
|
||||
System.exit(1);
|
||||
}
|
||||
// readback for testing
|
||||
readGeoIPFile(outfile, new Long[] { Long.MAX_VALUE }, Collections.EMPTY_MAP, new Log(GeoIPv6.class));
|
||||
readGeoIPFile(outfile, new Long[] { Long.MAX_VALUE }, Collections.<String, String> emptyMap(), new Log(GeoIPv6.class));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,9 +51,9 @@ public class OutboundMessageRegistry {
|
||||
public OutboundMessageRegistry(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(OutboundMessageRegistry.class);
|
||||
_selectors = new ArrayList(64);
|
||||
_selectorToMessage = new HashMap(64);
|
||||
_activeMessages = new ConcurrentHashSet(64);
|
||||
_selectors = new ArrayList<MessageSelector>(64);
|
||||
_selectorToMessage = new HashMap<MessageSelector, Object>(64);
|
||||
_activeMessages = new ConcurrentHashSet<OutNetMessage>(64);
|
||||
_cleanupTask = new CleanupTask();
|
||||
}
|
||||
|
||||
@@ -108,10 +108,10 @@ public class OutboundMessageRegistry {
|
||||
MessageSelector sel = _selectors.get(i);
|
||||
boolean isMatch = sel.isMatch(message);
|
||||
if (isMatch) {
|
||||
if (matchedSelectors == null) matchedSelectors = new ArrayList(1);
|
||||
if (matchedSelectors == null) matchedSelectors = new ArrayList<MessageSelector>(1);
|
||||
matchedSelectors.add(sel);
|
||||
if (!sel.continueMatching()) {
|
||||
if (removedSelectors == null) removedSelectors = new ArrayList(1);
|
||||
if (removedSelectors == null) removedSelectors = new ArrayList<MessageSelector>(1);
|
||||
removedSelectors.add(sel);
|
||||
//iter.remove();
|
||||
_selectors.remove(i);
|
||||
@@ -123,11 +123,11 @@ public class OutboundMessageRegistry {
|
||||
|
||||
List<OutNetMessage> rv = null;
|
||||
if (matchedSelectors != null) {
|
||||
rv = new ArrayList(matchedSelectors.size());
|
||||
rv = new ArrayList<OutNetMessage>(matchedSelectors.size());
|
||||
for (MessageSelector sel : matchedSelectors) {
|
||||
boolean removed = false;
|
||||
OutNetMessage msg = null;
|
||||
List msgs = null;
|
||||
List<OutNetMessage> msgs = null;
|
||||
synchronized (_selectorToMessage) {
|
||||
Object o = null;
|
||||
if ( (removedSelectors != null) && (removedSelectors.contains(sel)) ) {
|
||||
@@ -154,7 +154,7 @@ public class OutboundMessageRegistry {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rv = Collections.EMPTY_LIST;
|
||||
rv = Collections.emptyList();
|
||||
}
|
||||
|
||||
return rv;
|
||||
@@ -202,11 +202,11 @@ public class OutboundMessageRegistry {
|
||||
synchronized (_selectorToMessage) {
|
||||
Object oldMsg = _selectorToMessage.put(sel, msg);
|
||||
if (oldMsg != null) {
|
||||
List multi = null;
|
||||
List<OutNetMessage> multi = null;
|
||||
if (oldMsg instanceof OutNetMessage) {
|
||||
//multi = Collections.synchronizedList(new ArrayList(4));
|
||||
multi = new ArrayList(4);
|
||||
multi.add(oldMsg);
|
||||
multi = new ArrayList<OutNetMessage>(4);
|
||||
multi.add((OutNetMessage)oldMsg);
|
||||
multi.add(msg);
|
||||
_selectorToMessage.put(sel, multi);
|
||||
} else if (oldMsg instanceof List) {
|
||||
@@ -261,7 +261,7 @@ public class OutboundMessageRegistry {
|
||||
|
||||
public void timeReached() {
|
||||
long now = _context.clock().now();
|
||||
List<MessageSelector> removing = new ArrayList(8);
|
||||
List<MessageSelector> removing = new ArrayList<MessageSelector>(8);
|
||||
synchronized (_selectors) {
|
||||
// CME?
|
||||
//for (Iterator<MessageSelector> iter = _selectors.iterator(); iter.hasNext(); ) {
|
||||
|
||||
@@ -23,7 +23,6 @@ import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Vector;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
@@ -43,7 +42,6 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.LHMCache;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.SystemVersion;
|
||||
import net.i2p.util.Translate;
|
||||
@@ -75,7 +73,7 @@ public abstract class TransportImpl implements Transport {
|
||||
long max = 4096;
|
||||
// 1024 nominal for 128 MB
|
||||
int size = (int) Math.max(min, Math.min(max, 1 + (maxMemory / (128*1024))));
|
||||
_IPMap = new LHMCache(size);
|
||||
_IPMap = new LHMCache<Hash, byte[]>(size);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -95,14 +93,14 @@ public abstract class TransportImpl implements Transport {
|
||||
//_context.statManager().createRateStat("transport.sendProcessingTime." + getStyle(), "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l });
|
||||
_context.statManager().createRateStat("transport.expiredOnQueueLifetime", "How long a message that expires on our outbound queue is processed", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
|
||||
|
||||
_currentAddresses = new CopyOnWriteArrayList();
|
||||
_currentAddresses = new CopyOnWriteArrayList<RouterAddress>();
|
||||
if (getStyle().equals("NTCP"))
|
||||
_sendPool = new ArrayBlockingQueue(8);
|
||||
_sendPool = new ArrayBlockingQueue<OutNetMessage>(8);
|
||||
else
|
||||
_sendPool = null;
|
||||
_unreachableEntries = new HashMap(32);
|
||||
_wasUnreachableEntries = new HashMap(32);
|
||||
_localAddresses = new ConcurrentHashSet(4);
|
||||
_unreachableEntries = new HashMap<Hash, Long>(32);
|
||||
_wasUnreachableEntries = new HashMap<Hash, Long>(32);
|
||||
_localAddresses = new ConcurrentHashSet<InetAddress>(4);
|
||||
_context.simpleScheduler().addPeriodicEvent(new CleanupUnreachable(), 2 * UNREACHABLE_PERIOD, UNREACHABLE_PERIOD / 2);
|
||||
}
|
||||
|
||||
@@ -179,7 +177,7 @@ public abstract class TransportImpl implements Transport {
|
||||
*/
|
||||
public Vector getClockSkews() { return new Vector(); }
|
||||
|
||||
public List<String> getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
|
||||
public List<String> getMostRecentErrorMessages() { return Collections.emptyList(); }
|
||||
|
||||
/**
|
||||
* Nonblocking call to pull the next outbound message
|
||||
@@ -564,7 +562,7 @@ public abstract class TransportImpl implements Transport {
|
||||
* @since IPv6
|
||||
*/
|
||||
protected Collection<InetAddress> getSavedLocalAddresses() {
|
||||
List<InetAddress> rv = new ArrayList(_localAddresses);
|
||||
List<InetAddress> rv = new ArrayList<InetAddress>(_localAddresses);
|
||||
_localAddresses.clear();
|
||||
return rv;
|
||||
}
|
||||
|
||||
@@ -13,9 +13,7 @@ import java.io.Writer;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@@ -31,6 +29,7 @@ import net.i2p.router.CommSystemFacade;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.RouterContext;
|
||||
import static net.i2p.router.transport.Transport.AddressSource.*;
|
||||
import net.i2p.router.transport.TransportManager.Port;
|
||||
import net.i2p.router.transport.crypto.DHSessionKeyBuilder;
|
||||
import net.i2p.router.transport.ntcp.NTCPTransport;
|
||||
import net.i2p.router.transport.udp.UDPTransport;
|
||||
@@ -66,7 +65,7 @@ public class TransportManager implements TransportEventListener {
|
||||
_context.statManager().createRateStat("transport.bidFailSelf", "Could not attempt to bid on message, as it targeted ourselves", "Transport", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("transport.bidFailNoTransports", "Could not attempt to bid on message, as none of the transports could attempt it", "Transport", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("transport.bidFailAllTransports", "Could not attempt to bid on message, as all of the transports had failed", "Transport", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_transports = new ConcurrentHashMap(2);
|
||||
_transports = new ConcurrentHashMap<String, Transport>(2);
|
||||
if (_context.getBooleanPropertyDefaultTrue(PROP_ENABLE_UPNP))
|
||||
_upnpManager = new UPnPManager(context, this);
|
||||
else
|
||||
@@ -168,7 +167,7 @@ public class TransportManager implements TransportEventListener {
|
||||
_log.debug("Starting up the transport manager");
|
||||
// Let's do this in a predictable order to make testing easier
|
||||
// Start NTCP first so it can get notified from SSU
|
||||
List<Transport> tps = new ArrayList();
|
||||
List<Transport> tps = new ArrayList<Transport>();
|
||||
Transport tp = getTransport(NTCPTransport.STYLE);
|
||||
if (tp != null)
|
||||
tps.add(tp);
|
||||
@@ -363,7 +362,7 @@ public class TransportManager implements TransportEventListener {
|
||||
* This forces a rebuild
|
||||
*/
|
||||
public List<RouterAddress> getAddresses() {
|
||||
List<RouterAddress> rv = new ArrayList(4);
|
||||
List<RouterAddress> rv = new ArrayList<RouterAddress>(4);
|
||||
// do this first since SSU may force a NTCP change
|
||||
for (Transport t : _transports.values())
|
||||
t.updateAddress();
|
||||
@@ -406,7 +405,7 @@ public class TransportManager implements TransportEventListener {
|
||||
* which we will pass along to UPnP
|
||||
*/
|
||||
private Set<Port> getPorts() {
|
||||
Set<Port> rv = new HashSet(4);
|
||||
Set<Port> rv = new HashSet<Port>(4);
|
||||
for (Transport t : _transports.values()) {
|
||||
int port = t.getRequestedPort();
|
||||
// Use UDP port for NTCP too - see comment in NTCPTransport.getRequestedPort() for why this is here
|
||||
@@ -435,7 +434,7 @@ public class TransportManager implements TransportEventListener {
|
||||
if (_context.router().getRouterInfo().equals(msg.getTarget()))
|
||||
throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?");
|
||||
|
||||
List<TransportBid> rv = new ArrayList(_transports.size());
|
||||
List<TransportBid> rv = new ArrayList<TransportBid>(_transports.size());
|
||||
Set failedTransports = msg.getFailedTransports();
|
||||
for (Transport t : _transports.values()) {
|
||||
if (failedTransports.contains(t.getStyle())) {
|
||||
@@ -535,7 +534,7 @@ public class TransportManager implements TransportEventListener {
|
||||
}
|
||||
|
||||
public List<String> getMostRecentErrorMessages() {
|
||||
List<String> rv = new ArrayList(16);
|
||||
List<String> rv = new ArrayList<String>(16);
|
||||
for (Transport t : _transports.values()) {
|
||||
rv.addAll(t.getMostRecentErrorMessages());
|
||||
}
|
||||
@@ -543,7 +542,7 @@ public class TransportManager implements TransportEventListener {
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
TreeMap<String, Transport> transports = new TreeMap();
|
||||
TreeMap<String, Transport> transports = new TreeMap<String, Transport>();
|
||||
for (Transport t : _transports.values()) {
|
||||
transports.put(t.getStyle(), t);
|
||||
}
|
||||
|
||||
@@ -266,9 +266,9 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
|
||||
}
|
||||
|
||||
private void registerPortMappings() {
|
||||
Set ports;
|
||||
Set<ForwardPort> ports;
|
||||
synchronized(lock) {
|
||||
ports = new HashSet(portsForwarded);
|
||||
ports = new HashSet<ForwardPort>(portsForwarded);
|
||||
}
|
||||
if (ports.isEmpty())
|
||||
return;
|
||||
@@ -326,9 +326,9 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
|
||||
}
|
||||
|
||||
public void unregisterPortMappings() {
|
||||
Set ports;
|
||||
Set<ForwardPort> ports;
|
||||
synchronized(lock) {
|
||||
ports = new HashSet(portsForwarded);
|
||||
ports = new HashSet<ForwardPort>(portsForwarded);
|
||||
}
|
||||
if (ports.isEmpty())
|
||||
return;
|
||||
@@ -918,7 +918,7 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
|
||||
ForwardPortStatus fps = new ForwardPortStatus(ForwardPortStatus.DEFINITE_FAILURE,
|
||||
"UPnP device does not support port forwarding",
|
||||
port.portNumber);
|
||||
Map map = Collections.singletonMap(port, fps);
|
||||
Map<ForwardPort, ForwardPortStatus> map = Collections.singletonMap(port, fps);
|
||||
forwardCallback.portForwardStatus(map);
|
||||
}
|
||||
return;
|
||||
@@ -949,7 +949,7 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
|
||||
} else {
|
||||
fps = new ForwardPortStatus(ForwardPortStatus.PROBABLE_FAILURE, "UPnP port forwarding apparently failed", port.portNumber);
|
||||
}
|
||||
Map map = Collections.singletonMap(port, fps);
|
||||
Map<ForwardPort, ForwardPortStatus> map = Collections.singletonMap(port, fps);
|
||||
try {
|
||||
forwardCallback.portForwardStatus(map);
|
||||
} catch (Exception e) {
|
||||
|
||||
@@ -6,7 +6,6 @@ package net.i2p.router.transport;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
@@ -122,7 +121,7 @@ class UPnPManager {
|
||||
return;
|
||||
//}
|
||||
|
||||
Set<ForwardPort> forwards = new HashSet(ports.size());
|
||||
Set<ForwardPort> forwards = new HashSet<ForwardPort>(ports.size());
|
||||
for (TransportManager.Port entry : ports) {
|
||||
String style = entry.style;
|
||||
int port = entry.port;
|
||||
|
||||
@@ -17,7 +17,6 @@ import java.util.concurrent.LinkedBlockingQueue;
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.I2PException;
|
||||
import net.i2p.crypto.CryptoConstants;
|
||||
import net.i2p.crypto.KeyGenerator;
|
||||
import net.i2p.crypto.SHA256Generator;
|
||||
import net.i2p.data.ByteArray;
|
||||
//import net.i2p.data.DataHelper;
|
||||
@@ -470,7 +469,7 @@ public class DHSessionKeyBuilder {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("DH Precalc (minimum: " + _minSize + " max: " + _maxSize + ", delay: "
|
||||
+ _calcDelay + ")");
|
||||
_builders = new LinkedBlockingQueue(_maxSize);
|
||||
_builders = new LinkedBlockingQueue<DHSessionKeyBuilder>(_maxSize);
|
||||
setPriority(Thread.MIN_PRIORITY);
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import java.nio.ByteBuffer;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
@@ -30,7 +29,7 @@ import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter;
|
||||
import net.i2p.router.util.CoDelPriorityBlockingQueue;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter.Request;
|
||||
import net.i2p.router.util.PriBlockingQueue;
|
||||
import net.i2p.util.ByteCache;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
@@ -170,12 +169,12 @@ class NTCPConnection {
|
||||
_transport = transport;
|
||||
_remAddr = null;
|
||||
_chan = chan;
|
||||
_readBufs = new ConcurrentLinkedQueue();
|
||||
_writeBufs = new ConcurrentLinkedQueue();
|
||||
_bwInRequests = new ConcurrentHashSet(2);
|
||||
_bwOutRequests = new ConcurrentHashSet(8);
|
||||
_readBufs = new ConcurrentLinkedQueue<ByteBuffer>();
|
||||
_writeBufs = new ConcurrentLinkedQueue<ByteBuffer>();
|
||||
_bwInRequests = new ConcurrentHashSet<Request>(2);
|
||||
_bwOutRequests = new ConcurrentHashSet<Request>(8);
|
||||
//_outbound = new CoDelPriorityBlockingQueue(ctx, "NTCP-Connection", 32);
|
||||
_outbound = new PriBlockingQueue(ctx, "NTCP-Connection", 32);
|
||||
_outbound = new PriBlockingQueue<OutNetMessage>(ctx, "NTCP-Connection", 32);
|
||||
_isInbound = true;
|
||||
_decryptBlockBuf = new byte[BLOCK_SIZE];
|
||||
_curReadState = new ReadState();
|
||||
@@ -198,12 +197,12 @@ class NTCPConnection {
|
||||
_transport = transport;
|
||||
_remotePeer = remotePeer;
|
||||
_remAddr = remAddr;
|
||||
_readBufs = new ConcurrentLinkedQueue();
|
||||
_writeBufs = new ConcurrentLinkedQueue();
|
||||
_bwInRequests = new ConcurrentHashSet(2);
|
||||
_bwOutRequests = new ConcurrentHashSet(8);
|
||||
_readBufs = new ConcurrentLinkedQueue<ByteBuffer>();
|
||||
_writeBufs = new ConcurrentLinkedQueue<ByteBuffer>();
|
||||
_bwInRequests = new ConcurrentHashSet<Request>(2);
|
||||
_bwOutRequests = new ConcurrentHashSet<Request>(8);
|
||||
//_outbound = new CoDelPriorityBlockingQueue(ctx, "NTCP-Connection", 32);
|
||||
_outbound = new PriBlockingQueue(ctx, "NTCP-Connection", 32);
|
||||
_outbound = new PriBlockingQueue<OutNetMessage>(ctx, "NTCP-Connection", 32);
|
||||
_isInbound = false;
|
||||
_establishState = new EstablishState(ctx, transport, this);
|
||||
_decryptBlockBuf = new byte[BLOCK_SIZE];
|
||||
@@ -381,7 +380,7 @@ class NTCPConnection {
|
||||
EventPumper.releaseBuf(bb);
|
||||
}
|
||||
|
||||
List<OutNetMessage> pending = new ArrayList();
|
||||
List<OutNetMessage> pending = new ArrayList<OutNetMessage>();
|
||||
//_outbound.drainAllTo(pending);
|
||||
_outbound.drainTo(pending);
|
||||
for (OutNetMessage msg : pending) {
|
||||
@@ -869,7 +868,7 @@ class NTCPConnection {
|
||||
NUM_PREP_BUFS = (int) Math.max(MIN_BUFS, Math.min(MAX_BUFS, 1 + (maxMemory / (16*1024*1024))));
|
||||
}
|
||||
|
||||
private final static LinkedBlockingQueue<PrepBuffer> _bufs = new LinkedBlockingQueue(NUM_PREP_BUFS);
|
||||
private final static LinkedBlockingQueue<PrepBuffer> _bufs = new LinkedBlockingQueue<PrepBuffer>(NUM_PREP_BUFS);
|
||||
|
||||
/**
|
||||
* 32KB each
|
||||
@@ -1330,7 +1329,7 @@ class NTCPConnection {
|
||||
/**
|
||||
* FIXME static queue mixes handlers from different contexts in multirouter JVM
|
||||
*/
|
||||
private final static LinkedBlockingQueue<I2NPMessageHandler> _i2npHandlers = new LinkedBlockingQueue(MAX_HANDLERS);
|
||||
private final static LinkedBlockingQueue<I2NPMessageHandler> _i2npHandlers = new LinkedBlockingQueue<I2NPMessageHandler>(MAX_HANDLERS);
|
||||
|
||||
private final static I2NPMessageHandler acquireHandler(RouterContext ctx) {
|
||||
I2NPMessageHandler rv = _i2npHandlers.poll();
|
||||
|
||||
@@ -12,7 +12,6 @@ import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
@@ -31,7 +30,6 @@ import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.CommSystemFacade;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.transport.CommSystemFacadeImpl;
|
||||
import net.i2p.router.transport.Transport;
|
||||
import static net.i2p.router.transport.Transport.AddressSource.*;
|
||||
import net.i2p.router.transport.TransportBid;
|
||||
@@ -39,7 +37,6 @@ import net.i2p.router.transport.TransportImpl;
|
||||
import net.i2p.router.transport.TransportUtil;
|
||||
import static net.i2p.router.transport.TransportUtil.IPv6Config.*;
|
||||
import net.i2p.router.transport.crypto.DHSessionKeyBuilder;
|
||||
import net.i2p.router.transport.udp.UDPTransport;
|
||||
import net.i2p.util.Addresses;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.Log;
|
||||
@@ -170,10 +167,10 @@ public class NTCPTransport extends TransportImpl {
|
||||
_context.statManager().createRateStat("ntcp.wantsQueuedWrite", "", "ntcp", RATES);
|
||||
//_context.statManager().createRateStat("ntcp.write", "", "ntcp", RATES);
|
||||
_context.statManager().createRateStat("ntcp.writeError", "", "ntcp", RATES);
|
||||
_endpoints = new HashSet(4);
|
||||
_establishing = new ConcurrentHashSet(16);
|
||||
_endpoints = new HashSet<InetSocketAddress>(4);
|
||||
_establishing = new ConcurrentHashSet<NTCPConnection>(16);
|
||||
_conLock = new Object();
|
||||
_conByIdent = new ConcurrentHashMap(64);
|
||||
_conByIdent = new ConcurrentHashMap<Hash, NTCPConnection>(64);
|
||||
|
||||
_finisher = new NTCPSendFinisher(ctx, this);
|
||||
|
||||
@@ -472,7 +469,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
*/
|
||||
@Override
|
||||
public Vector<Long> getClockSkews() {
|
||||
Vector<Long> skews = new Vector();
|
||||
Vector<Long> skews = new Vector<Long>();
|
||||
|
||||
for (NTCPConnection con : _conByIdent.values()) {
|
||||
if (con.isEstablished())
|
||||
@@ -1088,7 +1085,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
_finisher.stop();
|
||||
List<NTCPConnection> cons;
|
||||
synchronized (_conLock) {
|
||||
cons = new ArrayList(_conByIdent.values());
|
||||
cons = new ArrayList<NTCPConnection>(_conByIdent.values());
|
||||
_conByIdent.clear();
|
||||
}
|
||||
for (NTCPConnection con : cons) {
|
||||
@@ -1105,7 +1102,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
|
||||
@Override
|
||||
public void renderStatusHTML(java.io.Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
TreeSet<NTCPConnection> peers = new TreeSet(getComparator(sortFlags));
|
||||
TreeSet<NTCPConnection> peers = new TreeSet<NTCPConnection>(getComparator(sortFlags));
|
||||
peers.addAll(_conByIdent.values());
|
||||
|
||||
long offsetTotal = 0;
|
||||
@@ -1220,8 +1217,8 @@ public class NTCPTransport extends TransportImpl {
|
||||
synchronized (_rateFmt) { return _rateFmt.format(rate); }
|
||||
}
|
||||
|
||||
private Comparator getComparator(int sortFlags) {
|
||||
Comparator rv = null;
|
||||
private Comparator<NTCPConnection> getComparator(int sortFlags) {
|
||||
Comparator<NTCPConnection> rv = null;
|
||||
switch (Math.abs(sortFlags)) {
|
||||
default:
|
||||
rv = AlphaComparator.instance();
|
||||
|
||||
@@ -30,10 +30,10 @@ class Reader {
|
||||
public Reader(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_pendingConnections = new LinkedHashSet(16);
|
||||
_runners = new ArrayList(8);
|
||||
_liveReads = new HashSet(8);
|
||||
_readAfterLive = new HashSet(8);
|
||||
_pendingConnections = new LinkedHashSet<NTCPConnection>(16);
|
||||
_runners = new ArrayList<Runner>(8);
|
||||
_liveReads = new HashSet<NTCPConnection>(8);
|
||||
_readAfterLive = new HashSet<NTCPConnection>(8);
|
||||
}
|
||||
|
||||
public synchronized void startReading(int numReaders) {
|
||||
|
||||
@@ -26,10 +26,10 @@ class Writer {
|
||||
|
||||
public Writer(RouterContext ctx) {
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_pendingConnections = new LinkedHashSet(16);
|
||||
_runners = new ArrayList(5);
|
||||
_liveWrites = new HashSet(5);
|
||||
_writeAfterLive = new HashSet(5);
|
||||
_pendingConnections = new LinkedHashSet<NTCPConnection>(16);
|
||||
_runners = new ArrayList<Runner>(5);
|
||||
_liveWrites = new HashSet<NTCPConnection>(5);
|
||||
_writeAfterLive = new HashSet<NTCPConnection>(5);
|
||||
}
|
||||
|
||||
public synchronized void startWriting(int numWriters) {
|
||||
|
||||
@@ -34,7 +34,7 @@ class ACKSender implements Runnable {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(ACKSender.class);
|
||||
_transport = transport;
|
||||
_peersToACK = new LinkedBlockingQueue();
|
||||
_peersToACK = new LinkedBlockingQueue<PeerState>();
|
||||
_builder = new PacketBuilder(_context, transport);
|
||||
_alive = true;
|
||||
_context.statManager().createRateStat("udp.sendACKCount", "how many ack messages were sent to a peer", "udp", UDPTransport.RATES);
|
||||
@@ -86,7 +86,7 @@ class ACKSender implements Runnable {
|
||||
public void run() {
|
||||
|
||||
// we use a Set to strip out dups that come in on the Queue
|
||||
Set<PeerState> notYet = new HashSet();
|
||||
Set<PeerState> notYet = new HashSet<PeerState>();
|
||||
while (_alive) {
|
||||
PeerState peer = null;
|
||||
long now = 0;
|
||||
|
||||
@@ -9,8 +9,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterAddress;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
@@ -127,12 +125,12 @@ class EstablishmentManager {
|
||||
_log = ctx.logManager().getLog(EstablishmentManager.class);
|
||||
_transport = transport;
|
||||
_builder = new PacketBuilder(ctx, transport);
|
||||
_inboundStates = new ConcurrentHashMap();
|
||||
_outboundStates = new ConcurrentHashMap();
|
||||
_queuedOutbound = new ConcurrentHashMap();
|
||||
_liveIntroductions = new ConcurrentHashMap();
|
||||
_outboundByClaimedAddress = new ConcurrentHashMap();
|
||||
_outboundByHash = new ConcurrentHashMap();
|
||||
_inboundStates = new ConcurrentHashMap<RemoteHostId, InboundEstablishState>();
|
||||
_outboundStates = new ConcurrentHashMap<RemoteHostId, OutboundEstablishState>();
|
||||
_queuedOutbound = new ConcurrentHashMap<RemoteHostId, List<OutNetMessage>>();
|
||||
_liveIntroductions = new ConcurrentHashMap<Long, OutboundEstablishState>();
|
||||
_outboundByClaimedAddress = new ConcurrentHashMap<RemoteHostId, OutboundEstablishState>();
|
||||
_outboundByHash = new ConcurrentHashMap<Hash, OutboundEstablishState>();
|
||||
_activityLock = new Object();
|
||||
DEFAULT_MAX_CONCURRENT_ESTABLISH = Math.max(DEFAULT_LOW_MAX_CONCURRENT_ESTABLISH,
|
||||
Math.min(DEFAULT_HIGH_MAX_CONCURRENT_ESTABLISH,
|
||||
@@ -312,7 +310,7 @@ class EstablishmentManager {
|
||||
if (_queuedOutbound.size() >= MAX_QUEUED_OUTBOUND && !_queuedOutbound.containsKey(to)) {
|
||||
rejected = true;
|
||||
} else {
|
||||
List<OutNetMessage> newQueued = new ArrayList(MAX_QUEUED_PER_PEER);
|
||||
List<OutNetMessage> newQueued = new ArrayList<OutNetMessage>(MAX_QUEUED_PER_PEER);
|
||||
List<OutNetMessage> queued = _queuedOutbound.putIfAbsent(to, newQueued);
|
||||
if (queued == null) {
|
||||
queued = newQueued;
|
||||
@@ -622,7 +620,7 @@ class EstablishmentManager {
|
||||
}
|
||||
RemoteHostId to = entry.getKey();
|
||||
List<OutNetMessage> allQueued = entry.getValue();
|
||||
List<OutNetMessage> queued = new ArrayList();
|
||||
List<OutNetMessage> queued = new ArrayList<OutNetMessage>();
|
||||
long now = _context.clock().now();
|
||||
synchronized (allQueued) {
|
||||
for (OutNetMessage msg : allQueued) {
|
||||
|
||||
@@ -16,7 +16,7 @@ class IPThrottler {
|
||||
|
||||
public IPThrottler(int max, long time) {
|
||||
_max = max;
|
||||
_counter = new ObjectCounter();
|
||||
_counter = new ObjectCounter<Integer>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), time);
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ class InboundEstablishState {
|
||||
_currentState = InboundState.IB_STATE_UNKNOWN;
|
||||
_establishBegin = ctx.clock().now();
|
||||
_keyBuilder = dh;
|
||||
_queuedMessages = new LinkedBlockingQueue();
|
||||
_queuedMessages = new LinkedBlockingQueue<OutNetMessage>();
|
||||
}
|
||||
|
||||
public synchronized InboundState getState() { return _currentState; }
|
||||
|
||||
@@ -11,12 +11,10 @@ import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.RouterAddress;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.transport.TransportImpl;
|
||||
import net.i2p.util.Addresses;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.Log;
|
||||
@@ -104,9 +102,9 @@ class IntroductionManager {
|
||||
_log = ctx.logManager().getLog(IntroductionManager.class);
|
||||
_transport = transport;
|
||||
_builder = new PacketBuilder(ctx, transport);
|
||||
_outbound = new ConcurrentHashMap(MAX_OUTBOUND);
|
||||
_inbound = new ConcurrentHashSet(MAX_INBOUND);
|
||||
_recentHolePunches = new HashSet(16);
|
||||
_outbound = new ConcurrentHashMap<Long, PeerState>(MAX_OUTBOUND);
|
||||
_inbound = new ConcurrentHashSet<PeerState>(MAX_INBOUND);
|
||||
_recentHolePunches = new HashSet<InetAddress>(16);
|
||||
ctx.statManager().createRateStat("udp.receiveRelayIntro", "How often we get a relayed request for us to talk to someone?", "udp", UDPTransport.RATES);
|
||||
ctx.statManager().createRateStat("udp.receiveRelayRequest", "How often we receive a good request to relay to someone else?", "udp", UDPTransport.RATES);
|
||||
ctx.statManager().createRateStat("udp.receiveRelayRequestBadTag", "Received relay requests with bad/expired tag", "udp", UDPTransport.RATES);
|
||||
@@ -169,7 +167,7 @@ class IntroductionManager {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Picking inbound out of " + _inbound.size());
|
||||
if (_inbound.isEmpty()) return 0;
|
||||
List<PeerState> peers = new ArrayList(_inbound);
|
||||
List<PeerState> peers = new ArrayList<PeerState>(_inbound);
|
||||
int sz = peers.size();
|
||||
start = start % sz;
|
||||
int found = 0;
|
||||
|
||||
@@ -54,7 +54,7 @@ class MessageReceiver {
|
||||
_threadCount = Math.max(MIN_THREADS, Math.min(MAX_THREADS, ctx.bandwidthLimiter().getInboundKBytesPerSecond() / 20));
|
||||
qsize = (int) Math.max(MIN_QUEUE_SIZE, Math.min(MAX_QUEUE_SIZE, maxMemory / (2*1024*1024)));
|
||||
}
|
||||
_completeMessages = new CoDelBlockingQueue(ctx, "UDP-MessageReceiver", qsize);
|
||||
_completeMessages = new CoDelBlockingQueue<InboundMessageState>(ctx, "UDP-MessageReceiver", qsize);
|
||||
|
||||
// the runners run forever, no need to have a cache
|
||||
//_cache = ByteCache.getInstance(64, I2NPMessage.MAX_SIZE);
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package net.i2p.router.transport.udp;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
@@ -116,7 +115,7 @@ class OutboundEstablishState {
|
||||
_remoteHostId = remoteHostId;
|
||||
_remotePeer = remotePeer;
|
||||
_introKey = introKey;
|
||||
_queuedMessages = new LinkedBlockingQueue();
|
||||
_queuedMessages = new LinkedBlockingQueue<OutNetMessage>();
|
||||
_establishBegin = ctx.clock().now();
|
||||
_remoteAddress = addr;
|
||||
_introductionNonce = -1;
|
||||
|
||||
@@ -7,7 +7,6 @@ import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
@@ -68,7 +67,7 @@ class OutboundMessageFragments {
|
||||
_log = ctx.logManager().getLog(OutboundMessageFragments.class);
|
||||
_transport = transport;
|
||||
// _throttle = throttle;
|
||||
_activePeers = new ConcurrentHashSet(256);
|
||||
_activePeers = new ConcurrentHashSet<PeerState>(256);
|
||||
_builder = new PacketBuilder(ctx, transport);
|
||||
_alive = true;
|
||||
// _allowExcess = false;
|
||||
@@ -363,12 +362,12 @@ class OutboundMessageFragments {
|
||||
List<Long> msgIds = peer.getCurrentFullACKs();
|
||||
int newFullAckCount = msgIds.size();
|
||||
msgIds.addAll(peer.getCurrentResendACKs());
|
||||
List<ACKBitfield> partialACKBitfields = new ArrayList();
|
||||
List<ACKBitfield> partialACKBitfields = new ArrayList<ACKBitfield>();
|
||||
peer.fetchPartialACKs(partialACKBitfields);
|
||||
int piggybackedPartialACK = partialACKBitfields.size();
|
||||
// getCurrentFullACKs() already makes a copy, do we need to copy again?
|
||||
// YES because buildPacket() now removes them (maybe)
|
||||
List<Long> remaining = new ArrayList(msgIds);
|
||||
List<Long> remaining = new ArrayList<Long>(msgIds);
|
||||
int sparseCount = 0;
|
||||
UDPPacket rv[] = new UDPPacket[fragments]; //sparse
|
||||
for (int i = 0; i < fragments; i++) {
|
||||
|
||||
@@ -11,7 +11,6 @@ import java.util.List;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.ByteArray;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
@@ -443,7 +442,7 @@ class PacketBuilder {
|
||||
* It doesn't generate a reply, but that's ok.
|
||||
*/
|
||||
public UDPPacket buildPing(PeerState peer) {
|
||||
return buildACK(peer, Collections.EMPTY_LIST);
|
||||
return buildACK(peer, Collections.<ACKBitfield> emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1082,7 +1081,7 @@ class PacketBuilder {
|
||||
public List<UDPPacket> buildRelayRequest(UDPTransport transport, OutboundEstablishState state, SessionKey ourIntroKey) {
|
||||
UDPAddress addr = state.getRemoteAddress();
|
||||
int count = addr.getIntroducerCount();
|
||||
List<UDPPacket> rv = new ArrayList(count);
|
||||
List<UDPPacket> rv = new ArrayList<UDPPacket>(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
InetAddress iaddr = addr.getIntroducerHost(i);
|
||||
int iport = addr.getIntroducerPort(i);
|
||||
@@ -1095,7 +1094,7 @@ class PacketBuilder {
|
||||
iaddr.getAddress().length != 4 ||
|
||||
(!_transport.isValid(iaddr.getAddress())) ||
|
||||
(Arrays.equals(iaddr.getAddress(), _transport.getExternalIP()) && !_transport.allowLocal())) {
|
||||
if (_log.shouldLog(_log.WARN))
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Cannot build a relay request to " + state.getRemoteIdentity().calculateHash()
|
||||
+ ", as their UDP address is invalid: addr=" + addr + " index=" + i);
|
||||
// TODO implement some sort of introducer banlist
|
||||
|
||||
@@ -56,11 +56,11 @@ class PacketHandler {
|
||||
_inbound = inbound;
|
||||
_testManager = testManager;
|
||||
_introManager = introManager;
|
||||
_failCache = new LHMCache(24);
|
||||
_failCache = new LHMCache<RemoteHostId, Object>(24);
|
||||
|
||||
long maxMemory = SystemVersion.getMaxMemory();
|
||||
int qsize = (int) Math.max(MIN_QUEUE_SIZE, Math.min(MAX_QUEUE_SIZE, maxMemory / (2*1024*1024)));
|
||||
_inboundQueue = new CoDelBlockingQueue(ctx, "UDP-Receiver", qsize);
|
||||
_inboundQueue = new CoDelBlockingQueue<UDPPacket>(ctx, "UDP-Receiver", qsize);
|
||||
int num_handlers;
|
||||
if (maxMemory < 32*1024*1024)
|
||||
num_handlers = 1;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user