forked from I2P_Developers/i2p.i2p
Compare commits
26 Commits
i2p_0_6_1_
...
i2p_0_6_1_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
031636e607 | ||
|
|
b5c0d77c69 | ||
|
|
d489caa88c | ||
|
|
2a24029acf | ||
|
|
c5aab8c750 | ||
|
|
343748111a | ||
|
|
c5ddfabfe9 | ||
|
|
1ef33906ed | ||
|
|
f3849a22ad | ||
|
|
b03ff21d3b | ||
|
|
52094b10c9 | ||
|
|
fc927efaa3 | ||
|
|
65dc803fb7 | ||
|
|
349adf6690 | ||
|
|
2c843fd818 | ||
|
|
863b511cde | ||
|
|
c417e7c237 | ||
|
|
1822c0d7d8 | ||
|
|
94c1c32b51 | ||
|
|
deb35f4af4 | ||
|
|
883150f943 | ||
|
|
717d1b97b2 | ||
|
|
e62135eacc | ||
|
|
2c6d953359 | ||
|
|
2b79e2df3f | ||
|
|
fab6e421b8 |
11
Makefile.gcj
11
Makefile.gcj
@@ -21,11 +21,12 @@ NATIVE_DIR=native
|
||||
# router.jar: full I2P router
|
||||
# jbigi.jar: collection of native optimized GMP routines for crypto
|
||||
JAR_BASE=i2p.jar mstreaming.jar streaming.jar
|
||||
JAR_CLIENTS=i2ptunnel.jar sam.jar i2psnark.jar
|
||||
JAR_CLIENTS=i2ptunnel.jar sam.jar
|
||||
JAR_ROUTER=router.jar
|
||||
JAR_JBIGI=jbigi.jar
|
||||
JAR_XML=xml-apis.jar resolver.jar xercesImpl.jar
|
||||
JAR_CONSOLE=\
|
||||
i2psnark.jar \
|
||||
javax.servlet.jar \
|
||||
commons-el.jar \
|
||||
commons-logging.jar \
|
||||
@@ -79,15 +80,15 @@ native_clean:
|
||||
native_shared: libi2p.so
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2p_dsa --main=net.i2p.crypto.DSAEngine
|
||||
@echo "* i2p_dsa is a simple test app with the DSA engine and Fortuna PRNG to make sure crypto is working"
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/prng --main=gnu.crypto.prng.Fortuna
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/prng --main=gnu.crypto.prng.FortunaStandalone
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2ptunnel --main=net.i2p.i2ptunnel.I2PTunnel
|
||||
@echo "* i2ptunnel is mihi's I2PTunnel CLI"
|
||||
@echo " run it as ./i2ptunnel -cli to avoid awt complaints"
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2ptunnelctl --main=net.i2p.i2ptunnel.TunnelControllerGroup
|
||||
@echo "* i2ptunnelctl is a controller for I2PTunnel, reading i2ptunnel.config"
|
||||
@echo " and launching the appropriate proxies"
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2psnark --main=org.klomp.snark.Snark
|
||||
@echo "* i2psnark is an anonymous bittorrent client"
|
||||
#@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2psnark --main=org.klomp.snark.Snark
|
||||
#@echo "* i2psnark is an anonymous bittorrent client"
|
||||
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2prouter --main=net.i2p.router.Router
|
||||
@echo "* i2prouter is the main I2P router"
|
||||
@echo " it can be used, and while the router console won't load,"
|
||||
@@ -95,6 +96,6 @@ native_shared: libi2p.so
|
||||
|
||||
libi2p.so:
|
||||
@echo "* Building libi2p.so"
|
||||
@(cd build ; ${GCJ} ${OPTIMIZE} -fPIC -fjni -shared -o ../${NATIVE_DIR}/libi2p.so ${LIBI2P_JARS} ; cd .. )
|
||||
@(cd build ; time ${GCJ} ${OPTIMIZE} -fPIC -fjni -shared -o ../${NATIVE_DIR}/libi2p.so ${LIBI2P_JARS} ; cd .. )
|
||||
@ls -l ${NATIVE_DIR}/libi2p.so
|
||||
@echo "* libi2p.so built"
|
||||
|
||||
@@ -10,6 +10,7 @@ import net.i2p.client.streaming.I2PSocket;
|
||||
import net.i2p.client.streaming.I2PSocketManager;
|
||||
import net.i2p.client.streaming.I2PSocketManagerFactory;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
@@ -31,6 +32,7 @@ public class I2PSnarkUtil {
|
||||
private Map _opts;
|
||||
private I2PSocketManager _manager;
|
||||
private boolean _configured;
|
||||
private Set _shitlist;
|
||||
|
||||
private I2PSnarkUtil() {
|
||||
_context = I2PAppContext.getGlobalContext();
|
||||
@@ -38,6 +40,7 @@ public class I2PSnarkUtil {
|
||||
_opts = new HashMap();
|
||||
setProxy("127.0.0.1", 4444);
|
||||
setI2CPConfig("127.0.0.1", 7654, null);
|
||||
_shitlist = new HashSet(64);
|
||||
_configured = false;
|
||||
}
|
||||
|
||||
@@ -110,18 +113,36 @@ public class I2PSnarkUtil {
|
||||
public void disconnect() {
|
||||
I2PSocketManager mgr = _manager;
|
||||
_manager = null;
|
||||
_shitlist.clear();
|
||||
mgr.destroySocketManager();
|
||||
}
|
||||
|
||||
/** connect to the given destination */
|
||||
I2PSocket connect(PeerID peer) throws IOException {
|
||||
Hash dest = peer.getAddress().calculateHash();
|
||||
synchronized (_shitlist) {
|
||||
if (_shitlist.contains(dest))
|
||||
throw new IOException("Not trying to contact " + dest.toBase64() + ", as they are shitlisted");
|
||||
}
|
||||
try {
|
||||
return _manager.connect(peer.getAddress());
|
||||
I2PSocket rv = _manager.connect(peer.getAddress());
|
||||
if (rv != null) synchronized (_shitlist) { _shitlist.remove(dest); }
|
||||
return rv;
|
||||
} catch (I2PException ie) {
|
||||
synchronized (_shitlist) {
|
||||
_shitlist.add(dest);
|
||||
}
|
||||
SimpleTimer.getInstance().addEvent(new Unshitlist(dest), 10*60*1000);
|
||||
throw new IOException("Unable to reach the peer " + peer + ": " + ie.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private class Unshitlist implements SimpleTimer.TimedEvent {
|
||||
private Hash _dest;
|
||||
public Unshitlist(Hash dest) { _dest = dest; }
|
||||
public void timeReached() { synchronized (_shitlist) { _shitlist.remove(_dest); } }
|
||||
}
|
||||
|
||||
/**
|
||||
* fetch the given URL, returning the file it is stored in, or null on error
|
||||
*/
|
||||
|
||||
@@ -633,11 +633,11 @@ public class Snark
|
||||
boolean allocating = false;
|
||||
public void storageCreateFile(Storage storage, String name, long length)
|
||||
{
|
||||
if (allocating)
|
||||
System.out.println(); // Done with last file.
|
||||
//if (allocating)
|
||||
// System.out.println(); // Done with last file.
|
||||
|
||||
System.out.print("Creating file '" + name
|
||||
+ "' of length " + length + ": ");
|
||||
//System.out.print("Creating file '" + name
|
||||
// + "' of length " + length + ": ");
|
||||
allocating = true;
|
||||
}
|
||||
|
||||
@@ -647,10 +647,10 @@ public class Snark
|
||||
public void storageAllocated(Storage storage, long length)
|
||||
{
|
||||
allocating = true;
|
||||
System.out.print(".");
|
||||
//System.out.print(".");
|
||||
allocated += length;
|
||||
if (allocated == meta.getTotalLength())
|
||||
System.out.println(); // We have all the disk space we need.
|
||||
//if (allocated == meta.getTotalLength())
|
||||
// System.out.println(); // We have all the disk space we need.
|
||||
}
|
||||
|
||||
boolean allChecked = false;
|
||||
@@ -664,26 +664,21 @@ public class Snark
|
||||
// Use the MetaInfo from the storage since our own might not
|
||||
// yet be setup correctly.
|
||||
MetaInfo meta = storage.getMetaInfo();
|
||||
if (meta != null)
|
||||
System.out.print("Checking existing "
|
||||
+ meta.getPieces()
|
||||
+ " pieces: ");
|
||||
//if (meta != null)
|
||||
// System.out.print("Checking existing "
|
||||
// + meta.getPieces()
|
||||
// + " pieces: ");
|
||||
checking = true;
|
||||
}
|
||||
if (checking)
|
||||
if (checked)
|
||||
System.out.print("+");
|
||||
else
|
||||
System.out.print("-");
|
||||
else
|
||||
if (!checking)
|
||||
Snark.debug("Got " + (checked ? "" : "BAD ") + "piece: " + num,
|
||||
Snark.INFO);
|
||||
}
|
||||
|
||||
public void storageAllChecked(Storage storage)
|
||||
{
|
||||
if (checking)
|
||||
System.out.println();
|
||||
//if (checking)
|
||||
// System.out.println();
|
||||
|
||||
allChecked = true;
|
||||
checking = false;
|
||||
@@ -693,7 +688,7 @@ public class Snark
|
||||
{
|
||||
Snark.debug("Completely received " + torrent, Snark.INFO);
|
||||
//storage.close();
|
||||
System.out.println("Completely received: " + torrent);
|
||||
//System.out.println("Completely received: " + torrent);
|
||||
if (completeListener != null)
|
||||
completeListener.torrentComplete(this);
|
||||
}
|
||||
|
||||
@@ -464,7 +464,7 @@ public class SnarkManager implements Snark.CompleteListener {
|
||||
|
||||
private static final String DEFAULT_TRACKERS[] = {
|
||||
"Postman's tracker", "http://YRgrgTLGnbTq2aZOZDJQ~o6Uk5k6TK-OZtx0St9pb0G-5EGYURZioxqYG8AQt~LgyyI~NCj6aYWpPO-150RcEvsfgXLR~CxkkZcVpgt6pns8SRc3Bi-QSAkXpJtloapRGcQfzTtwllokbdC-aMGpeDOjYLd8b5V9Im8wdCHYy7LRFxhEtGb~RL55DA8aYOgEXcTpr6RPPywbV~Qf3q5UK55el6Kex-6VCxreUnPEe4hmTAbqZNR7Fm0hpCiHKGoToRcygafpFqDw5frLXToYiqs9d4liyVB-BcOb0ihORbo0nS3CLmAwZGvdAP8BZ7cIYE3Z9IU9D1G8JCMxWarfKX1pix~6pIA-sp1gKlL1HhYhPMxwyxvuSqx34o3BqU7vdTYwWiLpGM~zU1~j9rHL7x60pVuYaXcFQDR4-QVy26b6Pt6BlAZoFmHhPcAuWfu-SFhjyZYsqzmEmHeYdAwa~HojSbofg0TMUgESRXMw6YThK1KXWeeJVeztGTz25sL8AAAA.i2p/announce.php"
|
||||
, "Orion's tracker", "http://gKik1lMlRmuroXVGTZ~7v4Vez3L3ZSpddrGZBrxVriosCQf7iHu6CIk8t15BKsj~P0JJpxrofeuxtm7SCUAJEr0AIYSYw8XOmp35UfcRPQWyb1LsxUkMT4WqxAT3s1ClIICWlBu5An~q-Mm0VFlrYLIPBWlUFnfPR7jZ9uP5ZMSzTKSMYUWao3ejiykr~mtEmyls6g-ZbgKZawa9II4zjOy-hdxHgP-eXMDseFsrym4Gpxvy~3Fv9TuiSqhpgm~UeTo5YBfxn6~TahKtE~~sdCiSydqmKBhxAQ7uT9lda7xt96SS09OYMsIWxLeQUWhns-C~FjJPp1D~IuTrUpAFcVEGVL-BRMmdWbfOJEcWPZ~CBCQSO~VkuN1ebvIOr9JBerFMZSxZtFl8JwcrjCIBxeKPBmfh~xYh16BJm1BBBmN1fp2DKmZ2jBNkAmnUbjQOqWvUcehrykWk5lZbE7bjJMDFH48v3SXwRuDBiHZmSbsTY6zhGY~GkMQHNGxPMMSIAAAA.i2p/bt"
|
||||
, "Orion's tracker", "http://gKik1lMlRmuroXVGTZ~7v4Vez3L3ZSpddrGZBrxVriosCQf7iHu6CIk8t15BKsj~P0JJpxrofeuxtm7SCUAJEr0AIYSYw8XOmp35UfcRPQWyb1LsxUkMT4WqxAT3s1ClIICWlBu5An~q-Mm0VFlrYLIPBWlUFnfPR7jZ9uP5ZMSzTKSMYUWao3ejiykr~mtEmyls6g-ZbgKZawa9II4zjOy-hdxHgP-eXMDseFsrym4Gpxvy~3Fv9TuiSqhpgm~UeTo5YBfxn6~TahKtE~~sdCiSydqmKBhxAQ7uT9lda7xt96SS09OYMsIWxLeQUWhns-C~FjJPp1D~IuTrUpAFcVEGVL-BRMmdWbfOJEcWPZ~CBCQSO~VkuN1ebvIOr9JBerFMZSxZtFl8JwcrjCIBxeKPBmfh~xYh16BJm1BBBmN1fp2DKmZ2jBNkAmnUbjQOqWvUcehrykWk5lZbE7bjJMDFH48v3SXwRuDBiHZmSbsTY6zhGY~GkMQHNGxPMMSIAAAA.i2p/bt/announce.php"
|
||||
// , "The freak's tracker", "http://mHKva9x24E5Ygfey2llR1KyQHv5f8hhMpDMwJDg1U-hABpJ2NrQJd6azirdfaR0OKt4jDlmP2o4Qx0H598~AteyD~RJU~xcWYdcOE0dmJ2e9Y8-HY51ie0B1yD9FtIV72ZI-V3TzFDcs6nkdX9b81DwrAwwFzx0EfNvK1GLVWl59Ow85muoRTBA1q8SsZImxdyZ-TApTVlMYIQbdI4iQRwU9OmmtefrCe~ZOf4UBS9-KvNIqUL0XeBSqm0OU1jq-D10Ykg6KfqvuPnBYT1BYHFDQJXW5DdPKwcaQE4MtAdSGmj1epDoaEBUa9btQlFsM2l9Cyn1hzxqNWXELmx8dRlomQLlV4b586dRzW~fLlOPIGC13ntPXogvYvHVyEyptXkv890jC7DZNHyxZd5cyrKC36r9huKvhQAmNABT2Y~pOGwVrb~RpPwT0tBuPZ3lHYhBFYmD8y~AOhhNHKMLzea1rfwTvovBMByDdFps54gMN1mX4MbCGT4w70vIopS9yAAAA.i2p/bytemonsoon/announce.php"
|
||||
};
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import java.net.Socket;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.StringTokenizer;
|
||||
import java.lang.IndexOutOfBoundsException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.client.streaming.I2PSocket;
|
||||
@@ -277,9 +278,14 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
|
||||
|
||||
if(field[0].charAt(0)==':')
|
||||
idx++;
|
||||
|
||||
command = field[idx++];
|
||||
|
||||
|
||||
try { command = field[idx++]; }
|
||||
catch (IndexOutOfBoundsException ioobe) // wtf, server sent borked command?
|
||||
{
|
||||
_log.warn("Dropping defective message: index out of bounds while extracting command.");
|
||||
return null;
|
||||
}
|
||||
|
||||
idx++; //skip victim
|
||||
|
||||
// Allow numerical responses
|
||||
|
||||
BIN
apps/jrobin/jrobin-1.4.0.jar
Normal file
BIN
apps/jrobin/jrobin-1.4.0.jar
Normal file
Binary file not shown.
@@ -25,6 +25,7 @@
|
||||
<pathelement location="../../systray/java/build/systray.jar" />
|
||||
<pathelement location="../../systray/java/lib/systray4j.jar" />
|
||||
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" /> <!-- we dont care if we're not on win32 -->
|
||||
<pathelement location="../../jrobin/jrobin-1.4.0.jar" />
|
||||
</classpath>
|
||||
</javac>
|
||||
</target>
|
||||
@@ -34,6 +35,12 @@
|
||||
<attribute name="Class-Path" value="i2p.jar router.jar" />
|
||||
</manifest>
|
||||
</jar>
|
||||
|
||||
<delete dir="./tmpextract" />
|
||||
<unjar src="../../jrobin/jrobin-1.4.0.jar" dest="./tmpextract" />
|
||||
<jar destfile="./build/routerconsole.jar" basedir="./tmpextract" update="true" />
|
||||
<delete dir="./tmpextract" />
|
||||
|
||||
<ant target="war" />
|
||||
</target>
|
||||
<target name="war" depends="precompilejsp">
|
||||
@@ -60,6 +67,7 @@
|
||||
<pathelement location="../../systray/java/lib/systray4j.jar" />
|
||||
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" />
|
||||
<pathelement location="build/routerconsole.jar" />
|
||||
<pathelement location="build/" />
|
||||
<pathelement location="../../../router/java/build/router.jar" />
|
||||
<pathelement location="../../../core/java/build/i2p.jar" />
|
||||
</classpath>
|
||||
@@ -86,6 +94,7 @@
|
||||
<pathelement location="../../systray/java/lib/systray4j.jar" />
|
||||
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" />
|
||||
<pathelement location="build/routerconsole.jar" />
|
||||
<pathelement location="build" />
|
||||
<pathelement location="../../../router/java/build/router.jar" />
|
||||
<pathelement location="../../../core/java/build/i2p.jar" />
|
||||
</classpath>
|
||||
|
||||
@@ -30,7 +30,6 @@ import net.i2p.router.web.ConfigServiceHandler.UpdateWrapperManagerAndRekeyTask;
|
||||
*/
|
||||
public class ConfigNetHandler extends FormHandler {
|
||||
private String _hostname;
|
||||
private boolean _guessRequested;
|
||||
private boolean _reseedRequested;
|
||||
private boolean _saveRequested;
|
||||
private boolean _recheckReachabilityRequested;
|
||||
@@ -52,9 +51,7 @@ public class ConfigNetHandler extends FormHandler {
|
||||
private boolean _ratesOnly;
|
||||
|
||||
protected void processForm() {
|
||||
if (_guessRequested) {
|
||||
guessHostname();
|
||||
} else if (_reseedRequested) {
|
||||
if (_reseedRequested) {
|
||||
reseed();
|
||||
} else if (_saveRequested || ( (_action != null) && ("Save changes".equals(_action)) )) {
|
||||
saveChanges();
|
||||
@@ -65,7 +62,6 @@ public class ConfigNetHandler extends FormHandler {
|
||||
}
|
||||
}
|
||||
|
||||
public void setGuesshost(String moo) { _guessRequested = true; }
|
||||
public void setReseed(String moo) { _reseedRequested = true; }
|
||||
public void setSave(String moo) { _saveRequested = true; }
|
||||
public void setEnabletimesync(String moo) { _timeSyncEnabled = true; }
|
||||
@@ -110,37 +106,7 @@ public class ConfigNetHandler extends FormHandler {
|
||||
_sharePct = (pct != null ? pct.trim() : null);
|
||||
}
|
||||
|
||||
private static final String IP_PREFIX = "<h1>Your IP is ";
|
||||
private static final String IP_SUFFIX = " <br></h1>";
|
||||
private void guessHostname() {
|
||||
BufferedReader reader = null;
|
||||
try {
|
||||
URL url = new URL("http://www.whatismyip.com/");
|
||||
URLConnection con = url.openConnection();
|
||||
con.connect();
|
||||
reader = new BufferedReader(new InputStreamReader(con.getInputStream()));
|
||||
String line = null;
|
||||
while ( (line = reader.readLine()) != null) {
|
||||
if (line.startsWith(IP_PREFIX)) {
|
||||
int end = line.indexOf(IP_SUFFIX);
|
||||
if (end == -1) {
|
||||
addFormError("Unable to guess the host (BAD_SUFFIX)");
|
||||
return;
|
||||
}
|
||||
String ip = line.substring(IP_PREFIX.length(), end);
|
||||
addFormNotice("Host guess: " + ip);
|
||||
return;
|
||||
}
|
||||
}
|
||||
addFormError("Unable to guess the host (NO_PREFIX)");
|
||||
} catch (IOException ioe) {
|
||||
addFormError("Unable to guess the host (IO_ERROR)");
|
||||
_context.logManager().getLog(ConfigNetHandler.class).error("Unable to guess the host", ioe);
|
||||
} finally {
|
||||
if (reader != null) try { reader.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static final String DEFAULT_SEED_URL = ReseedHandler.DEFAULT_SEED_URL;
|
||||
/**
|
||||
* Reseed has been requested, so lets go ahead and do it. Fetch all of
|
||||
|
||||
104
apps/routerconsole/java/src/net/i2p/router/web/GraphHelper.java
Normal file
104
apps/routerconsole/java/src/net/i2p/router/web/GraphHelper.java
Normal file
@@ -0,0 +1,104 @@
|
||||
package net.i2p.router.web;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.*;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
public class GraphHelper {
|
||||
private RouterContext _context;
|
||||
private Writer _out;
|
||||
private int _periodCount;
|
||||
private boolean _showEvents;
|
||||
private int _width;
|
||||
private int _height;
|
||||
private int _refreshDelaySeconds;
|
||||
/**
|
||||
* Configure this bean to query a particular router context
|
||||
*
|
||||
* @param contextId begging few characters of the routerHash, or null to pick
|
||||
* the first one we come across.
|
||||
*/
|
||||
public void setContextId(String contextId) {
|
||||
try {
|
||||
_context = ContextHelper.getContext(contextId);
|
||||
} catch (Throwable t) {
|
||||
t.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public GraphHelper() {
|
||||
_periodCount = 60; // SummaryListener.PERIODS;
|
||||
_showEvents = false;
|
||||
_width = 250;
|
||||
_height = 100;
|
||||
_refreshDelaySeconds = 60;
|
||||
}
|
||||
|
||||
public void setOut(Writer out) { _out = out; }
|
||||
public void setPeriodCount(String str) {
|
||||
try { _periodCount = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
}
|
||||
public void setShowEvents(boolean b) { _showEvents = b; }
|
||||
public void setHeight(String str) {
|
||||
try { _height = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
}
|
||||
public void setWidth(String str) {
|
||||
try { _width = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
}
|
||||
public void setRefreshDelay(String str) {
|
||||
try { _refreshDelaySeconds = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
}
|
||||
|
||||
public String getImages() {
|
||||
try {
|
||||
List listeners = StatSummarizer.instance().getListeners();
|
||||
for (int i = 0; i < listeners.size(); i++) {
|
||||
SummaryListener lsnr = (SummaryListener)listeners.get(i);
|
||||
Rate r = lsnr.getRate();
|
||||
String title = r.getRateStat().getName() + " for " + DataHelper.formatDuration(_periodCount * r.getPeriod());
|
||||
_out.write("<img src=\"viewstat.jsp?stat=" + r.getRateStat().getName()
|
||||
+ "&showEvents=" + _showEvents
|
||||
+ "&period=" + r.getPeriod()
|
||||
+ "&periodCount=" + _periodCount
|
||||
+ "&width=" + _width
|
||||
+ "&height=" + _height
|
||||
+ "\" title=\"" + title + "\" />\n");
|
||||
}
|
||||
if (_refreshDelaySeconds > 0)
|
||||
_out.write("<meta http-equiv=\"refresh\" content=\"" + _refreshDelaySeconds + "\" />\n");
|
||||
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
}
|
||||
return "";
|
||||
}
|
||||
public String getForm() {
|
||||
try {
|
||||
_out.write("<form action=\"graphs.jsp\" method=\"GET\">");
|
||||
_out.write("Periods: <input size=\"3\" type=\"text\" name=\"periodCount\" value=\"" + _periodCount + "\" /><br />\n");
|
||||
_out.write("Plot averages: <input type=\"radio\" name=\"showEvents\" value=\"false\" " + (_showEvents ? "" : "checked=\"true\" ") + " /> ");
|
||||
_out.write("or plot events: <input type=\"radio\" name=\"showEvents\" value=\"true\" "+ (_showEvents ? "checked=\"true\" " : "") + " /><br />\n");
|
||||
_out.write("Image sizes: width: <input size=\"4\" type=\"text\" name=\"width\" value=\"" + _width
|
||||
+ "\" /> pixels, height: <input size=\"4\" type=\"text\" name=\"height\" value=\"" + _height
|
||||
+ "\" /><br />\n");
|
||||
_out.write("Refresh delay: <select name=\"refreshDelay\"><option value=\"60\">1 minute</option><option value=\"120\">2 minutes</option><option value=\"300\">5 minutes</option><option value=\"600\">10 minutes</option><option value=\"-1\">Never</option></select><br />\n");
|
||||
_out.write("<input type=\"submit\" value=\"Redraw\" />");
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
}
|
||||
return "";
|
||||
}
|
||||
public String getPeerSummary() {
|
||||
try {
|
||||
_context.commSystem().renderStatusHTML(_out);
|
||||
_context.bandwidthLimiter().renderStatusHTML(_out);
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
}
|
||||
return "";
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,7 @@ public class RouterConsoleRunner {
|
||||
|
||||
static {
|
||||
System.setProperty("org.mortbay.http.Version.paranoid", "true");
|
||||
System.setProperty("java.awt.headless", "true");
|
||||
}
|
||||
|
||||
public RouterConsoleRunner(String args[]) {
|
||||
@@ -95,6 +96,10 @@ public class RouterConsoleRunner {
|
||||
I2PThread t = new I2PThread(fetcher, "NewsFetcher");
|
||||
t.setDaemon(true);
|
||||
t.start();
|
||||
|
||||
I2PThread st = new I2PThread(new StatSummarizer(), "StatSummarizer");
|
||||
st.setDaemon(true);
|
||||
st.start();
|
||||
}
|
||||
|
||||
private void initialize(WebApplicationContext context) {
|
||||
|
||||
@@ -0,0 +1,154 @@
|
||||
package net.i2p.router.web;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import net.i2p.stat.*;
|
||||
import net.i2p.router.*;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class StatSummarizer implements Runnable {
|
||||
private RouterContext _context;
|
||||
/** list of SummaryListener instances */
|
||||
private List _listeners;
|
||||
private static StatSummarizer _instance;
|
||||
|
||||
public StatSummarizer() {
|
||||
_context = (RouterContext)RouterContext.listContexts().get(0); // fuck it, only summarize one per jvm
|
||||
_listeners = new ArrayList(16);
|
||||
_instance = this;
|
||||
}
|
||||
|
||||
public static StatSummarizer instance() { return _instance; }
|
||||
|
||||
public void run() {
|
||||
String specs = "";
|
||||
while (_context.router().isAlive()) {
|
||||
specs = adjustDatabases(specs);
|
||||
try { Thread.sleep(60*1000); } catch (InterruptedException ie) {}
|
||||
}
|
||||
}
|
||||
|
||||
/** list of SummaryListener instances */
|
||||
List getListeners() { return _listeners; }
|
||||
|
||||
private static final String DEFAULT_DATABASES = "bw.sendRate.60000" +
|
||||
",bw.recvRate.60000" +
|
||||
",tunnel.testSuccessTime.60000" +
|
||||
",udp.outboundActiveCount.60000" +
|
||||
",udp.receivePacketSize.60000" +
|
||||
",udp.receivePacketSkew.60000" +
|
||||
",udp.sendConfirmTime.60000" +
|
||||
",udp.sendPacketSize.60000" +
|
||||
",router.activePeers.60000" +
|
||||
",router.activeSendPeers.60000" +
|
||||
",tunnel.acceptLoad.60000" +
|
||||
",client.sendAckTime.60000" +
|
||||
",client.dispatchNoACK.60000" +
|
||||
",transport.sendMessageFailureLifetime.60000" +
|
||||
",transport.sendProcessingTime.60000";
|
||||
|
||||
private String adjustDatabases(String oldSpecs) {
|
||||
String spec = _context.getProperty("stat.summaries", DEFAULT_DATABASES);
|
||||
if ( ( (spec == null) && (oldSpecs == null) ) ||
|
||||
( (spec != null) && (oldSpecs != null) && (oldSpecs.equals(spec))) )
|
||||
return oldSpecs;
|
||||
|
||||
List old = parseSpecs(oldSpecs);
|
||||
List newSpecs = parseSpecs(spec);
|
||||
|
||||
// remove old ones
|
||||
for (int i = 0; i < old.size(); i++) {
|
||||
Rate r = (Rate)old.get(i);
|
||||
if (!newSpecs.contains(r))
|
||||
removeDb(r);
|
||||
}
|
||||
// add new ones
|
||||
StringBuffer buf = new StringBuffer();
|
||||
for (int i = 0; i < newSpecs.size(); i++) {
|
||||
Rate r = (Rate)newSpecs.get(i);
|
||||
if (!old.contains(r))
|
||||
addDb(r);
|
||||
buf.append(r.getRateStat().getName()).append(".").append(r.getPeriod());
|
||||
if (i + 1 < newSpecs.size())
|
||||
buf.append(',');
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private void removeDb(Rate r) {
|
||||
for (int i = 0; i < _listeners.size(); i++) {
|
||||
SummaryListener lsnr = (SummaryListener)_listeners.get(i);
|
||||
if (lsnr.getRate().equals(r)) {
|
||||
_listeners.remove(i);
|
||||
lsnr.stopListening();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
private void addDb(Rate r) {
|
||||
SummaryListener lsnr = new SummaryListener(r);
|
||||
_listeners.add(lsnr);
|
||||
lsnr.startListening();
|
||||
//System.out.println("Start listening for " + r.getRateStat().getName() + ": " + r.getPeriod());
|
||||
}
|
||||
public boolean renderPng(Rate rate, OutputStream out) throws IOException {
|
||||
return renderPng(rate, out, -1, -1, false, false, false, false, -1, true);
|
||||
}
|
||||
public boolean renderPng(Rate rate, OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
|
||||
for (int i = 0; i < _listeners.size(); i++) {
|
||||
SummaryListener lsnr = (SummaryListener)_listeners.get(i);
|
||||
if (lsnr.getRate().equals(rate)) {
|
||||
lsnr.renderPng(out, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
public boolean renderPng(OutputStream out, String templateFilename) throws IOException {
|
||||
SummaryRenderer.render(_context, out, templateFilename);
|
||||
return true;
|
||||
}
|
||||
public boolean getXML(Rate rate, OutputStream out) throws IOException {
|
||||
for (int i = 0; i < _listeners.size(); i++) {
|
||||
SummaryListener lsnr = (SummaryListener)_listeners.get(i);
|
||||
if (lsnr.getRate().equals(rate)) {
|
||||
lsnr.getData().exportXml(out);
|
||||
out.write(("<!-- Rate: " + lsnr.getRate().getRateStat().getName() + " for period " + lsnr.getRate().getPeriod() + " -->\n").getBytes());
|
||||
out.write(("<!-- Average data soure name: " + lsnr.getName() + " event count data source name: " + lsnr.getEventName() + " -->\n").getBytes());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param specs statName.period,statName.period,statName.period
|
||||
* @return list of Rate objects
|
||||
*/
|
||||
private List parseSpecs(String specs) {
|
||||
StringTokenizer tok = new StringTokenizer(specs, ",");
|
||||
List rv = new ArrayList();
|
||||
while (tok.hasMoreTokens()) {
|
||||
String spec = tok.nextToken();
|
||||
int split = spec.lastIndexOf('.');
|
||||
if ( (split <= 0) || (split + 1 >= spec.length()) )
|
||||
continue;
|
||||
String name = spec.substring(0, split);
|
||||
String per = spec.substring(split+1);
|
||||
long period = -1;
|
||||
try {
|
||||
period = Long.parseLong(per);
|
||||
RateStat rs = _context.statManager().getRate(name);
|
||||
if (rs != null) {
|
||||
Rate r = rs.getRate(period);
|
||||
if (r != null)
|
||||
rv.add(r);
|
||||
}
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,250 @@
|
||||
package net.i2p.router.web;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.stat.RateSummaryListener;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
import org.jrobin.core.RrdDb;
|
||||
import org.jrobin.core.RrdDef;
|
||||
import org.jrobin.core.RrdBackendFactory;
|
||||
import org.jrobin.core.RrdMemoryBackendFactory;
|
||||
import org.jrobin.core.Sample;
|
||||
|
||||
import java.awt.Color;
|
||||
import org.jrobin.graph.RrdGraph;
|
||||
import org.jrobin.graph.RrdGraphDef;
|
||||
import org.jrobin.graph.RrdGraphDefTemplate;
|
||||
import org.jrobin.core.RrdException;
|
||||
|
||||
class SummaryListener implements RateSummaryListener {
|
||||
private I2PAppContext _context;
|
||||
private Log _log;
|
||||
private Rate _rate;
|
||||
private String _name;
|
||||
private String _eventName;
|
||||
private RrdDb _db;
|
||||
private Sample _sample;
|
||||
private RrdMemoryBackendFactory _factory;
|
||||
private SummaryRenderer _renderer;
|
||||
|
||||
static final int PERIODS = 1440;
|
||||
|
||||
static {
|
||||
try {
|
||||
RrdBackendFactory.setDefaultFactory("MEMORY");
|
||||
} catch (RrdException re) {
|
||||
re.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public SummaryListener(Rate r) {
|
||||
_context = I2PAppContext.getGlobalContext();
|
||||
_rate = r;
|
||||
_log = _context.logManager().getLog(SummaryListener.class);
|
||||
}
|
||||
|
||||
public void add(double totalValue, long eventCount, double totalEventTime, long period) {
|
||||
long now = now();
|
||||
long when = now / 1000;
|
||||
//System.out.println("add to " + getRate().getRateStat().getName() + " on " + System.currentTimeMillis() + " / " + now + " / " + when);
|
||||
if (_db != null) {
|
||||
// add one value to the db (the average value for the period)
|
||||
try {
|
||||
_sample.setTime(when);
|
||||
double val = eventCount > 0 ? (totalValue / (double)eventCount) : 0d;
|
||||
_sample.setValue(_name, val);
|
||||
_sample.setValue(_eventName, eventCount);
|
||||
//_sample.setValue(0, val);
|
||||
//_sample.setValue(1, eventCount);
|
||||
_sample.update();
|
||||
//String names[] = _sample.getDsNames();
|
||||
//System.out.println("Add " + val + " over " + eventCount + " for " + _name
|
||||
// + " [" + names[0] + ", " + names[1] + "]");
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error adding", ioe);
|
||||
} catch (RrdException re) {
|
||||
_log.error("Error adding", re);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* JRobin can only deal with 20 character data source names, so we need to create a unique,
|
||||
* munged version from the user/developer-visible name.
|
||||
*
|
||||
*/
|
||||
private static String createName(I2PAppContext ctx, String wanted) {
|
||||
return ctx.sha().calculateHash(DataHelper.getUTF8(wanted)).toBase64().substring(0,20);
|
||||
}
|
||||
|
||||
public Rate getRate() { return _rate; }
|
||||
public void startListening() {
|
||||
RateStat rs = _rate.getRateStat();
|
||||
long period = _rate.getPeriod();
|
||||
String baseName = rs.getName() + "." + period;
|
||||
_name = createName(_context, baseName);
|
||||
_eventName = createName(_context, baseName + ".events");
|
||||
try {
|
||||
RrdDef def = new RrdDef(_name, now()/1000, period/1000);
|
||||
// for info on the heartbeat, xff, steps, etc, see the rrdcreate man page, aka
|
||||
// http://www.jrobin.org/support/man/rrdcreate.html
|
||||
long heartbeat = period*10/1000;
|
||||
def.addDatasource(_name, "GAUGE", heartbeat, Double.NaN, Double.NaN);
|
||||
def.addDatasource(_eventName, "GAUGE", heartbeat, 0, Double.NaN);
|
||||
double xff = 0.9;
|
||||
int steps = 1;
|
||||
int rows = PERIODS;
|
||||
def.addArchive("AVERAGE", xff, steps, rows);
|
||||
_factory = (RrdMemoryBackendFactory)RrdBackendFactory.getDefaultFactory();
|
||||
_db = new RrdDb(def, _factory);
|
||||
_sample = _db.createSample();
|
||||
_renderer = new SummaryRenderer(_context, this);
|
||||
_rate.setSummaryListener(this);
|
||||
} catch (RrdException re) {
|
||||
_log.error("Error starting", re);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error starting", ioe);
|
||||
}
|
||||
}
|
||||
public void stopListening() {
|
||||
if (_db == null) return;
|
||||
try {
|
||||
_db.close();
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error closing", ioe);
|
||||
}
|
||||
_rate.setSummaryListener(null);
|
||||
_factory.delete(_db.getPath());
|
||||
_db = null;
|
||||
}
|
||||
public void renderPng(OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
|
||||
_renderer.render(out, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
|
||||
}
|
||||
public void renderPng(OutputStream out) throws IOException { _renderer.render(out); }
|
||||
|
||||
String getName() { return _name; }
|
||||
String getEventName() { return _eventName; }
|
||||
RrdDb getData() { return _db; }
|
||||
long now() { return _context.clock().now(); }
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
return ((obj instanceof SummaryListener) && ((SummaryListener)obj)._rate.equals(_rate));
|
||||
}
|
||||
public int hashCode() { return _rate.hashCode(); }
|
||||
}
|
||||
|
||||
class SummaryRenderer {
|
||||
private Log _log;
|
||||
private SummaryListener _listener;
|
||||
public SummaryRenderer(I2PAppContext ctx, SummaryListener lsnr) {
|
||||
_log = ctx.logManager().getLog(SummaryRenderer.class);
|
||||
_listener = lsnr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render the stats as determined by the specified JRobin xml config,
|
||||
* but note that this doesn't work on stock jvms, as it requires
|
||||
* DOM level 3 load and store support. Perhaps we can bundle that, or
|
||||
* specify who can get it from where, etc.
|
||||
*
|
||||
*/
|
||||
public static void render(I2PAppContext ctx, OutputStream out, String filename) throws IOException {
|
||||
long end = ctx.clock().now();
|
||||
long start = end - 60*1000*SummaryListener.PERIODS;
|
||||
long begin = System.currentTimeMillis();
|
||||
try {
|
||||
RrdGraphDefTemplate template = new RrdGraphDefTemplate(filename);
|
||||
RrdGraphDef def = template.getRrdGraphDef();
|
||||
def.setTimePeriod(start/1000, end/1000); // ignore the periods in the template
|
||||
RrdGraph graph = new RrdGraph(def);
|
||||
byte img[] = graph.getPNGBytes();
|
||||
out.write(img);
|
||||
} catch (RrdException re) {
|
||||
//_log.error("Error rendering " + filename, re);
|
||||
throw new IOException("Error plotting: " + re.getMessage());
|
||||
} catch (IOException ioe) {
|
||||
//_log.error("Error rendering " + filename, ioe);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
public void render(OutputStream out) throws IOException { render(out, -1, -1, false, false, false, false, -1, true); }
|
||||
public void render(OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
|
||||
long end = _listener.now();
|
||||
if (periodCount <= 0) periodCount = SummaryListener.PERIODS;
|
||||
if (periodCount > SummaryListener.PERIODS)
|
||||
periodCount = SummaryListener.PERIODS;
|
||||
long start = end - _listener.getRate().getPeriod()*periodCount;
|
||||
long begin = System.currentTimeMillis();
|
||||
try {
|
||||
RrdGraphDef def = new RrdGraphDef();
|
||||
def.setTimePeriod(start/1000, end/1000);
|
||||
String title = _listener.getRate().getRateStat().getName() + " averaged for "
|
||||
+ DataHelper.formatDuration(_listener.getRate().getPeriod());
|
||||
if (!hideTitle)
|
||||
def.setTitle(title);
|
||||
String path = _listener.getData().getPath();
|
||||
String dsNames[] = _listener.getData().getDsNames();
|
||||
String plotName = null;
|
||||
String descr = null;
|
||||
if (showEvents) {
|
||||
// include the average event count on the plot
|
||||
plotName = dsNames[1];
|
||||
descr = "Events per period";
|
||||
} else {
|
||||
// include the average value
|
||||
plotName = dsNames[0];
|
||||
descr = _listener.getRate().getRateStat().getDescription();
|
||||
}
|
||||
def.datasource(plotName, path, plotName, "AVERAGE", "MEMORY");
|
||||
def.area(plotName, Color.BLUE, descr + "@r");
|
||||
if (!hideLegend) {
|
||||
def.gprint(plotName, "AVERAGE", "average: @2@s");
|
||||
def.gprint(plotName, "MAX", " max: @2@s@r");
|
||||
}
|
||||
if (!showCredit)
|
||||
def.setShowSignature(false);
|
||||
/*
|
||||
// these four lines set up a graph plotting both values and events on the same chart
|
||||
// (but with the same coordinates, so the values may look pretty skewed)
|
||||
def.datasource(dsNames[0], path, dsNames[0], "AVERAGE", "MEMORY");
|
||||
def.datasource(dsNames[1], path, dsNames[1], "AVERAGE", "MEMORY");
|
||||
def.area(dsNames[0], Color.BLUE, _listener.getRate().getRateStat().getDescription());
|
||||
def.line(dsNames[1], Color.RED, "Events per period");
|
||||
*/
|
||||
if (hideLegend)
|
||||
def.setShowLegend(false);
|
||||
if (hideGrid) {
|
||||
def.setGridX(false);
|
||||
def.setGridY(false);
|
||||
}
|
||||
//System.out.println("rendering: path=" + path + " dsNames[0]=" + dsNames[0] + " dsNames[1]=" + dsNames[1] + " lsnr.getName=" + _listener.getName());
|
||||
def.setAntiAliasing(false);
|
||||
//System.out.println("Rendering: \n" + def.exportXmlTemplate());
|
||||
//System.out.println("*****************\nData: \n" + _listener.getData().dump());
|
||||
RrdGraph graph = new RrdGraph(def);
|
||||
//System.out.println("Graph created");
|
||||
byte data[] = null;
|
||||
if ( (width <= 0) || (height <= 0) )
|
||||
data = graph.getPNGBytes();
|
||||
else
|
||||
data = graph.getPNGBytes(width, height);
|
||||
long timeToPlot = System.currentTimeMillis() - begin;
|
||||
out.write(data);
|
||||
//File t = File.createTempFile("jrobinData", ".xml");
|
||||
//_listener.getData().dumpXml(new FileOutputStream(t));
|
||||
//System.out.println("plotted: " + (data != null ? data.length : 0) + " bytes in " + timeToPlot
|
||||
// ); // + ", data written to " + t.getAbsolutePath());
|
||||
} catch (RrdException re) {
|
||||
_log.error("Error rendering", re);
|
||||
throw new IOException("Error plotting: " + re.getMessage());
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error rendering", ioe);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,7 +43,8 @@
|
||||
A negative rate means a default limit of 16KBytes per second.</i><br />
|
||||
Bandwidth share percentage:
|
||||
<jsp:getProperty name="nethelper" property="sharePercentageBox" /><br />
|
||||
Sharing a higher percentage will improve your anonymity and help the network
|
||||
Sharing a higher percentage will improve your anonymity and help the network<br />
|
||||
<input type="submit" name="save" value="Save changes" /> <input type="reset" value="Cancel" /><br />
|
||||
<hr />
|
||||
<b>Enable load testing: </b>
|
||||
<input type="checkbox" name="enableloadtesting" value="true" <jsp:getProperty name="nethelper" property="enableLoadTesting" /> />
|
||||
|
||||
23
apps/routerconsole/jsp/graphs.jsp
Normal file
23
apps/routerconsole/jsp/graphs.jsp
Normal file
@@ -0,0 +1,23 @@
|
||||
<%@page contentType="text/html"%>
|
||||
<%@page pageEncoding="UTF-8"%>
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
|
||||
<html><head>
|
||||
<title>I2P Router Console - graphs</title>
|
||||
<link rel="stylesheet" href="default.css" type="text/css" />
|
||||
</head><body>
|
||||
|
||||
<%@include file="nav.jsp" %>
|
||||
<%@include file="summary.jsp" %>
|
||||
|
||||
<div class="main" id="main">
|
||||
<jsp:useBean class="net.i2p.router.web.GraphHelper" id="graphHelper" scope="request" />
|
||||
<jsp:setProperty name="graphHelper" property="*" />
|
||||
<jsp:setProperty name="graphHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
|
||||
<jsp:setProperty name="graphHelper" property="out" value="<%=out%>" />
|
||||
<jsp:getProperty name="graphHelper" property="images" />
|
||||
<jsp:getProperty name="graphHelper" property="form" />
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
@@ -33,6 +33,7 @@
|
||||
<a href="netdb.jsp">NetDB</a> |
|
||||
<a href="logs.jsp">Logs</a> |
|
||||
<a href="jobs.jsp">Jobs</a> |
|
||||
<a href="graphs.jsp">Graphs</a> |
|
||||
<a href="oldstats.jsp">Stats</a> |
|
||||
<a href="oldconsole.jsp">Internals</a>
|
||||
<% } %>
|
||||
|
||||
53
apps/routerconsole/jsp/viewstat.jsp
Normal file
53
apps/routerconsole/jsp/viewstat.jsp
Normal file
@@ -0,0 +1,53 @@
|
||||
<%
|
||||
boolean rendered = false;
|
||||
String templateFile = request.getParameter("template");
|
||||
if (templateFile != null) {
|
||||
java.io.OutputStream cout = response.getOutputStream();
|
||||
response.setContentType("image/png");
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().renderPng(cout, templateFile);
|
||||
}
|
||||
net.i2p.stat.Rate rate = null;
|
||||
String stat = request.getParameter("stat");
|
||||
String period = request.getParameter("period");
|
||||
net.i2p.stat.RateStat rs = net.i2p.I2PAppContext.getGlobalContext().statManager().getRate(stat);
|
||||
if ( !rendered && (rs != null)) {
|
||||
long per = -1;
|
||||
try {
|
||||
per = Long.parseLong(period);
|
||||
rate = rs.getRate(per);
|
||||
if (rate != null) {
|
||||
java.io.OutputStream cout = response.getOutputStream();
|
||||
String format = request.getParameter("format");
|
||||
if ("xml".equals(format)) {
|
||||
response.setContentType("text/xml");
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().getXML(rate, cout);
|
||||
} else {
|
||||
response.setContentType("image/png");
|
||||
int width = -1;
|
||||
int height = -1;
|
||||
int periodCount = -1;
|
||||
String str = request.getParameter("width");
|
||||
if (str != null) try { width = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
str = request.getParameter("height");
|
||||
if (str != null) try { height = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
str = request.getParameter("periodCount");
|
||||
if (str != null) try { periodCount = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
|
||||
boolean hideLegend = Boolean.valueOf(""+request.getParameter("hideLegend")).booleanValue();
|
||||
boolean hideGrid = Boolean.valueOf(""+request.getParameter("hideGrid")).booleanValue();
|
||||
boolean hideTitle = Boolean.valueOf(""+request.getParameter("hideTitle")).booleanValue();
|
||||
boolean showEvents = Boolean.valueOf(""+request.getParameter("showEvents")).booleanValue();
|
||||
boolean showCredit = true;
|
||||
if (request.getParameter("showCredit") != null)
|
||||
showCredit = Boolean.valueOf(""+request.getParameter("showCredit")).booleanValue();
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().renderPng(rate, cout, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
|
||||
}
|
||||
if (rendered)
|
||||
cout.close();
|
||||
//System.out.println("Rendered period " + per + " for the stat " + stat + "? " + rendered);
|
||||
}
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
if (!rendered) {
|
||||
response.sendError(404, "That stat is not available");
|
||||
}
|
||||
%>
|
||||
@@ -210,6 +210,11 @@ public class Connection {
|
||||
}
|
||||
}
|
||||
if (packet != null) {
|
||||
if (packet.isFlagSet(Packet.FLAG_RESET)) {
|
||||
// sendReset takes care to prevent too-frequent RSET transmissions
|
||||
sendReset();
|
||||
return;
|
||||
}
|
||||
ResendPacketEvent evt = (ResendPacketEvent)packet.getResendEvent();
|
||||
if (evt != null) {
|
||||
boolean sent = evt.retransmit(false);
|
||||
@@ -240,9 +245,11 @@ public class Connection {
|
||||
_disconnectScheduledOn = _context.clock().now();
|
||||
SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
if (_resetSentOn + 10*1000 > now) return; // don't send resets too fast
|
||||
_resetSent = true;
|
||||
if (_resetSentOn <= 0)
|
||||
_resetSentOn = _context.clock().now();
|
||||
_resetSentOn = now;
|
||||
if ( (_remotePeer == null) || (_sendStreamId <= 0) ) return;
|
||||
PacketLocal reply = new PacketLocal(_context, _remotePeer);
|
||||
reply.setFlag(Packet.FLAG_RESET);
|
||||
|
||||
@@ -6,6 +6,7 @@ import java.text.*;
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.*;
|
||||
import net.i2p.syndie.data.*;
|
||||
import net.i2p.util.FileUtil;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@@ -211,7 +212,13 @@ public class Archive {
|
||||
if (!entryDir.exists())
|
||||
entryDir.mkdirs();
|
||||
|
||||
boolean ok = _extractor.extract(entryFile, entryDir, null, info);
|
||||
boolean ok = true;
|
||||
try {
|
||||
ok = _extractor.extract(entryFile, entryDir, null, info);
|
||||
} catch (IOException ioe) {
|
||||
ok = false;
|
||||
_log.error("Error extracting " + entryFile.getPath() + ", deleting it", ioe);
|
||||
}
|
||||
if (!ok) {
|
||||
File files[] = entryDir.listFiles();
|
||||
for (int i = 0; i < files.length; i++)
|
||||
@@ -326,6 +333,15 @@ public class Archive {
|
||||
return rv;
|
||||
}
|
||||
|
||||
public synchronized void delete(Hash blog) {
|
||||
if (blog == null) return;
|
||||
File blogDir = new File(_rootDir, blog.toBase64());
|
||||
boolean deleted = FileUtil.rmdir(blogDir, false);
|
||||
File cacheDir = new File(_cacheDir, blog.toBase64());
|
||||
deleted = FileUtil.rmdir(cacheDir, false) && deleted;
|
||||
_log.info("Deleted blog " + blog.toBase64() + " completely? " + deleted);
|
||||
}
|
||||
|
||||
public boolean storeEntry(EntryContainer container) {
|
||||
if (container == null) return false;
|
||||
BlogURI uri = container.getURI();
|
||||
|
||||
@@ -1060,4 +1060,49 @@ public class BlogManager {
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean isBanned(Hash blog) {
|
||||
if (blog == null) return false;
|
||||
String str = blog.toBase64();
|
||||
String banned = System.getProperty("syndie.bannedBlogs", "");
|
||||
return (banned.indexOf(str) >= 0);
|
||||
}
|
||||
|
||||
public String[] getBannedBlogs() {
|
||||
List blogs = new ArrayList();
|
||||
String str = System.getProperty("syndie.bannedBlogs", "");
|
||||
StringTokenizer tok = new StringTokenizer(str, ",");
|
||||
while (tok.hasMoreTokens()) {
|
||||
String blog = tok.nextToken();
|
||||
try {
|
||||
Hash h = new Hash();
|
||||
h.fromBase64(blog);
|
||||
blogs.add(blog); // the base64 string, but verified
|
||||
} catch (DataFormatException dfe) {
|
||||
// ignored
|
||||
}
|
||||
}
|
||||
String rv[] = new String[blogs.size()];
|
||||
for (int i = 0; i < blogs.size(); i++)
|
||||
rv[i] = (String)blogs.get(i);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the blog from the archive completely, and ban them from ever being added again
|
||||
*/
|
||||
public void purgeAndBan(Hash blog) {
|
||||
String banned[] = getBannedBlogs();
|
||||
StringBuffer buf = new StringBuffer();
|
||||
String str = blog.toBase64();
|
||||
buf.append(str);
|
||||
for (int i = 0; banned != null && i < banned.length; i++) {
|
||||
if (!banned[i].equals(str))
|
||||
buf.append(",").append(banned[i]);
|
||||
}
|
||||
System.setProperty("syndie.bannedBlogs", buf.toString());
|
||||
writeConfig();
|
||||
_archive.delete(blog);
|
||||
_archive.regenerateIndex();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,9 +59,9 @@ public class EntryExtractor {
|
||||
}
|
||||
|
||||
public void extract(EntryContainer entry, File entryDir) throws IOException {
|
||||
extractEntry(entry, entryDir);
|
||||
extractHeaders(entry, entryDir);
|
||||
extractMeta(entry, entryDir);
|
||||
extractEntry(entry, entryDir);
|
||||
Attachment attachments[] = entry.getAttachments();
|
||||
if (attachments != null) {
|
||||
for (int i = 0; i < attachments.length; i++) {
|
||||
@@ -97,10 +97,14 @@ public class EntryExtractor {
|
||||
}
|
||||
}
|
||||
private void extractEntry(EntryContainer entry, File entryDir) throws IOException {
|
||||
Entry e = entry.getEntry();
|
||||
if (e == null) throw new IOException("Entry is null");
|
||||
String text = e.getText();
|
||||
if (text == null) throw new IOException("Entry text is null");
|
||||
FileOutputStream out = null;
|
||||
try {
|
||||
out = new FileOutputStream(new File(entryDir, ENTRY));
|
||||
out.write(DataHelper.getUTF8(entry.getEntry().getText()));
|
||||
out.write(DataHelper.getUTF8(text));
|
||||
} finally {
|
||||
out.close();
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ public class EntryContainer {
|
||||
this();
|
||||
_entryURI = uri;
|
||||
if ( (smlData == null) || (smlData.length <= 0) )
|
||||
_entryData = new Entry(null);
|
||||
_entryData = new Entry(""); //null);
|
||||
else
|
||||
_entryData = new Entry(DataHelper.getUTF8(smlData));
|
||||
setHeader(HEADER_BLOGKEY, Base64.encode(uri.getKeyHash().getData()));
|
||||
@@ -277,7 +277,7 @@ public class EntryContainer {
|
||||
}
|
||||
|
||||
if (_entryData == null)
|
||||
_entryData = new Entry(null);
|
||||
_entryData = new Entry(""); //null);
|
||||
|
||||
_attachments = new Attachment[attachments.size()];
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ public class AddressesServlet extends BaseServlet {
|
||||
public static final String ACTION_DELETE_BLOG = "Delete author";
|
||||
public static final String ACTION_UPDATE_BLOG = "Update author";
|
||||
public static final String ACTION_ADD_BLOG = "Add author";
|
||||
public static final String ACTION_PURGE_AND_BAN_BLOG = "Purge and ban author";
|
||||
|
||||
public static final String ACTION_DELETE_ARCHIVE = "Delete archive";
|
||||
public static final String ACTION_UPDATE_ARCHIVE = "Update archive";
|
||||
@@ -128,6 +129,8 @@ public class AddressesServlet extends BaseServlet {
|
||||
if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) {
|
||||
out.write("Ignored? <input type=\"checkbox\" name=\"" + PARAM_IGNORE
|
||||
+ "\" checked=\"true\" value=\"true\" title=\"If true, their threads are hidden\" /> ");
|
||||
if (BlogManager.instance().authorizeRemote(user))
|
||||
out.write("<input type=\"submit\" name=\"" + PARAM_ACTION + "\" value=\"" + ACTION_PURGE_AND_BAN_BLOG + "\" /> ");
|
||||
} else {
|
||||
out.write("Ignored? <input type=\"checkbox\" name=\"" + PARAM_IGNORE
|
||||
+ "\" value=\"true\" title=\"If true, their threads are hidden\" /> ");
|
||||
|
||||
@@ -64,13 +64,13 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
* key=value& of params that need to be tacked onto an http request that updates data, to
|
||||
* prevent spoofing
|
||||
*/
|
||||
protected static String getAuthActionParams() { return PARAM_AUTH_ACTION + '=' + _authNonce + '&'; }
|
||||
protected static String getAuthActionParams() { return PARAM_AUTH_ACTION + '=' + _authNonce + "&"; }
|
||||
/**
|
||||
* key=value& of params that need to be tacked onto an http request that updates data, to
|
||||
* prevent spoofing
|
||||
*/
|
||||
public static void addAuthActionParams(StringBuffer buf) {
|
||||
buf.append(PARAM_AUTH_ACTION).append('=').append(_authNonce).append('&');
|
||||
buf.append(PARAM_AUTH_ACTION).append('=').append(_authNonce).append("&");
|
||||
}
|
||||
|
||||
public void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
|
||||
@@ -329,6 +329,34 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
(AddressesServlet.ACTION_UPDATE_OTHER.equals(action)) ||
|
||||
(AddressesServlet.ACTION_UPDATE_PEER.equals(action)) ) {
|
||||
return updateAddress(user, req);
|
||||
} else if (AddressesServlet.ACTION_PURGE_AND_BAN_BLOG.equals(action)) {
|
||||
String name = req.getParameter(AddressesServlet.PARAM_NAME);
|
||||
PetName pn = user.getPetNameDB().getByName(name);
|
||||
if (pn != null) {
|
||||
boolean purged = false;
|
||||
if (BlogManager.instance().authorizeRemote(user)) {
|
||||
Hash h = null;
|
||||
BlogURI uri = new BlogURI(pn.getLocation());
|
||||
if (uri.getKeyHash() != null) {
|
||||
h = uri.getKeyHash();
|
||||
}
|
||||
if (h == null) {
|
||||
byte b[] = Base64.decode(pn.getLocation());
|
||||
if ( (b != null) && (b.length == Hash.HASH_LENGTH) )
|
||||
h = new Hash(b);
|
||||
}
|
||||
if (h != null) {
|
||||
BlogManager.instance().purgeAndBan(h);
|
||||
purged = true;
|
||||
}
|
||||
}
|
||||
if (purged) // force a new thread index
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else if ( (AddressesServlet.ACTION_DELETE_ARCHIVE.equals(action)) ||
|
||||
(AddressesServlet.ACTION_DELETE_BLOG.equals(action)) ||
|
||||
(AddressesServlet.ACTION_DELETE_EEPSITE.equals(action)) ||
|
||||
@@ -866,22 +894,22 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
ThreadNode child = node.getChild(0);
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=');
|
||||
buf.append(child.getEntry().getKeyHash().toBase64()).append('/');
|
||||
buf.append(child.getEntry().getEntryId()).append('&');
|
||||
buf.append(child.getEntry().getEntryId()).append("&");
|
||||
}
|
||||
|
||||
if (!empty(viewPost))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&");
|
||||
else if (!empty(viewThread))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&");
|
||||
|
||||
if (!empty(offset))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&");
|
||||
|
||||
if (!empty(tags))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&");
|
||||
|
||||
if (!empty(author))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&");
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
@@ -901,21 +929,21 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
// collapse node == let the node be visible
|
||||
buf.append('?').append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=');
|
||||
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
|
||||
buf.append(node.getEntry().getEntryId()).append('&');
|
||||
buf.append(node.getEntry().getEntryId()).append("&");
|
||||
|
||||
if (!empty(viewPost))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&");
|
||||
else if (!empty(viewThread))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&");
|
||||
|
||||
if (!empty(offset))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&");
|
||||
|
||||
if (!empty(tags))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&");
|
||||
|
||||
if (!empty(author))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&");
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
@@ -939,23 +967,23 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
buf.append(uri);
|
||||
buf.append('?');
|
||||
if (!empty(visible))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_LOCATION).append('=').append(author.toBase64()).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_NAME).append('=').append(group).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append("&");
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_LOCATION).append('=').append(author.toBase64()).append("&");
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_NAME).append('=').append(group).append("&");
|
||||
|
||||
if (!empty(viewPost))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&");
|
||||
else if (!empty(viewThread))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&");
|
||||
|
||||
if (!empty(offset))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&");
|
||||
|
||||
if (!empty(tags))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&");
|
||||
|
||||
if (!empty(filteredAuthor))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append("&");
|
||||
|
||||
addAuthActionParams(buf);
|
||||
return buf.toString();
|
||||
@@ -966,23 +994,23 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
buf.append(uri);
|
||||
buf.append('?');
|
||||
if (!empty(visible))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP_NAME).append('=').append(name).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP).append('=').append(group).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append("&");
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP_NAME).append('=').append(name).append("&");
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP).append('=').append(group).append("&");
|
||||
|
||||
if (!empty(viewPost))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&");
|
||||
else if (!empty(viewThread))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&");
|
||||
|
||||
if (!empty(offset))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&");
|
||||
|
||||
if (!empty(tags))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&");
|
||||
|
||||
if (!empty(filteredAuthor))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append("&");
|
||||
|
||||
addAuthActionParams(buf);
|
||||
return buf.toString();
|
||||
@@ -1024,24 +1052,23 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
}
|
||||
buf.append('?').append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=');
|
||||
buf.append(expandTo.getKeyHash().toBase64()).append('/');
|
||||
buf.append(expandTo.getEntryId()).append('&');
|
||||
buf.append(expandTo.getEntryId()).append("&");
|
||||
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=');
|
||||
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
|
||||
buf.append(node.getEntry().getEntryId()).append('&');
|
||||
buf.append(node.getEntry().getEntryId()).append("&");
|
||||
|
||||
if (!empty(offset))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&");
|
||||
|
||||
if (!empty(tags))
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&");
|
||||
|
||||
if (!empty(author)) {
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append('&');
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&");
|
||||
if (authorOnly)
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR).append("=true&");
|
||||
buf.append(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR).append("=true&");
|
||||
}
|
||||
|
||||
buf.append("#").append(node.getEntry().toString());
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -62,6 +62,8 @@ public class RemoteArchiveBean {
|
||||
}
|
||||
|
||||
private boolean ignoreBlog(User user, Hash blog) {
|
||||
if (BlogManager.instance().isBanned(blog))
|
||||
return true;
|
||||
PetNameDB db = user.getPetNameDB();
|
||||
PetName pn = db.getByLocation(blog.toBase64());
|
||||
return ( (pn!= null) && (pn.isMember("Ignore")) );
|
||||
|
||||
@@ -85,4 +85,360 @@ td.s_detail_summDetail {
|
||||
td.s_summary_summ {
|
||||
font-size: 0.8em;
|
||||
background-color: #DDDDFF;
|
||||
}
|
||||
|
||||
<!-- following are doubtful salmon's contributions -->
|
||||
body {
|
||||
margin : 0px;
|
||||
padding : 0px;
|
||||
width: 99%;
|
||||
font-family : Arial, sans-serif, Helvetica;
|
||||
background-color : #FFF;
|
||||
color : black;
|
||||
font-size : 100%;
|
||||
|
||||
/* we've avoided Tantek Hacks so far,
|
||||
** but we can't avoid using the non-w3c method of
|
||||
** box rendering. (and therefore one of mozilla's
|
||||
** proprietry -moz properties (which hopefully they'll
|
||||
** drop soon).
|
||||
*/
|
||||
-moz-box-sizing : border-box;
|
||||
box-sizing : border-box;
|
||||
}
|
||||
a:link{color:#007}
|
||||
a:visited{color:#606}
|
||||
a:hover{color:#720}
|
||||
a:active{color:#900}
|
||||
|
||||
select {
|
||||
min-width: 1.5em;
|
||||
}
|
||||
.overallTable {
|
||||
border-spacing: 0px;
|
||||
border-collapse: collapse;
|
||||
float: left;
|
||||
}
|
||||
.topNav {
|
||||
background-color: #BBB;
|
||||
}
|
||||
.topNav_user {
|
||||
text-align: left;
|
||||
float: left;
|
||||
display: inline;
|
||||
}
|
||||
.topNav_admin {
|
||||
text-align: right;
|
||||
float: right;
|
||||
margin: 0 5px 0 0;
|
||||
display: inline;
|
||||
}
|
||||
.controlBar {
|
||||
margin: 0em;
|
||||
padding: 0em;
|
||||
// border: medium solid #DDF;
|
||||
background-color: #EEF;
|
||||
color: inherit;
|
||||
font-size: small;
|
||||
clear: left; /* fixes a bug in Opera */
|
||||
}
|
||||
.controlBarRight {
|
||||
text-align: right;
|
||||
}
|
||||
.threadEven {
|
||||
background-color: #FFF;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.threadOdd {
|
||||
background-color: #FFC;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.threadLeft {
|
||||
text-align: left;
|
||||
align: left;
|
||||
}
|
||||
.threadNav {
|
||||
background-color: #EEF;
|
||||
border: medium solid #CCF;
|
||||
}
|
||||
.threadNavRight {
|
||||
text-align: right;
|
||||
float: right;
|
||||
background-color: #EEF;
|
||||
}
|
||||
.rightOffset {
|
||||
float: right;
|
||||
margin: 0 5px 0 0;
|
||||
display: inline;
|
||||
}
|
||||
.threadInfoLeft {
|
||||
float: left;
|
||||
margin: 5px 0px 0 0;
|
||||
display: inline;
|
||||
}
|
||||
.threadInfoRight {
|
||||
float: right;
|
||||
margin: 0 5px 0 0;
|
||||
display: inline;
|
||||
}
|
||||
.postMeta {
|
||||
border-top: 1px solid black;
|
||||
background-color: #FFB;
|
||||
}
|
||||
.postMetaSubject {
|
||||
text-align: left;
|
||||
font-size: large;
|
||||
}
|
||||
.postMetaLink {
|
||||
text-align: right;
|
||||
}
|
||||
.postDetails {
|
||||
background-color: #FFC;
|
||||
}
|
||||
.postReply {
|
||||
background-color: #CCF;
|
||||
}
|
||||
.postReplyText {
|
||||
background-color: #CCF;
|
||||
}
|
||||
.postReplyOptions {
|
||||
background-color: #CCF;
|
||||
}
|
||||
.syndieBlogTopNav {
|
||||
padding: 0.5em;
|
||||
width: 98%;
|
||||
border: medium solid #CCF;
|
||||
background-color: #EEF;
|
||||
font-size: small;
|
||||
}
|
||||
.syndieBlogTopNavUser {
|
||||
text-align: left;
|
||||
}
|
||||
.syndieBlogTopNavAdmin {
|
||||
text-align: right;
|
||||
}
|
||||
.syndieBlogHeader {
|
||||
width: 100%;
|
||||
font-size: 1.4em;
|
||||
background-color: #000;
|
||||
text-align: Left;
|
||||
float: Left;
|
||||
}
|
||||
.syndieBlogHeader a {
|
||||
color: #FFF;
|
||||
padding: 4px;
|
||||
}
|
||||
.syndieBlogHeader a:hover {
|
||||
color:#88F;
|
||||
padding: 4px;
|
||||
}
|
||||
.syndieBlogLogo {
|
||||
float: left;
|
||||
display: inline;
|
||||
}
|
||||
.syndieBlogLinks {
|
||||
width: 20%;
|
||||
float: left;
|
||||
}
|
||||
.syndieBlogLinkGroup {
|
||||
font-size: 0.8em;
|
||||
background-color: #DDD;
|
||||
border: 1px solid black;
|
||||
margin: 5px;
|
||||
padding: 2px;
|
||||
}
|
||||
.syndieBlogLinkGroup ul {
|
||||
list-style: none;
|
||||
}
|
||||
.syndieBlogLinkGroup li {
|
||||
}
|
||||
.syndieBlogLinkGroupName {
|
||||
font-weight: bold;
|
||||
width: 100%;
|
||||
border-bottom: 1px dashed black;
|
||||
display: block;
|
||||
}
|
||||
.syndieBlogPostInfoGroup {
|
||||
font-size: 0.8em;
|
||||
background-color: #FFEA9F;
|
||||
border: 1px solid black;
|
||||
margin: 5px;
|
||||
padding: 2px;
|
||||
}
|
||||
.syndieBlogPostInfoGroup ol {
|
||||
list-style: none;
|
||||
}
|
||||
.syndieBlogPostInfoGroup li {
|
||||
}
|
||||
.syndieBlogPostInfoGroup li a {
|
||||
display: block;
|
||||
}
|
||||
.syndieBlogPostInfoGroupName {
|
||||
font-weight: bold;
|
||||
width: 100%;
|
||||
border-bottom: 1px dashed black;
|
||||
display: block;
|
||||
}
|
||||
.syndieBlogMeta {
|
||||
text-align: left;
|
||||
font-size: 0.8em;
|
||||
background-color: #DDD;
|
||||
border: 1px solid black;
|
||||
margin: 5px;
|
||||
padding: 2px;
|
||||
}
|
||||
.syndieBlogBody {
|
||||
width: 80%;
|
||||
float: left;
|
||||
}
|
||||
.syndieBlogPost {
|
||||
border: 1px solid black;
|
||||
margin-top: 5px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.syndieBlogPostHeader {
|
||||
background-color: #FFB;
|
||||
padding: 2px;
|
||||
}
|
||||
.syndieBlogPostSubject {
|
||||
font-weight: bold;
|
||||
}
|
||||
.syndieBlogPostFrom {
|
||||
text-align: right;
|
||||
}
|
||||
.syndieBlogPostSummary {
|
||||
background-color: #FFF;
|
||||
padding: 2px;
|
||||
}
|
||||
.syndieBlogPostDetails {
|
||||
background-color: #FFC;
|
||||
padding: 2px;
|
||||
}
|
||||
.syndieBlogNav {
|
||||
text-align: center;
|
||||
}
|
||||
.syndieBlogComments {
|
||||
border: none;
|
||||
margin-top: 5px;
|
||||
margin-left: 0px;
|
||||
float: left;
|
||||
}
|
||||
.syndieBlogComments ul {
|
||||
list-style: none;
|
||||
margin-left: 10px;
|
||||
}
|
||||
.syndieBlogCommentInfoGroup {
|
||||
font-size: 0.8em;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.syndieBlogCommentInfoGroup ol {
|
||||
list-style: none;
|
||||
}
|
||||
.syndieBlogCommentInfoGroup li {
|
||||
}
|
||||
.syndieBlogCommentInfoGroup li a {
|
||||
display: block;
|
||||
}
|
||||
.syndieBlogCommentInfoGroupName {
|
||||
font-size: 0.8em;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.syndieBlogFavorites {
|
||||
float: left;
|
||||
margin: 5px 0px 0 0;
|
||||
display: inline;
|
||||
}
|
||||
.syndieBlogList {
|
||||
float: right;
|
||||
margin: 5px 0px 0 0;
|
||||
display: inline;
|
||||
}
|
||||
.b_topnavUser {
|
||||
text-align: right;
|
||||
background-color: #CCD;
|
||||
}
|
||||
.b_topnavHome {
|
||||
background-color: #CCD;
|
||||
color: #000;
|
||||
width: 50px;
|
||||
text-align: left;
|
||||
}
|
||||
.b_topnav {
|
||||
background-color: #CCD;
|
||||
}
|
||||
.b_content {
|
||||
}
|
||||
.s_summary_overall {
|
||||
}
|
||||
.s_detail_overall {
|
||||
}
|
||||
.s_detail_subject {
|
||||
font-size: 0.8em;
|
||||
text-align: left;
|
||||
background-color: #CCF;
|
||||
}
|
||||
.s_detail_quote {
|
||||
margin-left: 1em;
|
||||
border: 1px solid #DBDBDB;
|
||||
background-color: #E0E0E0;
|
||||
}
|
||||
.s_detail_italic {
|
||||
font-style: italic;
|
||||
}
|
||||
.s_detail_bold {
|
||||
font-style: normal;
|
||||
font-weight: bold;
|
||||
}
|
||||
.s_detail_underline {
|
||||
font-style: normal;
|
||||
text-decoration: underline;
|
||||
}
|
||||
.s_detail_meta {
|
||||
font-size: 0.8em;
|
||||
text-align: right;
|
||||
background-color: #CCF;
|
||||
}
|
||||
|
||||
.s_summary_subject {
|
||||
font-size: 0.8em;
|
||||
text-align: left;
|
||||
background-color: #CCF;
|
||||
}
|
||||
.s_summary_meta {
|
||||
font-size: 0.8em;
|
||||
text-align: right;
|
||||
background-color: #CCF;
|
||||
}
|
||||
.s_summary_quote {
|
||||
margin-left: 1em;
|
||||
border-width: 1px solid #DBDBDB;
|
||||
background-color: #E0E0E0;
|
||||
}
|
||||
.s_summary_italic {
|
||||
font-style: italic;
|
||||
}
|
||||
.s_summary_bold {
|
||||
font-style: normal;
|
||||
font-weight: bold;
|
||||
}
|
||||
.s_summary_underline {
|
||||
font-style: normal;
|
||||
text-decoration: underline;
|
||||
}
|
||||
.s_summary_summDetail {
|
||||
font-size: 0.8em;
|
||||
}
|
||||
.s_detail_summDetail {
|
||||
}
|
||||
.s_detail_summDetailBlog {
|
||||
}
|
||||
.s_detail_summDetailBlogLink {
|
||||
}
|
||||
td.s_detail_summDetail {
|
||||
background-color: #CCF;
|
||||
}
|
||||
td.s_summary_summ { width: 80%;
|
||||
font-size: 0.8em;
|
||||
background-color: #CCF;
|
||||
}
|
||||
@@ -169,7 +169,14 @@ public class SysTray implements SysTrayMenuListener {
|
||||
_itemOpenConsole.addSysTrayMenuListener(this);
|
||||
// _sysTrayMenu.addItem(_itemShutdown);
|
||||
// _sysTrayMenu.addSeparator();
|
||||
_sysTrayMenu.addItem(_itemSelectBrowser);
|
||||
// hide it, as there have been reports of b0rked behavior on some JVMs.
|
||||
// specifically, that on XP & sun1.5.0.1, a user launching i2p w/out the
|
||||
// service wrapper would create netDb/, peerProfiles/, and other files
|
||||
// underneath each directory browsed to - as if the router's "." directory
|
||||
// is changing whenever the itemSelectBrowser's JFileChooser changed
|
||||
// directories. This has not been reproduced or confirmed yet, but is
|
||||
// pretty scary, and this function isn't too necessary.
|
||||
//_sysTrayMenu.addItem(_itemSelectBrowser);
|
||||
_sysTrayMenu.addItem(_itemOpenConsole);
|
||||
refreshDisplay();
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package gnu.crypto.hash;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// $Id: Sha256.java,v 1.2 2005/10/06 04:24:14 rsdio Exp $
|
||||
// $Id: Sha256Standalone.java,v 1.1 2006/02/26 16:30:59 jrandom Exp $
|
||||
//
|
||||
// Copyright (C) 2003 Free Software Foundation, Inc.
|
||||
//
|
||||
@@ -59,7 +59,7 @@ package gnu.crypto.hash;
|
||||
* renamed from Sha256 to avoid conflicts with JVMs using gnu-crypto as their JCE
|
||||
* provider.
|
||||
*
|
||||
* @version $Revision: 1.2 $
|
||||
* @version $Revision: 1.1 $
|
||||
*/
|
||||
public class Sha256Standalone extends BaseHash {
|
||||
// Constants and variables
|
||||
@@ -127,10 +127,12 @@ public class Sha256Standalone extends BaseHash {
|
||||
// Class methods
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/*
|
||||
public static final int[] G(int hh0, int hh1, int hh2, int hh3, int hh4,
|
||||
int hh5, int hh6, int hh7, byte[] in, int offset) {
|
||||
return sha(hh0, hh1, hh2, hh3, hh4, hh5, hh6, hh7, in, offset);
|
||||
}
|
||||
*/
|
||||
|
||||
// Instance methods
|
||||
// -------------------------------------------------------------------------
|
||||
@@ -143,17 +145,19 @@ public class Sha256Standalone extends BaseHash {
|
||||
|
||||
// Implementation of concrete methods in BaseHash --------------------------
|
||||
|
||||
private int transformResult[] = new int[8];
|
||||
protected void transform(byte[] in, int offset) {
|
||||
int[] result = sha(h0, h1, h2, h3, h4, h5, h6, h7, in, offset);
|
||||
//int[] result = sha(h0, h1, h2, h3, h4, h5, h6, h7, in, offset);
|
||||
sha(h0, h1, h2, h3, h4, h5, h6, h7, in, offset, transformResult);
|
||||
|
||||
h0 = result[0];
|
||||
h1 = result[1];
|
||||
h2 = result[2];
|
||||
h3 = result[3];
|
||||
h4 = result[4];
|
||||
h5 = result[5];
|
||||
h6 = result[6];
|
||||
h7 = result[7];
|
||||
h0 = transformResult[0];
|
||||
h1 = transformResult[1];
|
||||
h2 = transformResult[2];
|
||||
h3 = transformResult[3];
|
||||
h4 = transformResult[4];
|
||||
h5 = transformResult[5];
|
||||
h6 = transformResult[6];
|
||||
h7 = transformResult[7];
|
||||
}
|
||||
|
||||
protected byte[] padBuffer() {
|
||||
@@ -218,8 +222,8 @@ public class Sha256Standalone extends BaseHash {
|
||||
|
||||
// SHA specific methods ----------------------------------------------------
|
||||
|
||||
private static final synchronized int[]
|
||||
sha(int hh0, int hh1, int hh2, int hh3, int hh4, int hh5, int hh6, int hh7, byte[] in, int offset) {
|
||||
private static final synchronized void
|
||||
sha(int hh0, int hh1, int hh2, int hh3, int hh4, int hh5, int hh6, int hh7, byte[] in, int offset, int out[]) {
|
||||
int A = hh0;
|
||||
int B = hh1;
|
||||
int C = hh2;
|
||||
@@ -255,8 +259,18 @@ public class Sha256Standalone extends BaseHash {
|
||||
A = T + T2;
|
||||
}
|
||||
|
||||
/*
|
||||
return new int[] {
|
||||
hh0 + A, hh1 + B, hh2 + C, hh3 + D, hh4 + E, hh5 + F, hh6 + G, hh7 + H
|
||||
};
|
||||
*/
|
||||
out[0] = hh0 + A;
|
||||
out[1] = hh1 + B;
|
||||
out[2] = hh2 + C;
|
||||
out[3] = hh3 + D;
|
||||
out[4] = hh4 + E;
|
||||
out[5] = hh5 + F;
|
||||
out[6] = hh6 + G;
|
||||
out[7] = hh7 + H;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,8 +14,8 @@ package net.i2p;
|
||||
*
|
||||
*/
|
||||
public class CoreVersion {
|
||||
public final static String ID = "$Revision: 1.54 $ $Date: 2006/02/21 10:20:17 $";
|
||||
public final static String VERSION = "0.6.1.12";
|
||||
public final static String ID = "$Revision: 1.55 $ $Date: 2006/02/27 14:05:41 $";
|
||||
public final static String VERSION = "0.6.1.13";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Core version: " + VERSION);
|
||||
|
||||
@@ -26,6 +26,8 @@ public class Rate {
|
||||
private volatile double _lifetimeTotalValue;
|
||||
private volatile long _lifetimeEventCount;
|
||||
private volatile long _lifetimeTotalEventTime;
|
||||
private RateSummaryListener _summaryListener;
|
||||
private RateStat _stat;
|
||||
|
||||
private volatile long _lastCoalesceDate;
|
||||
private long _creationDate;
|
||||
@@ -108,6 +110,9 @@ public class Rate {
|
||||
public long getPeriod() {
|
||||
return _period;
|
||||
}
|
||||
|
||||
public RateStat getRateStat() { return _stat; }
|
||||
public void setRateStat(RateStat rs) { _stat = rs; }
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -175,12 +180,14 @@ public class Rate {
|
||||
}
|
||||
}
|
||||
|
||||
/** 2s is plenty of slack to deal with slow coalescing (across many stats) */
|
||||
private static final int SLACK = 2000;
|
||||
public void coalesce() {
|
||||
long now = now();
|
||||
synchronized (_lock) {
|
||||
long measuredPeriod = now - _lastCoalesceDate;
|
||||
if (measuredPeriod < _period) {
|
||||
// no need to coalesce
|
||||
if (measuredPeriod < _period - SLACK) {
|
||||
// no need to coalesce (assuming we only try to do so once per minute)
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -189,7 +196,7 @@ public class Rate {
|
||||
// how much were we off by? (so that we can sample down the measured values)
|
||||
double periodFactor = measuredPeriod / (double)_period;
|
||||
_lastTotalValue = _currentTotalValue / periodFactor;
|
||||
_lastEventCount = (long) (_currentEventCount / periodFactor);
|
||||
_lastEventCount = (long) ( (_currentEventCount + periodFactor - 1) / periodFactor);
|
||||
_lastTotalEventTime = (long) (_currentTotalEventTime / periodFactor);
|
||||
_lastCoalesceDate = now;
|
||||
|
||||
@@ -203,8 +210,13 @@ public class Rate {
|
||||
_currentEventCount = 0;
|
||||
_currentTotalEventTime = 0;
|
||||
}
|
||||
if (_summaryListener != null)
|
||||
_summaryListener.add(_lastTotalValue, _lastEventCount, _lastTotalEventTime, _period);
|
||||
}
|
||||
|
||||
public void setSummaryListener(RateSummaryListener listener) { _summaryListener = listener; }
|
||||
public RateSummaryListener getSummaryListener() { return _summaryListener; }
|
||||
|
||||
/** what was the average value across the events in the last period? */
|
||||
public double getAverageValue() {
|
||||
if ((_lastTotalValue != 0) && (_lastEventCount > 0))
|
||||
|
||||
@@ -27,8 +27,10 @@ public class RateStat {
|
||||
_description = description;
|
||||
_groupName = group;
|
||||
_rates = new Rate[periods.length];
|
||||
for (int i = 0; i < periods.length; i++)
|
||||
for (int i = 0; i < periods.length; i++) {
|
||||
_rates[i] = new Rate(periods[i]);
|
||||
_rates[i].setRateStat(this);
|
||||
}
|
||||
}
|
||||
public void setStatLog(StatLog sl) { _statLog = sl; }
|
||||
|
||||
@@ -159,6 +161,7 @@ public class RateStat {
|
||||
_rates[i].load(props, curPrefix, treatAsCurrent);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
_rates[i] = new Rate(period);
|
||||
_rates[i].setRateStat(this);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rate for " + prefix + " is corrupt, reinitializing that period");
|
||||
}
|
||||
|
||||
14
core/java/src/net/i2p/stat/RateSummaryListener.java
Normal file
14
core/java/src/net/i2p/stat/RateSummaryListener.java
Normal file
@@ -0,0 +1,14 @@
|
||||
package net.i2p.stat;
|
||||
|
||||
/**
|
||||
* Receive the state of the rate when its coallesced
|
||||
*/
|
||||
public interface RateSummaryListener {
|
||||
/**
|
||||
* @param totalValue sum of all event values in the most recent period
|
||||
* @param eventCount how many events occurred
|
||||
* @param totalEventTime how long the events were running for
|
||||
* @param period how long this period is
|
||||
*/
|
||||
void add(double totalValue, long eventCount, double totalEventTime, long period);
|
||||
}
|
||||
114
history.txt
114
history.txt
@@ -1,4 +1,116 @@
|
||||
$Id: history.txt,v 1.421 2006/02/26 16:30:58 jrandom Exp $
|
||||
$Id: history.txt,v 1.439 2006/03/25 18:50:51 jrandom Exp $
|
||||
|
||||
* 2006-03-26 0.6.1.13 released
|
||||
|
||||
2006-03-25 jrandom
|
||||
* Added a simple purge and ban of syndie authors, shown as the
|
||||
"Purge and ban" button on the addressbook for authors that are already
|
||||
on the ignore list. All of their entries and metadata are deleted from
|
||||
the archive, and the are transparently filtered from any remote
|
||||
syndication (so no user on the syndie instance will pull any new posts
|
||||
from them)
|
||||
* More strict tunnel join throtting when congested
|
||||
|
||||
2006-03-24 jrandom
|
||||
* Try to desync tunnel building near startup (thanks Complication!)
|
||||
* If we are highly congested, fall back on only querying the floodfill
|
||||
netDb peers, and only storing to those peers too
|
||||
* Cleaned up the floodfill-only queries
|
||||
|
||||
2006-03-21 jrandom
|
||||
* Avoid a very strange (unconfirmed) bug that people using the systray's
|
||||
browser picker dialog could cause by disabling the GUI-based browser
|
||||
picker.
|
||||
* Cut down on subsequent streaming lib reset packets transmitted
|
||||
* Use a larger MTU more often
|
||||
* Allow netDb searches to query shitlisted peers, as the queries are
|
||||
indirect.
|
||||
* Add an option to disable non-floodfill netDb searches (non-floodfill
|
||||
searches are used by default, but can be disabled by adding
|
||||
netDb.floodfillOnly=true to the advanced config)
|
||||
|
||||
2006-03-20 jrandom
|
||||
* Fix to allow for some slack when coalescing stats
|
||||
* Workaround some oddball errors
|
||||
|
||||
2006-03-18 jrandom
|
||||
* Added a new graphs.jsp page to show all of the stats being harvested
|
||||
|
||||
2006-03-18 jrandom
|
||||
* Made the netDb search load limitations a little less stringent
|
||||
* Add support for specifying the number of periods to be plotted on the
|
||||
graphs - e.g. to plot only the last hour of a stat that is averaged at
|
||||
the 60 second period, add &periodCount=60
|
||||
|
||||
2006-03-17 jrandom
|
||||
* Add support for graphing the event count as well as the average stat
|
||||
value (done by adding &showEvents=true to the URL). Also supports
|
||||
hiding the legend (&hideLegend=true), the grid (&hideGrid=true), and
|
||||
the title (&hideTitle=true).
|
||||
* Removed an unnecessary arbitrary filter on the profile organizer so we
|
||||
can pick high capacity and fast peers more appropriately
|
||||
|
||||
2006-03-16 jrandom
|
||||
* Integrate basic hooks for jrobin (http://jrobin.org) into the router
|
||||
console. Selected stats can be harvested automatically and fed into
|
||||
in-memory RRD databases, and those databases can be served up either as
|
||||
PNG images or as RRDtool compatible XML dumps (see oldstats.jsp for
|
||||
details). A base set of stats are harvested by default, but an
|
||||
alternate list can be specified by setting the 'stat.summaries' list on
|
||||
the advanced config. For instance:
|
||||
stat.summaries=bw.recvRate.60000,bw.sendRate.60000
|
||||
* HTML tweaking for the general config page (thanks void!)
|
||||
* Odd NPE fix (thanks Complication!)
|
||||
|
||||
2006-03-15 Complication
|
||||
* Trim out an old, inactive IP second-guessing method
|
||||
(thanks for spotting, Anonymous!)
|
||||
|
||||
2006-03-15 jrandom
|
||||
* Further stat cleanup
|
||||
* Keep track of how many peers we are actively trying to communicate with,
|
||||
beyond those who are just trying to communicate with us.
|
||||
* Further router tunnel participation throttle revisions to avoid spurious
|
||||
rejections
|
||||
* Rate stat display cleanup (thanks ripple!)
|
||||
* Don't even try to send messages that have been queued too long
|
||||
|
||||
2006-03-05 zzz
|
||||
* Remove the +++--- from the logs on i2psnark startup
|
||||
|
||||
2006-03-05 jrandom
|
||||
* HTML fixes in Syndie to work better with opera (thanks shaklen!)
|
||||
* Give netDb lookups to floodfill peers more time, as they are much more
|
||||
likely to succeed (thereby cutting down on the unnecessary netDb
|
||||
searches outside the floodfill set)
|
||||
* Fix to the SSU IP detection code so we won't use introducers when we
|
||||
don't need them (thanks Complication!)
|
||||
* Add a brief shitlist to i2psnark so it doesn't keep on trying to reach
|
||||
peers given to it
|
||||
* Don't let netDb searches wander across too many peers
|
||||
* Don't use the 1s bandwidth usage in the tunnel participation throttle,
|
||||
as its too volatile to have much meaning.
|
||||
* Don't bork if a Syndie post is missing an entry.sml
|
||||
|
||||
2006-03-05 Complication
|
||||
* Reduce exposed statistical information,
|
||||
to make build and uptime tracking more expensive
|
||||
|
||||
2006-03-04 Complication
|
||||
* Fix the announce URL of orion's tracker in Snark sources
|
||||
|
||||
2006-03-03 Complication
|
||||
* Explicit check for an index out of bounds exception while parsing
|
||||
an inbound IRC command (implicit check was there already)
|
||||
|
||||
2006-03-01 jrandom
|
||||
* More aggressive tunnel throttling as we approach our bandwidth limit,
|
||||
and throttle based off periods wider than 1 second.
|
||||
* Included Doubtful Salmon's syndie stylings (thanks!)
|
||||
|
||||
2006-02-27 zzz
|
||||
* Update error page templates to add \r, Connection: close, and
|
||||
Proxy-connection: close to headers.
|
||||
|
||||
* 2006-02-27 0.6.1.12 released
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<i2p.news date="$Date: 2006/02/21 10:20:21 $">
|
||||
<i2p.release version="0.6.1.12" date="2006/02/27" minVersion="0.6"
|
||||
<i2p.news date="$Date: 2006/02/27 14:05:41 $">
|
||||
<i2p.release version="0.6.1.13" date="2006/03/26" minVersion="0.6"
|
||||
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
|
||||
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
|
||||
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
<info>
|
||||
<appname>i2p</appname>
|
||||
<appversion>0.6.1.12</appversion>
|
||||
<appversion>0.6.1.13</appversion>
|
||||
<authors>
|
||||
<author name="I2P" email="support@i2p.net"/>
|
||||
</authors>
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
HTTP/1.1 409 Conflict
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
|
||||
HTTP/1.1 409 Conflict
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
Connection: close
|
||||
Proxy-Connection: close
|
||||
|
||||
<html><head>
|
||||
<title>Destination key conflict</title>
|
||||
<style type='text/css'>
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
HTTP/1.1 504 Gateway Timeout
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
|
||||
HTTP/1.1 504 Gateway Timeout
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
Connection: close
|
||||
Proxy-Connection: close
|
||||
|
||||
<html><head>
|
||||
<title>Eepsite not reachable</title>
|
||||
<style type='text/css'>
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
HTTP/1.1 400 Destination Not Found
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
|
||||
HTTP/1.1 400 Destination Not Found
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
Connection: close
|
||||
Proxy-Connection: close
|
||||
|
||||
<html><head>
|
||||
<title>Invalid eepsite destination</title>
|
||||
<style type='text/css'>
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
HTTP/1.1 404 Domain Not Found
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
|
||||
HTTP/1.1 404 Domain Not Found
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
Connection: close
|
||||
Proxy-Connection: close
|
||||
|
||||
<html><head>
|
||||
<title>Eepsite unknown</title>
|
||||
<style type='text/css'>
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
HTTP/1.1 504 Gateway Timeout
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
|
||||
HTTP/1.1 504 Gateway Timeout
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Cache-control: no-cache
|
||||
Connection: close
|
||||
Proxy-Connection: close
|
||||
|
||||
<html><head>
|
||||
<title>Outproxy Not Found</title>
|
||||
<style type='text/css'>
|
||||
|
||||
10
news.xml
10
news.xml
@@ -1,5 +1,5 @@
|
||||
<i2p.news date="$Date: 2006/02/21 22:34:02 $">
|
||||
<i2p.release version="0.6.1.12" date="2006/02/27" minVersion="0.6"
|
||||
<i2p.news date="$Date: 2006/03/21 21:15:13 $">
|
||||
<i2p.release version="0.6.1.13" date="2006/03/26" minVersion="0.6"
|
||||
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
|
||||
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
|
||||
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
|
||||
@@ -15,9 +15,9 @@
|
||||
with some useful SSU and peer selection fixes - please upgrade as soon as possible.
|
||||
<br>
|
||||
•
|
||||
2006-02-21:
|
||||
<a href="http://dev.i2p/pipermail/i2p/2006-February/001264.html">status notes</a>
|
||||
2006-03-21:
|
||||
<a href="http://dev.i2p/pipermail/i2p/2006-March/001271.html">status notes</a>
|
||||
and
|
||||
<a href="http://www.i2p/meeting169">meeting log</a>
|
||||
<a href="http://www.i2p/meeting173">meeting log</a>
|
||||
<br>
|
||||
</i2p.news>
|
||||
|
||||
@@ -29,6 +29,7 @@ public abstract class CommSystemFacade implements Service {
|
||||
public Set createAddresses() { return new HashSet(); }
|
||||
|
||||
public int countActivePeers() { return 0; }
|
||||
public int countActiveSendPeers() { return 0; }
|
||||
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
|
||||
|
||||
/**
|
||||
|
||||
@@ -483,10 +483,6 @@ public class LoadTestManager {
|
||||
// length == #hops+1 (as it includes the creator)
|
||||
if (cfg.getLength() < 2)
|
||||
return false;
|
||||
// only load test the client tunnels
|
||||
// XXX why?
|
||||
////if (cfg.getTunnel().getDestination() == null)
|
||||
//// return false;
|
||||
_active.add(cfg);
|
||||
return true;
|
||||
} else {
|
||||
@@ -496,18 +492,26 @@ public class LoadTestManager {
|
||||
|
||||
private boolean bandwidthOverloaded() {
|
||||
int msgLoadBps = CONCURRENT_MESSAGES
|
||||
* 5 // message size
|
||||
* 5 * 1024 // message size
|
||||
/ 10; // 10 seconds before timeout & retransmission
|
||||
msgLoadBps *= 2; // buffer
|
||||
if (_context.bandwidthLimiter().getSendBps()/1024d + (double)msgLoadBps >= _context.bandwidthLimiter().getOutboundKBytesPerSecond())
|
||||
int curBps = getBps();
|
||||
if ((curBps + msgLoadBps)/1024 >= _context.bandwidthLimiter().getOutboundKBytesPerSecond())
|
||||
return true;
|
||||
if (_context.bandwidthLimiter().getReceiveBps()/1024d + (double)msgLoadBps >= _context.bandwidthLimiter().getInboundKBytesPerSecond())
|
||||
if ((curBps + msgLoadBps)/1024 >= _context.bandwidthLimiter().getInboundKBytesPerSecond())
|
||||
return true;
|
||||
if (_context.throttle().getMessageDelay() > 1000)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
private int getBps() {
|
||||
int used1s = RouterThrottleImpl.get1sRate(_context);
|
||||
int used1m = RouterThrottleImpl.get1mRate(_context);
|
||||
int used5m = RouterThrottleImpl.get5mRate(_context);
|
||||
return Math.max(used1s, Math.max(used1m, used5m));
|
||||
}
|
||||
|
||||
private class CreatedJob extends JobImpl {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
public CreatedJob(RouterContext ctx, LoadTestTunnelConfig cfg) {
|
||||
|
||||
@@ -245,7 +245,8 @@ public class Router {
|
||||
_context.tunnelDispatcher().startup();
|
||||
_context.inNetMessagePool().startup();
|
||||
startupQueue();
|
||||
_context.jobQueue().addJob(new CoalesceStatsJob(_context));
|
||||
//_context.jobQueue().addJob(new CoalesceStatsJob(_context));
|
||||
SimpleTimer.getInstance().addEvent(new CoalesceStatsEvent(_context), 0);
|
||||
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob(_context));
|
||||
warmupCrypto();
|
||||
_sessionKeyPersistenceHelper.startup();
|
||||
@@ -1011,17 +1012,36 @@ public class Router {
|
||||
* coalesce the stats framework every minute
|
||||
*
|
||||
*/
|
||||
class CoalesceStatsJob extends JobImpl {
|
||||
public CoalesceStatsJob(RouterContext ctx) {
|
||||
super(ctx);
|
||||
class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
private RouterContext _ctx;
|
||||
public CoalesceStatsEvent(RouterContext ctx) {
|
||||
_ctx = ctx;
|
||||
ctx.statManager().createRateStat("bw.receiveBps", "How fast we receive data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("bw.sendBps", "How fast we send data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("bw.sendRate", "Low level bandwidth send rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("bw.recvRate", "Low level bandwidth receive rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.activeSendPeers", "How many peers we've sent to this minute", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
|
||||
}
|
||||
public String getName() { return "Coalesce stats"; }
|
||||
public void runJob() {
|
||||
private RouterContext getContext() { return _ctx; }
|
||||
public void timeReached() {
|
||||
int active = getContext().commSystem().countActivePeers();
|
||||
getContext().statManager().addRateData("router.activePeers", active, 60*1000);
|
||||
|
||||
int activeSend = getContext().commSystem().countActiveSendPeers();
|
||||
getContext().statManager().addRateData("router.activeSendPeers", activeSend, 60*1000);
|
||||
|
||||
int fast = getContext().profileOrganizer().countFastPeers();
|
||||
getContext().statManager().addRateData("router.fastPeers", fast, 60*1000);
|
||||
|
||||
int highCap = getContext().profileOrganizer().countHighCapacityPeers();
|
||||
getContext().statManager().addRateData("router.highCapacityPeers", highCap, 60*1000);
|
||||
|
||||
getContext().statManager().addRateData("bw.sendRate", (long)getContext().bandwidthLimiter().getSendBps(), 0);
|
||||
getContext().statManager().addRateData("bw.recvRate", (long)getContext().bandwidthLimiter().getReceiveBps(), 0);
|
||||
|
||||
getContext().statManager().coalesceStats();
|
||||
|
||||
RateStat receiveRate = getContext().statManager().getRate("transport.receiveMessageSize");
|
||||
@@ -1043,17 +1063,8 @@ class CoalesceStatsJob extends JobImpl {
|
||||
getContext().statManager().addRateData("bw.sendBps", (long)KBps, 60*1000);
|
||||
}
|
||||
}
|
||||
|
||||
int active = getContext().commSystem().countActivePeers();
|
||||
getContext().statManager().addRateData("router.activePeers", active, 60*1000);
|
||||
|
||||
int fast = getContext().profileOrganizer().countFastPeers();
|
||||
getContext().statManager().addRateData("router.fastPeers", fast, 60*1000);
|
||||
|
||||
int highCap = getContext().profileOrganizer().countHighCapacityPeers();
|
||||
getContext().statManager().addRateData("router.highCapacityPeers", highCap, 60*1000);
|
||||
|
||||
requeue(60*1000);
|
||||
|
||||
SimpleTimer.getInstance().addEvent(this, 60*1000);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -88,36 +88,8 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
}
|
||||
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
/*
|
||||
RateStat rs = _context.statManager().getRate("router.throttleNetworkCause");
|
||||
Rate r = null;
|
||||
if (rs != null)
|
||||
r = rs.getRate(60*1000);
|
||||
long throttleEvents = (r != null ? r.getCurrentEventCount() + r.getLastEventCount() : 0);
|
||||
if (throttleEvents > THROTTLE_EVENT_LIMIT) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ " since there have been " + throttleEvents
|
||||
+ " throttle events in the last 15 minutes or so");
|
||||
_context.statManager().addRateData("router.throttleTunnelCause", lag, lag);
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
*/
|
||||
|
||||
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
|
||||
Rate r = null;
|
||||
/*
|
||||
if (rs != null)
|
||||
r = rs.getRate(1*60*1000);
|
||||
double processTime = (r != null ? r.getAverageValue() : 0);
|
||||
if (processTime > 2000) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ "since the 10 minute message processing time is too slow (" + processTime + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProcessingTime10m", (long)processTime, (long)processTime);
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
*/
|
||||
if (rs != null)
|
||||
r = rs.getRate(60*1000);
|
||||
double processTime = (r != null ? r.getAverageValue() : 0);
|
||||
@@ -129,27 +101,6 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
|
||||
/*
|
||||
rs = _context.statManager().getRate("transport.sendMessageFailureLifetime");
|
||||
r = null;
|
||||
if (rs != null)
|
||||
r = rs.getRate(60*1000);
|
||||
double failCount = (r != null ? r.getCurrentEventCount() + r.getLastEventCount() : 0);
|
||||
if (failCount > 100) {
|
||||
long periods = r.getLifetimePeriods();
|
||||
long maxFailCount = r.getExtremeEventCount();
|
||||
if ( (periods > 0) && (maxFailCount > 100) ) {
|
||||
if (_context.random().nextInt((int)maxFailCount) <= failCount) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ "since the 1 minute message failure count is too high (" + failCount + "/" + maxFailCount + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelFailCount1m", (long)failCount, (long)maxFailCount);
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
|
||||
if (numTunnels > getMinThrottleTunnels()) {
|
||||
@@ -244,9 +195,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
double messagesPerTunnel = (r != null ? r.getAverageValue() : 0d);
|
||||
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
|
||||
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
|
||||
int participatingTunnels = (r != null ? (int) (r.getLastEventCount() + r.getCurrentEventCount()) : 0);
|
||||
if (participatingTunnels <= 0)
|
||||
participatingTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
int participatingTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
double bytesAllocated = messagesPerTunnel * participatingTunnels * 1024;
|
||||
|
||||
if (!allowTunnel(bytesAllocated, numTunnels)) {
|
||||
@@ -261,8 +210,35 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
+ " tunnels with lag of " + lag + ")");
|
||||
return TUNNEL_ACCEPT;
|
||||
}
|
||||
|
||||
static int get1sRate(RouterContext ctx) {
|
||||
return (int)Math.max(ctx.bandwidthLimiter().getSendBps(), ctx.bandwidthLimiter().getReceiveBps());
|
||||
}
|
||||
static int get1mRate(RouterContext ctx) {
|
||||
int send = 0;
|
||||
RateStat rs = ctx.statManager().getRate("bw.sendRate");
|
||||
if (rs != null)
|
||||
send = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
int recv = 0;
|
||||
rs = ctx.statManager().getRate("bw.recvRate");
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
static int get5mRate(RouterContext ctx) {
|
||||
int send = 0;
|
||||
RateStat rs = ctx.statManager().getRate("bw.sendRate");
|
||||
if (rs != null)
|
||||
send = (int)rs.getRate(5*60*1000).getAverageValue();
|
||||
int recv = 0;
|
||||
rs = ctx.statManager().getRate("bw.recvRate");
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(5*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
|
||||
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 600; // 1KBps
|
||||
private static final int MIN_AVAILABLE_BPS = 4*1024; // always leave at least 4KBps free when allowing
|
||||
|
||||
/**
|
||||
* with bytesAllocated already accounted for across the numTunnels existing
|
||||
@@ -272,34 +248,52 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
*/
|
||||
private boolean allowTunnel(double bytesAllocated, int numTunnels) {
|
||||
int maxKBps = Math.min(_context.bandwidthLimiter().getOutboundKBytesPerSecond(), _context.bandwidthLimiter().getInboundKBytesPerSecond());
|
||||
int used = (int)Math.max(_context.bandwidthLimiter().getSendBps(), _context.bandwidthLimiter().getReceiveBps());
|
||||
int used1s = 0; //get1sRate(_context); // dont throttle on the 1s rate, its too volatile
|
||||
int used1m = get1mRate(_context);
|
||||
int used5m = 0; //get5mRate(_context); // don't throttle on the 5m rate, as that'd hide available bandwidth
|
||||
int used = Math.max(Math.max(used1s, used1m), used5m);
|
||||
int availBps = (int)(((maxKBps*1024) - used) * getSharePercentage());
|
||||
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesUsed", used, maxKBps);
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesAllowed", availBps, (long)bytesAllocated);
|
||||
|
||||
if (maxKBps <= 8) {
|
||||
// lets be more conservative for dialup users and assume 1KBps per tunnel
|
||||
return ( (numTunnels + 1)*1024 < availBps);
|
||||
/*
|
||||
if (availBps <= 8*1024) {
|
||||
// lets be more conservative for people near their limit and assume 1KBps per tunnel
|
||||
boolean rv = ( (numTunnels + 1)*1024 < availBps);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Nearly full router (" + availBps + ") with " + numTunnels + " tunnels, allow a new request? " + rv);
|
||||
return rv;
|
||||
}
|
||||
*/
|
||||
|
||||
double growthFactor = ((double)(numTunnels+1))/(double)numTunnels;
|
||||
double toAllocate = (numTunnels > 0 ? bytesAllocated * growthFactor : 0);
|
||||
|
||||
double allocatedKBps = toAllocate / (10 * 60 * 1024);
|
||||
double pctFull = allocatedKBps / availBps;
|
||||
double allocatedBps = toAllocate / (10 * 60);
|
||||
double pctFull = allocatedBps / availBps;
|
||||
|
||||
if ( (pctFull < 1.0) && (pctFull >= 0.0) ) { // (_context.random().nextInt(100) > 100 * pctFull) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Probabalistically allowing the tunnel w/ " + pctFull + " of our " + availBps
|
||||
+ "Bps/" + allocatedKBps + "KBps allocated through " + numTunnels + " tunnels");
|
||||
_log.debug("Allowing the tunnel w/ " + pctFull + " of our " + availBps
|
||||
+ "Bps/" + allocatedBps + "KBps allocated through " + numTunnels + " tunnels");
|
||||
return true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Rejecting the tunnel w/ " + pctFull + " of our " + availBps
|
||||
+ "Bps allowed (" + toAllocate + "bytes / " + allocatedKBps
|
||||
+ "KBps) through " + numTunnels + " tunnels");
|
||||
return false;
|
||||
double probAllow = availBps / (allocatedBps + availBps);
|
||||
boolean allow = (availBps > MIN_AVAILABLE_BPS) && (_context.random().nextDouble() <= probAllow);
|
||||
if (allow) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Probabalistically allowing the tunnel w/ " + (pctFull*100d) + "% of our " + availBps
|
||||
+ "Bps allowed (" + toAllocate + "bytes / " + allocatedBps
|
||||
+ "Bps) through " + numTunnels + " tunnels");
|
||||
return true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting the tunnel w/ " + (pctFull*100d) + "% of our " + availBps
|
||||
+ "Bps allowed (" + toAllocate + "bytes / " + allocatedBps
|
||||
+ "Bps) through " + numTunnels + " tunnels");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,7 +303,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
*
|
||||
*/
|
||||
private double getSharePercentage() {
|
||||
String pct = _context.getProperty(PROP_BANDWIDTH_SHARE_PERCENTAGE, "0.8");
|
||||
String pct = _context.getProperty(PROP_BANDWIDTH_SHARE_PERCENTAGE);
|
||||
if (pct != null) {
|
||||
try {
|
||||
double d = Double.parseDouble(pct);
|
||||
|
||||
@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.362 $ $Date: 2006/02/26 16:30:57 $";
|
||||
public final static String VERSION = "0.6.1.12";
|
||||
public final static String ID = "$Revision: 1.380 $ $Date: 2006/03/25 18:50:48 $";
|
||||
public final static String VERSION = "0.6.1.13";
|
||||
public final static long BUILD = 0;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
|
||||
@@ -95,9 +95,11 @@ public class StatisticsManager implements Service {
|
||||
public Properties publishStatistics() {
|
||||
Properties stats = new Properties();
|
||||
stats.setProperty("router.version", RouterVersion.VERSION);
|
||||
stats.setProperty("router.id", RouterVersion.ID);
|
||||
stats.setProperty("coreVersion", CoreVersion.VERSION);
|
||||
stats.setProperty("core.id", CoreVersion.ID);
|
||||
|
||||
// No longer expose, to make build tracking more expensive
|
||||
// stats.setProperty("router.id", RouterVersion.ID);
|
||||
// stats.setProperty("core.id", CoreVersion.ID);
|
||||
|
||||
if (_includePeerRankings) {
|
||||
if (false)
|
||||
@@ -147,7 +149,12 @@ public class StatisticsManager implements Service {
|
||||
includeRate("udp.congestionOccurred", stats, new long[] { 10*60*1000 });
|
||||
//includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
|
||||
|
||||
// Round smaller uptimes to 1 hour, to frustrate uptime tracking
|
||||
long publishedUptime = _context.router().getUptime();
|
||||
if (publishedUptime < 60*60*1000) publishedUptime = 60*60*1000;
|
||||
|
||||
stats.setProperty("stat_uptime", DataHelper.formatDuration(publishedUptime));
|
||||
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
|
||||
|
||||
includeRate("tunnel.buildRequestTime", stats, new long[] { 60*1000, 10*60*1000 });
|
||||
@@ -280,8 +287,14 @@ public class StatisticsManager implements Service {
|
||||
|
||||
private String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); }
|
||||
|
||||
private final String num(double num) { synchronized (_fmt) { return _fmt.format(num); } }
|
||||
private final String pct(double num) { synchronized (_pct) { return _pct.format(num); } }
|
||||
private final String num(double num) {
|
||||
if (num < 0) num = 0;
|
||||
synchronized (_fmt) { return _fmt.format(num); }
|
||||
}
|
||||
private final String pct(double num) {
|
||||
if (num < 0) num = 0;
|
||||
synchronized (_pct) { return _pct.format(num); }
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) { }
|
||||
}
|
||||
|
||||
@@ -180,6 +180,17 @@ public class StatsGenerator {
|
||||
buf.append(num(curRate.getExtremeEventCount()));
|
||||
buf.append(")");
|
||||
}
|
||||
if (curRate.getSummaryListener() != null) {
|
||||
buf.append(" <a href=\"viewstat.jsp?stat=").append(name);
|
||||
buf.append("&period=").append(periods[i]);
|
||||
buf.append("\" title=\"Render summarized data\">render</a>");
|
||||
buf.append(" <a href=\"viewstat.jsp?stat=").append(name);
|
||||
buf.append("&period=").append(periods[i]).append("&showEvents=true\" title=\"Render summarized event counts\">events</a>");
|
||||
buf.append(" (as <a href=\"viewstat.jsp?stat=").append(name);
|
||||
buf.append("&period=").append(periods[i]);
|
||||
buf.append("&format=xml\" title=\"Dump stat history as XML\">XML</a>");
|
||||
buf.append(" in a format <a href=\"http://people.ee.ethz.ch/~oetiker/webtools/rrdtool\">RRDTool</a> understands)");
|
||||
}
|
||||
buf.append("</li>");
|
||||
if (i + 1 == periods.length) {
|
||||
// last one, so lets display the strict average
|
||||
|
||||
@@ -122,8 +122,10 @@ public class GarlicMessageBuilder {
|
||||
msg.setMessageExpiration(config.getExpiration());
|
||||
|
||||
long timeFromNow = config.getExpiration() - ctx.clock().now();
|
||||
if (timeFromNow < 1*1000)
|
||||
if (timeFromNow < 1*1000) {
|
||||
log.error("Building a message expiring in " + timeFromNow + "ms: " + config, new Exception("created by"));
|
||||
return null;
|
||||
}
|
||||
|
||||
if (log.shouldLog(Log.WARN))
|
||||
log.warn("CloveSet size for message " + msg.getUniqueId() + " is " + cloveSet.length
|
||||
|
||||
@@ -16,6 +16,7 @@ import net.i2p.data.Lease;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.Payload;
|
||||
import net.i2p.data.i2cp.MessageId;
|
||||
|
||||
import net.i2p.data.i2np.DataMessage;
|
||||
@@ -107,7 +108,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
@@ -118,7 +119,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoACK", "How often we send a client message without asking for an ACK?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
|
||||
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
|
||||
_clientMessage = msg;
|
||||
_clientMessageId = msg.getMessageId();
|
||||
@@ -313,6 +314,10 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*/
|
||||
private void send() {
|
||||
if (_finished) return;
|
||||
if (getContext().clock().now() >= _overallExpiration) {
|
||||
dieFatal();
|
||||
return;
|
||||
}
|
||||
boolean wantACK = true;
|
||||
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey());
|
||||
if (existingTags > 30)
|
||||
@@ -330,11 +335,16 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
if (wantACK)
|
||||
_inTunnel = selectInboundTunnel();
|
||||
|
||||
buildClove();
|
||||
boolean ok = (_clientMessage != null) && buildClove();
|
||||
if (!ok) {
|
||||
dieFatal();
|
||||
return;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Clove built to " + _toString);
|
||||
long msgExpiration = _overallExpiration; // getContext().clock().now() + OVERALL_TIMEOUT_MS_DEFAULT;
|
||||
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
|
||||
_overallExpiration, key,
|
||||
msgExpiration, key,
|
||||
_clove, _from.calculateHash(),
|
||||
_to, _inTunnel,
|
||||
sessKey, tags,
|
||||
@@ -344,7 +354,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// (should we always fail for this? or should we send it anyway, even if
|
||||
// we dont receive the reply? hmm...)
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Unable to create the garlic message (no tunnels left) to " + _toString);
|
||||
_log.warn(getJobId() + ": Unable to create the garlic message (no tunnels left or too lagged) to " + _toString);
|
||||
getContext().statManager().addRateData("client.dispatchNoTunnels", getContext().clock().now() - _start, 0);
|
||||
dieFatal();
|
||||
return;
|
||||
@@ -470,7 +480,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
}
|
||||
|
||||
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
|
||||
private void buildClove() {
|
||||
private boolean buildClove() {
|
||||
PayloadGarlicConfig clove = new PayloadGarlicConfig();
|
||||
|
||||
DeliveryInstructions instructions = new DeliveryInstructions();
|
||||
@@ -487,7 +497,13 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
clove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
|
||||
DataMessage msg = new DataMessage(getContext());
|
||||
msg.setData(_clientMessage.getPayload().getEncryptedData());
|
||||
Payload p = _clientMessage.getPayload();
|
||||
if (p == null)
|
||||
return false;
|
||||
byte d[] = p.getEncryptedData();
|
||||
if (d == null)
|
||||
return false;
|
||||
msg.setData(d);
|
||||
msg.setMessageExpiration(clove.getExpiration());
|
||||
|
||||
clove.setPayload(msg);
|
||||
@@ -499,6 +515,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Built payload clove with id " + clove.getId());
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,244 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.util.*;
|
||||
import net.i2p.router.*;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.i2np.*;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Try sending a search to some floodfill peers, failing completely if we don't get
|
||||
* a match from one of those peers, with no fallback to the kademlia search
|
||||
*
|
||||
*/
|
||||
class FloodOnlySearchJob extends FloodSearchJob {
|
||||
private Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
private Hash _key;
|
||||
private List _onFind;
|
||||
private List _onFailed;
|
||||
private long _expiration;
|
||||
private int _timeoutMs;
|
||||
private long _origExpiration;
|
||||
private boolean _isLease;
|
||||
private volatile int _lookupsRemaining;
|
||||
private volatile boolean _dead;
|
||||
private long _created;
|
||||
|
||||
private List _out;
|
||||
private MessageSelector _replySelector;
|
||||
private ReplyJob _onReply;
|
||||
private Job _onTimeout;
|
||||
public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
|
||||
super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
|
||||
_log = ctx.logManager().getLog(FloodOnlySearchJob.class);
|
||||
_facade = facade;
|
||||
_key = key;
|
||||
_onFind = new ArrayList();
|
||||
_onFind.add(onFind);
|
||||
_onFailed = new ArrayList();
|
||||
_onFailed.add(onFailed);
|
||||
_timeoutMs = Math.min(timeoutMs, SearchJob.PER_FLOODFILL_PEER_TIMEOUT);
|
||||
_expiration = _timeoutMs + ctx.clock().now();
|
||||
_origExpiration = _timeoutMs + ctx.clock().now();
|
||||
_isLease = isLease;
|
||||
_lookupsRemaining = 0;
|
||||
_dead = false;
|
||||
_out = new ArrayList(2);
|
||||
_replySelector = new FloodOnlyLookupSelector(getContext(), this);
|
||||
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
|
||||
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
|
||||
_created = System.currentTimeMillis();
|
||||
}
|
||||
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
|
||||
if (_dead) {
|
||||
getContext().jobQueue().addJob(onFailed);
|
||||
} else {
|
||||
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
|
||||
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
|
||||
}
|
||||
}
|
||||
public long getExpiration() { return _expiration; }
|
||||
private static final int CONCURRENT_SEARCHES = 2;
|
||||
public void runJob() {
|
||||
// pick some floodfill peers and send out the searches
|
||||
List floodfillPeers = _facade.getFloodfillPeers();
|
||||
if (floodfillPeers == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Running netDb searches against the floodfill peers, but we don't know any");
|
||||
failed();
|
||||
return;
|
||||
}
|
||||
OutNetMessage out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
|
||||
_out.add(out);
|
||||
|
||||
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
|
||||
Hash peer = (Hash)floodfillPeers.get(i);
|
||||
if (peer.equals(getContext().routerHash()))
|
||||
continue;
|
||||
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
|
||||
if ( (replyTunnel == null) || (outTunnel == null) ) {
|
||||
failed();
|
||||
return;
|
||||
}
|
||||
dlm.setFrom(replyTunnel.getPeer(0));
|
||||
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
|
||||
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
|
||||
dlm.setSearchKey(_key);
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
|
||||
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
|
||||
_lookupsRemaining++;
|
||||
}
|
||||
|
||||
if (_lookupsRemaining <= 0) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
|
||||
// no floodfill peers, fail
|
||||
failed();
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb flood search (phase 1)"; }
|
||||
|
||||
Hash getKey() { return _key; }
|
||||
void decrementRemaining() { _lookupsRemaining--; }
|
||||
int getLookupsRemaining() { return _lookupsRemaining; }
|
||||
|
||||
void failed() {
|
||||
synchronized (this) {
|
||||
if (_dead) return;
|
||||
_dead = true;
|
||||
}
|
||||
for (int i = 0; i < _out.size(); i++) {
|
||||
OutNetMessage out = (OutNetMessage)_out.get(i);
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
}
|
||||
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining + " remaining after " + (System.currentTimeMillis()-_created));
|
||||
_facade.complete(_key);
|
||||
getContext().statManager().addRateData("netDb.failedTime", System.currentTimeMillis()-_created, System.currentTimeMillis()-_created);
|
||||
synchronized (_onFailed) {
|
||||
for (int i = 0; i < _onFailed.size(); i++) {
|
||||
Job j = (Job)_onFailed.remove(0);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
void success() {
|
||||
synchronized (this) {
|
||||
if (_dead) return;
|
||||
_dead = true;
|
||||
}
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
|
||||
_facade.complete(_key);
|
||||
getContext().statManager().addRateData("netDb.successTime", System.currentTimeMillis()-_created, System.currentTimeMillis()-_created);
|
||||
synchronized (_onFind) {
|
||||
while (_onFind.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)_onFind.remove(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class FloodOnlyLookupTimeoutJob extends JobImpl {
|
||||
private FloodSearchJob _search;
|
||||
private Log _log;
|
||||
public FloodOnlyLookupTimeoutJob(RouterContext ctx, FloodOnlySearchJob job) {
|
||||
super(ctx);
|
||||
_search = job;
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
}
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(_search.getJobId() + ": search timed out");
|
||||
_search.failed();
|
||||
}
|
||||
public String getName() { return "NetDb flood search (phase 1) timeout"; }
|
||||
}
|
||||
|
||||
class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
private Log _log;
|
||||
private FloodOnlySearchJob _search;
|
||||
public FloodOnlyLookupMatchJob(RouterContext ctx, FloodOnlySearchJob job) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
||||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(_search.getJobId() + ": search match and found locally");
|
||||
_search.success();
|
||||
} else {
|
||||
int remaining = _search.getLookupsRemaining();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(_search.getJobId() + ": got a DatabasSearchReply when we were looking for "
|
||||
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
|
||||
// netDb reply pointing us at other people
|
||||
_search.failed();
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb flood search (phase 1) match"; }
|
||||
public void setMessage(I2NPMessage message) {
|
||||
if (message instanceof DatabaseSearchReplyMessage) {
|
||||
// a dsrm is only passed in when there are no more lookups remaining
|
||||
_search.failed();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
|
||||
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet());
|
||||
else
|
||||
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo());
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(_search.getJobId() + ": Received an invalid store reply", iae);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class FloodOnlyLookupSelector implements MessageSelector {
|
||||
private RouterContext _context;
|
||||
private FloodOnlySearchJob _search;
|
||||
private boolean _matchFound;
|
||||
private Log _log;
|
||||
public FloodOnlyLookupSelector(RouterContext ctx, FloodOnlySearchJob search) {
|
||||
_context = ctx;
|
||||
_search = search;
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
_matchFound = false;
|
||||
}
|
||||
public boolean continueMatching() {
|
||||
return _search.getLookupsRemaining() > 0 && !_matchFound && _context.clock().now() < getExpiration();
|
||||
}
|
||||
public long getExpiration() { return (_matchFound ? -1 : _search.getExpiration()); }
|
||||
public boolean isMatch(I2NPMessage message) {
|
||||
if (message == null) return false;
|
||||
if (message instanceof DatabaseStoreMessage) {
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
// is it worth making sure the reply came in on the right tunnel?
|
||||
if (_search.getKey().equals(dsm.getKey())) {
|
||||
_search.decrementRemaining();
|
||||
_matchFound = true;
|
||||
return true;
|
||||
}
|
||||
} else if (message instanceof DatabaseSearchReplyMessage) {
|
||||
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
|
||||
if (_search.getKey().equals(dsrm.getSearchKey())) {
|
||||
_search.decrementRemaining();
|
||||
if (_search.getLookupsRemaining() <= 0)
|
||||
return true; // ok, no more left, so time to fail
|
||||
else
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -14,9 +14,23 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
public static final char CAPACITY_FLOODFILL = 'f';
|
||||
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
|
||||
private static final String DEFAULT_FLOODFILL_PARTICIPANT = "false";
|
||||
private Map _activeFloodQueries;
|
||||
|
||||
public FloodfillNetworkDatabaseFacade(RouterContext context) {
|
||||
super(context);
|
||||
_activeFloodQueries = new HashMap();
|
||||
|
||||
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedAttemptedPeers", "How many peers we sent a search to when the search fails", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
protected void createHandlers() {
|
||||
@@ -106,4 +120,271 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Begin a kademlia style search for the key specified, which can take up to timeoutMs and
|
||||
* will fire the appropriate jobs on success or timeout (or if the kademlia search completes
|
||||
* without any match)
|
||||
*
|
||||
*/
|
||||
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
//if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
boolean isNew = true;
|
||||
FloodSearchJob searchJob = null;
|
||||
synchronized (_activeFloodQueries) {
|
||||
searchJob = (FloodSearchJob)_activeFloodQueries.get(key);
|
||||
if (searchJob == null) {
|
||||
if (SearchJob.onlyQueryFloodfillPeers(_context)) {
|
||||
searchJob = new FloodOnlySearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease);
|
||||
} else {
|
||||
searchJob = new FloodSearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease);
|
||||
}
|
||||
_activeFloodQueries.put(key, searchJob);
|
||||
isNew = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (isNew) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("this is the first search for that key, fire off the FloodSearchJob");
|
||||
_context.jobQueue().addJob(searchJob);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Deferring flood search for " + key.toBase64() + " with " + onFindJob);
|
||||
searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
_context.statManager().addRateData("netDb.lookupLeaseSetDeferred", 1, searchJob.getExpiration()-_context.clock().now());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ok, the initial set of searches to the floodfill peers timed out, lets fall back on the
|
||||
* wider kademlia-style searches
|
||||
*/
|
||||
void searchFull(Hash key, List onFind, List onFailed, long timeoutMs, boolean isLease) {
|
||||
synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
|
||||
|
||||
Job find = null;
|
||||
if ( (onFind != null) && (onFind.size() > 0) )
|
||||
find = (Job)onFind.remove(0);
|
||||
Job fail = null;
|
||||
if ( (onFailed != null) && (onFailed.size() > 0) )
|
||||
fail = (Job)onFailed.remove(0);
|
||||
SearchJob job = super.search(key, find, fail, timeoutMs, isLease);
|
||||
if (job != null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Floodfill search timed out for " + key.toBase64() + ", falling back on normal search (#"
|
||||
+ job.getJobId() + ") with " + timeoutMs + " remaining");
|
||||
long expiration = timeoutMs + _context.clock().now();
|
||||
while ( (onFind != null) && (onFind.size() > 0) )
|
||||
job.addDeferred((Job)onFind.remove(0), null, expiration, isLease);
|
||||
while ( (onFailed != null) && (onFailed.size() > 0) )
|
||||
job.addDeferred(null, (Job)onFailed.remove(0), expiration, isLease);
|
||||
}
|
||||
}
|
||||
void complete(Hash key) {
|
||||
synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
|
||||
}
|
||||
|
||||
/** list of the Hashes of currently known floodfill peers */
|
||||
List getFloodfillPeers() {
|
||||
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
|
||||
return sel.selectFloodfillParticipants(getKBuckets());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try sending a search to some floodfill peers, but if we don't get a successful
|
||||
* match within half the allowed lookup time, give up and start querying through
|
||||
* the normal (kademlia) channels. This should cut down on spurious lookups caused
|
||||
* by simple delays in responses from floodfill peers
|
||||
*
|
||||
*/
|
||||
class FloodSearchJob extends JobImpl {
|
||||
private Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
private Hash _key;
|
||||
private List _onFind;
|
||||
private List _onFailed;
|
||||
private long _expiration;
|
||||
private int _timeoutMs;
|
||||
private long _origExpiration;
|
||||
private boolean _isLease;
|
||||
private volatile int _lookupsRemaining;
|
||||
private volatile boolean _dead;
|
||||
public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(FloodSearchJob.class);
|
||||
_facade = facade;
|
||||
_key = key;
|
||||
_onFind = new ArrayList();
|
||||
_onFind.add(onFind);
|
||||
_onFailed = new ArrayList();
|
||||
_onFailed.add(onFailed);
|
||||
int timeout = -1;
|
||||
timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
|
||||
if (timeout < timeoutMs)
|
||||
timeout = timeoutMs;
|
||||
_timeoutMs = timeout;
|
||||
_expiration = timeout + ctx.clock().now();
|
||||
_origExpiration = timeoutMs + ctx.clock().now();
|
||||
_isLease = isLease;
|
||||
_lookupsRemaining = 0;
|
||||
_dead = false;
|
||||
}
|
||||
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
|
||||
if (_dead) {
|
||||
getContext().jobQueue().addJob(onFailed);
|
||||
} else {
|
||||
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
|
||||
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
|
||||
}
|
||||
}
|
||||
public long getExpiration() { return _expiration; }
|
||||
private static final int CONCURRENT_SEARCHES = 2;
|
||||
private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
|
||||
private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
|
||||
public void runJob() {
|
||||
// pick some floodfill peers and send out the searches
|
||||
List floodfillPeers = _facade.getFloodfillPeers();
|
||||
FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
|
||||
ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
|
||||
Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
|
||||
OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
|
||||
|
||||
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
|
||||
Hash peer = (Hash)floodfillPeers.get(i);
|
||||
if (peer.equals(getContext().routerHash()))
|
||||
continue;
|
||||
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
|
||||
if ( (replyTunnel == null) || (outTunnel == null) ) {
|
||||
_dead = true;
|
||||
while (_onFailed.size() > 0) {
|
||||
Job job = (Job)_onFailed.remove(0);
|
||||
getContext().jobQueue().addJob(job);
|
||||
}
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
return;
|
||||
}
|
||||
dlm.setFrom(replyTunnel.getPeer(0));
|
||||
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
|
||||
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
|
||||
dlm.setSearchKey(_key);
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
|
||||
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
|
||||
_lookupsRemaining++;
|
||||
}
|
||||
|
||||
if (_lookupsRemaining <= 0) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
|
||||
// no floodfill peers, go to the normal ones
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
_facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1)"; }
|
||||
|
||||
Hash getKey() { return _key; }
|
||||
void decrementRemaining() { _lookupsRemaining--; }
|
||||
int getLookupsRemaining() { return _lookupsRemaining; }
|
||||
|
||||
void failed() {
|
||||
if (_dead) return;
|
||||
_dead = true;
|
||||
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
|
||||
if (timeRemaining > 0) {
|
||||
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
|
||||
} else {
|
||||
for (int i = 0; i < _onFailed.size(); i++) {
|
||||
Job j = (Job)_onFailed.remove(0);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
void success() {
|
||||
if (_dead) return;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
|
||||
_dead = true;
|
||||
_facade.complete(_key);
|
||||
while (_onFind.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)_onFind.remove(0));
|
||||
}
|
||||
}
|
||||
|
||||
class FloodLookupTimeoutJob extends JobImpl {
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
_search.decrementRemaining();
|
||||
if (_search.getLookupsRemaining() <= 0)
|
||||
_search.failed();
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1) timeout"; }
|
||||
}
|
||||
|
||||
class FloodLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
private Log _log;
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(FloodLookupMatchJob.class);
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
||||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
||||
_search.success();
|
||||
} else {
|
||||
int remaining = _search.getLookupsRemaining();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
|
||||
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
|
||||
// netDb reply pointing us at other people
|
||||
if (remaining <= 0)
|
||||
_search.failed();
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1) match"; }
|
||||
public void setMessage(I2NPMessage message) {}
|
||||
}
|
||||
|
||||
class FloodLookupSelector implements MessageSelector {
|
||||
private RouterContext _context;
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
|
||||
_context = ctx;
|
||||
_search = search;
|
||||
}
|
||||
public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
|
||||
public long getExpiration() { return _search.getExpiration(); }
|
||||
public boolean isMatch(I2NPMessage message) {
|
||||
if (message == null) return false;
|
||||
if (message instanceof DatabaseStoreMessage) {
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
// is it worth making sure the reply came in on the right tunnel?
|
||||
if (_search.getKey().equals(dsm.getKey())) {
|
||||
_search.decrementRemaining();
|
||||
return true;
|
||||
}
|
||||
} else if (message instanceof DatabaseSearchReplyMessage) {
|
||||
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
|
||||
if (_search.getKey().equals(dsrm.getSearchKey())) {
|
||||
_search.decrementRemaining();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,16 +74,17 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
return;
|
||||
if (entry.equals(_context.routerHash()))
|
||||
return;
|
||||
if (_context.shitlist().isShitlisted(entry))
|
||||
return;
|
||||
// it isn't direct, so who cares if they're shitlisted
|
||||
//if (_context.shitlist().isShitlisted(entry))
|
||||
// return;
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
|
||||
if (info == null)
|
||||
return;
|
||||
//if (info == null)
|
||||
// return;
|
||||
|
||||
if (FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
|
||||
if (info != null && FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
|
||||
_floodfillMatches.add(entry);
|
||||
} else {
|
||||
if ( (_wanted > _matches) && (_key != null) ) {
|
||||
if ( (!SearchJob.onlyQueryFloodfillPeers(_context)) && (_wanted > _matches) && (_key != null) ) {
|
||||
BigInteger diff = getDistance(_key, entry);
|
||||
_sorted.put(diff, entry);
|
||||
}
|
||||
|
||||
@@ -694,10 +694,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
if (err != null)
|
||||
throw new IllegalArgumentException("Invalid store attempt - " + err);
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("RouterInfo " + key.toBase64() + " is stored with "
|
||||
+ routerInfo.getOptions().size() + " options on "
|
||||
+ new Date(routerInfo.getPublished()));
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("RouterInfo " + key.toBase64() + " is stored with "
|
||||
+ routerInfo.getOptions().size() + " options on "
|
||||
+ new Date(routerInfo.getPublished()));
|
||||
|
||||
_context.peerManager().setCapabilities(key, routerInfo.getCapabilities());
|
||||
_ds.put(key, routerInfo);
|
||||
@@ -799,8 +799,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* without any match)
|
||||
*
|
||||
*/
|
||||
void search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
if (!_initialized) return;
|
||||
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
if (!_initialized) return null;
|
||||
boolean isNew = true;
|
||||
SearchJob searchJob = null;
|
||||
synchronized (_activeRequests) {
|
||||
@@ -823,6 +823,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
int deferred = searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
_context.statManager().addRateData("netDb.lookupLeaseSetDeferred", deferred, searchJob.getExpiration()-_context.clock().now());
|
||||
}
|
||||
return searchJob;
|
||||
}
|
||||
|
||||
private Set getLeases() {
|
||||
@@ -851,8 +852,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
|
||||
/** smallest allowed period */
|
||||
private static final int MIN_PER_PEER_TIMEOUT = 3*1000;
|
||||
private static final int MAX_PER_PEER_TIMEOUT = 5*1000;
|
||||
private static final int MIN_PER_PEER_TIMEOUT = 5*1000;
|
||||
private static final int MAX_PER_PEER_TIMEOUT = 10*1000;
|
||||
|
||||
public int getPeerTimeout(Hash peer) {
|
||||
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
|
||||
|
||||
@@ -51,6 +51,8 @@ class SearchJob extends JobImpl {
|
||||
private List _deferredSearches;
|
||||
private boolean _deferredCleared;
|
||||
private long _startedOn;
|
||||
private boolean _floodfillPeersExhausted;
|
||||
private int _floodfillSearchesOutstanding;
|
||||
|
||||
private static final int SEARCH_BREDTH = 3; // 10 peers at a time
|
||||
private static final int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
|
||||
@@ -98,18 +100,9 @@ class SearchJob extends JobImpl {
|
||||
_deferredCleared = false;
|
||||
_peerSelector = facade.getPeerSelector();
|
||||
_startedOn = -1;
|
||||
_floodfillPeersExhausted = false;
|
||||
_floodfillSearchesOutstanding = 0;
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
getContext().statManager().addRateData("netDb.searchCount", 1, 0);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
|
||||
@@ -128,11 +121,48 @@ class SearchJob extends JobImpl {
|
||||
public long getExpiration() { return _expiration; }
|
||||
public long getTimeoutMs() { return _timeoutMs; }
|
||||
|
||||
private static final boolean DEFAULT_FLOODFILL_ONLY = false;
|
||||
|
||||
static boolean onlyQueryFloodfillPeers(RouterContext ctx) {
|
||||
if (isCongested(ctx))
|
||||
return true;
|
||||
return Boolean.valueOf(ctx.getProperty("netDb.floodfillOnly", DEFAULT_FLOODFILL_ONLY + "")).booleanValue();
|
||||
}
|
||||
|
||||
static boolean isCongested(RouterContext ctx) {
|
||||
float availableSend = ctx.bandwidthLimiter().getOutboundKBytesPerSecond()*1024 - ctx.bandwidthLimiter().getSendBps();
|
||||
float availableRecv = ctx.bandwidthLimiter().getInboundKBytesPerSecond()*1024 - ctx.bandwidthLimiter().getReceiveBps();
|
||||
// 6KBps is an arbitrary limit, but a wider search should be able to operate
|
||||
// in that range without a problem
|
||||
return ( (availableSend < 6*1024) || (availableRecv < 6*1024) );
|
||||
}
|
||||
|
||||
static final int PER_FLOODFILL_PEER_TIMEOUT = 10*1000;
|
||||
|
||||
protected int getPerPeerTimeoutMs(Hash peer) {
|
||||
int timeout = 0;
|
||||
if (_floodfillPeersExhausted && _floodfillSearchesOutstanding <= 0)
|
||||
timeout = _facade.getPeerTimeout(peer);
|
||||
else
|
||||
timeout = PER_FLOODFILL_PEER_TIMEOUT;
|
||||
long now = getContext().clock().now();
|
||||
|
||||
if (now + timeout > _expiration)
|
||||
return (int)(_expiration - now);
|
||||
else
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Let each peer take up to the average successful search RTT
|
||||
*
|
||||
*/
|
||||
protected int getPerPeerTimeoutMs() {
|
||||
if (_floodfillPeersExhausted && _floodfillSearchesOutstanding <= 0)
|
||||
return PER_PEER_TIMEOUT;
|
||||
else
|
||||
return PER_FLOODFILL_PEER_TIMEOUT;
|
||||
/*
|
||||
if (true)
|
||||
return PER_PEER_TIMEOUT;
|
||||
int rv = -1;
|
||||
@@ -145,8 +175,11 @@ class SearchJob extends JobImpl {
|
||||
return PER_PEER_TIMEOUT;
|
||||
else
|
||||
return rv + 1025; // tunnel delay
|
||||
*/
|
||||
}
|
||||
|
||||
private static int MAX_PEERS_QUERIED = 40;
|
||||
|
||||
/**
|
||||
* Send the next search, or stop if its completed
|
||||
*/
|
||||
@@ -168,6 +201,11 @@ class SearchJob extends JobImpl {
|
||||
_log.info(getJobId() + ": Key search expired");
|
||||
_state.complete(true);
|
||||
fail();
|
||||
} else if (_state.getAttempted().size() > MAX_PEERS_QUERIED) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Too many peers quried");
|
||||
_state.complete(true);
|
||||
fail();
|
||||
} else {
|
||||
//_log.debug("Continuing search");
|
||||
continueSearch();
|
||||
@@ -211,6 +249,14 @@ class SearchJob extends JobImpl {
|
||||
int sent = 0;
|
||||
Set attempted = _state.getAttempted();
|
||||
while (sent <= 0) {
|
||||
boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext());
|
||||
if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().size() <= 0) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": no non-floodfill peers left, and no more pending. Searched: "
|
||||
+ _state.getAttempted().size() + " failed: " + _state.getFailed().size());
|
||||
fail();
|
||||
return;
|
||||
}
|
||||
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted);
|
||||
if ( (closestHashes == null) || (closestHashes.size() <= 0) ) {
|
||||
if (_state.getPending().size() <= 0) {
|
||||
@@ -243,8 +289,14 @@ class SearchJob extends JobImpl {
|
||||
+ peer + " : " + (ds == null ? "null" : ds.getClass().getName()));
|
||||
_state.replyTimeout(peer);
|
||||
} else {
|
||||
if (((RouterInfo)ds).isHidden() ||
|
||||
getContext().shitlist().isShitlisted(peer)) {
|
||||
RouterInfo ri = (RouterInfo)ds;
|
||||
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
|
||||
_floodfillPeersExhausted = true;
|
||||
if (onlyFloodfill)
|
||||
continue;
|
||||
}
|
||||
if (ri.isHidden()) {// || // allow querying shitlisted, since its indirect
|
||||
//getContext().shitlist().isShitlisted(peer)) {
|
||||
// dont bother
|
||||
} else {
|
||||
_state.addPending(peer);
|
||||
@@ -319,12 +371,13 @@ class SearchJob extends JobImpl {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Send search to " + router.getIdentity().getHash().toBase64()
|
||||
+ " for " + _state.getTarget().toBase64());
|
||||
+ " for " + _state.getTarget().toBase64()
|
||||
+ " w/ timeout " + getPerPeerTimeoutMs(router.getIdentity().calculateHash()));
|
||||
}
|
||||
|
||||
getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0);
|
||||
|
||||
if (_isLease || false) // moo
|
||||
if (_isLease || true) // always send searches out tunnels
|
||||
sendLeaseSearch(router);
|
||||
else
|
||||
sendRouterSearch(router);
|
||||
@@ -355,7 +408,7 @@ class SearchJob extends JobImpl {
|
||||
// return;
|
||||
//}
|
||||
|
||||
int timeout = _facade.getPeerTimeout(router.getIdentity().getHash());
|
||||
int timeout = getPerPeerTimeoutMs(router.getIdentity().getHash());
|
||||
long expiration = getContext().clock().now() + timeout;
|
||||
|
||||
DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration);
|
||||
@@ -379,6 +432,8 @@ class SearchJob extends JobImpl {
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade,
|
||||
this, outTunnel, inTunnel);
|
||||
|
||||
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
|
||||
_floodfillSearchesOutstanding++;
|
||||
getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router), timeout);
|
||||
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, router.getIdentity().getHash());
|
||||
}
|
||||
@@ -398,6 +453,8 @@ class SearchJob extends JobImpl {
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, router.getIdentity().getHash(),
|
||||
reply, new FailedJob(getContext(), router), sel, timeout, SEARCH_PRIORITY);
|
||||
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
|
||||
_floodfillSearchesOutstanding++;
|
||||
j.runJob();
|
||||
//getContext().jobQueue().addJob(j);
|
||||
}
|
||||
@@ -475,6 +532,7 @@ class SearchJob extends JobImpl {
|
||||
*/
|
||||
protected class FailedJob extends JobImpl {
|
||||
private Hash _peer;
|
||||
private boolean _isFloodfill;
|
||||
private boolean _penalizePeer;
|
||||
private long _sentOn;
|
||||
public FailedJob(RouterContext enclosingContext, RouterInfo peer) {
|
||||
@@ -490,8 +548,11 @@ class SearchJob extends JobImpl {
|
||||
_penalizePeer = penalizePeer;
|
||||
_peer = peer.getIdentity().getHash();
|
||||
_sentOn = enclosingContext.clock().now();
|
||||
_isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
|
||||
}
|
||||
public void runJob() {
|
||||
if (_isFloodfill)
|
||||
_floodfillSearchesOutstanding--;
|
||||
if (_state.completed()) return;
|
||||
_state.replyTimeout(_peer);
|
||||
if (_penalizePeer) {
|
||||
@@ -622,8 +683,11 @@ class SearchJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": State of failed search: " + _state);
|
||||
|
||||
long time = getContext().clock().now() - _state.getWhenStarted();
|
||||
int attempted = _state.getAttempted().size();
|
||||
getContext().statManager().addRateData("netDb.failedAttemptedPeers", attempted, time);
|
||||
|
||||
if (_keepStats) {
|
||||
long time = getContext().clock().now() - _state.getWhenStarted();
|
||||
getContext().statManager().addRateData("netDb.failedTime", time, 0);
|
||||
//_facade.fail(_state.getTarget());
|
||||
}
|
||||
@@ -711,6 +775,7 @@ class SearchJob extends JobImpl {
|
||||
boolean wasAttempted(Hash peer) { return _state.wasAttempted(peer); }
|
||||
long timeoutMs() { return _timeoutMs; }
|
||||
boolean add(Hash peer) { return _facade.getKBuckets().add(peer); }
|
||||
void decrementOutstandingFloodfillSearches() { _floodfillSearchesOutstanding--; }
|
||||
}
|
||||
|
||||
class SearchReplyJob extends JobImpl {
|
||||
|
||||
@@ -26,6 +26,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
|
||||
private SearchJob _job;
|
||||
private TunnelInfo _outTunnel;
|
||||
private TunnelInfo _replyTunnel;
|
||||
private boolean _isFloodfillPeer;
|
||||
private long _sentOn;
|
||||
|
||||
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer,
|
||||
@@ -39,6 +40,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(SearchUpdateReplyFoundJob.class);
|
||||
_peer = peer.getIdentity().getHash();
|
||||
_isFloodfillPeer = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
|
||||
_state = state;
|
||||
_facade = facade;
|
||||
_job = job;
|
||||
@@ -49,6 +51,9 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
|
||||
|
||||
public String getName() { return "Update Reply Found for Kademlia Search"; }
|
||||
public void runJob() {
|
||||
if (_isFloodfillPeer)
|
||||
_job.decrementOutstandingFloodfillSearches();
|
||||
|
||||
I2NPMessage message = _message;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Reply from " + _peer.toBase64()
|
||||
|
||||
@@ -29,7 +29,7 @@ class StartExplorersJob extends JobImpl {
|
||||
/** don't explore more than 1 bucket at a time */
|
||||
private static final int MAX_PER_RUN = 1;
|
||||
/** dont explore the network more often than once every minute */
|
||||
private static final int MIN_RERUN_DELAY_MS = 60*1000;
|
||||
private static final int MIN_RERUN_DELAY_MS = 5*60*1000;
|
||||
/** explore the network at least once every thirty minutes */
|
||||
private static final int MAX_RERUN_DELAY_MS = 30*60*1000;
|
||||
|
||||
|
||||
@@ -92,6 +92,8 @@ class StoreJob extends JobImpl {
|
||||
private boolean isExpired() {
|
||||
return getContext().clock().now() >= _expiration;
|
||||
}
|
||||
|
||||
private static final int MAX_PEERS_SENT = 10;
|
||||
|
||||
/**
|
||||
* send the key to the next batch of peers
|
||||
@@ -105,6 +107,9 @@ class StoreJob extends JobImpl {
|
||||
if (isExpired()) {
|
||||
_state.complete(true);
|
||||
fail();
|
||||
} else if (_state.getAttempted().size() > MAX_PEERS_SENT) {
|
||||
_state.complete(true);
|
||||
fail();
|
||||
} else {
|
||||
//if (_log.shouldLog(Log.INFO))
|
||||
// _log.info(getJobId() + ": Sending: " + _state);
|
||||
|
||||
@@ -482,9 +482,10 @@ public class ProfileOrganizer {
|
||||
|
||||
placeTime = System.currentTimeMillis()-placeStart;
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
|
||||
+ ", capacity: " + _thresholdCapacityValue + ", speed: " + _thresholdSpeedValue + "]");
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
for (Iterator iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
||||
PeerProfile prof = (PeerProfile)iter.next();
|
||||
@@ -522,7 +523,7 @@ public class ProfileOrganizer {
|
||||
if ( (!_fastPeers.containsKey(cur.getPeer())) && (!cur.getIsFailing()) ) {
|
||||
if (!isSelectable(cur.getPeer())) {
|
||||
// skip peers we dont have in the netDb
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("skip unknown peer from fast promotion: " + cur.getPeer().toBase64());
|
||||
continue;
|
||||
}
|
||||
@@ -611,8 +612,9 @@ public class ProfileOrganizer {
|
||||
continue;
|
||||
|
||||
// dont bother trying to make sense of things below the baseline
|
||||
if (profile.getCapacityValue() <= CapacityCalculator.GROWTH_FACTOR)
|
||||
continue;
|
||||
// otoh, keep them in the threshold calculation, so we can adapt
|
||||
////if (profile.getCapacityValue() <= CapacityCalculator.GROWTH_FACTOR)
|
||||
//// continue;
|
||||
|
||||
totalCapacity += profile.getCapacityValue();
|
||||
totalIntegration += profile.getIntegrationValue();
|
||||
|
||||
@@ -54,6 +54,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
}
|
||||
|
||||
public int countActivePeers() { return (_manager == null ? 0 : _manager.countActivePeers()); }
|
||||
public int countActiveSendPeers() { return (_manager == null ? 0 : _manager.countActiveSendPeers()); }
|
||||
|
||||
public List getBids(OutNetMessage msg) {
|
||||
return _manager.getBids(msg);
|
||||
|
||||
@@ -47,7 +47,6 @@ public class FIFOBandwidthLimiter {
|
||||
private long _lastTotalSent;
|
||||
private long _lastTotalReceived;
|
||||
private long _lastStatsUpdated;
|
||||
private long _lastRateUpdated;
|
||||
private float _sendBps;
|
||||
private float _recvBps;
|
||||
|
||||
@@ -65,8 +64,8 @@ public class FIFOBandwidthLimiter {
|
||||
_context.statManager().createRateStat("bwLimiter.pendingInboundRequests", "How many inbound requests are ahead of the current one (ignoring ones with 0)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bwLimiter.outboundDelayedTime", "How long it takes to honor an outbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bw.sendRate", "Low level bandwidth send rate, averaged every minute", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bw.recvRate", "Low level bandwidth receive rate, averaged every minute", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bw.sendBps1s", "How fast we are transmitting for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.recvBps1s", "How fast we are receiving for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_pendingInboundRequests = new ArrayList(16);
|
||||
_pendingOutboundRequests = new ArrayList(16);
|
||||
_lastTotalSent = _totalAllocatedOutboundBytes;
|
||||
@@ -74,7 +73,6 @@ public class FIFOBandwidthLimiter {
|
||||
_sendBps = 0;
|
||||
_recvBps = 0;
|
||||
_lastStatsUpdated = now();
|
||||
_lastRateUpdated = _lastStatsUpdated;
|
||||
_refiller = new FIFOBandwidthRefiller(_context, this);
|
||||
I2PThread t = new I2PThread(_refiller);
|
||||
t.setName("BWRefiller" + (++__id));
|
||||
@@ -295,11 +293,6 @@ public class FIFOBandwidthLimiter {
|
||||
_context.statManager().getStatLog().addData("bw", "bw.recvBps1s", (long)_recvBps, recv);
|
||||
}
|
||||
}
|
||||
if (60*1000 + _lastRateUpdated <= now) {
|
||||
_lastRateUpdated = now;
|
||||
_context.statManager().addRateData("bw.sendRate", (long)_sendBps, 0);
|
||||
_context.statManager().addRateData("bw.recvRate", (long)_recvBps, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -38,6 +38,7 @@ public interface Transport {
|
||||
public String getStyle();
|
||||
|
||||
public int countActivePeers();
|
||||
public int countActiveSendPeers();
|
||||
public List getMostRecentErrorMessages();
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException;
|
||||
|
||||
@@ -64,6 +64,10 @@ public abstract class TransportImpl implements Transport {
|
||||
*
|
||||
*/
|
||||
public int countActivePeers() { return 0; }
|
||||
/**
|
||||
* How many peers are we actively sending messages to (this minute)
|
||||
*/
|
||||
public int countActiveSendPeers() { return 0; }
|
||||
|
||||
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
|
||||
/**
|
||||
|
||||
@@ -120,6 +120,14 @@ public class TransportManager implements TransportEventListener {
|
||||
return peers;
|
||||
}
|
||||
|
||||
public int countActiveSendPeers() {
|
||||
int peers = 0;
|
||||
for (int i = 0; i < _transports.size(); i++) {
|
||||
peers += ((Transport)_transports.get(i)).countActiveSendPeers();
|
||||
}
|
||||
return peers;
|
||||
}
|
||||
|
||||
public short getReachabilityStatus() {
|
||||
if (_transports.size() <= 0) return CommSystemFacade.STATUS_UNKNOWN;
|
||||
short status[] = new short[_transports.size()];
|
||||
|
||||
@@ -118,10 +118,16 @@ public class MessageReceiver {
|
||||
int size = message.getCompleteSize();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Full message received (" + message.getMessageId() + ") after " + message.getLifetime());
|
||||
I2NPMessage msg = readMessage(buf, message, handler);
|
||||
long afterRead = System.currentTimeMillis();
|
||||
if (msg != null)
|
||||
_transport.messageReceived(msg, null, message.getFrom(), message.getLifetime(), size);
|
||||
long afterRead = -1;
|
||||
try {
|
||||
I2NPMessage msg = readMessage(buf, message, handler);
|
||||
afterRead = System.currentTimeMillis();
|
||||
if (msg != null)
|
||||
_transport.messageReceived(msg, null, message.getFrom(), message.getLifetime(), size);
|
||||
} catch (RuntimeException re) {
|
||||
_log.error("b0rked receiving a message.. wazza huzza hmm?", re);
|
||||
continue;
|
||||
}
|
||||
message = null;
|
||||
long after = System.currentTimeMillis();
|
||||
if (afterRead - before > 100)
|
||||
|
||||
@@ -117,7 +117,8 @@ public class OutboundEstablishState {
|
||||
|
||||
public void addMessage(OutNetMessage msg) {
|
||||
synchronized (_queuedMessages) {
|
||||
_queuedMessages.add(msg);
|
||||
if (!_queuedMessages.contains(msg))
|
||||
_queuedMessages.add(msg);
|
||||
}
|
||||
}
|
||||
public OutNetMessage getNextQueuedMessage() {
|
||||
|
||||
@@ -61,7 +61,7 @@ public class OutboundMessageFragments {
|
||||
_context.statManager().createRateStat("udp.sendConfirmVolley", "How many times did fragments need to be sent before ACK", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendFailed", "How many sends a failed message was pushed", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendAggressiveFailed", "How many volleys was a packet sent before we gave up", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundActiveCount", "How many messages are in the active pool when a new one is added", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundActiveCount", "How many messages are in the active pool", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendRejected", "What volley are we on when the peer was throttled (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.partialACKReceived", "How many fragments were partially ACKed (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendSparse", "How many fragments were partially ACKed and hence not resent (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
@@ -160,7 +160,7 @@ public class OutboundMessageFragments {
|
||||
}
|
||||
_activePeers.notifyAll();
|
||||
}
|
||||
msg.timestamp("made active along with: " + active);
|
||||
//msg.timestamp("made active along with: " + active);
|
||||
_context.statManager().addRateData("udp.outboundActiveCount", active, 0);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
|
||||
@@ -254,9 +254,9 @@ public class PeerState {
|
||||
_mtuReceive = _mtu;
|
||||
_mtuLastChecked = -1;
|
||||
_lastACKSend = -1;
|
||||
_rtt = 1000;
|
||||
_rto = MIN_RTO;
|
||||
_rtt = _rto/2;
|
||||
_rttDeviation = _rtt;
|
||||
_rto = MAX_RTO;
|
||||
_messagesReceived = 0;
|
||||
_messagesSent = 0;
|
||||
_packetsTransmitted = 0;
|
||||
@@ -874,7 +874,7 @@ public class PeerState {
|
||||
double retransPct = 0;
|
||||
if (_packetsTransmitted > 10) {
|
||||
retransPct = (double)_packetsRetransmitted/(double)_packetsTransmitted;
|
||||
boolean wantLarge = retransPct < .25d; // heuristic to allow fairly lossy links to use large MTUs
|
||||
boolean wantLarge = retransPct < .50d; // heuristic to allow fairly lossy links to use large MTUs
|
||||
if (wantLarge && _mtu != LARGE_MTU) {
|
||||
if (_context.random().nextLong(_mtuDecreases) <= 0) {
|
||||
_mtu = LARGE_MTU;
|
||||
@@ -943,10 +943,12 @@ public class PeerState {
|
||||
else
|
||||
_consecutiveSmall = 0;
|
||||
|
||||
if ( (_consecutiveSmall < 50) && (_packetsReceived > 50) )
|
||||
_mtuReceive = LARGE_MTU;
|
||||
else
|
||||
_mtuReceive = MIN_MTU;
|
||||
if (_packetsReceived > 50) {
|
||||
if (_consecutiveSmall < 50)
|
||||
_mtuReceive = LARGE_MTU;
|
||||
else
|
||||
_mtuReceive = MIN_MTU;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -317,7 +317,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
|
||||
if (explicitSpecified)
|
||||
return;
|
||||
|
||||
|
||||
boolean fixedPort = getIsPortFixed();
|
||||
boolean updated = false;
|
||||
boolean fireTest = false;
|
||||
@@ -328,7 +328,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
+ RemoteHostId.toString(ourIP) + ". Lets throw tomatoes at them");
|
||||
_context.shitlist().shitlistRouter(from, "They said we had an invalid IP");
|
||||
return;
|
||||
} else if (inboundRecent) {
|
||||
} else if (inboundRecent && _externalListenPort > 0 && _externalListenHost != null) {
|
||||
// use OS clock since its an ordering thing, not a time thing
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Ignoring IP address suggestion, since we have received an inbound con recently");
|
||||
@@ -761,9 +761,15 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
}
|
||||
} else {
|
||||
boolean rv = (_externalListenHost == null) || (_externalListenPort <= 0);
|
||||
if (!rv) {
|
||||
RouterAddress addr = _externalAddress;
|
||||
UDPAddress ua = new UDPAddress(addr);
|
||||
if (ua.getIntroducerCount() > 0)
|
||||
rv = true; // status == ok and we don't actually need introducers, so rebuild
|
||||
}
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
if (rv) {
|
||||
_log.info("Need to initialize our direct SSU info");
|
||||
_log.info("Need to initialize our direct SSU info (" + _externalListenHost + ":" + _externalListenPort + ")");
|
||||
} else {
|
||||
RouterAddress addr = _externalAddress;
|
||||
UDPAddress ua = new UDPAddress(addr);
|
||||
@@ -1158,6 +1164,22 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
return active;
|
||||
}
|
||||
|
||||
public int countActiveSendPeers() {
|
||||
long now = _context.clock().now();
|
||||
int active = 0;
|
||||
int inactive = 0;
|
||||
synchronized (_peersByIdent) {
|
||||
for (Iterator iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
|
||||
PeerState peer = (PeerState)iter.next();
|
||||
if (now-peer.getLastSendFullyTime() > 1*60*1000)
|
||||
inactive++;
|
||||
else
|
||||
active++;
|
||||
}
|
||||
}
|
||||
return active;
|
||||
}
|
||||
|
||||
private static class AlphaComparator implements Comparator {
|
||||
private static final AlphaComparator _instance = new AlphaComparator();
|
||||
public static final AlphaComparator instance() { return _instance; }
|
||||
|
||||
@@ -92,6 +92,10 @@ public class FragmentHandler {
|
||||
}
|
||||
} catch (ArrayIndexOutOfBoundsException aioobe) {
|
||||
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
||||
} catch (NullPointerException npe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Corrupt fragment received: offset = " + offset, npe);
|
||||
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
||||
} catch (RuntimeException e) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Corrupt fragment received: offset = " + offset, e);
|
||||
|
||||
@@ -53,7 +53,7 @@ public class TunnelDispatcher implements Service {
|
||||
_leaveJob = new LeaveTunnel(ctx);
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
"How many tunnels are we participating in?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundPeer",
|
||||
"How many messages we send out a tunnel targetting a peer?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
@@ -42,7 +42,7 @@ class BuildHandler {
|
||||
_context.statManager().createRateStat("tunnel.rejectTimeout", "How often we reject a tunnel because we can't find the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
_context.statManager().createRateStat("tunnel.rejectOverloaded", "How long we had to wait before processing the request (when it was rejected)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.acceptLoad", "How long we had to wait before processing the request (when it was accepted)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.acceptLoad", "Delay before processing the accepted request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoad", "How long we had to wait before finally giving up on an inbound request (period is queue count)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.handleRemaining", "How many pending inbound requests were left on the queue after one pass?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
@@ -307,7 +307,11 @@ class BuildHandler {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Request " + _state.msg.getUniqueId() + " handled with a successful deferred lookup for the next peer " + _nextPeer.toBase64());
|
||||
|
||||
handleReq(getContext().netDb().lookupRouterInfoLocally(_nextPeer), _state, _req, _nextPeer);
|
||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(_nextPeer);
|
||||
if (ri != null)
|
||||
handleReq(ri, _state, _req, _nextPeer);
|
||||
else
|
||||
_log.error("Deferred successfully, but we couldnt find " + _nextPeer.toBase64() + "?");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
|
||||
private TestJob _testJob;
|
||||
private Job _expireJob;
|
||||
private TunnelInfo _pairedTunnel;
|
||||
private boolean _live;
|
||||
|
||||
/** Creates a new instance of PooledTunnelCreatorConfig */
|
||||
|
||||
@@ -25,12 +26,19 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
|
||||
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound, Hash destination) {
|
||||
super(ctx, length, isInbound, destination);
|
||||
_pool = null;
|
||||
_live = false;
|
||||
}
|
||||
|
||||
public void testSuccessful(int ms) {
|
||||
if (_testJob != null)
|
||||
_testJob.testSuccessful(ms);
|
||||
super.testSuccessful(ms);
|
||||
|
||||
// once a tunnel has been built and we know it works, lets skew ourselves a bit so we
|
||||
// aren't as cyclic
|
||||
if ( (_context.router().getUptime() < 10*60*1000) && (!_live) )
|
||||
setExpiration(getExpiration() - _context.random().nextInt(5*60*1000));
|
||||
_live = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -42,19 +42,19 @@ class TestJob extends JobImpl {
|
||||
_log.error("Invalid tunnel test configuration: no pool for " + cfg, new Exception("origin"));
|
||||
getTiming().setStartAfter(getDelay() + ctx.clock().now());
|
||||
ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
public String getName() { return "Test tunnel"; }
|
||||
public void runJob() {
|
||||
@@ -129,6 +129,11 @@ class TestJob extends JobImpl {
|
||||
getContext().keyManager().getPublicKey(),
|
||||
encryptKey, encryptTag);
|
||||
|
||||
if (msg == null) {
|
||||
// overloaded / unknown peers / etc
|
||||
scheduleRetest();
|
||||
return;
|
||||
}
|
||||
Set encryptTags = new HashSet(1);
|
||||
encryptTags.add(encryptTag);
|
||||
getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags);
|
||||
@@ -189,8 +194,6 @@ class TestJob extends JobImpl {
|
||||
private int getTestPeriod() { return 20*1000; }
|
||||
private void scheduleRetest() { scheduleRetest(false); }
|
||||
private void scheduleRetest(boolean asap) {
|
||||
_outTunnel = null;
|
||||
_replyTunnel = null;
|
||||
if (asap) {
|
||||
requeue(getContext().random().nextInt(TEST_DELAY));
|
||||
} else {
|
||||
|
||||
@@ -58,10 +58,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessTime",
|
||||
"How long do successful tunnel tests take?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
"How many tunnels are we participating in?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
/** pick an inbound tunnel not bound to a particular destination */
|
||||
|
||||
Reference in New Issue
Block a user