forked from I2P_Developers/i2p.i2p
Compare commits
46 Commits
i2p_0_6_1_
...
i2p_0_6_1_
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f2078e1523 | ||
![]() |
f2fb87c88b | ||
![]() |
fcbea19478 | ||
![]() |
92f25bd4fa | ||
![]() |
85c2c11217 | ||
![]() |
de1ca4aea4 | ||
![]() |
a0f865fb99 | ||
![]() |
2c3fea5605 | ||
![]() |
ba1d88b5c9 | ||
![]() |
2ad715c668 | ||
![]() |
5f17557e54 | ||
![]() |
2ad5a6f907 | ||
![]() |
0920462060 | ||
![]() |
870e94e184 | ||
![]() |
6b0d507644 | ||
![]() |
70cf9e4ca7 | ||
![]() |
2a3974c71d | ||
![]() |
46ac9292e8 | ||
![]() |
4307097472 | ||
![]() |
ed3fdaf4f1 | ||
![]() |
378a9a8f5c | ||
![]() |
4ef6180455 | ||
![]() |
d4970e23c0 | ||
![]() |
0c9f165016 | ||
![]() |
be3a899ecb | ||
![]() |
7a6a749004 | ||
![]() |
17271ee3f0 | ||
![]() |
99bcfa90df | ||
![]() |
eb36e993c1 | ||
![]() |
e5eca5fa45 | ||
![]() |
8cba2f4236 | ||
![]() |
40d5ed31ac | ||
![]() |
181275fe35 | ||
![]() |
23d8c01ce7 | ||
![]() |
de83944486 | ||
![]() |
90cd7ff23a | ||
![]() |
8d0a9b4ccd | ||
![]() |
230d4cd23f | ||
![]() |
e9b6fcc0a4 | ||
![]() |
8fcb871409 | ||
![]() |
83bef43fd5 | ||
![]() |
b4fc6ca31b | ||
![]() |
ab3f1b708d | ||
![]() |
c76402a160 | ||
![]() |
a50c73aa5e | ||
![]() |
5aa66795d2 |
@@ -228,36 +228,6 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
|
||||
String outmsg = outboundFilter(inmsg);
|
||||
if(outmsg!=null)
|
||||
{
|
||||
if (outmsg.indexOf("PING") >= 0) {
|
||||
// Most clients just send a PING and are happy with any old PONG. Others,
|
||||
// like BitchX, actually expect certain behavior. It sends two different pings:
|
||||
// "PING :irc.freshcoffee.i2p" and "PING 1234567890 127.0.0.1" (where the IP is the proxy)
|
||||
// the PONG to the former seems to be "PONG 127.0.0.1", while the PONG to the later is
|
||||
// ":irc.freshcoffee.i2p PONG irc.freshcoffe.i2p :1234567890".
|
||||
// We don't want to send them our proxy's IP address, so we need to rewrite the PING
|
||||
// sent to the server, but when we get a PONG back, use what we expected, rather than
|
||||
// what they sent.
|
||||
//
|
||||
// Yuck.
|
||||
StringTokenizer tok = new StringTokenizer(inmsg, " ");
|
||||
int tokens = tok.countTokens();
|
||||
if ( (tokens <= 2) || (tokens == 3) ) { // "PING nonce" or "PING nonce serverIP"
|
||||
tok.nextToken(); // skip
|
||||
//_expectedPong = "PONG 127.0.0.1 :" + tok.nextToken();
|
||||
_expectedPong = "PONG " + tok.nextToken();
|
||||
} else {
|
||||
// if it isn't one of those two, we will filter out all PONGs, which means
|
||||
// the client will fail. whee!
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("IRC client sent a PING we don't understand (\"" + inmsg + "\"), so we're filtering it");
|
||||
_expectedPong = null;
|
||||
}
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("outbound rewritten PING: "+outmsg + ", waiting for [" + _expectedPong + "]");
|
||||
_log.warn(" - outbound was: "+inmsg);
|
||||
}
|
||||
}
|
||||
|
||||
if(!inmsg.equals(outmsg)) {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("outbound FILTERED: "+outmsg);
|
||||
@@ -377,7 +347,7 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
|
||||
return null;
|
||||
}
|
||||
|
||||
public static String outboundFilter(String s) {
|
||||
public String outboundFilter(String s) {
|
||||
|
||||
String field[]=s.split(" ",3);
|
||||
String command;
|
||||
@@ -415,18 +385,38 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
|
||||
command = field[0].toUpperCase();
|
||||
|
||||
if ("PING".equals(command)) {
|
||||
// e.g. "PING", "PING nonce", or "PING nonce serverIP"
|
||||
if (field.length == 1) {
|
||||
return "PING";
|
||||
} else if (field.length == 2) {
|
||||
return "PING " + field[1];
|
||||
} else if (field.length == 3) {
|
||||
return "PING " + field[1];
|
||||
// Most clients just send a PING and are happy with any old PONG. Others,
|
||||
// like BitchX, actually expect certain behavior. It sends two different pings:
|
||||
// "PING :irc.freshcoffee.i2p" and "PING 1234567890 127.0.0.1" (where the IP is the proxy)
|
||||
// the PONG to the former seems to be "PONG 127.0.0.1", while the PONG to the later is
|
||||
// ":irc.freshcoffee.i2p PONG irc.freshcoffe.i2p :1234567890".
|
||||
// We don't want to send them our proxy's IP address, so we need to rewrite the PING
|
||||
// sent to the server, but when we get a PONG back, use what we expected, rather than
|
||||
// what they sent.
|
||||
//
|
||||
// Yuck.
|
||||
|
||||
String rv = null;
|
||||
if (field.length == 1) { // PING
|
||||
rv = "PING";
|
||||
_expectedPong = "PONG 127.0.0.1";
|
||||
} else if (field.length == 2) { // PING nonce
|
||||
rv = "PING " + field[1];
|
||||
_expectedPong = "PONG " + field[1];
|
||||
} else if (field.length == 3) { // PING nonce serverLocation
|
||||
rv = "PING " + field[1];
|
||||
_expectedPong = "PONG " + field[1];
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("IRC client sent a PING we don't understand, filtering it (\"" + s + "\")");
|
||||
return null;
|
||||
rv = null;
|
||||
_expectedPong = null;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("sending ping " + rv + ", waiting for " + _expectedPong + " orig was [" + s + "]");
|
||||
|
||||
return rv;
|
||||
}
|
||||
if ("PONG".equals(command))
|
||||
return "PONG 127.0.0.1"; // no way to know what the ircd to i2ptunnel server con is, so localhost works
|
||||
|
@@ -55,9 +55,17 @@ public class GraphHelper {
|
||||
|
||||
public String getImages() {
|
||||
try {
|
||||
_out.write("<img src=\"viewstat.jsp?stat=bw.combined"
|
||||
+ "&periodCount=" + _periodCount
|
||||
+ "&width=" + _width
|
||||
+ "&height=" + _height
|
||||
+ "\" title=\"Combined bandwidth graph\" />\n");
|
||||
|
||||
List listeners = StatSummarizer.instance().getListeners();
|
||||
for (int i = 0; i < listeners.size(); i++) {
|
||||
SummaryListener lsnr = (SummaryListener)listeners.get(i);
|
||||
TreeSet ordered = new TreeSet(new AlphaComparator());
|
||||
ordered.addAll(listeners);
|
||||
for (Iterator iter = ordered.iterator(); iter.hasNext(); ) {
|
||||
SummaryListener lsnr = (SummaryListener)iter.next();
|
||||
Rate r = lsnr.getRate();
|
||||
String title = r.getRateStat().getName() + " for " + DataHelper.formatDuration(_periodCount * r.getPeriod());
|
||||
_out.write("<img src=\"viewstat.jsp?stat=" + r.getRateStat().getName()
|
||||
@@ -102,3 +110,13 @@ public class GraphHelper {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
class AlphaComparator implements Comparator {
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
SummaryListener l = (SummaryListener)lhs;
|
||||
SummaryListener r = (SummaryListener)rhs;
|
||||
String lName = l.getRate().getRateStat().getName() + "." + l.getRate().getPeriod();
|
||||
String rName = r.getRate().getRateStat().getName() + "." + r.getRate().getPeriod();
|
||||
return lName.compareTo(rName);
|
||||
}
|
||||
}
|
@@ -8,6 +8,8 @@ import net.i2p.router.RouterContext;
|
||||
public class PeerHelper {
|
||||
private RouterContext _context;
|
||||
private Writer _out;
|
||||
private int _sortFlags;
|
||||
private String _urlBase;
|
||||
/**
|
||||
* Configure this bean to query a particular router context
|
||||
*
|
||||
@@ -25,10 +27,22 @@ public class PeerHelper {
|
||||
public PeerHelper() {}
|
||||
|
||||
public void setOut(Writer out) { _out = out; }
|
||||
public void setSort(String flags) {
|
||||
if (flags != null) {
|
||||
try {
|
||||
_sortFlags = Integer.parseInt(flags);
|
||||
} catch (NumberFormatException nfe) {
|
||||
_sortFlags = 0;
|
||||
}
|
||||
} else {
|
||||
_sortFlags = 0;
|
||||
}
|
||||
}
|
||||
public void setUrlBase(String base) { _urlBase = base; }
|
||||
|
||||
public String getPeerSummary() {
|
||||
try {
|
||||
_context.commSystem().renderStatusHTML(_out);
|
||||
_context.commSystem().renderStatusHTML(_out, _urlBase, _sortFlags);
|
||||
_context.bandwidthLimiter().renderStatusHTML(_out);
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
|
@@ -5,18 +5,27 @@ import java.util.*;
|
||||
|
||||
import net.i2p.stat.*;
|
||||
import net.i2p.router.*;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
import java.awt.Color;
|
||||
import org.jrobin.graph.RrdGraph;
|
||||
import org.jrobin.graph.RrdGraphDef;
|
||||
import org.jrobin.graph.RrdGraphDefTemplate;
|
||||
import org.jrobin.core.RrdException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class StatSummarizer implements Runnable {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
/** list of SummaryListener instances */
|
||||
private List _listeners;
|
||||
private static StatSummarizer _instance;
|
||||
|
||||
public StatSummarizer() {
|
||||
_context = (RouterContext)RouterContext.listContexts().get(0); // fuck it, only summarize one per jvm
|
||||
_log = _context.logManager().getLog(getClass());
|
||||
_listeners = new ArrayList(16);
|
||||
_instance = this;
|
||||
}
|
||||
@@ -45,6 +54,10 @@ public class StatSummarizer implements Runnable {
|
||||
",router.activePeers.60000" +
|
||||
",router.activeSendPeers.60000" +
|
||||
",tunnel.acceptLoad.60000" +
|
||||
",tunnel.dropLoadProactive.60000" +
|
||||
",tunnel.buildExploratorySuccess.60000" +
|
||||
",tunnel.buildExploratoryReject.60000" +
|
||||
",tunnel.buildExploratoryExpire.60000" +
|
||||
",client.sendAckTime.60000" +
|
||||
",client.dispatchNoACK.60000" +
|
||||
",transport.sendMessageFailureLifetime.60000" +
|
||||
@@ -124,6 +137,69 @@ public class StatSummarizer implements Runnable {
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean renderRatePng(OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
|
||||
long end = _context.clock().now();
|
||||
if (periodCount <= 0) periodCount = SummaryListener.PERIODS;
|
||||
if (periodCount > SummaryListener.PERIODS)
|
||||
periodCount = SummaryListener.PERIODS;
|
||||
long period = 60*1000;
|
||||
long start = end - period*periodCount;
|
||||
long begin = System.currentTimeMillis();
|
||||
try {
|
||||
RrdGraphDef def = new RrdGraphDef();
|
||||
def.setTimePeriod(start/1000, end/1000);
|
||||
String title = "Bandwidth usage";
|
||||
if (!hideTitle)
|
||||
def.setTitle(title);
|
||||
String sendName = SummaryListener.createName(_context, "bw.sendRate.60000");
|
||||
String recvName = SummaryListener.createName(_context, "bw.recvRate.60000");
|
||||
def.datasource(sendName, sendName, sendName, "AVERAGE", "MEMORY");
|
||||
def.datasource(recvName, recvName, recvName, "AVERAGE", "MEMORY");
|
||||
def.area(sendName, Color.BLUE, "Outbound bytes/second");
|
||||
//def.line(sendName, Color.BLUE, "Outbound bytes/second", 3);
|
||||
//def.line(recvName, Color.RED, "Inbound bytes/second@r", 3);
|
||||
def.area(recvName, Color.RED, "Inbound bytes/second@r");
|
||||
if (!hideLegend) {
|
||||
def.gprint(sendName, "AVERAGE", "outbound average: @2@sbytes/second");
|
||||
def.gprint(sendName, "MAX", " max: @2@sbytes/second@r");
|
||||
def.gprint(recvName, "AVERAGE", "inbound average: @2bytes/second@s");
|
||||
def.gprint(recvName, "MAX", " max: @2@sbytes/second@r");
|
||||
}
|
||||
if (!showCredit)
|
||||
def.setShowSignature(false);
|
||||
if (hideLegend)
|
||||
def.setShowLegend(false);
|
||||
if (hideGrid) {
|
||||
def.setGridX(false);
|
||||
def.setGridY(false);
|
||||
}
|
||||
//System.out.println("rendering: path=" + path + " dsNames[0]=" + dsNames[0] + " dsNames[1]=" + dsNames[1] + " lsnr.getName=" + _listener.getName());
|
||||
def.setAntiAliasing(false);
|
||||
//System.out.println("Rendering: \n" + def.exportXmlTemplate());
|
||||
//System.out.println("*****************\nData: \n" + _listener.getData().dump());
|
||||
RrdGraph graph = new RrdGraph(def);
|
||||
//System.out.println("Graph created");
|
||||
byte data[] = null;
|
||||
if ( (width <= 0) || (height <= 0) )
|
||||
data = graph.getPNGBytes();
|
||||
else
|
||||
data = graph.getPNGBytes(width, height);
|
||||
long timeToPlot = System.currentTimeMillis() - begin;
|
||||
out.write(data);
|
||||
//File t = File.createTempFile("jrobinData", ".xml");
|
||||
//_listener.getData().dumpXml(new FileOutputStream(t));
|
||||
//System.out.println("plotted: " + (data != null ? data.length : 0) + " bytes in " + timeToPlot
|
||||
// ); // + ", data written to " + t.getAbsolutePath());
|
||||
return true;
|
||||
} catch (RrdException re) {
|
||||
_log.error("Error rendering", re);
|
||||
throw new IOException("Error plotting: " + re.getMessage());
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error rendering", ioe);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param specs statName.period,statName.period,statName.period
|
||||
* @return list of Rate objects
|
||||
|
@@ -213,11 +213,11 @@ public class SummaryHelper {
|
||||
}
|
||||
|
||||
/**
|
||||
* How fast we have been receiving data over the last minute (pretty printed
|
||||
* How fast we have been receiving data over the last second (pretty printed
|
||||
* string with 2 decimal places representing the KBps)
|
||||
*
|
||||
*/
|
||||
public String getInboundMinuteKBps() {
|
||||
public String getInboundSecondKBps() {
|
||||
if (_context == null)
|
||||
return "0.0";
|
||||
double kbps = _context.bandwidthLimiter().getReceiveBps()/1024d;
|
||||
@@ -225,11 +225,11 @@ public class SummaryHelper {
|
||||
return fmt.format(kbps);
|
||||
}
|
||||
/**
|
||||
* How fast we have been sending data over the last minute (pretty printed
|
||||
* How fast we have been sending data over the last second (pretty printed
|
||||
* string with 2 decimal places representing the KBps)
|
||||
*
|
||||
*/
|
||||
public String getOutboundMinuteKBps() {
|
||||
public String getOutboundSecondKBps() {
|
||||
if (_context == null)
|
||||
return "0.0";
|
||||
double kbps = _context.bandwidthLimiter().getSendBps()/1024d;
|
||||
@@ -493,6 +493,13 @@ public class SummaryHelper {
|
||||
return _context.throttle().getTunnelLag() + "ms";
|
||||
}
|
||||
|
||||
public String getInboundBacklog() {
|
||||
if (_context == null)
|
||||
return "0";
|
||||
|
||||
return String.valueOf(_context.tunnelManager().getInboundBuildQueueSize());
|
||||
}
|
||||
|
||||
public boolean updateAvailable() {
|
||||
return NewsFetcher.getInstance(_context).updateAvailable();
|
||||
}
|
||||
|
@@ -78,7 +78,7 @@ class SummaryListener implements RateSummaryListener {
|
||||
* munged version from the user/developer-visible name.
|
||||
*
|
||||
*/
|
||||
private static String createName(I2PAppContext ctx, String wanted) {
|
||||
static String createName(I2PAppContext ctx, String wanted) {
|
||||
return ctx.sha().calculateHash(DataHelper.getUTF8(wanted)).toBase64().substring(0,20);
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ class SummaryRenderer {
|
||||
* specify who can get it from where, etc.
|
||||
*
|
||||
*/
|
||||
public static void render(I2PAppContext ctx, OutputStream out, String filename) throws IOException {
|
||||
public static synchronized void render(I2PAppContext ctx, OutputStream out, String filename) throws IOException {
|
||||
long end = ctx.clock().now();
|
||||
long start = end - 60*1000*SummaryListener.PERIODS;
|
||||
long begin = System.currentTimeMillis();
|
||||
|
@@ -14,6 +14,8 @@
|
||||
<jsp:useBean class="net.i2p.router.web.PeerHelper" id="peerHelper" scope="request" />
|
||||
<jsp:setProperty name="peerHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
|
||||
<jsp:setProperty name="peerHelper" property="out" value="<%=out%>" />
|
||||
<jsp:setProperty name="peerHelper" property="urlBase" value="peers.jsp" />
|
||||
<jsp:setProperty name="peerHelper" property="sort" value="<%=request.getParameter("sort") != null ? request.getParameter("sort") : ""%>" />
|
||||
<jsp:getProperty name="peerHelper" property="peerSummary" />
|
||||
</div>
|
||||
|
||||
|
@@ -39,7 +39,7 @@
|
||||
<b>Active:</b> <jsp:getProperty name="helper" property="activePeers" />/<jsp:getProperty name="helper" property="activeProfiles" /><br />
|
||||
<b>Fast:</b> <jsp:getProperty name="helper" property="fastPeers" /><br />
|
||||
<b>High capacity:</b> <jsp:getProperty name="helper" property="highCapacityPeers" /><br />
|
||||
<b>Well integrated:</b> <jsp:getProperty name="helper" property="wellIntegratedPeers" /><br />
|
||||
<!-- <b>Well integrated:</b> <jsp:getProperty name="helper" property="wellIntegratedPeers" /><br /> -->
|
||||
<b>Failing:</b> <jsp:getProperty name="helper" property="failingPeers" /><br />
|
||||
<!-- <b>Shitlisted:</b> <jsp:getProperty name="helper" property="shitlistedPeers" /><br /> -->
|
||||
<b>Known:</b> <jsp:getProperty name="helper" property="allPeers" /><br /><%
|
||||
@@ -65,7 +65,7 @@
|
||||
%><hr />
|
||||
|
||||
<u><b><a href="config.jsp" title="Configure the bandwidth limits">Bandwidth in/out</a></b></u><br />
|
||||
<b>1s:</b> <jsp:getProperty name="helper" property="inboundMinuteKBps" />/<jsp:getProperty name="helper" property="outboundMinuteKBps" />KBps<br />
|
||||
<b>1s:</b> <jsp:getProperty name="helper" property="inboundSecondKBps" />/<jsp:getProperty name="helper" property="outboundSecondKBps" />KBps<br />
|
||||
<b>5m:</b> <jsp:getProperty name="helper" property="inboundFiveMinuteKBps" />/<jsp:getProperty name="helper" property="outboundFiveMinuteKBps" />KBps<br />
|
||||
<b>Total:</b> <jsp:getProperty name="helper" property="inboundLifetimeKBps" />/<jsp:getProperty name="helper" property="outboundLifetimeKBps" />KBps<br />
|
||||
<b>Used:</b> <jsp:getProperty name="helper" property="inboundTransferred" />/<jsp:getProperty name="helper" property="outboundTransferred" /><br />
|
||||
@@ -83,6 +83,7 @@
|
||||
<b>Job lag:</b> <jsp:getProperty name="helper" property="jobLag" /><br />
|
||||
<b>Message delay:</b> <jsp:getProperty name="helper" property="messageDelay" /><br />
|
||||
<b>Tunnel lag:</b> <jsp:getProperty name="helper" property="tunnelLag" /><br />
|
||||
<b>Handle backlog:</b> <jsp:getProperty name="helper" property="inboundBacklog" /><br />
|
||||
<hr />
|
||||
|
||||
</div>
|
||||
|
@@ -9,18 +9,25 @@ if (templateFile != null) {
|
||||
net.i2p.stat.Rate rate = null;
|
||||
String stat = request.getParameter("stat");
|
||||
String period = request.getParameter("period");
|
||||
boolean fakeBw = (stat != null && ("bw.combined".equals(stat)));
|
||||
net.i2p.stat.RateStat rs = net.i2p.I2PAppContext.getGlobalContext().statManager().getRate(stat);
|
||||
if ( !rendered && (rs != null)) {
|
||||
if ( !rendered && ((rs != null) || fakeBw) ) {
|
||||
long per = -1;
|
||||
try {
|
||||
per = Long.parseLong(period);
|
||||
rate = rs.getRate(per);
|
||||
if (rate != null) {
|
||||
if (fakeBw)
|
||||
per = 60*1000;
|
||||
else
|
||||
per = Long.parseLong(period);
|
||||
if (!fakeBw)
|
||||
rate = rs.getRate(per);
|
||||
if ( (rate != null) || (fakeBw) ) {
|
||||
java.io.OutputStream cout = response.getOutputStream();
|
||||
String format = request.getParameter("format");
|
||||
if ("xml".equals(format)) {
|
||||
response.setContentType("text/xml");
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().getXML(rate, cout);
|
||||
if (!fakeBw) {
|
||||
response.setContentType("text/xml");
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().getXML(rate, cout);
|
||||
}
|
||||
} else {
|
||||
response.setContentType("image/png");
|
||||
int width = -1;
|
||||
@@ -39,7 +46,10 @@ if ( !rendered && (rs != null)) {
|
||||
boolean showCredit = true;
|
||||
if (request.getParameter("showCredit") != null)
|
||||
showCredit = Boolean.valueOf(""+request.getParameter("showCredit")).booleanValue();
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().renderPng(rate, cout, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
|
||||
if (fakeBw)
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().renderRatePng(cout, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
|
||||
else
|
||||
rendered = net.i2p.router.web.StatSummarizer.instance().renderPng(rate, cout, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
|
||||
}
|
||||
if (rendered)
|
||||
cout.close();
|
||||
|
@@ -1066,7 +1066,7 @@ public class BlogManager {
|
||||
}
|
||||
|
||||
public boolean isBanned(Hash blog) {
|
||||
if (blog == null) return false;
|
||||
if ( (blog == null) || (blog.getData() == null) || (blog.getData().length <= 0) ) return false;
|
||||
String str = blog.toBase64();
|
||||
String banned = System.getProperty("syndie.bannedBlogs", "");
|
||||
return (banned.indexOf(str) >= 0);
|
||||
|
@@ -163,8 +163,9 @@ public class ArchiveIndex {
|
||||
/** list of unique blogs locally known (set of Hash) */
|
||||
public Set getUniqueBlogs() {
|
||||
Set rv = new HashSet();
|
||||
for (int i = 0; i < _blogs.size(); i++)
|
||||
for (int i = 0; i < _blogs.size(); i++) {
|
||||
rv.add(getBlog(i));
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
public List getReplies(BlogURI uri) {
|
||||
@@ -367,7 +368,10 @@ public class ArchiveIndex {
|
||||
return;
|
||||
tok.nextToken();
|
||||
String keyStr = tok.nextToken();
|
||||
Hash keyHash = new Hash(Base64.decode(keyStr));
|
||||
byte k[] = Base64.decode(keyStr);
|
||||
if ( (k == null) || (k.length != Hash.HASH_LENGTH) )
|
||||
return; // ignore bad hashes
|
||||
Hash keyHash = new Hash(k);
|
||||
String whenStr = tok.nextToken();
|
||||
long when = getIndexDate(whenStr);
|
||||
String tag = tok.nextToken();
|
||||
|
@@ -641,6 +641,8 @@ public class RemoteArchiveBean {
|
||||
int newBlogs = 0;
|
||||
for (Iterator iter = remoteBlogs.iterator(); iter.hasNext(); ) {
|
||||
Hash blog = (Hash)iter.next();
|
||||
if ( (blog == null) || (blog.getData() == null) || (blog.getData().length <= 0) )
|
||||
continue;
|
||||
if (ignoreBlog(user, blog))
|
||||
continue;
|
||||
if (!localBlogs.contains(blog)) {
|
||||
|
@@ -14,8 +14,8 @@ package net.i2p;
|
||||
*
|
||||
*/
|
||||
public class CoreVersion {
|
||||
public final static String ID = "$Revision: 1.56 $ $Date: 2006/03/26 18:23:49 $";
|
||||
public final static String VERSION = "0.6.1.14";
|
||||
public final static String ID = "$Revision: 1.62 $ $Date: 2006-05-18 17:31:09 $";
|
||||
public final static String VERSION = "0.6.1.20";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Core version: " + VERSION);
|
||||
|
@@ -29,6 +29,8 @@ public class BufferedStatLog implements StatLog {
|
||||
private String _lastFilters;
|
||||
private BufferedWriter _out;
|
||||
private String _outFile;
|
||||
/** short circuit for adding data, set to true if some filters are set, false if its empty (so we can skip the sync) */
|
||||
private volatile boolean _filtersSpecified;
|
||||
|
||||
private static final int BUFFER_SIZE = 1024;
|
||||
private static final boolean DISABLE_LOGGING = false;
|
||||
@@ -44,6 +46,7 @@ public class BufferedStatLog implements StatLog {
|
||||
_lastWrite = _events.length-1;
|
||||
_statFilters = new ArrayList(10);
|
||||
_flushFrequency = 500;
|
||||
_filtersSpecified = false;
|
||||
I2PThread writer = new I2PThread(new StatLogWriter(), "StatLogWriter");
|
||||
writer.setDaemon(true);
|
||||
writer.start();
|
||||
@@ -51,6 +54,7 @@ public class BufferedStatLog implements StatLog {
|
||||
|
||||
public void addData(String scope, String stat, long value, long duration) {
|
||||
if (DISABLE_LOGGING) return;
|
||||
if (!shouldLog(stat)) return;
|
||||
synchronized (_events) {
|
||||
_events[_eventNext].init(scope, stat, value, duration);
|
||||
_eventNext = (_eventNext + 1) % _events.length;
|
||||
@@ -72,6 +76,7 @@ public class BufferedStatLog implements StatLog {
|
||||
}
|
||||
|
||||
private boolean shouldLog(String stat) {
|
||||
if (!_filtersSpecified) return false;
|
||||
synchronized (_statFilters) {
|
||||
return _statFilters.contains(stat) || _statFilters.contains("*");
|
||||
}
|
||||
@@ -88,11 +93,18 @@ public class BufferedStatLog implements StatLog {
|
||||
_statFilters.clear();
|
||||
while (tok.hasMoreTokens())
|
||||
_statFilters.add(tok.nextToken().trim());
|
||||
if (_statFilters.size() > 0)
|
||||
_filtersSpecified = true;
|
||||
else
|
||||
_filtersSpecified = false;
|
||||
}
|
||||
}
|
||||
_lastFilters = val;
|
||||
} else {
|
||||
synchronized (_statFilters) { _statFilters.clear(); }
|
||||
synchronized (_statFilters) {
|
||||
_statFilters.clear();
|
||||
_filtersSpecified = false;
|
||||
}
|
||||
}
|
||||
|
||||
String filename = _context.getProperty(StatManager.PROP_STAT_FILE);
|
||||
@@ -146,7 +158,7 @@ public class BufferedStatLog implements StatLog {
|
||||
updateFilters();
|
||||
int cur = start;
|
||||
while (cur != end) {
|
||||
if (shouldLog(_events[cur].getStat())) {
|
||||
//if (shouldLog(_events[cur].getStat())) {
|
||||
String when = null;
|
||||
synchronized (_fmt) {
|
||||
when = _fmt.format(new Date(_events[cur].getTime()));
|
||||
@@ -164,7 +176,7 @@ public class BufferedStatLog implements StatLog {
|
||||
_out.write(" ");
|
||||
_out.write(Long.toString(_events[cur].getDuration()));
|
||||
_out.write("\n");
|
||||
}
|
||||
//}
|
||||
cur = (cur + 1) % _events.length;
|
||||
}
|
||||
_out.flush();
|
||||
|
@@ -188,6 +188,8 @@ public class Rate {
|
||||
long measuredPeriod = now - _lastCoalesceDate;
|
||||
if (measuredPeriod < _period - SLACK) {
|
||||
// no need to coalesce (assuming we only try to do so once per minute)
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("not coalescing, measuredPeriod = " + measuredPeriod + " period = " + _period);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -431,6 +433,7 @@ public class Rate {
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ((obj == null) || (obj.getClass() != Rate.class)) return false;
|
||||
if (obj == this) return true;
|
||||
Rate r = (Rate) obj;
|
||||
return _period == r.getPeriod() && _creationDate == r.getCreationDate() &&
|
||||
//_lastCoalesceDate == r.getLastCoalesceDate() &&
|
||||
|
@@ -76,15 +76,33 @@ public class FortunaRandomSource extends RandomSource implements EntropyHarveste
|
||||
if (n<=0)
|
||||
throw new IllegalArgumentException("n must be positive");
|
||||
|
||||
if ((n & -n) == n) // i.e., n is a power of 2
|
||||
return (int)((n * (long)nextBits(31)) >> 31);
|
||||
////
|
||||
// this shortcut from sun's docs neither works nor is necessary.
|
||||
//
|
||||
//if ((n & -n) == n) {
|
||||
// // i.e., n is a power of 2
|
||||
// return (int)((n * (long)nextBits(31)) >> 31);
|
||||
//}
|
||||
|
||||
int bits, val;
|
||||
do {
|
||||
bits = nextBits(31);
|
||||
val = bits % n;
|
||||
} while(bits - val + (n-1) < 0);
|
||||
return val;
|
||||
int numBits = 0;
|
||||
int remaining = n;
|
||||
int rv = 0;
|
||||
while (remaining > 0) {
|
||||
remaining >>= 1;
|
||||
rv += nextBits(8) << numBits*8;
|
||||
numBits++;
|
||||
}
|
||||
if (rv < 0)
|
||||
rv += n;
|
||||
return rv % n;
|
||||
|
||||
//int bits, val;
|
||||
//do {
|
||||
// bits = nextBits(31);
|
||||
// val = bits % n;
|
||||
//} while(bits - val + (n-1) < 0);
|
||||
//
|
||||
//return val;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -157,11 +175,16 @@ public class FortunaRandomSource extends RandomSource implements EntropyHarveste
|
||||
* through 2^numBits-1
|
||||
*/
|
||||
protected synchronized int nextBits(int numBits) {
|
||||
int rv = 0;
|
||||
long rv = 0;
|
||||
int bytes = (numBits + 7) / 8;
|
||||
for (int i = 0; i < bytes; i++)
|
||||
rv += ((_fortuna.nextByte() & 0xFF) << i*8);
|
||||
return rv;
|
||||
//rv >>>= (64-numBits);
|
||||
if (rv < 0)
|
||||
rv = 0 - rv;
|
||||
int off = 8*bytes - numBits;
|
||||
rv >>>= off;
|
||||
return (int)rv;
|
||||
}
|
||||
|
||||
public EntropyHarvester harvester() { return this; }
|
||||
@@ -175,4 +198,19 @@ public class FortunaRandomSource extends RandomSource implements EntropyHarveste
|
||||
public synchronized void feedEntropy(String source, byte[] data, int offset, int len) {
|
||||
_fortuna.addRandomBytes(data, offset, len);
|
||||
}
|
||||
|
||||
public static void main(String args[]) {
|
||||
try {
|
||||
RandomSource rand = I2PAppContext.getGlobalContext().random();
|
||||
java.io.ByteArrayOutputStream baos = new java.io.ByteArrayOutputStream();
|
||||
java.util.zip.GZIPOutputStream gos = new java.util.zip.GZIPOutputStream(baos);
|
||||
for (int i = 0; i < 1024*1024; i++) {
|
||||
int c = rand.nextInt(256);
|
||||
gos.write((byte)c);
|
||||
}
|
||||
gos.finish();
|
||||
byte compressed[] = baos.toByteArray();
|
||||
System.out.println("Compressed size of 1MB: " + compressed.length);
|
||||
} catch (Exception e) { e.printStackTrace(); }
|
||||
}
|
||||
}
|
||||
|
176
history.txt
176
history.txt
@@ -1,4 +1,178 @@
|
||||
$Id: history.txt,v 1.445 2006/04/04 23:40:04 jrandom Exp $
|
||||
$Id: history.txt,v 1.479 2006-05-18 17:31:08 jrandom Exp $
|
||||
|
||||
* 2006-06-04 0.6.1.20 released
|
||||
|
||||
2006-06-04 jrandom
|
||||
* Reduce the SSU ack frequency
|
||||
* Tweaked the tunnel rejection settings to reject less aggressively
|
||||
|
||||
2006-05-31 jrandom
|
||||
* Only send netDb searches to the floodfill peers for the time being
|
||||
* Add some proof of concept filters for tunnel participation. By default,
|
||||
it will skip peers with an advertised bandwith of less than 32KBps or
|
||||
an advertised uptime of less than 2 hours. If this is sufficient, a
|
||||
safer implementation of these filters will be implemented.
|
||||
|
||||
* 2006-05-18 0.6.1.19 released
|
||||
|
||||
2006-05-18 jrandom
|
||||
* Made the SSU ACKs less frequent when possible
|
||||
|
||||
2006-05-17 Complication
|
||||
* Fix some oversights in my previous changes:
|
||||
adjust some loglevels, make a few statements less wasteful,
|
||||
make one comparison less confusing and more likely to log unexpected values
|
||||
|
||||
2006-05-17 jrandom
|
||||
* Make the peer page sortable
|
||||
* SSU modifications to cut down on unnecessary connection failures
|
||||
|
||||
2006-05-16 jrandom
|
||||
* Further shitlist randomizations
|
||||
* Adjust the stats monitored for detecting cpu overload when dropping new
|
||||
tunnel requests
|
||||
|
||||
2006-05-15 jrandom
|
||||
* Add a load dependent throttle on the pending inbound tunnel request
|
||||
backlog
|
||||
* Increased the tunnel test failure slack before killing a tunnel
|
||||
|
||||
2006-05-13 Complication
|
||||
* Separate growth factors for tunnel count and tunnel test time
|
||||
* Reduce growth factors, so probabalistic throttle would activate
|
||||
* Square probAccept values to decelerate stronger when far from average
|
||||
* Create a bandwidth stat with approximately 15-second half life
|
||||
* Make allowTunnel() check the 1-second bandwidth for overload
|
||||
before doing allowance calculations using 15-second bandwidth
|
||||
* Tweak the overload detector in BuildExecutor to be more sensitive
|
||||
for rising edges, add ability to initiate tunnel drops
|
||||
* Add a function to seek and drop the highest-rate participating tunnel,
|
||||
keeping a fixed+random grace period between such drops.
|
||||
It doesn't seem very effective, so disabled by default
|
||||
("router.dropTunnelsOnOverload=true" to enable)
|
||||
|
||||
2006-05-11 jrandom
|
||||
* PRNG bugfix (thanks cervantes and Complication!)
|
||||
|
||||
* 2006-05-09 0.6.1.18 released
|
||||
|
||||
2006-05-09 jrandom
|
||||
* Further tunnel creation timeout revamp
|
||||
|
||||
2006-05-07 Complication
|
||||
* Fix problem whereby repeated calls to allowed() would make
|
||||
the 1-tunnel exception permit more than one concurrent build
|
||||
|
||||
2006-05-06 jrandom
|
||||
* Readjust the tunnel creation timeouts to reject less but fail earlier,
|
||||
while tracking the extended timeout events.
|
||||
|
||||
2006-05-04 jrandom
|
||||
* Short circuit a highly congested part of the stat logging unless its
|
||||
required (may or may not help with a synchronization issue reported by
|
||||
andreas)
|
||||
|
||||
2006-05-03 Complication
|
||||
* Allow a single build attempt to proceed despite 1-minute overload
|
||||
only if the 1-second rate shows enough spare bandwidth
|
||||
(e.g. overload has already eased)
|
||||
|
||||
2006-05-02 Complication
|
||||
* Correct a misnamed property in SummaryHelper.java
|
||||
to avoid confusion
|
||||
* Make the maximum allowance of our own concurrent
|
||||
tunnel builds slightly adaptive: one concurrent build per 6 KB/s
|
||||
within the fixed range 2..10
|
||||
* While overloaded, try to avoid completely choking our own build attempts,
|
||||
instead prefer limiting them to 1
|
||||
|
||||
2006-05-01 jrandom
|
||||
* Adjust the tunnel build timeouts to cut down on expirations, and
|
||||
increased the SSU connection establishment retransmission rate to
|
||||
something less glacial.
|
||||
* For the first 5 minutes of uptime, be less aggressive with tunnel
|
||||
exploration, opting for more reliable peers to start with.
|
||||
|
||||
2006-05-01 jrandom
|
||||
* Fix for a netDb lookup race (thanks cervantes!)
|
||||
|
||||
2006-04-27 jrandom
|
||||
* Avoid a race in the message reply registry (thanks cervantes!)
|
||||
|
||||
2006-04-27 jrandom
|
||||
* Fixed the tunnel expiration desync code (thanks Complication!)
|
||||
|
||||
* 2006-04-23 0.6.1.17 released
|
||||
|
||||
2006-04-19 jrandom
|
||||
* Adjust how we pick high capacity peers to allow the inclusion of fast
|
||||
peers (the previous filter assumed an old usage pattern)
|
||||
* New set of stats to help track per-packet-type bandwidth usage better
|
||||
* Cut out the proactive tail drop from the SSU transport, for now
|
||||
* Reduce the frequency of tunnel build attempts while we're saturated
|
||||
* Don't drop tunnel requests as easily - prefer to explicitly reject them
|
||||
|
||||
* 2006-04-15 0.6.1.16 released
|
||||
|
||||
2006-04-15 jrandom
|
||||
* Adjust the proactive tunnel request dropping so we will reject what we
|
||||
can instead of dropping so much (but still dropping if we get too far
|
||||
overloaded)
|
||||
|
||||
2006-04-14 jrandom
|
||||
* 0 isn't very random
|
||||
* Adjust the tunnel drop to be more reasonable
|
||||
|
||||
2006-04-14 jrandom
|
||||
* -28.00230115311259 is not between 0 and 1 in any universe I know.
|
||||
* Made the bw-related tunnel join throttle much simpler
|
||||
|
||||
2006-04-14 jrandom
|
||||
* Make some more stats graphable, and allow some internal tweaking on the
|
||||
tunnel pairing for creation and testing.
|
||||
|
||||
* 2006-04-13 0.6.1.15 released
|
||||
|
||||
2006-04-12 jrandom
|
||||
* Added a further failsafe against trying to queue up too many messages to
|
||||
a peer.
|
||||
|
||||
2006-04-12 jrandom
|
||||
* Watch out for failed syndie index fetches (thanks bar!)
|
||||
|
||||
2006-04-11 jrandom
|
||||
* Throttling improvements on SSU - throttle all transmissions to a peer
|
||||
when we are retransmitting, not just retransmissions. Also, if
|
||||
we're already retransmitting to a peer, probabalistically tail drop new
|
||||
messages targetting that peer, based on the estimated wait time before
|
||||
transmission.
|
||||
* Fixed the rounding error in the inbound tunnel drop probability.
|
||||
|
||||
2006-04-10 jrandom
|
||||
* Include a combined send/receive graph (good idea cervantes!)
|
||||
* Proactively drop inbound tunnel requests probabalistically as the
|
||||
estimated queue time approaches our limit, rather than letting them all
|
||||
through up to that limit.
|
||||
|
||||
2006-04-08 jrandom
|
||||
* Stat summarization fix (removing the occational holes in the jrobin
|
||||
graphs)
|
||||
|
||||
2006-04-08 jrandom
|
||||
* Process inbound tunnel requests more efficiently
|
||||
* Proactively drop inbound tunnel requests if the queue before we'd
|
||||
process it in is too long (dynamically adjusted by cpu load)
|
||||
* Adjust the tunnel rejection throttle to reject requeusts when we have to
|
||||
proactively drop too many requests.
|
||||
* Display the number of pending inbound tunnel join requests on the router
|
||||
console (as the "handle backlog")
|
||||
* Include a few more stats in the default set of graphs
|
||||
|
||||
2006-04-06 jrandom
|
||||
* Fix for a bug in the new irc ping/pong filter (thanks Complication!)
|
||||
|
||||
2006-04-06 jrandom
|
||||
* Fixed a typo in the reply cleanup code
|
||||
|
||||
* 2006-04-05 0.6.1.14 released
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
<i2p.news date="$Date: 2006/03/26 18:23:54 $">
|
||||
<i2p.release version="0.6.1.14" date="2006/04/05" minVersion="0.6"
|
||||
<i2p.news date="$Date: 2006-05-18 17:31:08 $">
|
||||
<i2p.release version="0.6.1.20" date="2006/05/18" minVersion="0.6"
|
||||
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
|
||||
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
|
||||
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
|
||||
|
@@ -4,7 +4,7 @@
|
||||
|
||||
<info>
|
||||
<appname>i2p</appname>
|
||||
<appversion>0.6.1.14</appversion>
|
||||
<appversion>0.6.1.20</appversion>
|
||||
<authors>
|
||||
<author name="I2P" email="support@i2p.net"/>
|
||||
</authors>
|
||||
|
18
news.xml
18
news.xml
@@ -1,5 +1,5 @@
|
||||
<i2p.news date="$Date: 2006/04/04 22:06:00 $">
|
||||
<i2p.release version="0.6.1.14" date="2006/04/05" minVersion="0.6"
|
||||
<i2p.news date="$Date: 2006-05-30 22:19:24 $">
|
||||
<i2p.release version="0.6.1.20" date="2006/05/18" minVersion="0.6"
|
||||
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
|
||||
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
|
||||
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
|
||||
@@ -10,13 +10,13 @@
|
||||
anonlogs="http://i2p/Nf3ab-ZFkmI-LyMt7GjgT-jfvZ3zKDl0L96pmGQXF1B82W2Bfjf0n7~288vafocjFLnQnVcmZd~-p0-Oolfo9aW2Rm-AhyqxnxyLlPBqGxsJBXjPhm1JBT4Ia8FB-VXt0BuY0fMKdAfWwN61-tj4zIcQWRxv3DFquwEf035K~Ra4SWOqiuJgTRJu7~o~DzHVljVgWIzwf8Z84cz0X33pv-mdG~~y0Bsc2qJVnYwjjR178YMcRSmNE0FVMcs6f17c6zqhMw-11qjKpY~EJfHYCx4lBWF37CD0obbWqTNUIbL~78vxqZRT3dgAgnLixog9nqTO-0Rh~NpVUZnoUi7fNR~awW5U3Cf7rU7nNEKKobLue78hjvRcWn7upHUF45QqTDuaM3yZa7OsjbcH-I909DOub2Q0Dno6vIwuA7yrysccN1sbnkwZbKlf4T6~iDdhaSLJd97QCyPOlbyUfYy9QLNExlRqKgNVJcMJRrIual~Lb1CLbnzt0uvobM57UpqSAAAA/meeting141"
|
||||
publiclogs="http://www.i2p.net/meeting141" />
|
||||
•
|
||||
2006-04-05: 0.6.1.14 released with important tunnel reliability, ssu, and peer
|
||||
selection fixes.
|
||||
<br>
|
||||
2006-05-18: 0.6.1.19 <a href="http://dev.i2p/pipermail/i2p/2006-May/001290.html">released</a>
|
||||
with PRNG bugfixes, congestion handling and SSU improvements.
|
||||
<br />
|
||||
•
|
||||
2006-04-04:
|
||||
<a href="http://dev.i2p/pipermail/i2p/2006-April/001275.html">status notes</a>
|
||||
2006-05-30:
|
||||
<a href="http://dev.i2p/pipermail/i2p/2006-May/001291.html">status notes</a>
|
||||
and
|
||||
<a href="http://www.i2p/meeting175">meeting log</a>
|
||||
<br>
|
||||
<a href="http://www.i2p/meeting181">meeting log</a>
|
||||
<br />
|
||||
</i2p.news>
|
||||
|
@@ -333,7 +333,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
}
|
||||
|
||||
protected void verifyUnwritten() {
|
||||
if (_written) throw new RuntimeException("Already written");
|
||||
if (_written) throw new IllegalStateException("Already written");
|
||||
}
|
||||
protected void written() { _written = true; }
|
||||
protected void read() { _read = true; }
|
||||
|
@@ -23,7 +23,8 @@ import java.util.Set;
|
||||
public abstract class CommSystemFacade implements Service {
|
||||
public abstract void processMessage(OutNetMessage msg);
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException { }
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException { }
|
||||
public void renderStatusHTML(Writer out) throws IOException { renderStatusHTML(out, null, 0); }
|
||||
|
||||
/** Create the set of RouterAddress structures based on the router's config */
|
||||
public Set createAddresses() { return new HashSet(); }
|
||||
|
@@ -26,7 +26,7 @@ class JobQueueRunner implements Runnable {
|
||||
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_state = 1;
|
||||
}
|
||||
|
@@ -506,9 +506,9 @@ public class LoadTestManager {
|
||||
}
|
||||
|
||||
private int getBps() {
|
||||
int used1s = RouterThrottleImpl.get1sRate(_context);
|
||||
int used1m = RouterThrottleImpl.get1mRate(_context);
|
||||
int used5m = RouterThrottleImpl.get5mRate(_context);
|
||||
int used1s = _context.router().get1sRate();
|
||||
int used1m = _context.router().get1mRate();
|
||||
int used5m = _context.router().get5mRate();
|
||||
return Math.max(used1s, Math.max(used1m, used5m));
|
||||
}
|
||||
|
||||
|
@@ -35,8 +35,10 @@ import net.i2p.router.message.GarlicMessageHandler;
|
||||
//import net.i2p.router.message.TunnelMessageHandler;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.startup.StartupJob;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.stat.StatManager;
|
||||
import net.i2p.util.FileUtil;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
@@ -1029,6 +1031,98 @@ public class Router {
|
||||
t.start();
|
||||
return true;
|
||||
}
|
||||
|
||||
private static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
|
||||
|
||||
/**
|
||||
* What fraction of the bandwidth specified in our bandwidth limits should
|
||||
* we allow to be consumed by participating tunnels?
|
||||
*
|
||||
*/
|
||||
public double getSharePercentage() {
|
||||
RouterContext ctx = _context;
|
||||
if (ctx == null) return 0;
|
||||
String pct = ctx.getProperty(PROP_BANDWIDTH_SHARE_PERCENTAGE);
|
||||
if (pct != null) {
|
||||
try {
|
||||
double d = Double.parseDouble(pct);
|
||||
if (d > 1)
|
||||
return d/100d; // *cough* sometimes its 80 instead of .8 (!stab jrandom)
|
||||
else
|
||||
return d;
|
||||
} catch (NumberFormatException nfe) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Unable to get the share percentage");
|
||||
}
|
||||
}
|
||||
return 0.8;
|
||||
}
|
||||
|
||||
public int get1sRate() { return get1sRate(false); }
|
||||
public int get1sRate(boolean outboundOnly) {
|
||||
RouterContext ctx = _context;
|
||||
if (ctx != null) {
|
||||
FIFOBandwidthLimiter bw = ctx.bandwidthLimiter();
|
||||
if (bw != null) {
|
||||
int out = (int)bw.getSendBps();
|
||||
if (outboundOnly)
|
||||
return out;
|
||||
return (int)Math.max(out, bw.getReceiveBps());
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public int get15sRate() { return get15sRate(false); }
|
||||
public int get15sRate(boolean outboundOnly) {
|
||||
RouterContext ctx = _context;
|
||||
if (ctx != null) {
|
||||
FIFOBandwidthLimiter bw = ctx.bandwidthLimiter();
|
||||
if (bw != null) {
|
||||
int out = (int)bw.getSendBps15s();
|
||||
if (outboundOnly)
|
||||
return out;
|
||||
return (int)Math.max(out, bw.getReceiveBps15s());
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public int get1mRate() { return get1mRate(false); }
|
||||
public int get1mRate(boolean outboundOnly) {
|
||||
int send = 0;
|
||||
RouterContext ctx = _context;
|
||||
if (ctx == null)
|
||||
return 0;
|
||||
StatManager mgr = ctx.statManager();
|
||||
if (mgr == null)
|
||||
return 0;
|
||||
RateStat rs = mgr.getRate("bw.sendRate");
|
||||
if (rs != null)
|
||||
send = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
if (outboundOnly)
|
||||
return send;
|
||||
int recv = 0;
|
||||
rs = mgr.getRate("bw.recvRate");
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
public int get5mRate() { return get5mRate(false); }
|
||||
public int get5mRate(boolean outboundOnly) {
|
||||
int send = 0;
|
||||
RateStat rs = _context.statManager().getRate("bw.sendRate");
|
||||
if (rs != null)
|
||||
send = (int)rs.getRate(5*60*1000).getAverageValue();
|
||||
if (outboundOnly)
|
||||
return send;
|
||||
int recv = 0;
|
||||
rs = _context.statManager().getRate("bw.recvRate");
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(5*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1087,7 +1181,7 @@ class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
}
|
||||
}
|
||||
|
||||
SimpleTimer.getInstance().addEvent(this, 60*1000);
|
||||
SimpleTimer.getInstance().addEvent(this, 20*1000);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -32,8 +32,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
|
||||
private static final String PROP_MAX_TUNNELS = "router.maxParticipatingTunnels";
|
||||
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
|
||||
private static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
|
||||
|
||||
|
||||
/** tunnel acceptance */
|
||||
public static final int TUNNEL_ACCEPT = 0;
|
||||
|
||||
@@ -53,6 +52,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_context.statManager().createRateStat("router.throttleTunnelBytesAllowed", "How many bytes are allowed to be sent when we get a tunnel request (period is how many are currently allocated)?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelBytesUsed", "Used Bps at request (period = max KBps)?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelFailCount1m", "How many messages failed to be sent in the last 2 minutes when we throttle based on a spike in failures (period = 10 minute average failure count)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
|
||||
_context.statManager().createRateStat("router.throttleTunnelQueueOverload", "How many pending tunnel request messages have we received when we reject them due to overload (period = time to process each)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
|
||||
}
|
||||
|
||||
public boolean acceptNetworkMessage() {
|
||||
@@ -104,8 +104,8 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
|
||||
if (numTunnels > getMinThrottleTunnels()) {
|
||||
double growthFactor = getTunnelGrowthFactor();
|
||||
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(60*60*1000);
|
||||
double tunnelGrowthFactor = getTunnelGrowthFactor();
|
||||
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(10*60*1000);
|
||||
if (avgTunnels != null) {
|
||||
double avg = 0;
|
||||
if (avgTunnels.getLastEventCount() > 0)
|
||||
@@ -115,9 +115,10 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
int min = getMinThrottleTunnels();
|
||||
if (avg < min)
|
||||
avg = min;
|
||||
if ( (avg > 0) && (avg*growthFactor < numTunnels) ) {
|
||||
if ( (avg > 0) && (avg*tunnelGrowthFactor < numTunnels) ) {
|
||||
// we're accelerating, lets try not to take on too much too fast
|
||||
double probAccept = (avg*growthFactor) / numTunnels;
|
||||
double probAccept = (avg*tunnelGrowthFactor) / numTunnels;
|
||||
probAccept = probAccept * probAccept; // square the decelerator for tunnel counts
|
||||
int v = _context.random().nextInt(100);
|
||||
if (v < probAccept*100) {
|
||||
// ok
|
||||
@@ -133,40 +134,46 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Accepting tunnel request, since the average is " + avg
|
||||
_log.info("Accepting tunnel request, since the tunnel count average is " + avg
|
||||
+ " and we only have " + numTunnels + ")");
|
||||
}
|
||||
}
|
||||
|
||||
Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10*60*1000);
|
||||
Rate tunnelTestTime60m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(60*60*1000);
|
||||
if ( (tunnelTestTime10m != null) && (tunnelTestTime60m != null) && (tunnelTestTime10m.getLastEventCount() > 0) ) {
|
||||
double avg10m = tunnelTestTime10m.getAverageValue();
|
||||
double avg60m = 0;
|
||||
if (tunnelTestTime60m.getLastEventCount() > 0)
|
||||
avg60m = tunnelTestTime60m.getAverageValue();
|
||||
else
|
||||
avg60m = tunnelTestTime60m.getLifetimeAverageValue();
|
||||
|
||||
if (avg60m < 2000)
|
||||
avg60m = 2000; // minimum before complaining
|
||||
|
||||
if ( (avg60m > 0) && (avg10m > avg60m * growthFactor) ) {
|
||||
double probAccept = (avg60m*growthFactor)/avg10m;
|
||||
int v = _context.random().nextInt(100);
|
||||
if (v < probAccept*100) {
|
||||
// ok
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Probabalistically accept tunnel request (p=" + probAccept
|
||||
+ " v=" + v + " test time avg 10m=" + avg10m + " 60m=" + avg60m + ")");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Probabalistically refusing tunnel request (test time avg 10m=" + avg10m
|
||||
+ " 60m=" + avg60m + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg10m-avg60m), 0);
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
}
|
||||
|
||||
double tunnelTestTimeGrowthFactor = getTunnelTestTimeGrowthFactor();
|
||||
Rate tunnelTestTime1m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(1*60*1000);
|
||||
Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10*60*1000);
|
||||
if ( (tunnelTestTime1m != null) && (tunnelTestTime10m != null) && (tunnelTestTime1m.getLastEventCount() > 0) ) {
|
||||
double avg1m = tunnelTestTime1m.getAverageValue();
|
||||
double avg10m = 0;
|
||||
if (tunnelTestTime10m.getLastEventCount() > 0)
|
||||
avg10m = tunnelTestTime10m.getAverageValue();
|
||||
else
|
||||
avg10m = tunnelTestTime10m.getLifetimeAverageValue();
|
||||
|
||||
if (avg10m < 2000)
|
||||
avg10m = 2000; // minimum before complaining
|
||||
|
||||
if ( (avg10m > 0) && (avg1m > avg10m * tunnelTestTimeGrowthFactor) ) {
|
||||
double probAccept = (avg10m*tunnelTestTimeGrowthFactor)/avg1m;
|
||||
probAccept = probAccept * probAccept; // square the decelerator for test times
|
||||
int v = _context.random().nextInt(100);
|
||||
if (v < probAccept*100) {
|
||||
// ok
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Probabalistically accept tunnel request (p=" + probAccept
|
||||
+ " v=" + v + " test time avg 1m=" + avg1m + " 10m=" + avg10m + ")");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Probabalistically refusing tunnel request (test time avg 1m=" + avg1m
|
||||
+ " 10m=" + avg10m + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg1m-avg10m), 0);
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Accepting tunnel request, since 60m test time average is " + avg10m
|
||||
+ " and past 1m only has " + avg1m + ")");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,54 +197,56 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
// of another tunnel?
|
||||
rs = _context.statManager().getRate("tunnel.participatingMessageCount");
|
||||
r = null;
|
||||
if (rs != null)
|
||||
double messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
|
||||
if (rs != null) {
|
||||
r = rs.getRate(10*60*1000);
|
||||
double messagesPerTunnel = (r != null ? r.getAverageValue() : 0d);
|
||||
if (r != null) {
|
||||
if (r.getLastEventCount() > 0)
|
||||
messagesPerTunnel = r.getAverageValue();
|
||||
else
|
||||
messagesPerTunnel = r.getLifetimeAverageValue();
|
||||
}
|
||||
}
|
||||
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
|
||||
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
|
||||
int participatingTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
double bytesAllocated = messagesPerTunnel * participatingTunnels * 1024;
|
||||
|
||||
double bytesAllocated = messagesPerTunnel * numTunnels * net.i2p.router.tunnel.TrivialPreprocessor.PREPROCESSED_SIZE;
|
||||
|
||||
if (!allowTunnel(bytesAllocated, numTunnels)) {
|
||||
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0);
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
|
||||
int queuedRequests = _context.tunnelManager().getInboundBuildQueueSize();
|
||||
int timePerRequest = 1000;
|
||||
rs = _context.statManager().getRate("tunnel.decryptRequestTime");
|
||||
if (rs != null) {
|
||||
r = rs.getRate(60*1000);
|
||||
if (r.getLastEventCount() > 0)
|
||||
timePerRequest = (int)r.getAverageValue();
|
||||
else
|
||||
timePerRequest = (int)rs.getLifetimeAverageValue();
|
||||
}
|
||||
float pctFull = (queuedRequests * timePerRequest) / (10*1000f);
|
||||
float pReject = 1 - ((1-pctFull) * (1-pctFull));
|
||||
if ( (pctFull >= 1) || (pReject >= _context.random().nextFloat()) ) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting a new tunnel request because we have too many pending requests (" + queuedRequests
|
||||
+ " at " + timePerRequest + "ms each, %full = " + pctFull);
|
||||
_context.statManager().addRateData("router.throttleTunnelQueueOverload", queuedRequests, timePerRequest);
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
|
||||
// ok, all is well, let 'er in
|
||||
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, 60*10*1000);
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Accepting a new tunnel request (now allocating " + bytesAllocated + " bytes across " + numTunnels
|
||||
+ " tunnels with lag of " + lag + ")");
|
||||
return TUNNEL_ACCEPT;
|
||||
}
|
||||
|
||||
static int get1sRate(RouterContext ctx) {
|
||||
return (int)Math.max(ctx.bandwidthLimiter().getSendBps(), ctx.bandwidthLimiter().getReceiveBps());
|
||||
}
|
||||
static int get1mRate(RouterContext ctx) {
|
||||
int send = 0;
|
||||
RateStat rs = ctx.statManager().getRate("bw.sendRate");
|
||||
if (rs != null)
|
||||
send = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
int recv = 0;
|
||||
rs = ctx.statManager().getRate("bw.recvRate");
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
static int get5mRate(RouterContext ctx) {
|
||||
int send = 0;
|
||||
RateStat rs = ctx.statManager().getRate("bw.sendRate");
|
||||
if (rs != null)
|
||||
send = (int)rs.getRate(5*60*1000).getAverageValue();
|
||||
int recv = 0;
|
||||
rs = ctx.statManager().getRate("bw.recvRate");
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(5*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
|
||||
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 600; // 1KBps
|
||||
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 60; // .1KBps
|
||||
private static final int MIN_AVAILABLE_BPS = 4*1024; // always leave at least 4KBps free when allowing
|
||||
|
||||
/**
|
||||
@@ -248,15 +257,48 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
*/
|
||||
private boolean allowTunnel(double bytesAllocated, int numTunnels) {
|
||||
int maxKBps = Math.min(_context.bandwidthLimiter().getOutboundKBytesPerSecond(), _context.bandwidthLimiter().getInboundKBytesPerSecond());
|
||||
int used1s = get1sRate(_context); // dont throttle on the 1s rate, its too volatile
|
||||
int used1m = get1mRate(_context);
|
||||
int used5m = 0; //get5mRate(_context); // don't throttle on the 5m rate, as that'd hide available bandwidth
|
||||
int used = Math.max(Math.max(used1s, used1m), used5m);
|
||||
int availBps = (int)(((maxKBps*1024) - used) * getSharePercentage());
|
||||
int used1s = _context.router().get1sRate(); // dont throttle on the 1s rate, its too volatile
|
||||
int used15s = _context.router().get15sRate();
|
||||
int used1m = _context.router().get1mRate(); // dont throttle on the 1m rate, its too slow
|
||||
int used = Math.min(used15s,used1s);
|
||||
|
||||
double share = _context.router().getSharePercentage();
|
||||
int availBps = (int)(((maxKBps*1024)*share) - used); //(int)(((maxKBps*1024) - used) * getSharePercentage());
|
||||
|
||||
// Write stats before making decisions
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesUsed", used, maxKBps);
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesAllowed", availBps, (long)bytesAllocated);
|
||||
|
||||
if (used1m > (maxKBps*1024)) {
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject tunnel, 1m rate (" + used1m + ") indicates overload.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (true) {
|
||||
// ok, ignore any predictions of 'bytesAllocated', since that makes poorly
|
||||
// grounded conclusions about future use (or even the bursty use). Instead,
|
||||
// simply say "do we have the bw to handle a new request"?
|
||||
float maxBps = maxKBps * 1024f;
|
||||
float pctFull = (maxBps - availBps) / (maxBps);
|
||||
double probReject = Math.pow(pctFull, 16); // steep curve
|
||||
double rand = _context.random().nextFloat();
|
||||
boolean reject = (availBps < MIN_AVAILABLE_BPS) || (rand <= probReject);
|
||||
if (reject && _log.shouldLog(Log.WARN))
|
||||
_log.warn("reject = " + reject + " avail/maxK/used " + availBps + "/" + maxKBps + "/"
|
||||
+ used + " pReject = " + probReject + " pFull = " + pctFull + " numTunnels = " + numTunnels
|
||||
+ "rand = " + rand + " est = " + bytesAllocated + " share = " + (float)share);
|
||||
else if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("reject = " + reject + " avail/maxK/used " + availBps + "/" + maxKBps + "/"
|
||||
+ used + " pReject = " + probReject + " pFull = " + pctFull + " numTunnels = " + numTunnels
|
||||
+ "rand = " + rand + " est = " + bytesAllocated + " share = " + (float)share);
|
||||
if (reject) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
if (availBps <= 8*1024) {
|
||||
// lets be more conservative for people near their limit and assume 1KBps per tunnel
|
||||
@@ -280,7 +322,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
return true;
|
||||
} else {
|
||||
double probAllow = availBps / (allocatedBps + availBps);
|
||||
boolean allow = (availBps > MIN_AVAILABLE_BPS) && (_context.random().nextDouble() <= probAllow);
|
||||
boolean allow = (availBps > MIN_AVAILABLE_BPS) && (_context.random().nextFloat() <= probAllow);
|
||||
if (allow) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Probabalistically allowing the tunnel w/ " + (pctFull*100d) + "% of our " + availBps
|
||||
@@ -297,28 +339,6 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* What fraction of the bandwidth specified in our bandwidth limits should
|
||||
* we allow to be consumed by participating tunnels?
|
||||
*
|
||||
*/
|
||||
private double getSharePercentage() {
|
||||
String pct = _context.getProperty(PROP_BANDWIDTH_SHARE_PERCENTAGE);
|
||||
if (pct != null) {
|
||||
try {
|
||||
double d = Double.parseDouble(pct);
|
||||
if (d > 1)
|
||||
return d/100d; // *cough* sometimes its 80 instead of .8 (!stab jrandom)
|
||||
else
|
||||
return d;
|
||||
} catch (NumberFormatException nfe) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Unable to get the share percentage");
|
||||
}
|
||||
}
|
||||
return 0.8;
|
||||
}
|
||||
|
||||
/** dont ever probabalistically throttle tunnels if we have less than this many */
|
||||
private int getMinThrottleTunnels() {
|
||||
try {
|
||||
@@ -330,9 +350,17 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
|
||||
private double getTunnelGrowthFactor() {
|
||||
try {
|
||||
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "3.0"));
|
||||
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "1.3"));
|
||||
} catch (NumberFormatException nfe) {
|
||||
return 3.0;
|
||||
return 1.3;
|
||||
}
|
||||
}
|
||||
|
||||
private double getTunnelTestTimeGrowthFactor() {
|
||||
try {
|
||||
return Double.parseDouble(_context.getProperty("router.tunnelTestTimeGrowthFactor", "1.3"));
|
||||
} catch (NumberFormatException nfe) {
|
||||
return 1.3;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.385 $ $Date: 2006/04/04 23:40:10 $";
|
||||
public final static String VERSION = "0.6.1.14";
|
||||
public final static String ID = "$Revision: 1.419 $ $Date: 2006-05-18 17:31:10 $";
|
||||
public final static String VERSION = "0.6.1.20";
|
||||
public final static long BUILD = 0;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
|
@@ -53,6 +53,7 @@ class RouterWatchdog implements Runnable {
|
||||
private void dumpStatus() {
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
Job cur = _context.jobQueue().getLastJob();
|
||||
/*
|
||||
if (cur != null)
|
||||
_log.error("Most recent job: " + cur);
|
||||
_log.error("Last job began: "
|
||||
@@ -61,6 +62,7 @@ class RouterWatchdog implements Runnable {
|
||||
_log.error("Last job ended: "
|
||||
+ DataHelper.formatDuration(_context.clock().now()-_context.jobQueue().getLastJobEnd())
|
||||
+ " ago");
|
||||
*/
|
||||
_log.error("Ready and waiting jobs: " + _context.jobQueue().getReadyCount());
|
||||
_log.error("Job lag: " + _context.jobQueue().getMaxLag());
|
||||
_log.error("Participating tunnel count: " + _context.tunnelManager().getParticipatingCount());
|
||||
|
@@ -65,8 +65,10 @@ public class Shitlist {
|
||||
|
||||
long period = SHITLIST_DURATION_MS + _context.random().nextLong(SHITLIST_DURATION_MS);
|
||||
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
|
||||
if (prof != null)
|
||||
if (prof != null) {
|
||||
period = SHITLIST_DURATION_MS << prof.incrementShitlists();
|
||||
period += _context.random().nextLong(period);
|
||||
}
|
||||
|
||||
if (period > 60*60*1000)
|
||||
period = 60*60*1000;
|
||||
|
@@ -56,6 +56,9 @@ public interface TunnelManagerFacade extends Service {
|
||||
/** When does the last tunnel we are participating in expire? */
|
||||
public long getLastParticipatingExpiration();
|
||||
|
||||
/** count how many inbound tunnel requests we have received but not yet processed */
|
||||
public int getInboundBuildQueueSize();
|
||||
|
||||
/**
|
||||
* the client connected (or updated their settings), so make sure we have
|
||||
* the tunnels for them, and whenever necessary, ask them to authorize
|
||||
@@ -97,6 +100,7 @@ class DummyTunnelManagerFacade implements TunnelManagerFacade {
|
||||
public void setOutboundSettings(TunnelPoolSettings settings) {}
|
||||
public void setInboundSettings(Hash client, TunnelPoolSettings settings) {}
|
||||
public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {}
|
||||
public int getInboundBuildQueueSize() { return 0; }
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
public void restart() {}
|
||||
|
@@ -123,7 +123,8 @@ public class GarlicMessageBuilder {
|
||||
|
||||
long timeFromNow = config.getExpiration() - ctx.clock().now();
|
||||
if (timeFromNow < 1*1000) {
|
||||
log.error("Building a message expiring in " + timeFromNow + "ms: " + config, new Exception("created by"));
|
||||
if (log.shouldLog(Log.WARN))
|
||||
log.warn("Building a message expiring in " + timeFromNow + "ms: " + config, new Exception("created by"));
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@@ -113,7 +113,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look fora remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
|
@@ -120,6 +120,22 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
public List getKnownRouterData() {
|
||||
List rv = new ArrayList();
|
||||
DataStore ds = getDataStore();
|
||||
if (ds != null) {
|
||||
Set keys = ds.getKeys();
|
||||
if (keys != null) {
|
||||
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
|
||||
Object o = getDataStore().get((Hash)iter.next());
|
||||
if (o instanceof RouterInfo)
|
||||
rv.add(o);
|
||||
}
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Begin a kademlia style search for the key specified, which can take up to timeoutMs and
|
||||
@@ -166,21 +182,44 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
|
||||
|
||||
Job find = null;
|
||||
if ( (onFind != null) && (onFind.size() > 0) )
|
||||
find = (Job)onFind.remove(0);
|
||||
Job fail = null;
|
||||
if ( (onFailed != null) && (onFailed.size() > 0) )
|
||||
fail = (Job)onFailed.remove(0);
|
||||
if (onFind != null) {
|
||||
synchronized (onFind) {
|
||||
if (onFind.size() > 0)
|
||||
find = (Job)onFind.remove(0);
|
||||
}
|
||||
}
|
||||
if (onFailed != null) {
|
||||
synchronized (onFailed) {
|
||||
if (onFailed.size() > 0)
|
||||
fail = (Job)onFailed.remove(0);
|
||||
}
|
||||
}
|
||||
SearchJob job = super.search(key, find, fail, timeoutMs, isLease);
|
||||
if (job != null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Floodfill search timed out for " + key.toBase64() + ", falling back on normal search (#"
|
||||
+ job.getJobId() + ") with " + timeoutMs + " remaining");
|
||||
long expiration = timeoutMs + _context.clock().now();
|
||||
while ( (onFind != null) && (onFind.size() > 0) )
|
||||
job.addDeferred((Job)onFind.remove(0), null, expiration, isLease);
|
||||
while ( (onFailed != null) && (onFailed.size() > 0) )
|
||||
job.addDeferred(null, (Job)onFailed.remove(0), expiration, isLease);
|
||||
List removed = null;
|
||||
if (onFind != null) {
|
||||
synchronized (onFind) {
|
||||
removed = new ArrayList(onFind);
|
||||
onFind.clear();
|
||||
}
|
||||
for (int i = 0; i < removed.size(); i++)
|
||||
job.addDeferred((Job)removed.get(i), null, expiration, isLease);
|
||||
removed = null;
|
||||
}
|
||||
if (onFailed != null) {
|
||||
synchronized (onFailed) {
|
||||
removed = new ArrayList(onFailed);
|
||||
onFailed.clear();
|
||||
}
|
||||
for (int i = 0; i < removed.size(); i++)
|
||||
job.addDeferred(null, (Job)removed.get(i), expiration, isLease);
|
||||
removed = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
void complete(Hash key) {
|
||||
@@ -263,10 +302,13 @@ class FloodSearchJob extends JobImpl {
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
|
||||
if ( (replyTunnel == null) || (outTunnel == null) ) {
|
||||
_dead = true;
|
||||
while (_onFailed.size() > 0) {
|
||||
Job job = (Job)_onFailed.remove(0);
|
||||
getContext().jobQueue().addJob(job);
|
||||
List removed = null;
|
||||
synchronized (_onFailed) {
|
||||
removed = new ArrayList(_onFailed);
|
||||
_onFailed.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
return;
|
||||
}
|
||||
@@ -304,10 +346,13 @@ class FloodSearchJob extends JobImpl {
|
||||
if (timeRemaining > 0) {
|
||||
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
|
||||
} else {
|
||||
for (int i = 0; i < _onFailed.size(); i++) {
|
||||
Job j = (Job)_onFailed.remove(0);
|
||||
getContext().jobQueue().addJob(j);
|
||||
List removed = null;
|
||||
synchronized (_onFailed) {
|
||||
removed = new ArrayList(_onFailed);
|
||||
_onFailed.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
}
|
||||
}
|
||||
void success() {
|
||||
@@ -316,8 +361,13 @@ class FloodSearchJob extends JobImpl {
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
|
||||
_dead = true;
|
||||
_facade.complete(_key);
|
||||
while (_onFind.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)_onFind.remove(0));
|
||||
List removed = null;
|
||||
synchronized (_onFind) {
|
||||
removed = new ArrayList(_onFind);
|
||||
_onFind.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -87,6 +87,8 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
if ( (!SearchJob.onlyQueryFloodfillPeers(_context)) && (_wanted > _matches) && (_key != null) ) {
|
||||
BigInteger diff = getDistance(_key, entry);
|
||||
_sorted.put(diff, entry);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
_matches++;
|
||||
|
@@ -121,7 +121,7 @@ class SearchJob extends JobImpl {
|
||||
public long getExpiration() { return _expiration; }
|
||||
public long getTimeoutMs() { return _timeoutMs; }
|
||||
|
||||
private static final boolean DEFAULT_FLOODFILL_ONLY = false;
|
||||
private static final boolean DEFAULT_FLOODFILL_ONLY = true;
|
||||
|
||||
static boolean onlyQueryFloodfillPeers(RouterContext ctx) {
|
||||
if (isCongested(ctx))
|
||||
|
@@ -15,9 +15,13 @@ import java.util.*;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.PeerSelectionCriteria;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
|
||||
/**
|
||||
* Manage the current state of the statistics
|
||||
*
|
||||
@@ -204,12 +208,27 @@ class PeerManager {
|
||||
return null;
|
||||
}
|
||||
public List getPeersByCapability(char capability) {
|
||||
synchronized (_capabilitiesByPeer) {
|
||||
List peers = locked_getPeers(capability);
|
||||
if (peers != null)
|
||||
return new ArrayList(peers);
|
||||
if (false) {
|
||||
synchronized (_capabilitiesByPeer) {
|
||||
List peers = locked_getPeers(capability);
|
||||
if (peers != null)
|
||||
return new ArrayList(peers);
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
FloodfillNetworkDatabaseFacade f = (FloodfillNetworkDatabaseFacade)_context.netDb();
|
||||
List routerInfos = f.getKnownRouterData();
|
||||
List rv = new ArrayList();
|
||||
for (Iterator iter = routerInfos.iterator(); iter.hasNext(); ) {
|
||||
RouterInfo ri = (RouterInfo)iter.next();
|
||||
String caps = ri.getCapabilities();
|
||||
if (caps.indexOf(capability) >= 0)
|
||||
rv.add(ri.getIdentity().calculateHash());
|
||||
}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Peers with capacity " + capability + ": " + rv.size());
|
||||
return rv;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
|
@@ -251,10 +251,12 @@ public class ProfileOrganizer {
|
||||
// we only use selectHighCapacityPeers when we are selecting for PURPOSE_TEST
|
||||
// or we are falling back due to _fastPeers being too small, so we can always
|
||||
// exclude the fast peers
|
||||
/*
|
||||
if (exclude == null)
|
||||
exclude = new HashSet(_fastPeers.keySet());
|
||||
else
|
||||
exclude.addAll(_fastPeers.keySet());
|
||||
*/
|
||||
locked_selectPeers(_highCapacityPeers, howMany, exclude, matches);
|
||||
}
|
||||
if (matches.size() < howMany) {
|
||||
@@ -809,6 +811,8 @@ public class ProfileOrganizer {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peer " + peer.toBase64() + " is locally known, allowing its use");
|
||||
// perhaps check to see if they are active?
|
||||
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
|
@@ -83,8 +83,8 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
}
|
||||
public void recheckReachability() { _manager.recheckReachability(); }
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
_manager.renderStatusHTML(out);
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
_manager.renderStatusHTML(out, urlBase, sortFlags);
|
||||
}
|
||||
|
||||
public Set createAddresses() {
|
||||
|
@@ -49,6 +49,8 @@ public class FIFOBandwidthLimiter {
|
||||
private long _lastStatsUpdated;
|
||||
private float _sendBps;
|
||||
private float _recvBps;
|
||||
private float _sendBps15s;
|
||||
private float _recvBps15s;
|
||||
|
||||
private static int __id = 0;
|
||||
|
||||
@@ -66,6 +68,8 @@ public class FIFOBandwidthLimiter {
|
||||
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bw.sendBps1s", "How fast we are transmitting for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.recvBps1s", "How fast we are receiving for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.sendBps15s", "How fast we are transmitting for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.recvBps15s", "How fast we are receiving for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_pendingInboundRequests = new ArrayList(16);
|
||||
_pendingOutboundRequests = new ArrayList(16);
|
||||
_lastTotalSent = _totalAllocatedOutboundBytes;
|
||||
@@ -97,6 +101,8 @@ public class FIFOBandwidthLimiter {
|
||||
public void setOutboundUnlimited(boolean isUnlimited) { _outboundUnlimited = isUnlimited; }
|
||||
public float getSendBps() { return _sendBps; }
|
||||
public float getReceiveBps() { return _recvBps; }
|
||||
public float getSendBps15s() { return _sendBps15s; }
|
||||
public float getReceiveBps15s() { return _recvBps15s; }
|
||||
|
||||
public int getOutboundKBytesPerSecond() { return _refiller.getOutboundKBytesPerSecond(); }
|
||||
public int getInboundKBytesPerSecond() { return _refiller.getInboundKBytesPerSecond(); }
|
||||
@@ -270,14 +276,16 @@ public class FIFOBandwidthLimiter {
|
||||
private void updateStats() {
|
||||
long now = now();
|
||||
long time = now - _lastStatsUpdated;
|
||||
// If at least one second has passed
|
||||
if (time >= 1000) {
|
||||
long totS = _totalAllocatedOutboundBytes;
|
||||
long totR = _totalAllocatedInboundBytes;
|
||||
long sent = totS - _lastTotalSent;
|
||||
long recv = totR - _lastTotalReceived;
|
||||
long sent = totS - _lastTotalSent; // How much we sent meanwhile
|
||||
long recv = totR - _lastTotalReceived; // How much we received meanwhile
|
||||
_lastTotalSent = totS;
|
||||
_lastTotalReceived = totR;
|
||||
_lastStatsUpdated = now;
|
||||
|
||||
if (_sendBps <= 0)
|
||||
_sendBps = ((float)sent*1000f)/(float)time;
|
||||
else
|
||||
@@ -286,12 +294,33 @@ public class FIFOBandwidthLimiter {
|
||||
_recvBps = ((float)recv*1000f)/(float)time;
|
||||
else
|
||||
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/(float)time;
|
||||
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("BW: time = " + time + " sent: " + sent + " recv: " + recv);
|
||||
_context.statManager().getStatLog().addData("bw", "bw.sendBps1s", (long)_sendBps, sent);
|
||||
_context.statManager().getStatLog().addData("bw", "bw.recvBps1s", (long)_recvBps, recv);
|
||||
}
|
||||
|
||||
// Maintain an approximate average with a 15-second halflife
|
||||
// Weights (0.955 and 0.045) are tuned so that transition between two values (e.g. 0..10)
|
||||
// would reach their midpoint (e.g. 5) in 15s
|
||||
if (_sendBps15s <= 0)
|
||||
_sendBps15s = ((float)sent*15*1000f)/(float)time;
|
||||
else
|
||||
_sendBps15s = (0.955f)*_sendBps15s + (0.045f)*((float)sent*1000f)/(float)time;
|
||||
|
||||
if (_recvBps15s <= 0)
|
||||
_recvBps15s = ((float)recv*15*1000f)/(float)time;
|
||||
else
|
||||
_recvBps15s = (0.955f)*_recvBps15s + (0.045f)*((float)recv*1000)/(float)time;
|
||||
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("BW15: time = " + time + " sent: " + sent + " recv: " + recv);
|
||||
_context.statManager().getStatLog().addData("bw", "bw.sendBps15s", (long)_sendBps15s, sent);
|
||||
_context.statManager().getStatLog().addData("bw", "bw.recvBps15s", (long)_recvBps15s, recv);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -151,7 +151,8 @@ public class OutboundMessageRegistry {
|
||||
if (oldMsg != null) {
|
||||
List multi = null;
|
||||
if (oldMsg instanceof OutNetMessage) {
|
||||
multi = Collections.synchronizedList(new ArrayList(4));
|
||||
//multi = Collections.synchronizedList(new ArrayList(4));
|
||||
multi = new ArrayList(4);
|
||||
multi.add(oldMsg);
|
||||
multi.add(msg);
|
||||
_selectorToMessage.put(sel, multi);
|
||||
@@ -193,21 +194,20 @@ public class OutboundMessageRegistry {
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
|
||||
private class CleanupTask implements SimpleTimer.TimedEvent {
|
||||
private List _removing;
|
||||
private long _nextExpire;
|
||||
public CleanupTask() {
|
||||
_removing = new ArrayList(4);
|
||||
_nextExpire = -1;
|
||||
}
|
||||
public void timeReached() {
|
||||
long now = _context.clock().now();
|
||||
List removing = new ArrayList(1);
|
||||
synchronized (_selectors) {
|
||||
for (int i = 0; i < _selectors.size(); i++) {
|
||||
MessageSelector sel = (MessageSelector)_selectors.get(i);
|
||||
if (sel == null) continue;
|
||||
long expiration = sel.getExpiration();
|
||||
if (expiration <= now) {
|
||||
_removing.add(sel);
|
||||
removing.add(sel);
|
||||
_selectors.remove(i);
|
||||
i--;
|
||||
} else if (expiration < _nextExpire || _nextExpire < now) {
|
||||
@@ -215,17 +215,19 @@ public class OutboundMessageRegistry {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_removing.size() > 0) {
|
||||
for (int i = 0; i < _removing.size(); i++) {
|
||||
MessageSelector sel = (MessageSelector)_removing.get(i);
|
||||
if (removing.size() > 0) {
|
||||
for (int i = 0; i < removing.size(); i++) {
|
||||
MessageSelector sel = (MessageSelector)removing.get(i);
|
||||
OutNetMessage msg = null;
|
||||
List msgs = null;
|
||||
synchronized (_selectorToMessage) {
|
||||
Object o = _selectorToMessage.remove(sel);
|
||||
if (o instanceof OutNetMessage)
|
||||
if (o instanceof OutNetMessage) {
|
||||
msg = (OutNetMessage)o;
|
||||
else if (o instanceof List)
|
||||
msgs = new ArrayList((List)o);
|
||||
} else if (o instanceof List) {
|
||||
//msgs = new ArrayList((List)o);
|
||||
msgs = (List)o;
|
||||
}
|
||||
}
|
||||
if (msg != null) {
|
||||
synchronized (_activeMessages) {
|
||||
@@ -239,14 +241,13 @@ public class OutboundMessageRegistry {
|
||||
_activeMessages.removeAll(msgs);
|
||||
}
|
||||
for (int j = 0; j < msgs.size(); j++) {
|
||||
msg = (OutNetMessage)msgs.get(i);
|
||||
msg = (OutNetMessage)msgs.get(j);
|
||||
Job fail = msg.getOnFailedReplyJob();
|
||||
if (fail != null)
|
||||
_context.jobQueue().addJob(fail);
|
||||
}
|
||||
}
|
||||
}
|
||||
_removing.clear();
|
||||
}
|
||||
|
||||
if (_nextExpire <= now)
|
||||
|
@@ -41,7 +41,7 @@ public interface Transport {
|
||||
public int countActiveSendPeers();
|
||||
public List getMostRecentErrorMessages();
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException;
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException;
|
||||
public short getReachabilityStatus();
|
||||
public void recheckReachability();
|
||||
}
|
||||
|
@@ -203,6 +203,8 @@ public abstract class TransportImpl implements Transport {
|
||||
+ msg.getMessageType() + " message with selector " + selector, new Exception("fail cause"));
|
||||
if (msg.getOnFailedSendJob() != null)
|
||||
_context.jobQueue().addJob(msg.getOnFailedSendJob());
|
||||
if (msg.getOnFailedReplyJob() != null)
|
||||
_context.jobQueue().addJob(msg.getOnFailedReplyJob());
|
||||
if (selector != null)
|
||||
_context.messageRegistry().unregisterPending(msg);
|
||||
log = true;
|
||||
@@ -361,6 +363,7 @@ public abstract class TransportImpl implements Transport {
|
||||
public void setListener(TransportEventListener listener) { _listener = listener; }
|
||||
/** Make this stuff pretty (only used in the old console) */
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException { renderStatusHTML(out); }
|
||||
|
||||
public RouterContext getContext() { return _context; }
|
||||
public short getReachabilityStatus() { return CommSystemFacade.STATUS_UNKNOWN; }
|
||||
|
@@ -244,7 +244,7 @@ public class TransportManager implements TransportEventListener {
|
||||
return rv;
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
TreeMap transports = new TreeMap();
|
||||
for (int i = 0; i < _transports.size(); i++) {
|
||||
Transport t = (Transport)_transports.get(i);
|
||||
@@ -252,7 +252,7 @@ public class TransportManager implements TransportEventListener {
|
||||
}
|
||||
for (Iterator iter = transports.values().iterator(); iter.hasNext(); ) {
|
||||
Transport t= (Transport)iter.next();
|
||||
t.renderStatusHTML(out);
|
||||
t.renderStatusHTML(out, urlBase, sortFlags);
|
||||
}
|
||||
StringBuffer buf = new StringBuffer(4*1024);
|
||||
buf.append("Listening on: <br /><pre>\n");
|
||||
|
@@ -22,7 +22,7 @@ public class ACKSender implements Runnable {
|
||||
private boolean _alive;
|
||||
|
||||
/** how frequently do we want to send ACKs to a peer? */
|
||||
static final int ACK_FREQUENCY = 100;
|
||||
static final int ACK_FREQUENCY = 500;
|
||||
|
||||
public ACKSender(RouterContext ctx, UDPTransport transport) {
|
||||
_context = ctx;
|
||||
@@ -60,6 +60,16 @@ public class ACKSender implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
private long ackFrequency(long timeSinceACK, long rtt) {
|
||||
// if we are actively pumping lots of data to them, we can depend upon
|
||||
// the unsentACKThreshold to figure out when to send an ACK instead of
|
||||
// using the timer, so we can set the timeout/frequency higher
|
||||
if (timeSinceACK < 2*1000)
|
||||
return Math.max(rtt/2, 500);
|
||||
else
|
||||
return ACK_FREQUENCY;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
while (_alive) {
|
||||
PeerState peer = null;
|
||||
@@ -70,7 +80,7 @@ public class ACKSender implements Runnable {
|
||||
for (int i = 0; i < _peersToACK.size(); i++) {
|
||||
PeerState cur = (PeerState)_peersToACK.get(i);
|
||||
long wanted = cur.getWantedACKSendSince();
|
||||
long delta = wanted + ACK_FREQUENCY - now;
|
||||
long delta = wanted + ackFrequency(now-cur.getLastACKSend(), cur.getRTT()) - now;
|
||||
if ( ( (wanted > 0) && (delta < 0) ) || (cur.unsentACKThresholdReached()) ) {
|
||||
_peersToACK.remove(i);
|
||||
peer = cur;
|
||||
|
@@ -62,10 +62,10 @@ public class EstablishmentManager {
|
||||
_queuedOutbound = new HashMap(32);
|
||||
_liveIntroductions = new HashMap(32);
|
||||
_activityLock = new Object();
|
||||
_context.statManager().createRateStat("udp.inboundEstablishTime", "How long it takes for a new inbound session to be established", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundEstablishTime", "How long it takes for a new outbound session to be established", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.inboundEstablishFailedState", "What state a failed inbound establishment request fails in", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundEstablishFailedState", "What state a failed outbound establishment request fails in", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.inboundEstablishTime", "How long it takes for a new inbound session to be established", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundEstablishTime", "How long it takes for a new outbound session to be established", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.inboundEstablishFailedState", "What state a failed inbound establishment request fails in", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundEstablishFailedState", "What state a failed outbound establishment request fails in", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendIntroRelayRequest", "How often we send a relay request to reach a peer", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendIntroRelayTimeout", "How often a relay request times out before getting a response (due to the target or intro peer being offline)", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receiveIntroRelayResponse", "How long it took to receive a relay response", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
@@ -576,9 +576,9 @@ public class EstablishmentManager {
|
||||
return;
|
||||
}
|
||||
_transport.send(_builder.buildSessionCreatedPacket(state, _transport.getExternalPort(), _transport.getIntroKey()));
|
||||
// if they haven't advanced to sending us confirmed packets in 5s,
|
||||
// if they haven't advanced to sending us confirmed packets in 1s,
|
||||
// repeat
|
||||
state.setNextSendTime(now + 5*1000);
|
||||
state.setNextSendTime(now + 1000);
|
||||
}
|
||||
|
||||
private void sendRequest(OutboundEstablishState state) {
|
||||
@@ -943,7 +943,7 @@ public class EstablishmentManager {
|
||||
|
||||
Hash peer = outboundState.getRemoteIdentity().calculateHash();
|
||||
_context.shitlist().shitlistRouter(peer, err);
|
||||
_transport.dropPeer(peer);
|
||||
_transport.dropPeer(peer, false, err);
|
||||
//_context.profileManager().commErrorOccurred(peer);
|
||||
} else {
|
||||
while (true) {
|
||||
@@ -988,15 +988,15 @@ public class EstablishmentManager {
|
||||
|
||||
long delay = nextSendTime - now;
|
||||
if ( (nextSendTime == -1) || (delay > 0) ) {
|
||||
if (delay > 5000)
|
||||
delay = 5000;
|
||||
if (delay > 1000)
|
||||
delay = 1000;
|
||||
boolean interrupted = false;
|
||||
try {
|
||||
synchronized (_activityLock) {
|
||||
if (_activity > 0)
|
||||
return;
|
||||
if (nextSendTime == -1)
|
||||
_activityLock.wait(5000);
|
||||
_activityLock.wait(1000);
|
||||
else
|
||||
_activityLock.wait(delay);
|
||||
}
|
||||
|
@@ -360,9 +360,9 @@ public class OutboundEstablishState {
|
||||
/** note that we just sent the SessionConfirmed packet */
|
||||
public synchronized void confirmedPacketsSent() {
|
||||
_lastSend = _context.clock().now();
|
||||
_nextSend = _lastSend + 5*1000;
|
||||
_nextSend = _lastSend + 1000;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Send confirm packets, nextSend = 5s");
|
||||
_log.debug("Send confirm packets, nextSend = 1s");
|
||||
if ( (_currentState == STATE_UNKNOWN) ||
|
||||
(_currentState == STATE_REQUEST_SENT) ||
|
||||
(_currentState == STATE_CREATED_RECEIVED) )
|
||||
@@ -371,15 +371,15 @@ public class OutboundEstablishState {
|
||||
/** note that we just sent the SessionRequest packet */
|
||||
public synchronized void requestSent() {
|
||||
_lastSend = _context.clock().now();
|
||||
_nextSend = _lastSend + 5*1000;
|
||||
_nextSend = _lastSend + 1000;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Send a request packet, nextSend = 5s");
|
||||
_log.debug("Send a request packet, nextSend = 1s");
|
||||
if (_currentState == STATE_UNKNOWN)
|
||||
_currentState = STATE_REQUEST_SENT;
|
||||
}
|
||||
public synchronized void introSent() {
|
||||
_lastSend = _context.clock().now();
|
||||
_nextSend = _lastSend + 5*1000;
|
||||
_nextSend = _lastSend + 1000;
|
||||
if (_currentState == STATE_UNKNOWN)
|
||||
_currentState = STATE_PENDING_INTRO;
|
||||
}
|
||||
|
@@ -61,7 +61,7 @@ public class OutboundMessageFragments {
|
||||
_context.statManager().createRateStat("udp.sendConfirmVolley", "How many times did fragments need to be sent before ACK", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendFailed", "How many sends a failed message was pushed", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendAggressiveFailed", "How many volleys was a packet sent before we gave up", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundActiveCount", "How many messages are in the active pool", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.outboundActiveCount", "How many messages are in the peer's active pool", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendRejected", "What volley are we on when the peer was throttled (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.partialACKReceived", "How many fragments were partially ACKed (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendSparse", "How many fragments were partially ACKed and hence not resent (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
|
@@ -402,6 +402,7 @@ public class PacketBuilder {
|
||||
authenticate(packet, ourIntroKey, ourIntroKey, iv);
|
||||
setTo(packet, to, state.getSentPort());
|
||||
_ivCache.release(iv);
|
||||
packet.setMessageType(53);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -465,6 +466,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, state.getIntroKey(), state.getIntroKey());
|
||||
setTo(packet, to, state.getSentPort());
|
||||
packet.setMessageType(52);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -571,6 +573,7 @@ public class PacketBuilder {
|
||||
}
|
||||
|
||||
setTo(packet, to, state.getSentPort());
|
||||
packet.setMessageType(51);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -623,6 +626,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, toCipherKey, toMACKey);
|
||||
setTo(packet, toIP, toPort);
|
||||
packet.setMessageType(50);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -667,6 +671,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, aliceIntroKey, aliceIntroKey);
|
||||
setTo(packet, aliceIP, alicePort);
|
||||
packet.setMessageType(49);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -713,6 +718,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, charlieCipherKey, charlieMACKey);
|
||||
setTo(packet, charlieIP, charliePort);
|
||||
packet.setMessageType(48);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -757,6 +763,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, bobCipherKey, bobMACKey);
|
||||
setTo(packet, bobIP, bobPort);
|
||||
packet.setMessageType(47);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -854,6 +861,7 @@ public class PacketBuilder {
|
||||
if (encrypt)
|
||||
authenticate(packet, new SessionKey(introKey), new SessionKey(introKey));
|
||||
setTo(packet, introHost, introPort);
|
||||
packet.setMessageType(46);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -903,6 +911,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, charlie.getCurrentCipherKey(), charlie.getCurrentMACKey());
|
||||
setTo(packet, charlie.getRemoteIPAddress(), charlie.getRemotePort());
|
||||
packet.setMessageType(45);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -963,6 +972,7 @@ public class PacketBuilder {
|
||||
packet.getPacket().setLength(off);
|
||||
authenticate(packet, aliceIntroKey, aliceIntroKey);
|
||||
setTo(packet, aliceAddr, alice.getPort());
|
||||
packet.setMessageType(44);
|
||||
return packet;
|
||||
}
|
||||
|
||||
@@ -994,6 +1004,8 @@ public class PacketBuilder {
|
||||
// its just for hole punching
|
||||
packet.getPacket().setLength(0);
|
||||
setTo(packet, to, port);
|
||||
|
||||
packet.setMessageType(43);
|
||||
return packet;
|
||||
}
|
||||
|
||||
|
@@ -68,6 +68,17 @@ public class PacketHandler {
|
||||
_context.statManager().createRateStat("udp.packetVerifyTimeSlow", "How long it takes the PacketHandler to verify a data packet after dequeueing when its slow (period is dequeue time)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.packetValidateMultipleCount", "How many times we validate a packet, if done more than once (period = afterValidate-enqueue)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.packetNoValidationLifetime", "How long packets that are never validated are around for", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.sessionRequest", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.sessionConfirmed", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.sessionCreated", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.dataKnown", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.dataKnownAck", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.dataUnknown", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.dataUnknownAck", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.test", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.relayRequest", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.relayIntro", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("udp.receivePacketSize.relayResponse", "Packet size of the given inbound packet type (period is the packet's lifetime)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
@@ -440,14 +451,17 @@ public class PacketHandler {
|
||||
case UDPPacket.PAYLOAD_TYPE_SESSION_REQUEST:
|
||||
_state = 47;
|
||||
_establisher.receiveSessionRequest(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.sessionRequest", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_SESSION_CONFIRMED:
|
||||
_state = 48;
|
||||
_establisher.receiveSessionConfirmed(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.sessionConfirmed", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_SESSION_CREATED:
|
||||
_state = 49;
|
||||
_establisher.receiveSessionCreated(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.sessionCreated", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_DATA:
|
||||
_state = 50;
|
||||
@@ -472,6 +486,14 @@ public class PacketHandler {
|
||||
}
|
||||
packet.beforeReceiveFragments();
|
||||
_inbound.receiveData(state, dr);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.dataKnown", packet.getPacket().getLength(), packet.getLifetime());
|
||||
if (dr.readFragmentCount() <= 0)
|
||||
_context.statManager().addRateData("udp.receivePacketSize.dataKnownAck", packet.getPacket().getLength(), packet.getLifetime());
|
||||
} else {
|
||||
_context.statManager().addRateData("udp.receivePacketSize.dataUnknown", packet.getPacket().getLength(), packet.getLifetime());
|
||||
UDPPacketReader.DataReader dr = reader.getDataReader();
|
||||
if (dr.readFragmentCount() <= 0)
|
||||
_context.statManager().addRateData("udp.receivePacketSize.dataUnknownAck", packet.getPacket().getLength(), packet.getLifetime());
|
||||
}
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_TEST:
|
||||
@@ -479,21 +501,25 @@ public class PacketHandler {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Received test packet: " + reader + " from " + from);
|
||||
_testManager.receiveTest(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.test", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_RELAY_REQUEST:
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Received relay request packet: " + reader + " from " + from);
|
||||
_introManager.receiveRelayRequest(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.relayRequest", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_RELAY_INTRO:
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Received relay intro packet: " + reader + " from " + from);
|
||||
_introManager.receiveRelayIntro(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.relayIntro", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
case UDPPacket.PAYLOAD_TYPE_RELAY_RESPONSE:
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Received relay response packet: " + reader + " from " + from);
|
||||
_establisher.receiveRelayResponse(from, reader);
|
||||
_context.statManager().addRateData("udp.receivePacketSize.relayResponse", packet.getPacket().getLength(), packet.getLifetime());
|
||||
break;
|
||||
default:
|
||||
_state = 52;
|
||||
|
@@ -10,19 +10,19 @@ import java.util.Map;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.Job;
|
||||
|
||||
/**
|
||||
* Contain all of the state about a UDP connection to a peer.
|
||||
*
|
||||
*/
|
||||
public class PeerState {
|
||||
private I2PAppContext _context;
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
/**
|
||||
* The peer are we talking to. This should be set as soon as this
|
||||
@@ -216,7 +216,7 @@ public class PeerState {
|
||||
/** override the default MTU */
|
||||
private static final String PROP_DEFAULT_MTU = "i2np.udp.mtu";
|
||||
|
||||
public PeerState(I2PAppContext ctx, UDPTransport transport) {
|
||||
public PeerState(RouterContext ctx, UDPTransport transport) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(PeerState.class);
|
||||
_transport = transport;
|
||||
@@ -278,6 +278,8 @@ public class PeerState {
|
||||
_context.statManager().createRateStat("udp.rejectConcurrentActive", "How many messages are currently being sent to the peer when we reject it (period is how many concurrent packets we allow)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.allowConcurrentActive", "How many messages are currently being sent to the peer when we accept it (period is how many concurrent packets we allow)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.rejectConcurrentSequence", "How many consecutive concurrency rejections have we had when we stop rejecting (period is how many concurrent packets we are on)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.queueDropSize", "How many messages were queued up when it was considered full, causing a tail drop?", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.queueAllowTotalLifetime", "When a peer is retransmitting and we probabalistically allow a new message, what is the sum of the pending message lifetimes? (period is the new message's lifetime)?", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
}
|
||||
|
||||
private int getDefaultMTU() {
|
||||
@@ -445,13 +447,13 @@ public class PeerState {
|
||||
if (_concurrentMessagesActive < 0)
|
||||
_concurrentMessagesActive = 0;
|
||||
|
||||
long now = _context.clock().now()/(10*1000);
|
||||
if (_lastFailedSendPeriod >= now) {
|
||||
// ignore... too fast
|
||||
} else {
|
||||
_lastFailedSendPeriod = now;
|
||||
//long now = _context.clock().now()/(10*1000);
|
||||
//if (_lastFailedSendPeriod >= now) {
|
||||
// // ignore... too fast
|
||||
//} else {
|
||||
// _lastFailedSendPeriod = now;
|
||||
_consecutiveFailedSends++;
|
||||
}
|
||||
//}
|
||||
return _consecutiveFailedSends;
|
||||
}
|
||||
public long getInactivityTime() {
|
||||
@@ -874,7 +876,7 @@ public class PeerState {
|
||||
double retransPct = 0;
|
||||
if (_packetsTransmitted > 10) {
|
||||
retransPct = (double)_packetsRetransmitted/(double)_packetsTransmitted;
|
||||
boolean wantLarge = retransPct < .50d; // heuristic to allow fairly lossy links to use large MTUs
|
||||
boolean wantLarge = retransPct < .30d; // heuristic to allow fairly lossy links to use large MTUs
|
||||
if (wantLarge && _mtu != LARGE_MTU) {
|
||||
if (_context.random().nextLong(_mtuDecreases) <= 0) {
|
||||
_mtu = LARGE_MTU;
|
||||
@@ -997,7 +999,6 @@ public class PeerState {
|
||||
}
|
||||
|
||||
public RemoteHostId getRemoteHostId() { return _remoteHostId; }
|
||||
|
||||
|
||||
public int add(OutboundMessageState state) {
|
||||
if (_dead) {
|
||||
@@ -1009,10 +1010,54 @@ public class PeerState {
|
||||
_log.debug("Adding to " + _remotePeer.toBase64() + ": " + state.getMessageId());
|
||||
List msgs = _outboundMessages;
|
||||
if (msgs == null) return 0;
|
||||
int rv = 0;
|
||||
boolean fail = false;
|
||||
synchronized (msgs) {
|
||||
msgs.add(state);
|
||||
return msgs.size();
|
||||
rv = msgs.size() + 1;
|
||||
if (rv > 32) {
|
||||
// 32 queued messages? to *one* peer? nuh uh.
|
||||
fail = true;
|
||||
rv--;
|
||||
} else if (_retransmitter != null) {
|
||||
long lifetime = _retransmitter.getLifetime();
|
||||
long totalLifetime = lifetime;
|
||||
for (int i = 1; i < msgs.size(); i++) { // skip the first, as thats the retransmitter
|
||||
OutboundMessageState cur = (OutboundMessageState)msgs.get(i);
|
||||
totalLifetime += cur.getLifetime();
|
||||
}
|
||||
long remaining = -1;
|
||||
OutNetMessage omsg = state.getMessage();
|
||||
if (omsg != null)
|
||||
remaining = omsg.getExpiration() - _context.clock().now();
|
||||
else
|
||||
remaining = 10*1000 - state.getLifetime();
|
||||
|
||||
if (remaining <= 0)
|
||||
remaining = 1; // total lifetime will exceed it anyway, guaranteeing failure
|
||||
float pDrop = totalLifetime / (float)remaining;
|
||||
pDrop = pDrop * pDrop * pDrop;
|
||||
if (false && (pDrop >= _context.random().nextFloat())) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Proactively tail dropping for " + _remotePeer.toBase64() + " (messages=" + msgs.size()
|
||||
+ " headLifetime=" + lifetime + " totalLifetime=" + totalLifetime + " curLifetime=" + state.getLifetime()
|
||||
+ " remaining=" + remaining + " pDrop=" + pDrop + ")");
|
||||
_context.statManager().addRateData("udp.queueDropSize", msgs.size(), totalLifetime);
|
||||
fail = true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Probabalistically allowing for " + _remotePeer.toBase64() + " (messages=" + msgs.size()
|
||||
+ " headLifetime=" + lifetime + " totalLifetime=" + totalLifetime + " curLifetime=" + state.getLifetime()
|
||||
+ " remaining=" + remaining + " pDrop=" + pDrop + ")");
|
||||
_context.statManager().addRateData("udp.queueAllowTotalLifetime", totalLifetime, lifetime);
|
||||
msgs.add(state);
|
||||
}
|
||||
} else {
|
||||
msgs.add(state);
|
||||
}
|
||||
}
|
||||
if (fail)
|
||||
_transport.failed(state, false);
|
||||
return rv;
|
||||
}
|
||||
/** drop all outbound messages */
|
||||
public void dropOutbound() {
|
||||
@@ -1202,7 +1247,7 @@ public class PeerState {
|
||||
* mind bw/cwin throttle, etc)
|
||||
*
|
||||
*/
|
||||
private static final boolean THROTTLE_INITIAL_SEND = false;
|
||||
private static final boolean THROTTLE_INITIAL_SEND = true;
|
||||
|
||||
private static final int SSU_HEADER_SIZE = 46;
|
||||
static final int UDP_HEADER_SIZE = 8;
|
||||
@@ -1516,6 +1561,7 @@ public class PeerState {
|
||||
buf.append(" lifetime: ").append(now-_keyEstablishedTime);
|
||||
buf.append(" cwin: ").append(_sendWindowBytes);
|
||||
buf.append(" acwin: ").append(_sendWindowBytesRemaining);
|
||||
buf.append(" consecFail: ").append(_consecutiveFailedSends);
|
||||
buf.append(" recv OK/Dup: ").append(_packetsReceived).append('/').append(_packetsReceivedDuplicate);
|
||||
buf.append(" send OK/Dup: ").append(_packetsTransmitted).append('/').append(_packetsRetransmitted);
|
||||
return buf.toString();
|
||||
|
@@ -55,7 +55,21 @@ public class UDPSender {
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.18", "tunnel data message size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.19", "tunnel gateway message size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.20", "data message size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.21", "tunnel build", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.22", "tunnel build reply", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.20", "data message size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.42", "ack-only packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.43", "hole punch packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.44", "relay response packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.45", "relay intro packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.46", "relay request packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.47", "peer test charlie to bob packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.48", "peer test bob to charlie packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.49", "peer test to alice packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.50", "peer test from alice packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.51", "session confirmed packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.52", "session request packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendPacketSize.53", "session created packet size", "udp", new long[] { 60*1000, 5*60*1000, 30*60*1000 });
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
@@ -211,7 +225,7 @@ public class UDPSender {
|
||||
//_log.debug("Sending packet: (size="+size + "/"+size2 +")\nraw: " + Base64.encode(packet.getPacket().getData(), 0, size));
|
||||
}
|
||||
|
||||
//_context.statManager().addRateData("udp.sendPacketSize." + packet.getMessageType(), size, packet.getFragmentCount());
|
||||
_context.statManager().addRateData("udp.sendPacketSize." + packet.getMessageType(), size, packet.getFragmentCount());
|
||||
|
||||
//packet.getPacket().setLength(size);
|
||||
try {
|
||||
|
@@ -153,8 +153,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
_needsRebuild = true;
|
||||
|
||||
_context.statManager().createRateStat("udp.alreadyConnected", "What is the lifetime of a reestablished session", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.droppedPeer", "How long ago did we receive from a dropped peer (duration == session lifetime", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.droppedPeerInactive", "How long ago did we receive from a dropped peer (duration == session lifetime)", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.droppedPeer", "How long ago did we receive from a dropped peer (duration == session lifetime", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.droppedPeerInactive", "How long ago did we receive from a dropped peer (duration == session lifetime)", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.statusOK", "How many times the peer test returned OK", "udp", new long[] { 5*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.statusDifferent", "How many times the peer test returned different IP/ports", "udp", new long[] { 5*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.statusReject", "How many times the peer test returned reject unsolicited", "udp", new long[] { 5*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
@@ -163,6 +163,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
_context.statManager().createRateStat("udp.addressUpdated", "How many times we adjust our own reachable IP address", "udp", new long[] { 1*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.proactiveReestablish", "How long a session was idle for when we proactively reestablished it", "udp", new long[] { 1*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.dropPeerDroplist", "How many peers currently have their packets dropped outright when a new peer is added to the list?", "udp", new long[] { 1*60*1000, 20*60*1000 });
|
||||
_context.statManager().createRateStat("udp.dropPeerConsecutiveFailures", "How many consecutive failed sends to a peer did we attempt before giving up and reestablishing a new session (lifetime is inactivity perood)", "udp", new long[] { 1*60*1000, 10*60*1000 });
|
||||
__instance = this;
|
||||
}
|
||||
|
||||
@@ -601,7 +602,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
}
|
||||
}
|
||||
_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network");
|
||||
dropPeer(peerHash);
|
||||
dropPeer(peerHash, false, "wrong network");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping the peer " + peerHash.toBase64() + " because they are in the wrong net");
|
||||
return;
|
||||
@@ -636,22 +637,28 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
|
||||
public boolean isInDropList(RemoteHostId peer) { synchronized (_dropList) { return _dropList.contains(peer); } }
|
||||
|
||||
void dropPeer(Hash peer) {
|
||||
void dropPeer(Hash peer, boolean shouldShitlist, String why) {
|
||||
PeerState state = getPeerState(peer);
|
||||
if (state != null)
|
||||
dropPeer(state, false);
|
||||
dropPeer(state, shouldShitlist, why);
|
||||
}
|
||||
private void dropPeer(PeerState peer, boolean shouldShitlist) {
|
||||
private void dropPeer(PeerState peer, boolean shouldShitlist, String why) {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
long now = _context.clock().now();
|
||||
StringBuffer buf = new StringBuffer(4096);
|
||||
long timeSinceSend = now - peer.getLastSendTime();
|
||||
long timeSinceRecv = now - peer.getLastReceiveTime();
|
||||
long timeSinceAck = now - peer.getLastACKSend();
|
||||
long timeSinceSendOK = now - peer.getLastSendFullyTime();
|
||||
int consec = peer.getConsecutiveFailedSends();
|
||||
buf.append("Dropping remote peer: ").append(peer.toString()).append(" shitlist? ").append(shouldShitlist);
|
||||
buf.append(" lifetime: ").append(now - peer.getKeyEstablishedTime());
|
||||
buf.append(" time since send/recv/ack: ").append(timeSinceSend).append(" / ");
|
||||
buf.append(" time since send/fully/recv/ack: ").append(timeSinceSend).append(" / ");
|
||||
buf.append(timeSinceSendOK).append(" / ");
|
||||
buf.append(timeSinceRecv).append(" / ").append(timeSinceAck);
|
||||
buf.append(" consec failures: ").append(consec);
|
||||
if (why != null)
|
||||
buf.append(" cause: ").append(why);
|
||||
/*
|
||||
buf.append("Existing peers: \n");
|
||||
synchronized (_peersByIdent) {
|
||||
@@ -694,14 +701,10 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
if (peer.getRemotePeer() != null) {
|
||||
dropPeerCapacities(peer);
|
||||
|
||||
if (shouldShitlist) {
|
||||
long now = _context.clock().now();
|
||||
_context.statManager().addRateData("udp.droppedPeer", now - peer.getLastReceiveTime(), now - peer.getKeyEstablishedTime());
|
||||
if (shouldShitlist)
|
||||
_context.shitlist().shitlistRouter(peer.getRemotePeer(), "dropped after too many retries");
|
||||
} else {
|
||||
long now = _context.clock().now();
|
||||
_context.statManager().addRateData("udp.droppedPeerInactive", now - peer.getLastReceiveTime(), now - peer.getKeyEstablishedTime());
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
_context.statManager().addRateData("udp.droppedPeer", now - peer.getLastReceiveTime(), now - peer.getKeyEstablishedTime());
|
||||
synchronized (_peersByIdent) {
|
||||
altByIdent = (PeerState)_peersByIdent.remove(peer.getRemotePeer());
|
||||
}
|
||||
@@ -725,8 +728,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
rebuildExternalAddress();
|
||||
|
||||
// deal with races to make sure we drop the peers fully
|
||||
if ( (altByIdent != null) && (peer != altByIdent) ) dropPeer(altByIdent, shouldShitlist);
|
||||
if ( (altByHost != null) && (peer != altByHost) ) dropPeer(altByHost, shouldShitlist);
|
||||
if ( (altByIdent != null) && (peer != altByIdent) ) dropPeer(altByIdent, shouldShitlist, "recurse");
|
||||
if ( (altByHost != null) && (peer != altByHost) ) dropPeer(altByHost, shouldShitlist, "recurse");
|
||||
}
|
||||
|
||||
private boolean needsRebuild() {
|
||||
@@ -842,7 +845,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
return (pref != null) && "true".equals(pref);
|
||||
}
|
||||
|
||||
private static final int MAX_IDLE_TIME = 60*1000;
|
||||
private static final int MAX_IDLE_TIME = 5*60*1000;
|
||||
|
||||
public String getStyle() { return STYLE; }
|
||||
public void send(OutNetMessage msg) {
|
||||
@@ -866,7 +869,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
(peer.getConsecutiveFailedSends() > 0) &&
|
||||
(inboundActive <= 0)) {
|
||||
// peer is waaaay idle, drop the con and queue it up as a new con
|
||||
dropPeer(peer, false);
|
||||
dropPeer(peer, false, "proactive reconnection");
|
||||
msg.timestamp("peer is really idle, dropping con and reestablishing");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Proactive reestablish to " + to.toBase64());
|
||||
@@ -1085,10 +1088,16 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
_log.warn("Consecutive failure #" + consecutive
|
||||
+ " on " + msg.toString()
|
||||
+ " to " + msg.getPeer());
|
||||
if ( (consecutive > MAX_CONSECUTIVE_FAILED) && (msg.getPeer().getInactivityTime() > DROP_INACTIVITY_TIME))
|
||||
dropPeer(msg.getPeer(), false);
|
||||
else if (consecutive > 2 * MAX_CONSECUTIVE_FAILED) // they're sending us data, but we cant reply?
|
||||
dropPeer(msg.getPeer(), false);
|
||||
if ( (_context.clock().now() - msg.getPeer().getLastSendFullyTime() <= 60*1000) || (consecutive < MAX_CONSECUTIVE_FAILED) ) {
|
||||
// ok, a few conseutive failures, but we /are/ getting through to them
|
||||
} else {
|
||||
_context.statManager().addRateData("udp.dropPeerConsecutiveFailures", consecutive, msg.getPeer().getInactivityTime());
|
||||
dropPeer(msg.getPeer(), false, "too many failures");
|
||||
}
|
||||
//if ( (consecutive > MAX_CONSECUTIVE_FAILED) && (msg.getPeer().getInactivityTime() > DROP_INACTIVITY_TIME))
|
||||
// dropPeer(msg.getPeer(), false);
|
||||
//else if (consecutive > 2 * MAX_CONSECUTIVE_FAILED) // they're sending us data, but we cant reply?
|
||||
// dropPeer(msg.getPeer(), false);
|
||||
}
|
||||
noteSend(msg, false);
|
||||
if (m != null)
|
||||
@@ -1181,21 +1190,6 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
return active;
|
||||
}
|
||||
|
||||
private static class AlphaComparator implements Comparator {
|
||||
private static final AlphaComparator _instance = new AlphaComparator();
|
||||
public static final AlphaComparator instance() { return _instance; }
|
||||
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
if ( (lhs == null) || (rhs == null) || !(lhs instanceof PeerState) || !(rhs instanceof PeerState))
|
||||
throw new IllegalArgumentException("rhs = " + rhs + " lhs = " + lhs);
|
||||
PeerState l = (PeerState)lhs;
|
||||
PeerState r = (PeerState)rhs;
|
||||
// base64 retains binary ordering
|
||||
return DataHelper.compareTo(l.getRemotePeer().getData(), r.getRemotePeer().getData());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static UDPTransport __instance;
|
||||
/** **internal, do not use** */
|
||||
public static final UDPTransport _instance() { return __instance; }
|
||||
@@ -1216,8 +1210,302 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
return peers;
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
TreeSet peers = new TreeSet(AlphaComparator.instance());
|
||||
private static final int FLAG_ALPHA = 0;
|
||||
private static final int FLAG_IDLE_IN = 1;
|
||||
private static final int FLAG_IDLE_OUT = 2;
|
||||
private static final int FLAG_RATE_IN = 3;
|
||||
private static final int FLAG_RATE_OUT = 4;
|
||||
private static final int FLAG_SKEW = 5;
|
||||
private static final int FLAG_CWND= 6;
|
||||
private static final int FLAG_SSTHRESH = 7;
|
||||
private static final int FLAG_RTT = 8;
|
||||
private static final int FLAG_DEV = 9;
|
||||
private static final int FLAG_RTO = 10;
|
||||
private static final int FLAG_MTU = 11;
|
||||
private static final int FLAG_SEND = 12;
|
||||
private static final int FLAG_RECV = 13;
|
||||
private static final int FLAG_RESEND = 14;
|
||||
private static final int FLAG_DUP = 15;
|
||||
private static final int FLAG_UPTIME = 16;
|
||||
|
||||
private Comparator getComparator(int sortFlags) {
|
||||
Comparator rv = null;
|
||||
switch (Math.abs(sortFlags)) {
|
||||
case FLAG_IDLE_IN:
|
||||
rv = IdleInComparator.instance();
|
||||
break;
|
||||
case FLAG_IDLE_OUT:
|
||||
rv = IdleOutComparator.instance();
|
||||
break;
|
||||
case FLAG_RATE_IN:
|
||||
rv = RateInComparator.instance();
|
||||
break;
|
||||
case FLAG_RATE_OUT:
|
||||
rv = RateOutComparator.instance();
|
||||
break;
|
||||
case FLAG_UPTIME:
|
||||
rv = UptimeComparator.instance();
|
||||
break;
|
||||
case FLAG_SKEW:
|
||||
rv = SkewComparator.instance();
|
||||
break;
|
||||
case FLAG_CWND:
|
||||
rv = CwndComparator.instance();
|
||||
break;
|
||||
case FLAG_SSTHRESH:
|
||||
rv = SsthreshComparator.instance();
|
||||
break;
|
||||
case FLAG_RTT:
|
||||
rv = RTTComparator.instance();
|
||||
break;
|
||||
case FLAG_DEV:
|
||||
rv = DevComparator.instance();
|
||||
break;
|
||||
case FLAG_RTO:
|
||||
rv = RTOComparator.instance();
|
||||
break;
|
||||
case FLAG_MTU:
|
||||
rv = MTUComparator.instance();
|
||||
break;
|
||||
case FLAG_SEND:
|
||||
rv = SendCountComparator.instance();
|
||||
break;
|
||||
case FLAG_RECV:
|
||||
rv = RecvCountComparator.instance();
|
||||
break;
|
||||
case FLAG_RESEND:
|
||||
rv = ResendComparator.instance();
|
||||
break;
|
||||
case FLAG_DUP:
|
||||
rv = DupComparator.instance();
|
||||
break;
|
||||
case FLAG_ALPHA:
|
||||
default:
|
||||
rv = AlphaComparator.instance();
|
||||
break;
|
||||
}
|
||||
if (sortFlags < 0)
|
||||
rv = new InverseComparator(rv);
|
||||
return rv;
|
||||
}
|
||||
private static class AlphaComparator extends PeerComparator {
|
||||
private static final AlphaComparator _instance = new AlphaComparator();
|
||||
public static final AlphaComparator instance() { return _instance; }
|
||||
}
|
||||
private static class IdleInComparator extends PeerComparator {
|
||||
private static final IdleInComparator _instance = new IdleInComparator();
|
||||
public static final IdleInComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getLastReceiveTime() - r.getLastReceiveTime();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class IdleOutComparator extends PeerComparator {
|
||||
private static final IdleOutComparator _instance = new IdleOutComparator();
|
||||
public static final IdleOutComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getLastSendTime() - r.getLastSendTime();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class RateInComparator extends PeerComparator {
|
||||
private static final RateInComparator _instance = new RateInComparator();
|
||||
public static final RateInComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getReceiveBps() - r.getReceiveBps();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class RateOutComparator extends PeerComparator {
|
||||
private static final RateOutComparator _instance = new RateOutComparator();
|
||||
public static final RateOutComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getSendBps() - r.getSendBps();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class UptimeComparator extends PeerComparator {
|
||||
private static final UptimeComparator _instance = new UptimeComparator();
|
||||
public static final UptimeComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getKeyEstablishedTime() - r.getKeyEstablishedTime();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class SkewComparator extends PeerComparator {
|
||||
private static final SkewComparator _instance = new SkewComparator();
|
||||
public static final SkewComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = Math.abs(l.getClockSkew()) - Math.abs(r.getClockSkew());
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class CwndComparator extends PeerComparator {
|
||||
private static final CwndComparator _instance = new CwndComparator();
|
||||
public static final CwndComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getSendWindowBytes() - r.getSendWindowBytes();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class SsthreshComparator extends PeerComparator {
|
||||
private static final SsthreshComparator _instance = new SsthreshComparator();
|
||||
public static final SsthreshComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getSlowStartThreshold() - r.getSlowStartThreshold();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class RTTComparator extends PeerComparator {
|
||||
private static final RTTComparator _instance = new RTTComparator();
|
||||
public static final RTTComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getRTT() - r.getRTT();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class DevComparator extends PeerComparator {
|
||||
private static final DevComparator _instance = new DevComparator();
|
||||
public static final DevComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getRTTDeviation() - r.getRTTDeviation();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class RTOComparator extends PeerComparator {
|
||||
private static final RTOComparator _instance = new RTOComparator();
|
||||
public static final RTOComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getRTO() - r.getRTO();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class MTUComparator extends PeerComparator {
|
||||
private static final MTUComparator _instance = new MTUComparator();
|
||||
public static final MTUComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getMTU() - r.getMTU();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class SendCountComparator extends PeerComparator {
|
||||
private static final SendCountComparator _instance = new SendCountComparator();
|
||||
public static final SendCountComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getPacketsTransmitted() - r.getPacketsTransmitted();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class RecvCountComparator extends PeerComparator {
|
||||
private static final RecvCountComparator _instance = new RecvCountComparator();
|
||||
public static final RecvCountComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getPacketsReceived() - r.getPacketsReceived();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class ResendComparator extends PeerComparator {
|
||||
private static final ResendComparator _instance = new ResendComparator();
|
||||
public static final ResendComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getPacketsRetransmitted() - r.getPacketsRetransmitted();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
private static class DupComparator extends PeerComparator {
|
||||
private static final DupComparator _instance = new DupComparator();
|
||||
public static final DupComparator instance() { return _instance; }
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
long rv = l.getPacketsReceivedDuplicate() - r.getPacketsReceivedDuplicate();
|
||||
if (rv == 0) // fallback on alpha
|
||||
return super.compare(l, r);
|
||||
else
|
||||
return (int)rv;
|
||||
}
|
||||
}
|
||||
|
||||
private static class PeerComparator implements Comparator {
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
if ( (lhs == null) || (rhs == null) || !(lhs instanceof PeerState) || !(rhs instanceof PeerState))
|
||||
throw new IllegalArgumentException("rhs = " + rhs + " lhs = " + lhs);
|
||||
return compare((PeerState)lhs, (PeerState)rhs);
|
||||
}
|
||||
protected int compare(PeerState l, PeerState r) {
|
||||
// base64 retains binary ordering
|
||||
return DataHelper.compareTo(l.getRemotePeer().getData(), r.getRemotePeer().getData());
|
||||
}
|
||||
}
|
||||
private static class InverseComparator implements Comparator {
|
||||
private Comparator _comp;
|
||||
public InverseComparator(Comparator comp) { _comp = comp; }
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
return -1 * _comp.compare(lhs, rhs);
|
||||
}
|
||||
}
|
||||
|
||||
private void appendSortLinks(StringBuffer buf, String urlBase, int sortFlags, String descr, int ascending) {
|
||||
if (sortFlags == ascending) {
|
||||
buf.append(" <a href=\"").append(urlBase).append("?sort=").append(0-ascending);
|
||||
buf.append("\" title=\"").append(descr).append("\">V</a><b>^</b> ");
|
||||
} else if (sortFlags == 0 - ascending) {
|
||||
buf.append(" <b>V</b><a href=\"").append(urlBase).append("?sort=").append(ascending);
|
||||
buf.append("\" title=\"").append(descr).append("\">^</a> ");
|
||||
} else {
|
||||
buf.append(" <a href=\"").append(urlBase).append("?sort=").append(0-ascending);
|
||||
buf.append("\" title=\"").append(descr).append("\">V</a><a href=\"").append(urlBase).append("?sort=").append(ascending);
|
||||
buf.append("\" title=\"").append(descr).append("\">^</a> ");
|
||||
}
|
||||
}
|
||||
|
||||
//public void renderStatusHTML(Writer out) throws IOException { renderStatusHTML(out, 0); }
|
||||
public void renderStatusHTML(Writer out, int sortFlags) throws IOException {}
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
TreeSet peers = new TreeSet(getComparator(sortFlags));
|
||||
synchronized (_peersByIdent) {
|
||||
peers.addAll(_peersByIdent.values());
|
||||
}
|
||||
@@ -1238,13 +1526,50 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
buf.append("<b id=\"udpcon\">UDP connections: ").append(peers.size()).append("</b><br />\n");
|
||||
buf.append("<table border=\"1\">\n");
|
||||
buf.append(" <tr><td><b><a href=\"#def.peer\">peer</a></b></td><td><b><a href=\"#def.idle\">idle</a></b></td>");
|
||||
buf.append(" <td><b><a href=\"#def.rate\">in/out</a></b></td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.up\">up</a></b></td><td><b><a href=\"#def.skew\">skew</a></b></td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.cwnd\">cwnd</a></b></td><td><b><a href=\"#def.ssthresh\">ssthresh</a></b></td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.rtt\">rtt</a></b></td><td><b><a href=\"#def.dev\">dev</a></b></td><td><b><a href=\"#def.rto\">rto</a></b></td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.mtu\">mtu</a></b></td><td><b><a href=\"#def.send\">send</a></b></td><td><b><a href=\"#def.recv\">recv</a></b></td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.resent\">resent</a></b></td><td><b><a href=\"#def.dupRecv\">dupRecv</a></b></td>\n");
|
||||
buf.append(" <tr><td><b><a href=\"#def.peer\">peer</a></b>");
|
||||
if (sortFlags == FLAG_ALPHA)
|
||||
buf.append(" V ");
|
||||
else
|
||||
buf.append(" <a href=\"").append(urlBase).append("?sort=0\">V</a> ");
|
||||
buf.append("</td><td><b><a href=\"#def.idle\">idle</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by idle inbound", FLAG_IDLE_IN);
|
||||
buf.append("/");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by idle outbound", FLAG_IDLE_OUT);
|
||||
buf.append("</td>");
|
||||
buf.append(" <td><b><a href=\"#def.rate\">in/out</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by inbound rate", FLAG_RATE_IN);
|
||||
buf.append("/");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by outbound rate", FLAG_RATE_OUT);
|
||||
buf.append("</td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.up\">up</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by connection uptime", FLAG_UPTIME);
|
||||
buf.append("</td><td><b><a href=\"#def.skew\">skew</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by clock skew", FLAG_SKEW);
|
||||
buf.append("</td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.cwnd\">cwnd</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by congestion window", FLAG_CWND);
|
||||
buf.append("</td><td><b><a href=\"#def.ssthresh\">ssthresh</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by slow start threshold", FLAG_SSTHRESH);
|
||||
buf.append("</td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.rtt\">rtt</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by round trip time", FLAG_RTT);
|
||||
buf.append("</td><td><b><a href=\"#def.dev\">dev</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by round trip time deviation", FLAG_DEV);
|
||||
buf.append("</td><td><b><a href=\"#def.rto\">rto</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by retransmission timeout", FLAG_RTO);
|
||||
buf.append("</td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.mtu\">mtu</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by maximum transmit unit", FLAG_MTU);
|
||||
buf.append("</td><td><b><a href=\"#def.send\">send</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets sent", FLAG_SEND);
|
||||
buf.append("</td><td><b><a href=\"#def.recv\">recv</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets received", FLAG_RECV);
|
||||
buf.append("</td>\n");
|
||||
buf.append(" <td><b><a href=\"#def.resent\">resent</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets retransmitted", FLAG_RESEND);
|
||||
buf.append("</td><td><b><a href=\"#def.dupRecv\">dupRecv</a></b>");
|
||||
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets received more than once", FLAG_DUP);
|
||||
buf.append("</td>\n");
|
||||
buf.append(" </tr>\n");
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
@@ -1513,7 +1838,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
public String toString() { return "UDP bid @ " + getLatencyMs(); }
|
||||
}
|
||||
|
||||
private static final int EXPIRE_TIMEOUT = 10*60*1000;
|
||||
private static final int EXPIRE_TIMEOUT = 30*60*1000;
|
||||
|
||||
private class ExpirePeerEvent implements SimpleTimer.TimedEvent {
|
||||
private List _expirePeers;
|
||||
@@ -1539,7 +1864,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < _expireBuffer.size(); i++)
|
||||
dropPeer((PeerState)_expireBuffer.get(i), false);
|
||||
dropPeer((PeerState)_expireBuffer.get(i), false, "idle too long");
|
||||
_expireBuffer.clear();
|
||||
|
||||
if (_alive)
|
||||
@@ -1673,6 +1998,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
}
|
||||
if (_alive) {
|
||||
long delay = _context.random().nextInt(2*TEST_FREQUENCY);
|
||||
if (delay <= 0)
|
||||
throw new RuntimeException("wtf, delay is " + delay);
|
||||
SimpleTimer.getInstance().addEvent(PeerTestEvent.this, delay);
|
||||
}
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ public class HopConfig {
|
||||
private SessionKey _ivKey;
|
||||
private SessionKey _replyKey;
|
||||
private ByteArray _replyIV;
|
||||
private long _creation;
|
||||
private long _expiration;
|
||||
private Map _options;
|
||||
private long _messagesProcessed;
|
||||
@@ -37,8 +38,10 @@ public class HopConfig {
|
||||
_sendTo = null;
|
||||
_layerKey = null;
|
||||
_ivKey = null;
|
||||
_creation = -1;
|
||||
_expiration = -1;
|
||||
_options = null;
|
||||
_messagesProcessed = 0;
|
||||
}
|
||||
|
||||
/** what tunnel ID are we receiving on? */
|
||||
@@ -94,6 +97,10 @@ public class HopConfig {
|
||||
/** when does this tunnel expire (in ms since the epoch)? */
|
||||
public long getExpiration() { return _expiration; }
|
||||
public void setExpiration(long when) { _expiration = when; }
|
||||
|
||||
/** when was this tunnel created (in ms since the epoch)? */
|
||||
public long getCreation() { return _creation; }
|
||||
public void setCreation(long when) { _creation = when; }
|
||||
|
||||
/**
|
||||
* what are the configuration options for this tunnel (if any). keys to
|
||||
|
@@ -22,7 +22,7 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
|
||||
protected I2PAppContext _context;
|
||||
private Log _log;
|
||||
|
||||
static final int PREPROCESSED_SIZE = 1024;
|
||||
public static final int PREPROCESSED_SIZE = 1024;
|
||||
protected static final int IV_SIZE = HopProcessor.IV_LENGTH;
|
||||
protected static final ByteCache _dataCache = ByteCache.getInstance(512, PREPROCESSED_SIZE);
|
||||
protected static final ByteCache _ivCache = ByteCache.getInstance(128, IV_SIZE);
|
||||
|
@@ -137,7 +137,7 @@ public class TunnelCreatorConfig implements TunnelInfo {
|
||||
}
|
||||
|
||||
|
||||
private static final int MAX_CONSECUTIVE_TEST_FAILURES = 2;
|
||||
private static final int MAX_CONSECUTIVE_TEST_FAILURES = 3;
|
||||
|
||||
/**
|
||||
* The tunnel failed, so stop using it
|
||||
|
@@ -38,6 +38,8 @@ public class TunnelDispatcher implements Service {
|
||||
private long _lastParticipatingExpiration;
|
||||
private BloomFilterIVValidator _validator;
|
||||
private LeaveTunnel _leaveJob;
|
||||
/** what is the date/time we last deliberately dropped a tunnel? **/
|
||||
private long _lastDropTime;
|
||||
|
||||
/** Creates a new instance of TunnelDispatcher */
|
||||
public TunnelDispatcher(RouterContext ctx) {
|
||||
@@ -49,6 +51,7 @@ public class TunnelDispatcher implements Service {
|
||||
_inboundGateways = new HashMap();
|
||||
_participatingConfig = new HashMap();
|
||||
_lastParticipatingExpiration = 0;
|
||||
_lastDropTime = 0;
|
||||
_validator = null;
|
||||
_leaveJob = new LeaveTunnel(ctx);
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
@@ -104,7 +107,7 @@ public class TunnelDispatcher implements Service {
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
|
||||
"How many messages are sent through a participating tunnel?", "Tunnels",
|
||||
new long[] { 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000l, 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.ownedMessageCount",
|
||||
"How many messages are sent through a tunnel we created (period == failures)?", "Tunnels",
|
||||
new long[] { 60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
@@ -527,6 +530,70 @@ public class TunnelDispatcher implements Service {
|
||||
return new ArrayList(_participatingConfig.values());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static final int DROP_BASE_INTERVAL = 40 * 1000;
|
||||
private static final int DROP_RANDOM_BOOST = 10 * 1000;
|
||||
|
||||
/**
|
||||
* If a router is too overloaded to build its own tunnels,
|
||||
* the build executor may call this.
|
||||
*/
|
||||
|
||||
public void dropBiggestParticipating() {
|
||||
|
||||
List partTunnels = listParticipatingTunnels();
|
||||
if ((partTunnels == null) || (partTunnels.size() == 0)) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Not dropping tunnel, since partTunnels was null or had 0 items!");
|
||||
return;
|
||||
}
|
||||
|
||||
long periodWithoutDrop = _context.clock().now() - _lastDropTime;
|
||||
if (periodWithoutDrop < DROP_BASE_INTERVAL) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not dropping tunnel, since last drop was " + periodWithoutDrop + " ms ago!");
|
||||
return;
|
||||
}
|
||||
|
||||
HopConfig biggest = null;
|
||||
HopConfig current = null;
|
||||
|
||||
long biggestMessages = 0;
|
||||
long biggestAge = -1;
|
||||
double biggestRate = 0;
|
||||
|
||||
for (int i=0; i<partTunnels.size(); i++) {
|
||||
|
||||
current = (HopConfig)partTunnels.get(i);
|
||||
|
||||
long currentMessages = current.getProcessedMessagesCount();
|
||||
long currentAge = (_context.clock().now() - current.getCreation());
|
||||
double currentRate = ((double) currentMessages / (currentAge / 1000));
|
||||
|
||||
// Determine if this is the biggest, but don't include tunnels
|
||||
// with less than 20 messages (unpredictable rates)
|
||||
if ((currentMessages > 20) && ((biggest == null) || (currentRate > biggestRate))) {
|
||||
// Update our profile of the biggest
|
||||
biggest = current;
|
||||
biggestMessages = currentMessages;
|
||||
biggestAge = currentAge;
|
||||
biggestRate = currentRate;
|
||||
}
|
||||
}
|
||||
|
||||
if (biggest == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Not dropping tunnel, since no suitable tunnel was found.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping tunnel with " + biggestRate + " messages/s and " + biggestMessages +
|
||||
" messages, last drop was " + (periodWithoutDrop / 1000) + " s ago.");
|
||||
remove(biggest);
|
||||
_lastDropTime = _context.clock().now() + _context.random().nextInt(DROP_RANDOM_BOOST);
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
// NB: 256 == assume max rate (size adjusted to handle 256 messages per second)
|
||||
|
@@ -41,6 +41,7 @@ class BuildExecutor implements Runnable {
|
||||
_context.statManager().createRateStat("tunnel.buildClientReject", "How often a client tunnel is rejected", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildRequestTime", "How long it takes to build a tunnel request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildRequestZeroHopTime", "How long it takes to build a zero hop tunnel", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.pendingRemaining", "How many inbound requests are pending after a pass (period is how long the pass takes)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_repoll = false;
|
||||
_handler = new BuildHandler(ctx, this);
|
||||
}
|
||||
@@ -51,7 +52,12 @@ class BuildExecutor implements Runnable {
|
||||
buf = new StringBuffer(128);
|
||||
buf.append("Allowed: ");
|
||||
}
|
||||
int allowed = 20;
|
||||
|
||||
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
|
||||
int allowed = maxKBps / 6; // Max. 1 concurrent build per 6 KB/s outbound
|
||||
if (allowed < 2) allowed = 2; // Never choke below 2 builds (but congestion may)
|
||||
if (allowed > 10) allowed = 10; // Never go beyond 10, that is uncharted territory (old limit was 5)
|
||||
|
||||
String prop = _context.getProperty("router.tunnelConcurrentBuilds");
|
||||
if (prop != null)
|
||||
try { allowed = Integer.valueOf(prop).intValue(); } catch (NumberFormatException nfe) {}
|
||||
@@ -91,6 +97,8 @@ class BuildExecutor implements Runnable {
|
||||
_context.statManager().addRateData("tunnel.buildExploratoryExpire", 1, 0);
|
||||
else
|
||||
_context.statManager().addRateData("tunnel.buildClientExpire", 1, 0);
|
||||
for (int j = 0; j < cfg.getLength(); j++)
|
||||
didNotReply(cfg.getReplyMessageId(), cfg.getPeer(j));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,22 +114,94 @@ class BuildExecutor implements Runnable {
|
||||
_context.statManager().addRateData("tunnel.concurrentBuildsLagged", concurrent, lag);
|
||||
return 0; // if we have a job heavily blocking our jobqueue, ssllloowww dddooowwwnnn
|
||||
}
|
||||
//if (isOverloaded())
|
||||
// return 0;
|
||||
|
||||
// Trim the number of allowed tunnels for overload,
|
||||
// initiate a tunnel drop on severe overload
|
||||
allowed = trimForOverload(allowed,concurrent);
|
||||
|
||||
return allowed;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Estimated cost of tunnel build attempt, bytes
|
||||
private static final int BUILD_BANDWIDTH_ESTIMATE_BYTES = 5*1024;
|
||||
|
||||
/**
|
||||
* Don't even try to build tunnels if we're saturated
|
||||
*/
|
||||
private int trimForOverload(int allowed, int concurrent) {
|
||||
|
||||
// dont include the inbound rates when throttling tunnel building, since
|
||||
// that'd expose a pretty trivial attack.
|
||||
int used1s = _context.router().get1sRate(true); // Avoid reliance on the 1s rate, too volatile
|
||||
int used15s = _context.router().get15sRate(true);
|
||||
int used1m = _context.router().get1mRate(true); // Avoid reliance on the 1m rate, too slow
|
||||
|
||||
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
|
||||
int maxBps = maxKBps * 1024;
|
||||
int overBuildLimit = maxBps - BUILD_BANDWIDTH_ESTIMATE_BYTES; // Beyond this, refrain from building
|
||||
int nearBuildLimit = maxBps - (2*BUILD_BANDWIDTH_ESTIMATE_BYTES); // Beyond this, consider it close
|
||||
|
||||
// Detect any fresh overload which could set back tunnel building
|
||||
if (Math.max(used1s,used15s) > overBuildLimit) {
|
||||
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Overloaded, trouble building tunnels (maxKBps=" + maxKBps +
|
||||
", 1s=" + used1s + ", 15s=" + used15s + ", 1m=" + used1m + ")");
|
||||
|
||||
// Detect serious overload
|
||||
if (((used1s > maxBps) && (used1s > used15s) && (used15s > nearBuildLimit)) ||
|
||||
((used1s > maxBps) && (used15s > overBuildLimit)) ||
|
||||
((used1s > overBuildLimit) && (used15s > overBuildLimit))) {
|
||||
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Serious overload, allow building 0.");
|
||||
|
||||
// If so configured, drop biggest participating tunnel
|
||||
if (Boolean.valueOf(_context.getProperty("router.dropTunnelsOnOverload","false")).booleanValue() == true) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Requesting drop of biggest participating tunnel.");
|
||||
_context.tunnelDispatcher().dropBiggestParticipating();
|
||||
}
|
||||
return(0);
|
||||
} else {
|
||||
// Mild overload, check if we already build tunnels
|
||||
if (concurrent == 0) {
|
||||
// We aren't building, allow 1
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Mild overload, allow building 1.");
|
||||
return(1);
|
||||
} else {
|
||||
// Already building, allow 0
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Mild overload but already building " + concurrent + ", so allow 0.");
|
||||
return(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
// No overload, allow as requested
|
||||
return(allowed);
|
||||
}
|
||||
|
||||
|
||||
public void run() {
|
||||
_isRunning = true;
|
||||
List wanted = new ArrayList(8);
|
||||
List pools = new ArrayList(8);
|
||||
|
||||
boolean pendingRemaining = false;
|
||||
int pendingRemaining = 0;
|
||||
|
||||
long loopBegin = 0;
|
||||
long beforeHandleInboundReplies = 0;
|
||||
long afterHandleInboundReplies = 0;
|
||||
long afterBuildZeroHop = 0;
|
||||
long afterBuildReal = 0;
|
||||
long afterHandleInbound = 0;
|
||||
|
||||
while (!_manager.isShutdown()){
|
||||
loopBegin = System.currentTimeMillis();
|
||||
try {
|
||||
_repoll = pendingRemaining; // resets repoll to false unless there are inbound requeusts pending
|
||||
_repoll = pendingRemaining > 0; // resets repoll to false unless there are inbound requeusts pending
|
||||
_manager.listPools(pools);
|
||||
for (int i = 0; i < pools.size(); i++) {
|
||||
TunnelPool pool = (TunnelPool)pools.get(i);
|
||||
@@ -130,7 +210,9 @@ class BuildExecutor implements Runnable {
|
||||
wanted.add(pool);
|
||||
}
|
||||
|
||||
beforeHandleInboundReplies = System.currentTimeMillis();
|
||||
_handler.handleInboundReplies();
|
||||
afterHandleInboundReplies = System.currentTimeMillis();
|
||||
|
||||
// allowed() also expires timed out requests (for new style requests)
|
||||
int allowed = allowed();
|
||||
@@ -140,17 +222,22 @@ class BuildExecutor implements Runnable {
|
||||
|
||||
// zero hop ones can run inline
|
||||
allowed = buildZeroHopTunnels(wanted, allowed);
|
||||
afterBuildZeroHop = System.currentTimeMillis();
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Zero hops built, Allowed: " + allowed + " wanted: " + wanted);
|
||||
|
||||
int realBuilt = 0;
|
||||
TunnelManagerFacade mgr = _context.tunnelManager();
|
||||
if ( (mgr == null) || (mgr.selectInboundTunnel() == null) || (mgr.selectOutboundTunnel() == null) ) {
|
||||
// we don't have either inbound or outbound tunnels, so don't bother trying to build
|
||||
// non-zero-hop tunnels
|
||||
synchronized (_currentlyBuilding) {
|
||||
if (!_repoll)
|
||||
_currentlyBuilding.wait(5*1000+_context.random().nextInt(5*1000));
|
||||
if (!_repoll) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("No tunnel to build with (allowed=" + allowed + ", wanted=" + wanted.size() + ", pending=" + pendingRemaining + "), wait for a while");
|
||||
_currentlyBuilding.wait(1*1000+_context.random().nextInt(1*1000));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ( (allowed > 0) && (wanted.size() > 0) ) {
|
||||
@@ -173,6 +260,7 @@ class BuildExecutor implements Runnable {
|
||||
_currentlyBuilding.add(cfg);
|
||||
}
|
||||
buildTunnel(pool, cfg);
|
||||
realBuilt++;
|
||||
// 0hops are taken care of above, these are nonstandard 0hops
|
||||
//if (cfg.getLength() <= 1)
|
||||
// i--; //0hop, we can keep going, as there's no worry about throttling
|
||||
@@ -184,13 +272,13 @@ class BuildExecutor implements Runnable {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Nothin' doin, wait for a while");
|
||||
try {
|
||||
synchronized (_currentlyBuilding) {
|
||||
if (!_repoll) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Nothin' doin (allowed=" + allowed + ", wanted=" + wanted.size() + ", pending=" + pendingRemaining + "), wait for a while");
|
||||
//if (allowed <= 0)
|
||||
_currentlyBuilding.wait(_context.random().nextInt(5*1000));
|
||||
_currentlyBuilding.wait(_context.random().nextInt(1*1000));
|
||||
//else // wanted <= 0
|
||||
// _currentlyBuilding.wait(_context.random().nextInt(30*1000));
|
||||
}
|
||||
@@ -201,8 +289,23 @@ class BuildExecutor implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
afterBuildReal = System.currentTimeMillis();
|
||||
|
||||
pendingRemaining = _handler.handleInboundRequests();
|
||||
afterHandleInbound = System.currentTimeMillis();
|
||||
|
||||
if (pendingRemaining > 0)
|
||||
_context.statManager().addRateData("tunnel.pendingRemaining", pendingRemaining, afterHandleInbound-afterBuildReal);
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("build loop complete, tot=" + (afterHandleInbound-loopBegin) +
|
||||
" inReply=" + (afterHandleInboundReplies-beforeHandleInboundReplies) +
|
||||
" zeroHop=" + (afterBuildZeroHop-afterHandleInboundReplies) +
|
||||
" real=" + (afterBuildReal-afterBuildZeroHop) +
|
||||
" in=" + (afterHandleInbound-afterBuildReal) +
|
||||
" built=" + realBuilt +
|
||||
" pending=" + pendingRemaining);
|
||||
|
||||
wanted.clear();
|
||||
pools.clear();
|
||||
} catch (Exception e) {
|
||||
@@ -274,6 +377,7 @@ class BuildExecutor implements Runnable {
|
||||
_currentlyBuilding.remove(cfg);
|
||||
_currentlyBuilding.notifyAll();
|
||||
}
|
||||
|
||||
long expireBefore = _context.clock().now() + 10*60*1000 - BuildRequestor.REQUEST_TIMEOUT;
|
||||
if (cfg.getExpiration() <= expireBefore) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@@ -299,5 +403,11 @@ class BuildExecutor implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
private void didNotReply(long tunnel, Hash peer) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(tunnel + ": Peer " + peer.toBase64() + " did not reply to the tunnel join request");
|
||||
}
|
||||
|
||||
List locked_getCurrentlyBuilding() { return _currentlyBuilding; }
|
||||
public int getInboundBuildQueueSize() { return _handler.getInboundBuildQueueSize(); }
|
||||
}
|
||||
|
@@ -6,6 +6,8 @@ import net.i2p.data.i2np.*;
|
||||
import net.i2p.router.*;
|
||||
import net.i2p.router.tunnel.*;
|
||||
import net.i2p.router.peermanager.TunnelHistory;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@@ -46,7 +48,10 @@ class BuildHandler {
|
||||
_context.statManager().createRateStat("tunnel.dropLoad", "How long we had to wait before finally giving up on an inbound request (period is queue count)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadDelay", "How long we had to wait before finally giving up on an inbound request?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadBacklog", "How many requests were pending when they were so lagged that we had to drop a new inbound request??", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadProactive", "What the estimated queue time was when we dropped an inbound request (period is num pending)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadProactiveAbort", "How often we would have proactively dropped a request, but allowed it through?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.handleRemaining", "How many pending inbound requests were left on the queue after one pass?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildReplyTooSlow", "How often a tunnel build reply came back after we had given up waiting for it?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
_context.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("tunnel.receiveRejectionTransient", "How often we are rejected due to transient overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
@@ -64,10 +69,10 @@ class BuildHandler {
|
||||
private static final int NEXT_HOP_LOOKUP_TIMEOUT = 5*1000;
|
||||
|
||||
/**
|
||||
* Blocking call to handle a few of the pending inbound requests, returning true if
|
||||
* there are remaining requeusts we skipped over
|
||||
* Blocking call to handle a few of the pending inbound requests, returning how many
|
||||
* requests remain after this pass
|
||||
*/
|
||||
boolean handleInboundRequests() {
|
||||
int handleInboundRequests() {
|
||||
int dropExpired = 0;
|
||||
List handled = null;
|
||||
synchronized (_inboundBuildMessages) {
|
||||
@@ -81,7 +86,7 @@ class BuildHandler {
|
||||
handled.add(_inboundBuildMessages.remove(_inboundBuildMessages.size()-1));
|
||||
} else {
|
||||
// drop any expired messages
|
||||
long dropBefore = System.currentTimeMillis() - BuildRequestor.REQUEST_TIMEOUT;
|
||||
long dropBefore = System.currentTimeMillis() - (BuildRequestor.REQUEST_TIMEOUT*3);
|
||||
do {
|
||||
BuildMessageState state = (BuildMessageState)_inboundBuildMessages.get(0);
|
||||
if (state.recvTime <= dropBefore) {
|
||||
@@ -98,7 +103,7 @@ class BuildHandler {
|
||||
|
||||
// now pull off the oldest requests first (we're doing a tail-drop
|
||||
// when adding)
|
||||
for (int i = 0; i < toHandle; i++)
|
||||
for (int i = 0; i < toHandle && _inboundBuildMessages.size() > 0; i++)
|
||||
handled.add(_inboundBuildMessages.remove(0));
|
||||
}
|
||||
}
|
||||
@@ -139,7 +144,7 @@ class BuildHandler {
|
||||
int remaining = _inboundBuildMessages.size();
|
||||
if (remaining > 0)
|
||||
_context.statManager().addRateData("tunnel.handleRemaining", remaining, 0);
|
||||
return remaining > 0;
|
||||
return remaining;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,6 +194,7 @@ class BuildHandler {
|
||||
_log.warn("The reply " + replyMessageId + " did not match any pending tunnels");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Pending tunnels: " + buf.toString());
|
||||
_context.statManager().addRateData("tunnel.buildReplyTooSlow", 1, 0);
|
||||
} else {
|
||||
handleReply(state.msg, cfg, System.currentTimeMillis()-state.recvTime);
|
||||
}
|
||||
@@ -209,8 +215,8 @@ class BuildHandler {
|
||||
Hash peer = cfg.getPeer(i);
|
||||
int record = order.indexOf(new Integer(i));
|
||||
int howBad = statuses[record];
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(msg.getUniqueId() + ": Peer " + peer.toBase64() + " replied with status " + howBad);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(msg.getUniqueId() + ": Peer " + peer.toBase64() + " replied with status " + howBad);
|
||||
|
||||
if (howBad == 0) {
|
||||
// w3wt
|
||||
@@ -272,7 +278,7 @@ class BuildHandler {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
|
||||
|
||||
if (timeSinceReceived > BuildRequestor.REQUEST_TIMEOUT) {
|
||||
if (timeSinceReceived > (BuildRequestor.REQUEST_TIMEOUT*3)) {
|
||||
// don't even bother, since we are so overloaded locally
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
|
||||
@@ -364,6 +370,30 @@ class BuildHandler {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If we are dropping lots of requests before even trying to handle them,
|
||||
* I suppose you could call us "overloaded"
|
||||
*/
|
||||
private final static int MAX_PROACTIVE_DROPS = 240;
|
||||
|
||||
private int countProactiveDrops() {
|
||||
int dropped = 0;
|
||||
dropped += countEvents("tunnel.dropLoadProactive", 60*1000);
|
||||
dropped += countEvents("tunnel.dropLoad", 60*1000);
|
||||
dropped += countEvents("tunnel.dropLoadBacklog", 60*1000);
|
||||
dropped += countEvents("tunnel.dropLoadDelay", 60*1000);
|
||||
return dropped;
|
||||
}
|
||||
private int countEvents(String stat, long period) {
|
||||
RateStat rs = _context.statManager().getRate(stat);
|
||||
if (rs != null) {
|
||||
Rate r = rs.getRate(period);
|
||||
if (r != null)
|
||||
return (int)r.getCurrentEventCount();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
private void handleReq(RouterInfo nextPeerInfo, BuildMessageState state, BuildRequestRecord req, Hash nextPeer) {
|
||||
long ourId = req.readReceiveTunnelId();
|
||||
long nextId = req.readNextTunnelId();
|
||||
@@ -384,22 +414,31 @@ class BuildHandler {
|
||||
//if ( (response == 0) && (_context.random().nextInt(50) <= 1) )
|
||||
// response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
|
||||
int proactiveDrops = countProactiveDrops();
|
||||
long recvDelay = System.currentTimeMillis()-state.recvTime;
|
||||
if ( (response == 0) && (recvDelay > BuildRequestor.REQUEST_TIMEOUT/2) ) {
|
||||
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, recvDelay);
|
||||
response = TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
} else if (response == 0) {
|
||||
_context.statManager().addRateData("tunnel.acceptLoad", recvDelay, recvDelay);
|
||||
if (response == 0) {
|
||||
float pDrop = recvDelay / (BuildRequestor.REQUEST_TIMEOUT*3);
|
||||
pDrop = (float)Math.pow(pDrop, 16);
|
||||
if (_context.random().nextFloat() < pDrop) { // || (proactiveDrops > MAX_PROACTIVE_DROPS) ) ) {
|
||||
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, proactiveDrops);
|
||||
if (true || (proactiveDrops < MAX_PROACTIVE_DROPS*2))
|
||||
response = TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
else
|
||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
} else {
|
||||
_context.statManager().addRateData("tunnel.acceptLoad", recvDelay, recvDelay);
|
||||
}
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Responding to " + state.msg.getUniqueId() + "/" + ourId
|
||||
+ " after " + recvDelay + " with " + response
|
||||
+ " after " + recvDelay + "/" + proactiveDrops + " with " + response
|
||||
+ " from " + (state.fromHash != null ? state.fromHash.toBase64() :
|
||||
state.from != null ? state.from.calculateHash().toBase64() : "tunnel"));
|
||||
|
||||
if (response == 0) {
|
||||
HopConfig cfg = new HopConfig();
|
||||
cfg.setCreation(_context.clock().now());
|
||||
cfg.setExpiration(_context.clock().now() + 10*60*1000);
|
||||
cfg.setIVKey(req.readIVKey());
|
||||
cfg.setLayerKey(req.readLayerKey());
|
||||
@@ -505,6 +544,14 @@ class BuildHandler {
|
||||
}
|
||||
}
|
||||
|
||||
public int getInboundBuildQueueSize() {
|
||||
synchronized (_inboundBuildMessages) {
|
||||
return _inboundBuildMessages.size();
|
||||
}
|
||||
}
|
||||
|
||||
/** um, this is bad. don't set this. */
|
||||
private static final boolean DROP_ALL_REQUESTS = false;
|
||||
private static final boolean HANDLE_REPLIES_INLINE = true;
|
||||
|
||||
private class TunnelBuildMessageHandlerJobBuilder implements HandlerJobBuilder {
|
||||
@@ -544,29 +591,36 @@ class BuildHandler {
|
||||
_exec.repoll();
|
||||
}
|
||||
} else {
|
||||
if (_exec.wasRecentlyBuilding(reqId)) {
|
||||
if (DROP_ALL_REQUESTS || _exec.wasRecentlyBuilding(reqId)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping the reply " + reqId + ", as we used to be building that");
|
||||
} else {
|
||||
synchronized (_inboundBuildMessages) {
|
||||
boolean removed = false;
|
||||
int dropped = 0;
|
||||
while (_inboundBuildMessages.size() > 0) {
|
||||
BuildMessageState cur = (BuildMessageState)_inboundBuildMessages.get(_inboundBuildMessages.size()-1);
|
||||
for (int i = 0; i < _inboundBuildMessages.size(); i++) {
|
||||
BuildMessageState cur = (BuildMessageState)_inboundBuildMessages.get(i);
|
||||
long age = System.currentTimeMillis() - cur.recvTime;
|
||||
if (age >= BuildRequestor.REQUEST_TIMEOUT) {
|
||||
_inboundBuildMessages.remove(_inboundBuildMessages.size()-1);
|
||||
if (age >= BuildRequestor.REQUEST_TIMEOUT*3) {
|
||||
_inboundBuildMessages.remove(i);
|
||||
i--;
|
||||
dropped++;
|
||||
_context.statManager().addRateData("tunnel.dropLoad", age, _inboundBuildMessages.size());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (dropped > 0) {
|
||||
// if the queue is backlogged, stop adding new messages
|
||||
_context.statManager().addRateData("tunnel.dropLoadBacklog", _inboundBuildMessages.size(), _inboundBuildMessages.size());
|
||||
} else {
|
||||
_inboundBuildMessages.add(new BuildMessageState(receivedMessage, from, fromHash));
|
||||
int queueTime = estimateQueueTime(_inboundBuildMessages.size());
|
||||
float pDrop = queueTime/((float)BuildRequestor.REQUEST_TIMEOUT*3);
|
||||
pDrop = (float)Math.pow(pDrop, 16); // steeeep
|
||||
float f = _context.random().nextFloat();
|
||||
if ( (pDrop > f) && (allowProactiveDrop()) ) {
|
||||
_context.statManager().addRateData("tunnel.dropLoadProactive", queueTime, _inboundBuildMessages.size());
|
||||
} else {
|
||||
_inboundBuildMessages.add(new BuildMessageState(receivedMessage, from, fromHash));
|
||||
}
|
||||
}
|
||||
}
|
||||
_exec.repoll();
|
||||
@@ -576,6 +630,38 @@ class BuildHandler {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean allowProactiveDrop() {
|
||||
String allow = _context.getProperty("router.allowProactiveDrop", "true");
|
||||
boolean rv = false;
|
||||
if ( (allow == null) || (Boolean.valueOf(allow).booleanValue()) )
|
||||
rv = true;
|
||||
if (!rv)
|
||||
_context.statManager().addRateData("tunnel.dropLoadProactiveAbort", 1, 0);
|
||||
return rv;
|
||||
}
|
||||
|
||||
private int estimateQueueTime(int numPendingMessages) {
|
||||
int decryptTime = 200;
|
||||
RateStat rs = _context.statManager().getRate("tunnel.decryptRequestTime");
|
||||
if (rs != null) {
|
||||
Rate r = rs.getRate(60*1000);
|
||||
double avg = 0;
|
||||
if (r != null)
|
||||
avg = r.getAverageValue();
|
||||
if (avg > 0) {
|
||||
decryptTime = (int)avg;
|
||||
} else {
|
||||
avg = rs.getLifetimeAverageValue();
|
||||
if (avg > 0)
|
||||
decryptTime = (int)avg;
|
||||
}
|
||||
}
|
||||
float estimatedQueueTime = numPendingMessages * decryptTime;
|
||||
estimatedQueueTime *= 1.2f; // lets leave some cpu to spare, 'eh?
|
||||
return (int)estimatedQueueTime;
|
||||
}
|
||||
|
||||
|
||||
private class TunnelBuildReplyMessageHandlerJobBuilder implements HandlerJobBuilder {
|
||||
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@@ -21,9 +21,16 @@ class BuildRequestor {
|
||||
for (int i = 0; i < BuildMessageGenerator.ORDER.length; i++)
|
||||
ORDER.add(new Integer(i));
|
||||
}
|
||||
private static final boolean USE_PAIRED_CLIENT_TUNNELS = true;
|
||||
private static final int PRIORITY = 500;
|
||||
static final int REQUEST_TIMEOUT = 20*1000;
|
||||
static final int REQUEST_TIMEOUT = 10*1000;
|
||||
|
||||
private static boolean usePairedTunnels(RouterContext ctx) {
|
||||
String val = ctx.getProperty("router.usePairedTunnels");
|
||||
if ( (val == null) || (Boolean.valueOf(val).booleanValue()) )
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/** new style requests need to fill in the tunnel IDs before hand */
|
||||
public static void prepare(RouterContext ctx, PooledTunnelCreatorConfig cfg) {
|
||||
@@ -58,7 +65,7 @@ class BuildRequestor {
|
||||
cfg.setTunnelPool(pool);
|
||||
|
||||
TunnelInfo pairedTunnel = null;
|
||||
if (pool.getSettings().isExploratory() || !USE_PAIRED_CLIENT_TUNNELS) {
|
||||
if (pool.getSettings().isExploratory() || !usePairedTunnels(ctx)) {
|
||||
if (pool.getSettings().isInbound())
|
||||
pairedTunnel = ctx.tunnelManager().selectOutboundTunnel();
|
||||
else
|
||||
@@ -72,7 +79,7 @@ class BuildRequestor {
|
||||
if (pairedTunnel == null) {
|
||||
if (log.shouldLog(Log.WARN))
|
||||
log.warn("Couldn't find a paired tunnel for " + cfg + ", fall back on exploratory tunnels for pairing");
|
||||
if (!pool.getSettings().isExploratory() && USE_PAIRED_CLIENT_TUNNELS)
|
||||
if (!pool.getSettings().isExploratory() && usePairedTunnels(ctx))
|
||||
if (pool.getSettings().isInbound())
|
||||
pairedTunnel = ctx.tunnelManager().selectOutboundTunnel();
|
||||
else
|
||||
|
@@ -9,13 +9,21 @@ class ExpireJob extends JobImpl {
|
||||
private TunnelPool _pool;
|
||||
private TunnelCreatorConfig _cfg;
|
||||
private boolean _leaseUpdated;
|
||||
private long _dropAfter;
|
||||
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
|
||||
super(ctx);
|
||||
_pool = pool;
|
||||
_cfg = cfg;
|
||||
_leaseUpdated = false;
|
||||
// give 'em some extra time before dropping 'em
|
||||
getTiming().setStartAfter(cfg.getExpiration()); // + Router.CLOCK_FUDGE_FACTOR);
|
||||
// we act as if this tunnel expires a random skew before it actually does
|
||||
// so we rebuild out of sync. otoh, we will honor tunnel messages on it
|
||||
// up through the full lifetime of the tunnel, plus a clock skew, since
|
||||
// others may be sending to the published lease expirations
|
||||
long expire = cfg.getExpiration();
|
||||
_dropAfter = expire + Router.CLOCK_FUDGE_FACTOR;
|
||||
expire -= ctx.random().nextLong(5*60*1000);
|
||||
cfg.setExpiration(expire);
|
||||
getTiming().setStartAfter(expire);
|
||||
}
|
||||
public String getName() {
|
||||
if (_pool.getSettings().isExploratory()) {
|
||||
@@ -42,7 +50,8 @@ class ExpireJob extends JobImpl {
|
||||
_pool.removeTunnel(_cfg);
|
||||
_leaseUpdated = true;
|
||||
_pool.refreshLeaseSet();
|
||||
requeue(Router.CLOCK_FUDGE_FACTOR);
|
||||
long timeToDrop = _dropAfter - getContext().clock().now();
|
||||
requeue(timeToDrop);
|
||||
} else {
|
||||
// already removed/refreshed, but now lets make it
|
||||
// so we dont even honor the tunnel anymore
|
||||
|
@@ -55,7 +55,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
||||
if (Boolean.valueOf(ctx.getProperty("router.exploreHighCapacity", "false")).booleanValue())
|
||||
return true;
|
||||
// no need to explore too wildly at first
|
||||
if (ctx.router().getUptime() <= 10*1000)
|
||||
if (ctx.router().getUptime() <= 5*60*1000)
|
||||
return true;
|
||||
// ok, if we aren't explicitly asking for it, we should try to pick peers
|
||||
// randomly from the 'not failing' pool. However, if we are having a
|
||||
|
@@ -118,6 +118,7 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
|
||||
HopConfig cfg = new HopConfig();
|
||||
long expiration = _request.getDurationSeconds()*1000 + getContext().clock().now();
|
||||
cfg.setCreation(getContext().clock().now());
|
||||
cfg.setExpiration(expiration);
|
||||
cfg.setIVKey(_request.getIVKey());
|
||||
cfg.setLayerKey(_request.getLayerKey());
|
||||
|
@@ -33,11 +33,6 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
|
||||
if (_testJob != null)
|
||||
_testJob.testSuccessful(ms);
|
||||
super.testSuccessful(ms);
|
||||
|
||||
// once a tunnel has been built and we know it works, lets skew ourselves a bit so we
|
||||
// aren't as cyclic
|
||||
if ( (_context.router().getUptime() < 10*60*1000) && (!_live) )
|
||||
setExpiration(getExpiration() - _context.random().nextInt(5*60*1000));
|
||||
_live = true;
|
||||
}
|
||||
|
||||
|
@@ -191,7 +191,7 @@ class TestJob extends JobImpl {
|
||||
/** randomized time we should wait before testing */
|
||||
private int getDelay() { return TEST_DELAY + getContext().random().nextInt(TEST_DELAY); }
|
||||
/** how long we allow tests to run for before failing them */
|
||||
private int getTestPeriod() { return 20*1000; }
|
||||
private int getTestPeriod() { return 15*1000; }
|
||||
private void scheduleRetest() { scheduleRetest(false); }
|
||||
private void scheduleRetest(boolean asap) {
|
||||
if (asap) {
|
||||
|
@@ -2,11 +2,11 @@ package net.i2p.router.tunnel.pool;
|
||||
|
||||
import java.util.*;
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.*;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@@ -153,6 +153,103 @@ abstract class TunnelPeerSelector {
|
||||
if (caps == null) return new HashSet(0);
|
||||
HashSet rv = new HashSet(caps);
|
||||
return rv;
|
||||
} else if (filterSlow(ctx, isInbound, isExploratory)) {
|
||||
Log log = ctx.logManager().getLog(TunnelPeerSelector.class);
|
||||
String excludeCaps = ctx.getProperty("router.excludePeerCaps",
|
||||
String.valueOf(Router.CAPABILITY_BW16) +
|
||||
String.valueOf(Router.CAPABILITY_BW32));
|
||||
Set peers = new HashSet();
|
||||
if (excludeCaps != null) {
|
||||
char excl[] = excludeCaps.toCharArray();
|
||||
FloodfillNetworkDatabaseFacade fac = (FloodfillNetworkDatabaseFacade)ctx.netDb();
|
||||
List known = fac.getKnownRouterData();
|
||||
if (known != null) {
|
||||
for (int i = 0; i < known.size(); i++) {
|
||||
RouterInfo peer = (RouterInfo)known.get(i);
|
||||
String cap = peer.getCapabilities();
|
||||
if (cap == null) {
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
continue;
|
||||
}
|
||||
for (int j = 0; j < excl.length; j++) {
|
||||
if (cap.indexOf(excl[j]) >= 0) {
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
int maxLen = 0;
|
||||
if (cap.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) >= 0)
|
||||
maxLen++;
|
||||
if (cap.indexOf(Router.CAPABILITY_REACHABLE) >= 0)
|
||||
maxLen++;
|
||||
if (cap.indexOf(Router.CAPABILITY_UNREACHABLE) >= 0)
|
||||
maxLen++;
|
||||
if (cap.length() <= maxLen)
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
// otherwise, it contains flags we aren't trying to focus on,
|
||||
// so don't exclude it based on published capacity
|
||||
|
||||
if (filterUptime(ctx, isInbound, isExploratory)) {
|
||||
Properties opts = peer.getOptions();
|
||||
if (opts != null) {
|
||||
String val = opts.getProperty("stat_uptime");
|
||||
long uptimeMs = 0;
|
||||
if (val != null) {
|
||||
long factor = 1;
|
||||
if (val.endsWith("ms")) {
|
||||
factor = 1;
|
||||
val = val.substring(0, val.length()-2);
|
||||
} else if (val.endsWith("s")) {
|
||||
factor = 1000l;
|
||||
val = val.substring(0, val.length()-1);
|
||||
} else if (val.endsWith("m")) {
|
||||
factor = 60*1000l;
|
||||
val = val.substring(0, val.length()-1);
|
||||
} else if (val.endsWith("h")) {
|
||||
factor = 60*60*1000l;
|
||||
val = val.substring(0, val.length()-1);
|
||||
} else if (val.endsWith("d")) {
|
||||
factor = 24*60*60*1000l;
|
||||
val = val.substring(0, val.length()-1);
|
||||
}
|
||||
try { uptimeMs = Long.parseLong(val); } catch (NumberFormatException nfe) {}
|
||||
uptimeMs *= factor;
|
||||
} else {
|
||||
// not publishing an uptime, so exclude it
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
continue;
|
||||
}
|
||||
|
||||
long infoAge = ctx.clock().now() - peer.getPublished();
|
||||
if (infoAge < 0) {
|
||||
infoAge = 0;
|
||||
} else if (infoAge > 24*60*60*1000) {
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
continue;
|
||||
} else {
|
||||
if (infoAge + uptimeMs < 4*60*60*1000) {
|
||||
// up for less than 4 hours, so exclude it
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// not publishing stats, so exclude it
|
||||
peers.add(peer.getIdentity().calculateHash());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
for (int i = 0; i < excludeCaps.length(); i++) {
|
||||
List matches = ctx.peerManager().getPeersByCapability(excludeCaps.charAt(i));
|
||||
if (log.shouldLog(Log.INFO))
|
||||
log.info("Filtering out " + matches.size() + " peers with capability " + excludeCaps.charAt(i));
|
||||
peers.addAll(matches);
|
||||
}
|
||||
*/
|
||||
}
|
||||
return peers;
|
||||
} else {
|
||||
return new HashSet(1);
|
||||
}
|
||||
@@ -162,6 +259,7 @@ abstract class TunnelPeerSelector {
|
||||
private static final String PROP_OUTBOUND_CLIENT_EXCLUDE_UNREACHABLE = "router.outboundClientExcludeUnreachable";
|
||||
private static final String PROP_INBOUND_EXPLORATORY_EXCLUDE_UNREACHABLE = "router.inboundExploratoryExcludeUnreachable";
|
||||
private static final String PROP_INBOUND_CLIENT_EXCLUDE_UNREACHABLE = "router.inboundClientExcludeUnreachable";
|
||||
|
||||
private static final boolean DEFAULT_OUTBOUND_EXPLORATORY_EXCLUDE_UNREACHABLE = false;
|
||||
private static final boolean DEFAULT_OUTBOUND_CLIENT_EXCLUDE_UNREACHABLE = false;
|
||||
private static final boolean DEFAULT_INBOUND_EXPLORATORY_EXCLUDE_UNREACHABLE = false;
|
||||
@@ -186,4 +284,56 @@ abstract class TunnelPeerSelector {
|
||||
//System.err.println("Filter unreachable? " + rv + " (inbound? " + isInbound + ", exploratory? " + isExploratory);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
private static final String PROP_OUTBOUND_EXPLORATORY_EXCLUDE_SLOW = "router.outboundExploratoryExcludeSlow";
|
||||
private static final String PROP_OUTBOUND_CLIENT_EXCLUDE_SLOW = "router.outboundClientExcludeSlow";
|
||||
private static final String PROP_INBOUND_EXPLORATORY_EXCLUDE_SLOW = "router.inboundExploratoryExcludeSlow";
|
||||
private static final String PROP_INBOUND_CLIENT_EXCLUDE_SLOW = "router.inboundClientExcludeSlow";
|
||||
|
||||
protected boolean filterSlow(RouterContext ctx, boolean isInbound, boolean isExploratory) {
|
||||
boolean def = true;
|
||||
String val = null;
|
||||
|
||||
if (isExploratory)
|
||||
if (isInbound)
|
||||
val = ctx.getProperty(PROP_INBOUND_EXPLORATORY_EXCLUDE_SLOW);
|
||||
else
|
||||
val = ctx.getProperty(PROP_OUTBOUND_EXPLORATORY_EXCLUDE_SLOW);
|
||||
else
|
||||
if (isInbound)
|
||||
val = ctx.getProperty(PROP_INBOUND_CLIENT_EXCLUDE_SLOW);
|
||||
else
|
||||
val = ctx.getProperty(PROP_OUTBOUND_CLIENT_EXCLUDE_SLOW);
|
||||
|
||||
boolean rv = (val != null ? Boolean.valueOf(val).booleanValue() : def);
|
||||
//System.err.println("Filter unreachable? " + rv + " (inbound? " + isInbound + ", exploratory? " + isExploratory);
|
||||
return rv;
|
||||
}
|
||||
|
||||
private static final String PROP_OUTBOUND_EXPLORATORY_EXCLUDE_UPTIME = "router.outboundExploratoryExcludeUptime";
|
||||
private static final String PROP_OUTBOUND_CLIENT_EXCLUDE_UPTIME = "router.outboundClientExcludeUptime";
|
||||
private static final String PROP_INBOUND_EXPLORATORY_EXCLUDE_UPTIME = "router.inboundExploratoryExcludeUptime";
|
||||
private static final String PROP_INBOUND_CLIENT_EXCLUDE_UPTIME = "router.inboundClientExcludeUptime";
|
||||
|
||||
/** do we want to skip peers who haven't been up for long? */
|
||||
protected boolean filterUptime(RouterContext ctx, boolean isInbound, boolean isExploratory) {
|
||||
boolean def = true;
|
||||
String val = null;
|
||||
|
||||
if (isExploratory)
|
||||
if (isInbound)
|
||||
val = ctx.getProperty(PROP_INBOUND_EXPLORATORY_EXCLUDE_UPTIME);
|
||||
else
|
||||
val = ctx.getProperty(PROP_OUTBOUND_EXPLORATORY_EXCLUDE_UPTIME);
|
||||
else
|
||||
if (isInbound)
|
||||
val = ctx.getProperty(PROP_INBOUND_CLIENT_EXCLUDE_UPTIME);
|
||||
else
|
||||
val = ctx.getProperty(PROP_OUTBOUND_CLIENT_EXCLUDE_UPTIME);
|
||||
|
||||
boolean rv = (val != null ? Boolean.valueOf(val).booleanValue() : def);
|
||||
//System.err.println("Filter unreachable? " + rv + " (inbound? " + isInbound + ", exploratory? " + isExploratory);
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
@@ -607,6 +607,7 @@ public class TunnelPool {
|
||||
int j = peers.size() - 1 - i;
|
||||
cfg.setPeer(j, (Hash)peers.get(i));
|
||||
HopConfig hop = cfg.getConfig(j);
|
||||
hop.setCreation(_context.clock().now());
|
||||
hop.setExpiration(expiration);
|
||||
hop.setIVKey(_context.keyGenerator().generateSessionKey());
|
||||
hop.setLayerKey(_context.keyGenerator().generateSessionKey());
|
||||
|
@@ -8,10 +8,7 @@ import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.*;
|
||||
import net.i2p.data.i2np.*;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.router.*;
|
||||
@@ -390,6 +387,9 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
void tunnelFailed() { _executor.repoll(); }
|
||||
BuildExecutor getExecutor() { return _executor; }
|
||||
boolean isShutdown() { return _isShutdown; }
|
||||
|
||||
public int getInboundBuildQueueSize() { return _executor.getInboundBuildQueueSize(); }
|
||||
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
out.write("<h2><a name=\"exploratory\">Exploratory tunnels</a> (<a href=\"/configtunnels.jsp#exploratory\">config</a>):</h2>\n");
|
||||
@@ -495,11 +495,12 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
out.write("<td align=right>" + info.getProcessedMessagesCount() + "KB</td>\n");
|
||||
for (int j = 0; j < info.getLength(); j++) {
|
||||
Hash peer = info.getPeer(j);
|
||||
String cap = getCapacity(peer);
|
||||
TunnelId id = (info.isInbound() ? info.getReceiveTunnelId(j) : info.getSendTunnelId(j));
|
||||
if (_context.routerHash().equals(peer))
|
||||
out.write("<td><i>" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + "</i></td>");
|
||||
out.write("<td><i>" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + "</i>" + cap + "</td>");
|
||||
else
|
||||
out.write("<td>" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + "</td>");
|
||||
out.write("<td>" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + cap + "</td>");
|
||||
}
|
||||
out.write("</tr>\n");
|
||||
|
||||
@@ -526,4 +527,26 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
out.write("<b>No tunnels, waiting for the grace period to end</b><br />\n");
|
||||
out.write("Lifetime bandwidth usage: " + processedIn + "KB in, " + processedOut + "KB out<br />");
|
||||
}
|
||||
|
||||
private String getCapacity(Hash peer) {
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null) {
|
||||
String caps = info.getCapabilities();
|
||||
if (caps.indexOf(Router.CAPABILITY_BW16) >= 0) {
|
||||
return "[<16 ]";
|
||||
} else if (caps.indexOf(Router.CAPABILITY_BW32) >= 0) {
|
||||
return "[<32 ]";
|
||||
} else if (caps.indexOf(Router.CAPABILITY_BW64) >= 0) {
|
||||
return "[<64 ]";
|
||||
} else if (caps.indexOf(Router.CAPABILITY_BW128) >= 0) {
|
||||
return "<b>[<128]</b>";
|
||||
} else if (caps.indexOf(Router.CAPABILITY_BW256) >= 0) {
|
||||
return "<b>[>128]</b>";
|
||||
} else {
|
||||
return "[ ]";
|
||||
}
|
||||
} else {
|
||||
return "[ ]";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user