Compare commits

...

13 Commits

Author SHA1 Message Date
jrandom
265d5e306e * 2005-02-23 0.5.0.1 released 2005-02-23 05:00:52 +00:00
jrandom
10ed058c2e 2005-02-22 jrandom
* Reworked the tunnel (re)building process to remove the tokens and
      provide cleaner controls on the tunnels built.
    * Fixed situations where the timestamper wanted to test more servers than
      were provided (thanks Tracker!)
    * Get rid of the dead SAM sessions by using the streaming lib's callbacks
      (thanks Tracker!)
2005-02-23 04:20:28 +00:00
jrandom
8a21f0efec 2005-02-22 jrandom
* Temporary workaround for the I2CP disconnect bug (have the streaminglib
      try to automatically reconnect on accept()/connect(..)).
    * Loop check for expired lease republishing (just in case)
2005-02-22 23:13:00 +00:00
jrandom
b8291ac5a4 2005-02-22 jrandom
* Temporary workaround for the I2CP disconnect bug (have the streaminglib
      try to automatically reconnect on accept()/connect(..)).
    * Loop check for expired lease republishing (just in case)
2005-02-22 22:58:21 +00:00
jrandom
c17433cb93 2005-02-22 jrandom
* Adjusted (and fixed...) the timestamper change detection
    * Deal with a rare reordering bug at the beginning of a stream (so we
      don't drop it unnecessarily)
    * Cleaned up some dropped message handling in the router
    * Reduced job queue churn when dealing with a large number of tunnels by
      sharing an expiration job
    * Keep a separate list of the most recent CRIT messages (shown on the
      logs.jsp).  This way they don't get buried among any other messages.
    * For clarity, display the tunnel variance config as "Randomization" on
      the web console.
    * If lease republishing fails (boo! hiss!) try it again
    * Actually fix the negative jobLag in the right place (this time)
    * Allow reseeding when there are less than 10 known peer references
    * Lots of logging updates.
2005-02-22 07:07:29 +00:00
jrandom
35fe7f8203 2005-02-20 jrandom
* Allow the streaming lib resend frequency to drop down to 20s as the
      minimum, so that up to 2 retries can get sent on an http request.
    * Add further limits to failsafe tunnels.
    * Keep exploratory and client tunnel testing and building stats separate.
    * Only use the 60s period for throttling tunnel requests due to transient
      network overload.
    * Rebuild tunnels earlier (1-3m before expiration, by default)
    * Cache the next hop's routerInfo for participating tunnels so that the
      tunnel participation doesn't depend on the netDb.
    * Fixed a long standing bug in the streaming lib where we wouldn't always
      unchoke messages when the window size grows.
    * Make sure the window size never reaches 0 (duh)
2005-02-21 19:08:01 +00:00
jrandom
21f13dba43 2005-02-20 jrandom
* Allow the streaming lib resend frequency to drop down to 20s as the
      minimum, so that up to 2 retries can get sent on an http request.
    * Add further limits to failsafe tunnels.
    * Keep exploratory and client tunnel testing and building stats separate.
    * Only use the 60s period for throttling tunnel requests due to transient
      network overload.
    * Rebuild tunnels earlier (1-3m before expiration, by default)
    * Cache the next hop's routerInfo for participating tunnels so that the
      tunnel participation doesn't depend on the netDb.
    * Fixed a long standing bug in the streaming lib where we wouldn't always
      unchoke messages when the window size grows.
    * Make sure the window size never reaches 0 (duh)
2005-02-21 18:02:14 +00:00
duck
0db239a3fe added irc.postman.i2p 2005-02-21 03:13:40 +00:00
jrandom
4745d61f9b added subrosa.i2p 2005-02-21 02:55:12 +00:00
jrandom
b9a4c3ba52 *cough* 2005-02-20 11:09:05 +00:00
jrandom
cbf6a70a1a 2005-02-20 jrandom
* Only build failsafe tunnels if we need them
    * Properly implement the selectNotFailingPeers so that we get a random
      selection of peers, rather than using the strictOrdering (thanks dm!)
    * Don't include too many "don't tell me about" peer references in the
      lookup message - only send the 10 peer references closest to the target.
2005-02-20 09:12:43 +00:00
jrandom
7d4e093b58 2005-02-19 jrandom
* Only build new extra tunnels on failure if we don't have enough
    * Fix a fencepost in the tunnel building so that e.g. a variance of
      2 means +/- 2, not +/- 1 (thanks dm!)
    * Avoid an NPE on client disconnect
    * Never select a shitlisted peer to participate in a tunnel
    * Have netDb store messages timeout after 10s, not the full 60s (duh)
    * Keep session tags around for a little longer, just in case (grr)
    * Cleaned up some closing event issues on the streaming lib
    * Stop bundling the jetty 5.1.2 and updated wrapper.config in the update
      so that 0.4.* users will need to do a clean install, but we don't need
      to shove an additional 2MB in each update to those already on 0.5.
    * Imported the susimail css (oops, thanks susi!)
2005-02-19 23:20:56 +00:00
jrandom
d27feabcb3 clear the old precompiled .java files (thanks duck!) 2005-02-18 16:56:46 +00:00
64 changed files with 806 additions and 321 deletions

View File

@@ -31,6 +31,9 @@
</war>
</target>
<target name="precompilejsp">
<delete dir="../jsp/WEB-INF/" />
<delete file="../jsp/web-fragment.xml" />
<delete file="../jsp/web-out.xml" />
<mkdir dir="../jsp/WEB-INF/" />
<mkdir dir="../jsp/WEB-INF/classes" />
<!-- there are various jspc ant tasks, but they all seem a bit flakey -->

View File

@@ -216,7 +216,7 @@ public class I2PTunnelRunner extends I2PThread implements I2PSocket.SocketErrorL
this.out = out;
_toI2P = toI2P;
direction = (toI2P ? "toI2P" : "fromI2P");
_cache = ByteCache.getInstance(256, NETWORK_BUFFER_SIZE);
_cache = ByteCache.getInstance(16, NETWORK_BUFFER_SIZE);
setName("StreamForwarder " + _runnerId + "." + (++__forwarderId));
start();
}

View File

@@ -49,9 +49,11 @@ public class ConfigTunnelsHelper {
TunnelPoolSettings in = _context.tunnelManager().getInboundSettings(dest.calculateHash());
TunnelPoolSettings out = _context.tunnelManager().getOutboundSettings(dest.calculateHash());
String name = (in != null ? in.getDestinationNickname() : null);
if ( (in == null) || (out == null) ) continue;
String name = in.getDestinationNickname();
if (name == null)
name = (out != null ? out.getDestinationNickname() : null);
name = out.getDestinationNickname();
if (name == null)
name = dest.calculateHash().toBase64().substring(0,6);
@@ -110,7 +112,7 @@ public class ConfigTunnelsHelper {
buf.append("</tr>\n");
// tunnel depth variance
buf.append("<tr><td>Variance</td>\n");
buf.append("<tr><td>Randomization</td>\n");
buf.append("<td><select name=\"").append(index).append(".varianceInbound\">\n");
buf.append("<option value=\"0\" ");
if (in.getLengthVariance() == 0) buf.append(" selected=\"true\" ");

View File

@@ -41,6 +41,22 @@ public class LogsHelper {
return buf.toString();
}
public String getCriticalLogs() {
List msgs = _context.logManager().getBuffer().getMostRecentCriticalMessages();
StringBuffer buf = new StringBuffer(16*1024);
buf.append("<ul>");
buf.append("<code>\n");
for (int i = msgs.size(); i > 0; i--) {
String msg = (String)msgs.get(i - 1);
buf.append("<li>");
buf.append(msg);
buf.append("</li>\n");
}
buf.append("</code></ul>\n");
return buf.toString();
}
public String getServiceLogs() {
String str = FileUtil.readTextFile("wrapper.log", 500, false);
if (str == null)

View File

@@ -3,8 +3,12 @@ package net.i2p.router.web;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.text.DateFormat;
import java.text.DecimalFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Iterator;
import java.util.Locale;
import java.util.Set;
import net.i2p.data.DataHelper;
@@ -70,7 +74,32 @@ public class SummaryHelper {
else
return DataHelper.formatDuration(router.getUptime());
}
private static final DateFormat _fmt = new java.text.SimpleDateFormat("HH:mm:ss", Locale.UK);
public String getTime() {
if (_context == null) return "";
String now = null;
synchronized (_fmt) {
now = _fmt.format(new Date(_context.clock().now()));
}
long ms = _context.clock().getOffset();
if (ms < 60 * 1000) {
return now + " (" + (ms / 1000) + "s)";
} else if (ms < 60 * 1000) {
return now + " (" + (ms / (60 * 1000)) + "m)";
} else if (ms < 24 * 60 * 60 * 1000) {
return now + " (" + (ms / (60 * 60 * 1000)) + "h)";
} else {
return now + " (" + (ms / (24 * 60 * 60 * 1000)) + "d)";
}
}
public boolean allowReseed() {
return (_context.netDb().getKnownRouters() < 10);
}
/**
* Retrieve amount of used memory.

View File

@@ -19,6 +19,9 @@
<h4>Connection logs:</h4><a name="connectionlogs"> </a>
<jsp:getProperty name="logsHelper" property="connectionLogs" />
<hr />
<h4>Critical logs:</h4><a name="criticallogs"> </a>
<jsp:getProperty name="logsHelper" property="criticalLogs" />
<hr />
<h4>Service logs:</h4><a name="servicelogs"> </a>
<jsp:getProperty name="logsHelper" property="serviceLogs" />
</div>

View File

@@ -10,6 +10,7 @@
<b>Ident:</b> <jsp:getProperty name="helper" property="ident" /><br />
<b>Version:</b> <jsp:getProperty name="helper" property="version" /><br />
<b>Uptime:</b> <jsp:getProperty name="helper" property="uptime" /><br />
<b>Now:</b> <jsp:getProperty name="helper" property="time" /><br />
<b>Memory:</b> <jsp:getProperty name="helper" property="memory" /><br />
<hr />
@@ -23,7 +24,7 @@
if (helper.getActivePeers() <= 0) {
%><b><a href="config.jsp">check your NAT/firewall</a></b><br /><%
}
if (helper.getActiveProfiles() <= 10) { // 10 is the min fallback
if (helper.allowReseed()) {
if ("true".equals(System.getProperty("net.i2p.router.web.ReseedHandler.reseedInProgress", "false"))) {
out.print(" <i>reseeding</i>");
} else {

View File

@@ -132,6 +132,8 @@ public class SAMStreamSession {
if (socketMgr == null) {
throw new SAMException("Error creating I2PSocketManager");
}
socketMgr.addDisconnectListener(new DisconnectListener());
forceFlush = Boolean.valueOf(allprops.getProperty(PROP_FORCE_FLUSH, DEFAULT_FORCE_FLUSH)).booleanValue();
@@ -156,6 +158,12 @@ public class SAMStreamSession {
t.start();
}
}
private class DisconnectListener implements I2PSocketManager.DisconnectListener {
public void sessionDisconnected() {
close();
}
}
/**
* Get the SAM STREAM session Destination.

View File

@@ -57,7 +57,7 @@ public class Connection {
private I2PSocketFull _socket;
/** set to an error cause if the connection could not be established */
private String _connectionError;
private boolean _disconnectScheduled;
private long _disconnectScheduledOn;
private long _lastReceivedOn;
private ActivityTimer _activityTimer;
/** window size when we last saw congestion */
@@ -76,7 +76,7 @@ public class Connection {
private long _lifetimeDupMessageReceived;
public static final long MAX_RESEND_DELAY = 60*1000;
public static final long MIN_RESEND_DELAY = 30*1000;
public static final long MIN_RESEND_DELAY = 20*1000;
/** wait up to 5 minutes after disconnection so we can ack/close packets */
public static int DISCONNECT_TIMEOUT = 5*60*1000;
@@ -113,7 +113,7 @@ public class Connection {
_connectionManager = manager;
_resetReceived = false;
_connected = true;
_disconnectScheduled = false;
_disconnectScheduledOn = -1;
_lastReceivedOn = -1;
_activityTimer = new ActivityTimer();
_ackSinceCongestion = true;
@@ -181,6 +181,11 @@ public class Connection {
}
}
}
void windowAdjusted() {
synchronized (_outboundPackets) {
_outboundPackets.notifyAll();
}
}
void ackImmediately() {
_receiver.send(null, 0, 0);
@@ -191,6 +196,10 @@ public class Connection {
*
*/
void sendReset() {
if (_disconnectScheduledOn < 0) {
_disconnectScheduledOn = _context.clock().now();
SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
}
_resetSent = true;
if (_resetSentOn <= 0)
_resetSentOn = _context.clock().now();
@@ -382,6 +391,10 @@ public class Connection {
}
void resetReceived() {
if (_disconnectScheduledOn < 0) {
_disconnectScheduledOn = _context.clock().now();
SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
}
_resetReceived = true;
MessageOutputStream mos = _outputStream;
MessageInputStream mis = _inputStream;
@@ -398,6 +411,7 @@ public class Connection {
public boolean getHardDisconnected() { return _hardDisconnected; }
public boolean getResetSent() { return _resetSent; }
public long getResetSentOn() { return _resetSentOn; }
public long getDisconnectScheduledOn() { return _disconnectScheduledOn; }
void disconnect(boolean cleanDisconnect) {
disconnect(cleanDisconnect, true);
@@ -424,8 +438,8 @@ public class Connection {
killOutstandingPackets();
}
if (removeFromConMgr) {
if (!_disconnectScheduled) {
_disconnectScheduled = true;
if (_disconnectScheduledOn < 0) {
_disconnectScheduledOn = _context.clock().now();
SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
}
}
@@ -445,8 +459,8 @@ public class Connection {
SimpleTimer.getInstance().removeEvent(_activityTimer);
_activityTimer = null;
if (!_disconnectScheduled) {
_disconnectScheduled = true;
if (_disconnectScheduledOn < 0) {
_disconnectScheduledOn = _context.clock().now();
if (_log.shouldLog(Log.INFO))
_log.info("Connection disconnect complete from dead, drop the con "
@@ -576,7 +590,13 @@ public class Connection {
public long getAckedPackets() { return _ackedPackets; }
public long getCreatedOn() { return _createdOn; }
public long getCloseSentOn() { return _closeSentOn; }
public void setCloseSentOn(long when) { _closeSentOn = when; }
public void setCloseSentOn(long when) {
_closeSentOn = when;
if (_disconnectScheduledOn < 0) {
_disconnectScheduledOn = _context.clock().now();
SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
}
}
public long getCloseReceivedOn() { return _closeReceivedOn; }
public void setCloseReceivedOn(long when) { _closeReceivedOn = when; }
@@ -851,6 +871,7 @@ public class Connection {
+ ") for " + Connection.this.toString());
getOptions().setWindowSize(newWindowSize);
windowAdjusted();
}
}

View File

@@ -160,6 +160,8 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
public void setWindowSize(int numMsgs) {
if (numMsgs > _maxWindowSize)
numMsgs = _maxWindowSize;
else if (numMsgs <= 0)
numMsgs = 1;
_windowSize = numMsgs;
}

View File

@@ -224,6 +224,9 @@ public class ConnectionPacketHandler {
newWindowSize += 1;
}
}
if (newWindowSize <= 0)
newWindowSize = 1;
if (_log.shouldLog(Log.DEBUG))
_log.debug("New window size " + newWindowSize + "/" + oldWindow + " congestionSeenAt: "
@@ -233,6 +236,7 @@ public class ConnectionPacketHandler {
con.setCongestionWindowEnd(newWindowSize + lowest);
}
con.windowAdjusted();
return congested;
}
@@ -255,11 +259,15 @@ public class ConnectionPacketHandler {
con.setRemotePeer(packet.getOptionalFrom());
return true;
} else {
// neither RST nor SYN and we dont have the stream id yet? nuh uh
if (_log.shouldLog(Log.WARN))
_log.warn("Packet without RST or SYN where we dont know stream ID: "
+ packet);
return false;
// neither RST nor SYN and we dont have the stream id yet?
if (packet.getSequenceNum() <= 2) {
return true;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Packet without RST or SYN where we dont know stream ID: "
+ packet);
return false;
}
}
} else {
if (!DataHelper.eq(con.getSendStreamId(), packet.getReceiveStreamId())) {

View File

@@ -105,7 +105,7 @@ public class I2PSocketManagerFull implements I2PSocketManager {
}
public I2PSocket receiveSocket() throws I2PException {
if (_session.isClosed()) throw new I2PException("Session closed");
verifySession();
Connection con = _connectionManager.getConnectionHandler().accept(-1);
if (_log.shouldLog(Log.DEBUG))
_log.debug("receiveSocket() called: " + con);
@@ -149,6 +149,12 @@ public class I2PSocketManagerFull implements I2PSocketManager {
return _serverSocket;
}
private void verifySession() throws I2PException {
if (!_connectionManager.getSession().isClosed())
return;
_connectionManager.getSession().connect();
}
/**
* Create a new connected socket (block until the socket is created)
*
@@ -160,8 +166,7 @@ public class I2PSocketManagerFull implements I2PSocketManager {
*/
public I2PSocket connect(Destination peer, I2PSocketOptions options)
throws I2PException, NoRouteToHostException {
if (_connectionManager.getSession().isClosed())
throw new I2PException("Session is closed");
verifySession();
if (options == null)
options = _defaultOptions;
ConnectionOptions opts = null;

View File

@@ -92,7 +92,9 @@ public class MessageHandler implements I2PSessionListener {
*/
public void errorOccurred(I2PSession session, String message, Throwable error) {
if (_log.shouldLog(Log.ERROR))
_log.error("error occurred: " + message, error);
_log.error("error occurred: " + message + "- " + error.getMessage());
if (_log.shouldLog(Log.WARN))
_log.warn("cause", error);
//_manager.disconnectAllHard();
}

View File

@@ -137,28 +137,31 @@ public class PacketHandler {
if (_log.shouldLog(Log.WARN))
_log.warn("Received forged reset for " + con, ie);
}
} else if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
} else {
if ( (con.getSendStreamId() == null) ||
(DataHelper.eq(con.getSendStreamId(), packet.getReceiveStreamId())) ) {
byte oldId[] =con.getSendStreamId();
// con fully established, w00t
con.setSendStreamId(packet.getReceiveStreamId());
if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) // con fully established, w00t
con.setSendStreamId(packet.getReceiveStreamId());
try {
con.getPacketHandler().receivePacket(packet, con);
} catch (I2PException ie) {
if (_log.shouldLog(Log.WARN))
_log.warn("Received forged syn for " + con, ie);
_log.warn("Received forged packet for " + con + ": " + packet, ie);
con.setSendStreamId(oldId);
}
} else {
} else if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Receive a syn packet with the wrong IDs, sending reset: " + packet);
sendReset(packet);
} else {
if (!con.getResetSent()) {
// someone is sending us a packet on the wrong stream
if (_log.shouldLog(Log.WARN))
_log.warn("Received a packet on the wrong stream: " + packet + " connection: " + con);
}
}
} else {
// someone is sending us a packet on the wrong stream
if (_log.shouldLog(Log.WARN))
_log.warn("Received a packet on the wrong stream: " + packet + " connection: " + con);
}
}
}

View File

@@ -38,7 +38,7 @@ class SchedulerConnectedBulk extends SchedulerImpl {
public boolean accept(Connection con) {
boolean ok = (con != null) &&
(con.getAckedPackets() > 0) &&
(con.getHighestAckedThrough() >= 0) &&
(con.getOptions().getProfile() == ConnectionOptions.PROFILE_BULK) &&
(!con.getResetReceived()) &&
( (con.getCloseSentOn() <= 0) || (con.getCloseReceivedOn() <= 0) );

View File

@@ -39,7 +39,7 @@ class SchedulerConnecting extends SchedulerImpl {
boolean notYetConnected = (con.getIsConnected()) &&
//(con.getSendStreamId() == null) && // not null on recv
(con.getLastSendId() >= 0) &&
(con.getAckedPackets() <= 0) &&
(con.getHighestAckedThrough() < 0) &&
(!con.getResetReceived());
return notYetConnected;
}

View File

@@ -32,19 +32,13 @@ class SchedulerDead extends SchedulerImpl {
public boolean accept(Connection con) {
if (con == null) return false;
long timeSinceClose = _context.clock().now() - con.getCloseSentOn();
if (con.getResetSent())
timeSinceClose = _context.clock().now() - con.getResetSentOn();
boolean nothingLeftToDo = (con.getCloseSentOn() > 0) &&
(con.getCloseReceivedOn() > 0) &&
(con.getUnackedPacketsReceived() <= 0) &&
(con.getUnackedPacketsSent() <= 0) &&
(con.getResetSent()) &&
long timeSinceClose = _context.clock().now() - con.getDisconnectScheduledOn();
boolean nothingLeftToDo = (con.getDisconnectScheduledOn() > 0) &&
(timeSinceClose >= Connection.DISCONNECT_TIMEOUT);
boolean timedOut = (con.getOptions().getConnectTimeout() < con.getLifetime()) &&
con.getSendStreamId() == null &&
con.getLifetime() >= Connection.DISCONNECT_TIMEOUT;
return con.getResetReceived() || nothingLeftToDo || timedOut;
return nothingLeftToDo || timedOut;
}
public void eventOccurred(Connection con) {

View File

@@ -38,7 +38,7 @@ class SchedulerHardDisconnected extends SchedulerImpl {
timeSinceClose = _context.clock().now() - con.getResetSentOn();
boolean ok = (con.getHardDisconnected() || con.getResetSent()) &&
(timeSinceClose < Connection.DISCONNECT_TIMEOUT);
return ok;
return ok || con.getResetReceived();
}
public void eventOccurred(Connection con) {

96
apps/susimail/src/css.css Normal file
View File

@@ -0,0 +1,96 @@
body {
background-color:white;
}
li {
font-family:Verdana,Tahoma,Arial,Helvetica;
color:black;
line-height:12pt;
font-size:10pt;
margin-left:5mm;
margin-right:5mm;
}
p {
font-family:Verdana,Tahoma,Arial,Helvetica;
color:black;
line-height:12pt;
margin-left:5mm;
margin-right:5mm;
font-size:10pt;
}
p.hl {
font-size:12pt;
letter-spacing:2pt;
line-height:18pt;
font-weight:bold;
}
p.text {
margin-left:10mm;
margin-right:10mm;
}
p.error {
color:#ff0000;
}
p.info {
color:#327BBF;
}
span.coloured {
color:#327BBF;
}
p.footer {
margin-left:10mm;
margin-right:10mm;
font-size:8pt;
line-height:10pt;
}
p.mailbody {
font-family:Courier-Fixed;
margin-left:1cm;
margin-right:1cm;
}
a {
color:#327BBF;
text-decoration:none;
}
a:hover {
text-decoration:underline;
}
td {
font-family:Verdana,Tahoma,Arial,Helvetica;
color:black;
line-height:12pt;
margin-left:5mm;
margin-right:5mm;
font-size:10pt;
}
tr.list0 {
background-color:#e0e0e0;
}
tr.list1 {
background-color:#ffffff;
}
table.noborder {
margin-left:0mm;
margin-top:0mm;
margin-right:0mm;
}
pre {
font-family:Courier-Fixed;
margin-left:1cm;
margin-right:1cm;
}

View File

@@ -250,15 +250,19 @@
<copy file="build/routerconsole.jar" todir="pkg-temp/lib/" />
<!-- for the i2p 0.5 release, push jetty 5.2.1 -->
<!--
<copy file="build/jasper-compiler.jar" todir="pkg-temp/lib/" />
<copy file="build/jasper-runtime.jar" todir="pkg-temp/lib/" />
<copy file="build/commons-logging.jar" todir="pkg-temp/lib/" />
<copy file="build/commons-el.jar" todir="pkg-temp/lib/" />
<copy file="build/org.mortbay.jetty.jar" todir="pkg-temp/lib/" />
<copy file="build/javax.servlet.jar" todir="pkg-temp/lib/" />
-->
<!-- requires commons-* to be added to the classpath (boo, hiss) -->
<!--
<copy file="installer/resources/wrapper.config" todir="pkg-temp/" />
<touch file="pkg-temp/wrapper.config.updated" />
-->
<copy file="build/i2ptunnel.war" todir="pkg-temp/webapps/" />
<copy file="build/routerconsole.war" todir="pkg-temp/webapps/" />

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.27 $ $Date: 2005/02/16 17:23:50 $";
public final static String VERSION = "0.5";
public final static String ID = "$Revision: 1.28 $ $Date: 2005/02/17 17:57:53 $";
public final static String VERSION = "0.5.0.1";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -225,6 +225,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
*/
public void connect() throws I2PSessionException {
_closed = false;
_availabilityNotifier.stopNotifying();
I2PThread notifier = new I2PThread(_availabilityNotifier);
notifier.setName("Notifier " + _myDestination.calculateHash().toBase64().substring(0,4));
notifier.setDaemon(true);
@@ -504,7 +505,11 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
* Pass off the error to the listener
*/
void propogateError(String msg, Throwable error) {
if (_log.shouldLog(Log.ERROR)) _log.error(getPrefix() + "Error occurred: " + msg, error);
if (_log.shouldLog(Log.ERROR))
_log.error(getPrefix() + "Error occurred: " + msg + " - " + error.getMessage());
if (_log.shouldLog(Log.WARN))
_log.warn(getPrefix() + " cause", error);
if (_sessionListener != null) _sessionListener.errorOccurred(this, msg, error);
}

View File

@@ -57,8 +57,8 @@ class MessagePayloadMessageHandler extends HandlerImpl {
Payload payload = msg.getPayload();
byte[] data = _context.elGamalAESEngine().decrypt(payload.getEncryptedData(), session.getDecryptionKey());
if (data == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the payload");
if (_log.shouldLog(Log.WARN))
_log.warn("Error decrypting the payload");
throw new DataFormatException("Unable to decrypt the payload");
}
payload.setUnencryptedData(data);

View File

@@ -53,7 +53,7 @@ class TransientSessionKeyManager extends SessionKeyManager {
* can cause failed decrypts)
*
*/
public final static long SESSION_LIFETIME_MAX_MS = SESSION_TAG_DURATION_MS + 2 * 60 * 1000;
public final static long SESSION_LIFETIME_MAX_MS = SESSION_TAG_DURATION_MS + 5 * 60 * 1000;
public final static int MAX_INBOUND_SESSION_TAGS = 500 * 1000; // this will consume at most a few MB
/**

View File

@@ -99,7 +99,7 @@ public class BufferedStatLog implements StatLog {
if (_out != null) try { _out.close(); } catch (IOException ioe) {}
_outFile = filename;
try {
_out = new BufferedWriter(new FileWriter(_outFile, true));
_out = new BufferedWriter(new FileWriter(_outFile, true), 32*1024);
} catch (IOException ioe) { ioe.printStackTrace(); }
}
}
@@ -147,12 +147,16 @@ public class BufferedStatLog implements StatLog {
_out.write(when);
_out.write(" ");
if (_events[cur].getScope() == null)
_out.write("noScope ");
_out.write("noScope");
else
_out.write(_events[cur].getScope() + " ");
_out.write(_events[cur].getStat()+" ");
_out.write(_events[cur].getValue()+" ");
_out.write(_events[cur].getDuration()+"\n");
_out.write(_events[cur].getScope());
_out.write(" ");
_out.write(_events[cur].getStat());
_out.write(" ");
_out.write(Long.toString(_events[cur].getValue()));
_out.write(" ");
_out.write(Long.toString(_events[cur].getDuration()));
_out.write("\n");
}
cur = (cur + 1) % _events.length;
}

View File

@@ -26,10 +26,10 @@ public class Timestamper implements Runnable {
private boolean _initialized;
private static final int DEFAULT_QUERY_FREQUENCY = 5*60*1000;
private static final String DEFAULT_SERVER_LIST = "pool.ntp.org, pool.ntp.org";
private static final String DEFAULT_SERVER_LIST = "pool.ntp.org, pool.ntp.org, pool.ntp.org";
private static final boolean DEFAULT_DISABLED = true;
/** how many times do we have to query if we are changing the clock? */
private static final int DEFAULT_CONCURRING_SERVERS = 2;
private static final int DEFAULT_CONCURRING_SERVERS = 3;
public static final String PROP_QUERY_FREQUENCY = "time.queryFrequencyMs";
public static final String PROP_SERVER_LIST = "time.sntpServerList";
@@ -109,7 +109,7 @@ public class Timestamper implements Runnable {
if (_log.shouldLog(Log.INFO))
_log.info("Starting up timestamper");
boolean alreadyBitched = false;
boolean lastFailed = false;
try {
while (true) {
updateConfig();
@@ -123,14 +123,16 @@ public class Timestamper implements Runnable {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Querying servers " + _servers);
try {
queryTime(serverList);
lastFailed = !queryTime(serverList);
} catch (IllegalArgumentException iae) {
if (!alreadyBitched)
if (!lastFailed)
_log.log(Log.CRIT, "Unable to reach any of the NTP servers - network disconnected?");
alreadyBitched = true;
lastFailed = true;
}
}
long sleepTime = _context.random().nextInt(_queryFrequency) + _queryFrequency;
if (lastFailed)
sleepTime = 30*1000;
try { Thread.sleep(sleepTime); } catch (InterruptedException ie) {}
}
} catch (Throwable t) {
@@ -138,39 +140,64 @@ public class Timestamper implements Runnable {
}
}
private void queryTime(String serverList[]) throws IllegalArgumentException {
/**
* True if the time was queried successfully, false if it couldn't be
*/
private boolean queryTime(String serverList[]) throws IllegalArgumentException {
long found[] = new long[_concurringServers];
long localTime = -1;
long now = -1;
long expectedDelta = 0;
for (int i = 0; i < _concurringServers; i++) {
try { Thread.sleep(10*1000); } catch (InterruptedException ie) {}
localTime = _context.clock().now();
now = NtpClient.currentTime(serverList);
long delta = now - localTime;
found[i] = delta;
if (i == 0) {
if (Math.abs(delta) < MAX_VARIANCE) {
if (_log.shouldLog(Log.INFO))
_log.info("a single SNTP query was within the tolerance (" + delta + "ms)");
return;
return true;
} else {
// outside the tolerance, lets iterate across the concurring queries
expectedDelta = delta;
}
} else {
if (Math.abs(delta - expectedDelta) > MAX_VARIANCE) {
if (_log.shouldLog(Log.ERROR))
_log.error("SNTP client variance exceeded at query " + i + ". expected = " + expectedDelta + ", found = " + delta);
return;
if (_log.shouldLog(Log.ERROR)) {
StringBuffer err = new StringBuffer(96);
err.append("SNTP client variance exceeded at query ").append(i);
err.append(". expected = ");
err.append(expectedDelta);
err.append(", found = ");
err.append(delta);
err.append(" all deltas: ");
for (int j = 0; j < found.length; j++)
err.append(found[j]).append(' ');
_log.error(err.toString());
}
return false;
}
}
}
stampTime(now);
if (_log.shouldLog(Log.DEBUG)) {
StringBuffer buf = new StringBuffer(64);
buf.append("Deltas: ");
for (int i = 0; i < found.length; i++)
buf.append(found[i]).append(' ');
_log.debug(buf.toString());
}
return true;
}
/**
* Send an HTTP request to a given URL specifying the current time
*/
private void stampTime(long now) {
long before = _context.clock().now();
synchronized (_listeners) {
for (int i = 0; i < _listeners.size(); i++) {
UpdateListener lsnr = (UpdateListener)_listeners.get(i);
@@ -178,7 +205,7 @@ public class Timestamper implements Runnable {
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Stamped the time as " + now);
_log.debug("Stamped the time as " + now + " (delta=" + (now-before) + ")");
}
/**

View File

@@ -12,10 +12,12 @@ import net.i2p.I2PAppContext;
public class LogConsoleBuffer {
private I2PAppContext _context;
private List _buffer;
private List _critBuffer;
public LogConsoleBuffer(I2PAppContext context) {
_context = context;
_buffer = new ArrayList();
_critBuffer = new ArrayList();
}
void add(String msg) {
@@ -26,6 +28,14 @@ public class LogConsoleBuffer {
_buffer.add(msg);
}
}
void addCritical(String msg) {
int lim = _context.logManager().getConsoleBufferSize();
synchronized (_critBuffer) {
while (_critBuffer.size() >= lim)
_critBuffer.remove(0);
_critBuffer.add(msg);
}
}
/**
* Retrieve the currently bufferd messages, earlier values were generated...
@@ -38,4 +48,15 @@ public class LogConsoleBuffer {
return new ArrayList(_buffer);
}
}
/**
* Retrieve the currently bufferd crutucak messages, earlier values were generated...
* earlier. All values are strings with no formatting (as they are written
* in the logs)
*
*/
public List getMostRecentCriticalMessages() {
synchronized (_critBuffer) {
return new ArrayList(_critBuffer);
}
}
}

View File

@@ -101,6 +101,8 @@ class LogWriter implements Runnable {
// we always add to the console buffer, but only sometimes write to stdout
_manager.getBuffer().add(val);
if (rec.getPriority() >= Log.CRIT)
_manager.getBuffer().addCritical(val);
if (_manager.getDisplayOnScreenLevel() <= rec.getPriority()) {
if (_manager.displayOnScreen()) {
System.out.print(val);

View File

@@ -1,4 +1,70 @@
$Id: history.txt,v 1.146 2005/02/17 17:57:53 jrandom Exp $
$Id: history.txt,v 1.153 2005/02/22 23:20:29 jrandom Exp $
* 2005-02-23 0.5.0.1 released
2005-02-22 jrandom
* Reworked the tunnel (re)building process to remove the tokens and
provide cleaner controls on the tunnels built.
* Fixed situations where the timestamper wanted to test more servers than
were provided (thanks Tracker!)
* Get rid of the dead SAM sessions by using the streaming lib's callbacks
(thanks Tracker!)
2005-02-22 jrandom
* Temporary workaround for the I2CP disconnect bug (have the streaminglib
try to automatically reconnect on accept()/connect(..)).
* Loop check for expired lease republishing (just in case)
2005-02-22 jrandom
* Adjusted (and fixed...) the timestamper change detection
* Deal with a rare reordering bug at the beginning of a stream (so we
don't drop it unnecessarily)
* Cleaned up some dropped message handling in the router
* Reduced job queue churn when dealing with a large number of tunnels by
sharing an expiration job
* Keep a separate list of the most recent CRIT messages (shown on the
logs.jsp). This way they don't get buried among any other messages.
* For clarity, display the tunnel variance config as "Randomization" on
the web console.
* If lease republishing fails (boo! hiss!) try it again
* Actually fix the negative jobLag in the right place (this time)
* Allow reseeding when there are less than 10 known peer references
* Lots of logging updates.
2005-02-20 jrandom
* Allow the streaming lib resend frequency to drop down to 20s as the
minimum, so that up to 2 retries can get sent on an http request.
* Add further limits to failsafe tunnels.
* Keep exploratory and client tunnel testing and building stats separate.
* Only use the 60s period for throttling tunnel requests due to transient
network overload.
* Rebuild tunnels earlier (1-3m before expiration, by default)
* Cache the next hop's routerInfo for participating tunnels so that the
tunnel participation doesn't depend on the netDb.
* Fixed a long standing bug in the streaming lib where we wouldn't always
unchoke messages when the window size grows.
* Make sure the window size never reaches 0 (duh)
2005-02-20 jrandom
* Only build failsafe tunnels if we need them
* Properly implement the selectNotFailingPeers so that we get a random
selection of peers, rather than using the strictOrdering (thanks dm!)
* Don't include too many "don't tell me about" peer references in the
lookup message - only send the 10 peer references closest to the target.
2005-02-19 jrandom
* Only build new extra tunnels on failure if we don't have enough
* Fix a fencepost in the tunnel building so that e.g. a variance of
2 means +/- 2, not +/- 1 (thanks dm!)
* Avoid an NPE on client disconnect
* Never select a shitlisted peer to participate in a tunnel
* Have netDb store messages timeout after 10s, not the full 60s (duh)
* Keep session tags around for a little longer, just in case (grr)
* Cleaned up some closing event issues on the streaming lib
* Stop bundling the jetty 5.1.2 and updated wrapper.config in the update
so that 0.4.* users will need to do a clean install, but we don't need
to shove an additional 2MB in each update to those already on 0.5.
* Imported the susimail css (oops, thanks susi!)
* 2005-02-18 0.5 released

View File

@@ -1,6 +1,8 @@
; TC's hosts.txt guaranteed freshness
; $Id: hosts.txt,v 1.124 2005/02/18 01:23:29 jrandom Exp $
; $Id: hosts.txt,v 1.126 2005/02/20 21:55:12 jrandom Exp $
; changelog:
; (1.148) added irc.postman.i2p
; (1.147) added subrosa.i2p
; (1.146) added moxonom.i2p
; (1.145) added sex0r.i2p flock.i2p cneal.i2p www.nntp.i2p wallsgetbombed.i2p
; thedarkside.i2p legion.i2p manveru.i2p books.manveru.i2p bt.i2p
@@ -332,4 +334,5 @@ manveru.i2p=JqF1WsB~pgmGgXWuAL~yuTRrQYK8WpemusXASZ5RJuXbxtr0eV-GENTNnhQSRsgFx~Cv
books.manveru.i2p=qJpeR~kf1GVSlXtkm8BUnUIThLlmK5XsInMf4Zs9kOSebJs53tu8JOztk5WRCOExZ0MDojJUuWcMuC1~9zSvIMMokEILOM91D53og9hY2zLSFD4wqEjzHFvsmr7Vwp8EYTkab9AziqV5Pe5-rycf1BXTeTKyDjc5KSGJu23DHHXuaHrEZbJ9gKmyloe5mKUYRH~zJ7zGreL5isTWBNj596NWlXJA6XSrhP2HWX7HfMgzv-3sO0hZcfWVTb1NDhBdjIaJbsBdo2LCVaMlxqdLkAKMFnoAlN1YMwejvCab5nFFIqtecdTsXIbnKV0eo4t3F61cKaJur~tINRCVT8iek1aTTddR1rsPYdEHOiAG87qdQh0ny8la3m55ThD0Bf0aNqzyIytJnP9JHmU320FtrpZNKJ7-BMvtkOwBGcjUBdpVzcQGHo5cNkszahMm4NPbuQn3XtTZZYIVe52LLHi9oJC3mwGl8-FiRfXt3brvmkH6vZc~Z~vWUV0w1SmyFUGuAAAA
bt.i2p=Ei0~5AINGp-RegMS79AWJOgPRi2hCBAml92pFZTVTEZxOaaEHyOA0Ef6d43e1p0-Fx0ERR--YLdUBdTxAR1P3BYqZnDNDpegm5ywwp6mq27D~m5HWFljGAn33tqq6XUMO2e3iem5lPcY4RQ~-bILo9NHPLDFgd-i0ng9p11efyHeTN4uEIOKIj4esL0l-3CoNmODCM2Cw4VsTeNOvWnUAY7cOQg4OWQeKWcSV~K2YNn5~le6WGLTGwMsxKlXJRbnoSdGjEyD2tErtBNMzDGUYuYT9570oB0zuBskW8-3RBA8y5WT0tM3YshLpA9vSSgJT4k1S~mJGQdqQ3LNKA3F-n~F6mQ-l8y6yX5z9GNql505-aHs35WGBgN9xdAFDMZrb-dYtytGWHDblQGexuHy7zdbs8Pf0GOE1Mm1oKLm7iLZZEcJV4eh0dwsTMwPjKADbJVQCwn1YIIWJ-jUyACWrKyDEqR8rdGco2qX~PMi5CRW66sJyVrgcoHAz92d58FmAAAA
moxonom.i2p=hJU2KuBAv8QJh3jgoigFHE89h-FgTVQ1aHwRND2lqpkeLkVkC9A~7hCpYz2KAbWfKANImDzWMZ4jQvL80yfd~ZD7buqEZ60qA85l16YRZ-3JdWIiyQmyS5mNPxhCD3QZRyqoz56XJm0IyLc4PYcVdBAByRu19zjVZQeC-e4fMNRCNparVAm0lsdkNuLI3vwbrcxGNYoS1KQRJxGJAylngmL1DO7gu1FIBkxv~Ge1Q7m1pPgVluksmwxEaQDtbnZWS1nlcApEZuI0hXSfpmr1pTjS8oLSk3~j1G9q-TZ0PCSH5j8bofs5vWBOrdYw3-V3JfuZvrQkLWjlcCM6uHQ8VpAeAlDBpqLBZ5ixOquaO7bPqyjxD9oV4mYxcQusYPrHsbO15XQep9Y77r1aKYSo1SWnxGBtQqDtAorMHC8gBnLQwQeLT3Sk0mryp1PCxydR~m7xB2GWHxeIUQLNUOAf0LXnaOswVeZPImK0M04a87IGcd5yfJiKwJRO4rSdh4v~AAAA
subrosa.i2p=ndXKjhUrTaZYPX6EFeUsM~grnrsH-sFqFA74fe6qP-kTnpwxh29Vx6UPbxy~kTnhFEOmrJ7I1btDs8L4FgXhFgMCSVp2EcyoloLJJWUGaUrrBNEcvmCZOjtzS6X3KA7I~ZKfNY~Edumdp~FQoiRTl6gbuScdk2EwEYalUOGHGHTp74hF5ktHlt3HTkQCZVfA8avbkti0JHeWEy5bwQIW3hrfAFb6tRXFtkmCnK6xqFT~~qMw9vaaXDCqp5O1fUIY-8L4B2Bz52EK~tRrb1yTIQzAhYy0lT2RjmA0dH9mfywaEikOdyNBttf1xo0g7bTw4k44KTLaEVj4WakHMvsSWw~HH7vNDlLFvbWY-Fd3TJEuQSrAt43cXYTMM7x8ZkmXTNP3Z-bH6o-NuizpLPue0uWYTvVym66VWXysF6BrB~BV1lQ~U~PzNlvqxQ9GvAAc5LS7iSw-tuAleP151sS7GmFW~BF~SOYKQVR0nXKRBmIpkGvFCBMqVKXFJ2OWTW2-AAAA
irc.postman.i2p=d38SuD-ayF9gprdrl2uJM-FfFBor8qmBVQDxZt8SL7VZPUukrkzvxeaCE8riF5~2g1EgOA1nSfrQ4mt5NhuyL8MxGf6xByDng2lVCZ5Ks2InFVdaCEVg0BCfzdpztUGfO81oT7902zoE260P-qDuiSih5KMEouSWr7RPfP75ivoKfnRhVFcZiI36mRUfgOjKSGloeVLdkGIYh3AVYZ4MqQSWP-MXcY0xr1cD~G1ars2vfTNUjwErPirsyktbES0xXDtgr-Q7EF1vYo3cbPttFTyG6LUSEHvcHErfov5hAy~3lYkhgqMqVFyeKDEyb4ZkoSse1q2TpKxzmDAC95H4DQoxB6onvGjpgYQgwK3ZREf-0UVYY8eVHbnN~jTWLnIOdHfEbSMqN5OMYMeLvLc~SX-qKGNExTaA6Zl80QOkTF~Hh3lAONXUhJgPtzuV2igtXyV~BuhFuA8M2nu~HwLsgv5qPN8wInudXpzhhp~hG36tZ90OGDMSA-XXRMzwhHgCAAAA

View File

@@ -243,7 +243,9 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
buf.append("\n\tSearch Key: ").append(getSearchKey());
buf.append("\n\tFrom: ").append(getFrom());
buf.append("\n\tReply Tunnel: ").append(getReplyTunnel());
buf.append("\n\tDont Include Peers: ").append(getDontIncludePeers());
buf.append("\n\tDont Include Peers: ");
if (_dontIncludePeers != null)
buf.append(_dontIncludePeers.size());
buf.append("]");
return buf.toString();
}

View File

@@ -20,6 +20,7 @@ import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.TunnelCreateStatusMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
@@ -35,7 +36,7 @@ import net.i2p.util.Log;
public class InNetMessagePool implements Service {
private Log _log;
private RouterContext _context;
private Map _handlerJobBuilders;
private HandlerJobBuilder _handlerJobBuilders[];
private List _pendingDataMessages;
private List _pendingDataMessagesFrom;
private List _pendingGatewayMessages;
@@ -57,7 +58,7 @@ public class InNetMessagePool implements Service {
public InNetMessagePool(RouterContext context) {
_context = context;
_handlerJobBuilders = new HashMap();
_handlerJobBuilders = new HandlerJobBuilder[20];
_pendingDataMessages = new ArrayList(16);
_pendingDataMessagesFrom = new ArrayList(16);
_pendingGatewayMessages = new ArrayList(16);
@@ -75,11 +76,15 @@ public class InNetMessagePool implements Service {
}
public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) {
return (HandlerJobBuilder)_handlerJobBuilders.put(new Integer(i2npMessageType), builder);
HandlerJobBuilder old = _handlerJobBuilders[i2npMessageType];
_handlerJobBuilders[i2npMessageType] = builder;
return old;
}
public HandlerJobBuilder unregisterHandlerJobBuilder(int i2npMessageType) {
return (HandlerJobBuilder)_handlerJobBuilders.remove(new Integer(i2npMessageType));
HandlerJobBuilder old = _handlerJobBuilders[i2npMessageType];
_handlerJobBuilders[i2npMessageType] = null;
return old;
}
/**
@@ -132,7 +137,7 @@ public class InNetMessagePool implements Service {
shortCircuitTunnelData(messageBody, fromRouterHash);
allowMatches = false;
} else {
HandlerJobBuilder builder = (HandlerJobBuilder)_handlerJobBuilders.get(new Integer(type));
HandlerJobBuilder builder = _handlerJobBuilders[type];
if (_log.shouldLog(Log.DEBUG))
_log.debug("Add message to the inNetMessage pool - builder: " + builder
@@ -190,12 +195,14 @@ public class InNetMessagePool implements Service {
if (_log.shouldLog(Log.INFO))
_log.info("Dropping slow db lookup response: " + messageBody);
_context.statManager().addRateData("inNetPool.droppedDbLookupResponseMessage", 1, 0);
} else if (type == DatabaseLookupMessage.MESSAGE_TYPE) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dropping netDb lookup due to throttling");
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Message " + messageBody + " expiring on "
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
+ " was not handled by a HandlerJobBuilder - DROPPING: "
+ messageBody, new Exception("DROPPED MESSAGE"));
if (_log.shouldLog(Log.WARN))
_log.warn("Message expiring on "
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
+ " was not handled by a HandlerJobBuilder - DROPPING: " + messageBody);
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
}
} else {

View File

@@ -92,9 +92,12 @@ class JobQueueRunner implements Runnable {
_state = 13;
long diff = _context.clock().now() - beforeUpdate;
long lag = doStart - origStartAfter;
if (lag < 0) lag = 0;
_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
_context.statManager().addRateData("jobQueue.jobRun", duration, duration);
_context.statManager().addRateData("jobQueue.jobLag", doStart - origStartAfter, 0);
_context.statManager().addRateData("jobQueue.jobLag", lag, 0);
_context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
if (duration > 1000) {

View File

@@ -57,6 +57,8 @@ public abstract class NetworkDatabaseFacade implements Service {
public abstract void publish(LeaseSet localLeaseSet);
public abstract void unpublish(LeaseSet localLeaseSet);
public abstract void fail(Hash dbEntry);
public int getKnownRouters() { return 0; }
}

View File

@@ -24,11 +24,11 @@ class RouterThrottleImpl implements RouterThrottle {
private static int JOB_LAG_LIMIT = 2000;
/**
* Arbitrary hard limit - if we throttle our network connection this many
* times in the previous 10-20 minute period, don't accept requests to
* times in the previous 2 minute period, don't accept requests to
* participate in tunnels.
*
*/
private static int THROTTLE_EVENT_LIMIT = 300;
private static int THROTTLE_EVENT_LIMIT = 30;
private static final String PROP_MAX_TUNNELS = "router.maxParticipatingTunnels";
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
@@ -81,7 +81,7 @@ class RouterThrottleImpl implements RouterThrottle {
RateStat rs = _context.statManager().getRate("router.throttleNetworkCause");
Rate r = null;
if (rs != null)
r = rs.getRate(10*60*1000);
r = rs.getRate(60*1000);
long throttleEvents = (r != null ? r.getCurrentEventCount() + r.getLastEventCount() : 0);
if (throttleEvents > THROTTLE_EVENT_LIMIT) {
if (_log.shouldLog(Log.DEBUG))

View File

@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.141 $ $Date: 2005/02/17 12:59:28 $";
public final static String VERSION = "0.5";
public final static String ID = "$Revision: 1.148 $ $Date: 2005/02/22 23:20:29 $";
public final static String VERSION = "0.5.0.1";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);

View File

@@ -227,7 +227,10 @@ public class ClientConnectionRunner {
}
void disconnectClient(String reason) {
_log.error("Disconnecting the client: " + reason);
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Disconnecting the client ("
+ _config.getDestination().calculateHash().toBase64().substring(0,4)
+ ": " + reason);
DisconnectMessage msg = new DisconnectMessage();
msg.setReason(reason);
try {

View File

@@ -223,7 +223,7 @@ public class ClientManager {
}
}
private static final int REQUEST_LEASESET_TIMEOUT = 20*1000;
private static final int REQUEST_LEASESET_TIMEOUT = 120*1000;
public void requestLeaseSet(Hash dest, LeaseSet ls) {
ClientConnectionRunner runner = getRunner(dest);
if (runner != null) {

View File

@@ -41,6 +41,9 @@ class RequestLeaseSetJob extends JobImpl {
_expiration = expiration;
_onCreate = onCreate;
_onFail = onFail;
ctx.statManager().createRateStat("client.requestLeaseSetSuccess", "How frequently the router requests successfully a new leaseSet?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
ctx.statManager().createRateStat("client.requestLeaseSetTimeout", "How frequently the router requests a new leaseSet but gets no reply?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
ctx.statManager().createRateStat("client.requestLeaseSetDropped", "How frequently the router requests a new leaseSet but the client drops?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
}
public String getName() { return "Request Lease Set"; }
@@ -80,6 +83,7 @@ class RequestLeaseSetJob extends JobImpl {
getContext().jobQueue().addJob(new CheckLeaseRequestStatus(getContext(), state));
return;
} catch (I2CPMessageException ime) {
getContext().statManager().addRateData("client.requestLeaseSetDropped", 1, 0);
_log.error("Error sending I2CP message requesting the lease set", ime);
state.setIsSuccessful(false);
_runner.setLeaseRequest(null);
@@ -107,9 +111,13 @@ class RequestLeaseSetJob extends JobImpl {
if (_runner.isDead()) return;
if (_req.getIsSuccessful()) {
// we didn't fail
RequestLeaseSetJob.CheckLeaseRequestStatus.this.getContext().statManager().addRateData("client.requestLeaseSetSuccess", 1, 0);
return;
} else {
_log.error("Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ")");
RequestLeaseSetJob.CheckLeaseRequestStatus.this.getContext().statManager().addRateData("client.requestLeaseSetTimeout", 1, 0);
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ") for "
+ _runner.getConfig().getDestination().calculateHash().toBase64());
_runner.disconnectClient("Took too long to request leaseSet");
if (_req.getOnFailed() != null)
RequestLeaseSetJob.this.getContext().jobQueue().addJob(_req.getOnFailed());

View File

@@ -76,6 +76,9 @@ public class GarlicMessageReceiver {
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
+ "]");
if (_log.shouldLog(Log.WARN))
_log.warn("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
+ "]", new Exception("Decrypt garlic failed"));
_context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
_context.messageHistory().messageProcessingError(message.getUniqueId(),
@@ -105,6 +108,9 @@ public class GarlicMessageReceiver {
String howLongAgo = DataHelper.formatDuration(_context.clock().now()-clove.getExpiration().getTime());
if (_log.shouldLog(Log.ERROR))
_log.error("Clove is NOT valid: id=" + clove.getCloveId()
+ " expiration " + howLongAgo + " ago");
if (_log.shouldLog(Log.WARN))
_log.warn("Clove is NOT valid: id=" + clove.getCloveId()
+ " expiration " + howLongAgo + " ago", new Exception("Invalid within..."));
_context.messageHistory().messageProcessingError(clove.getCloveId(),
clove.getData().getClass().getName(),

View File

@@ -71,19 +71,18 @@ class ExploreJob extends SearchJob {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway.getIdentity().getHash());
msg.setDontIncludePeers(getState().getAttempted());
msg.setDontIncludePeers(getState().getClosestAttempted(MAX_CLOSEST));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId);
Set attempted = getState().getAttempted();
List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), NUM_CLOSEST_TO_IGNORE, attempted, getFacade().getKBuckets());
Set toSkip = new HashSet(64);
toSkip.addAll(attempted);
toSkip.addAll(peers);
msg.setDontIncludePeers(toSkip);
int available = MAX_CLOSEST - msg.getDontIncludePeers().size();
if (available > 0) {
List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), available, msg.getDontIncludePeers(), getFacade().getKBuckets());
msg.getDontIncludePeers().addAll(peers);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peers we don't want to hear about: " + toSkip);
_log.debug("Peers we don't want to hear about: " + msg.getDontIncludePeers());
return msg;
}

View File

@@ -60,6 +60,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private boolean _initialized;
/** Clock independent time of when we started up */
private long _started;
private int _knownRouters;
private StartExplorersJob _exploreJob;
private HarvesterJob _harvestJob;
/** when was the last time an exploration found something new? */
@@ -130,6 +131,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_peerSelector = new PeerSelector(_context);
_publishingLeaseSets = new HashSet(8);
_lastExploreNew = 0;
_knownRouters = 0;
_activeRequests = new HashMap(8);
_enforceNetId = DEFAULT_ENFORCE_NETID;
}
@@ -359,6 +361,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return rv;
}
public int getKnownRouters() { return _knownRouters; }
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
if (!_initialized) return;
LeaseSet ls = lookupLeaseSetLocally(key);
@@ -639,6 +643,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
+ routerInfo.getOptions().size() + " options on "
+ new Date(routerInfo.getPublished()));
_knownRouters++;
_ds.put(key, routerInfo);
synchronized (_lastSent) {
if (!_lastSent.containsKey(key))
@@ -699,6 +704,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
synchronized (_passiveSendKeys) {
_passiveSendKeys.remove(dbEntry);
}
if (isRouterInfo)
_knownRouters--;
}
public void unpublish(LeaseSet localLeaseSet) {

View File

@@ -22,7 +22,8 @@ import net.i2p.util.Log;
*/
public class RepublishLeaseSetJob extends JobImpl {
private Log _log;
private final static long REPUBLISH_LEASESET_DELAY = 60*1000; // 5 mins
private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000; // 5 mins
private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
private Hash _dest;
private KademliaNetworkDatabaseFacade _facade;
@@ -43,7 +44,7 @@ public class RepublishLeaseSetJob extends JobImpl {
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_DELAY));
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
}
} else {
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
@@ -73,8 +74,11 @@ public class RepublishLeaseSetJob extends JobImpl {
public OnFailure(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet failed"; }
public void runJob() {
if (_log.shouldLog(Log.ERROR))
_log.error("FAILED publishing of the leaseSet for " + _dest.toBase64());
if (_log.shouldLog(Log.WARN))
_log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64());
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
if ( (ls != null) && (ls.isCurrent(0)) )
getContext().jobQueue().addJob(new RepublishLeaseSetJob(getContext(), _facade, _dest));
}
}
}

View File

@@ -53,6 +53,8 @@ class SearchJob extends JobImpl {
private static final int SEARCH_BREDTH = 3; // 3 peers at a time
private static final int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
/** only send the 10 closest "dont tell me about" refs */
static final int MAX_CLOSEST = 10;
/**
* How long will we give each peer to reply to our search?
@@ -371,7 +373,7 @@ class SearchJob extends JobImpl {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(_state.getTarget());
msg.setFrom(replyGateway.getIdentity().getHash());
msg.setDontIncludePeers(_state.getAttempted());
msg.setDontIncludePeers(_state.getClosestAttempted(MAX_CLOSEST));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId);
return msg;
@@ -386,7 +388,7 @@ class SearchJob extends JobImpl {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(_state.getTarget());
msg.setFrom(getContext().routerHash());
msg.setDontIncludePeers(_state.getAttempted());
msg.setDontIncludePeers(_state.getClosestAttempted(MAX_CLOSEST));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(null);
return msg;

View File

@@ -6,7 +6,9 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeSet;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
@@ -48,6 +50,25 @@ class SearchState {
return (Set)_attemptedPeers.clone();
}
}
public Set getClosestAttempted(int max) {
synchronized (_attemptedPeers) {
return locked_getClosest(_attemptedPeers, max, _searchKey);
}
}
private Set locked_getClosest(Set peers, int max, Hash target) {
if (_attemptedPeers.size() <= max)
return new HashSet(_attemptedPeers);
TreeSet closest = new TreeSet(new XORComparator(target));
closest.addAll(_attemptedPeers);
HashSet rv = new HashSet(max);
int i = 0;
for (Iterator iter = closest.iterator(); iter.hasNext() && i < max; i++) {
rv.add(iter.next());
}
return rv;
}
public boolean wasAttempted(Hash peer) {
synchronized (_attemptedPeers) {
return _attemptedPeers.contains(peer);

View File

@@ -37,14 +37,15 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
public String getName() { return "Update Reply Found for Kademlia Search"; }
public void runJob() {
I2NPMessage message = _message;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer.toBase64()
+ " with message " + _message.getClass().getName());
+ " with message " + message.getClass().getName());
if (_message instanceof DatabaseStoreMessage) {
if (message instanceof DatabaseStoreMessage) {
long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage)_message;
DatabaseStoreMessage msg = (DatabaseStoreMessage)message;
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
try {
_facade.store(msg.getKey(), msg.getLeaseSet());
@@ -71,11 +72,11 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType());
}
} else if (_message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)_message, _peer);
} else if (message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + _message);
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + message);
return;
}

View File

@@ -199,7 +199,7 @@ class StoreJob extends JobImpl {
// _log.debug(getJobId() + ": Send store to " + router.getIdentity().getHash().toBase64());
}
sendStore(msg, router, _expiration);
sendStore(msg, router, getContext().clock().now() + STORE_TIMEOUT_MS);
}
private void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
@@ -315,7 +315,7 @@ class StoreJob extends JobImpl {
sendNext();
}
public String getName() { return "Kademlia Store Failed"; }
public String getName() { return "Kademlia Store Peer Failed"; }
}
/**

View File

@@ -0,0 +1,30 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Comparator;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
/**
* Help sort Hashes in relation to a base key using the XOR metric
*
*/
class XORComparator implements Comparator {
private Hash _base;
/**
* @param target key to compare distances with
*/
public XORComparator(Hash target) {
_base = target;
}
public int compare(Object lhs, Object rhs) {
if (lhs == null) throw new NullPointerException("LHS is null");
if (rhs == null) throw new NullPointerException("RHS is null");
if ( (lhs instanceof Hash) && (rhs instanceof Hash) ) {
byte lhsDelta[] = DataHelper.xor(((Hash)lhs).getData(), _base.getData());
byte rhsDelta[] = DataHelper.xor(((Hash)rhs).getData(), _base.getData());
return DataHelper.compareTo(lhsDelta, rhsDelta);
} else {
throw new ClassCastException(lhs.getClass().getName() + " / " + rhs.getClass().getName());
}
}
}

View File

@@ -44,6 +44,8 @@ public class ProfileOrganizer {
private Map _wellIntegratedPeers;
/** H(routerIdentity) to PeerProfile for all peers that are not failing horribly */
private Map _notFailingPeers;
/** H(routerIdnetity), containing elements in _notFailingPeers */
private List _notFailingPeersList;
/** H(routerIdentity) to PeerProfile for all peers that ARE failing horribly (but that we haven't dropped reference to yet) */
private Map _failingPeers;
/** who are we? */
@@ -91,7 +93,8 @@ public class ProfileOrganizer {
_fastPeers = new HashMap(16);
_highCapacityPeers = new HashMap(16);
_wellIntegratedPeers = new HashMap(16);
_notFailingPeers = new HashMap(16);
_notFailingPeers = new HashMap(64);
_notFailingPeersList = new ArrayList(64);
_failingPeers = new HashMap(16);
_strictCapacityOrder = new TreeSet(_comp);
_thresholdSpeedValue = 0.0d;
@@ -285,8 +288,20 @@ public class ProfileOrganizer {
*
*/
public void selectNotFailingPeers(int howMany, Set exclude, Set matches) {
selectNotFailingPeers(howMany, exclude, matches, false);
}
/**
* Return a set of Hashes for peers that are not failing, preferring ones that
* we are already talking with
*
* @param howMany how many peers to find
* @param exclude what peers to skip (may be null)
* @param matches set to store the matches in
* @param onlyNotFailing if true, don't include any high capacity peers
*/
public void selectNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing) {
if (matches.size() < howMany)
selectActiveNotFailingPeers(howMany, exclude, matches);
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing);
return;
}
/**
@@ -294,6 +309,7 @@ public class ProfileOrganizer {
* talking with.
*
*/
/*
private void selectActiveNotFailingPeers(int howMany, Set exclude, Set matches) {
if (true) {
selectAllNotFailingPeers(howMany, exclude, matches);
@@ -319,30 +335,39 @@ public class ProfileOrganizer {
selectAllNotFailingPeers(howMany, exclude, matches);
return;
}
*/
/**
* Return a set of Hashes for peers that are not failing.
*
*/
private void selectAllNotFailingPeers(int howMany, Set exclude, Set matches) {
private void selectAllNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing) {
if (matches.size() < howMany) {
int orig = matches.size();
int needed = howMany - orig;
int start = 0;
List selected = new ArrayList(needed);
synchronized (_reorganizeLock) {
for (Iterator iter = _strictCapacityOrder.iterator(); selected.size() < needed && iter.hasNext(); ) {
PeerProfile prof = (PeerProfile)iter.next();
if (matches.contains(prof.getPeer()) ||
(exclude != null && exclude.contains(prof.getPeer())) ||
_failingPeers.containsKey(prof.getPeer()) ) {
// we randomize the whole list when rebuilding it, but randomizing
// the entire list on each peer selection is a bit crazy
start = _context.random().nextInt(_notFailingPeersList.size());
for (int i = 0; i < _notFailingPeersList.size() && selected.size() < needed; i++) {
int curIndex = (i+start) % _notFailingPeersList.size();
Hash cur = (Hash)_notFailingPeersList.get(curIndex);
if (matches.contains(cur) ||
(exclude != null && exclude.contains(cur))) {
continue;
} else if (onlyNotFailing && _highCapacityPeers.containsKey(cur)) {
// we dont want the good peers, just random ones
continue;
} else {
if (isOk(prof.getPeer()))
selected.add(prof.getPeer());
if (isOk(cur))
selected.add(cur);
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Selecting all not failing found " + (matches.size()-orig) + " new peers: " + selected);
_log.info("Selecting all not failing (strict? " + onlyNotFailing + " start=" + start
+ ") found " + selected.size() + " new peers: " + selected);
matches.addAll(selected);
}
if (matches.size() < howMany) {
@@ -408,6 +433,7 @@ public class ProfileOrganizer {
_fastPeers.clear();
_highCapacityPeers.clear();
_notFailingPeers.clear();
_notFailingPeersList.clear();
_wellIntegratedPeers.clear();
for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) {
@@ -417,7 +443,9 @@ public class ProfileOrganizer {
locked_unfailAsNecessary();
locked_promoteFastAsNecessary();
Collections.shuffle(_notFailingPeersList, _context.random());
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
+ ", capacity: " + _thresholdCapacityValue + ", speed: " + _thresholdSpeedValue + "]");
@@ -654,12 +682,11 @@ public class ProfileOrganizer {
/** called after locking the reorganizeLock */
private PeerProfile locked_getProfile(Hash peer) {
if (_notFailingPeers.containsKey(peer))
return (PeerProfile)_notFailingPeers.get(peer);
else if (_failingPeers.containsKey(peer))
return (PeerProfile)_failingPeers.get(peer);
else
return null;
PeerProfile cur = (PeerProfile)_notFailingPeers.get(peer);
if (cur != null)
return cur;
cur = (PeerProfile)_failingPeers.get(peer);
return cur;
}
/**
@@ -690,6 +717,9 @@ public class ProfileOrganizer {
// the CLI shouldn't depend upon the netDb
if (netDb == null) return true;
if (_context.router() == null) return true;
if ( (_context.shitlist() != null) && (_context.shitlist().isShitlisted(peer)) )
return false; // never select a shitlisted peer
if (null != netDb.lookupRouterInfoLocally(peer)) {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + peer.toBase64() + " is locally known, allowing its use");
@@ -714,6 +744,7 @@ public class ProfileOrganizer {
_highCapacityPeers.remove(profile.getPeer());
_wellIntegratedPeers.remove(profile.getPeer());
_notFailingPeers.remove(profile.getPeer());
_notFailingPeersList.remove(profile.getPeer());
} else {
_failingPeers.remove(profile.getPeer());
_fastPeers.remove(profile.getPeer());
@@ -721,6 +752,7 @@ public class ProfileOrganizer {
_wellIntegratedPeers.remove(profile.getPeer());
_notFailingPeers.put(profile.getPeer(), profile);
_notFailingPeersList.add(profile.getPeer());
if (_thresholdCapacityValue <= profile.getCapacityValue()) {
_highCapacityPeers.put(profile.getPeer(), profile);
if (_log.shouldLog(Log.DEBUG))

View File

@@ -363,8 +363,8 @@ public class FragmentHandler {
}
if (removed && !_msg.getReleased()) {
noteFailure(_msg.getMessageId());
if (_log.shouldLog(Log.ERROR))
_log.error("Dropped failed fragmented message: " + _msg);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropped failed fragmented message: " + _msg);
_context.statManager().addRateData("tunnel.fragmentedDropped", _msg.getFragmentCount(), _msg.getLifetime());
_msg.failed();
} else {

View File

@@ -80,6 +80,11 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
// ok, they want us to send it remotely, but that'd bust our anonymity,
// so we send it out a tunnel first
TunnelInfo out = _context.tunnelManager().selectOutboundTunnel(_client);
if (out == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("no outbound tunnel to send the client message for " + _client + ": " + msg);
return;
}
if (_log.shouldLog(Log.INFO))
_log.info("distributing inbound tunnel message back out " + out
+ " targetting " + target.toBase64().substring(0,4));

View File

@@ -17,10 +17,13 @@ class OutboundReceiver implements TunnelGateway.Receiver {
private RouterContext _context;
private Log _log;
private TunnelCreatorConfig _config;
private RouterInfo _nextHopCache;
public OutboundReceiver(RouterContext ctx, TunnelCreatorConfig cfg) {
_context = ctx;
_log = ctx.logManager().getLog(OutboundReceiver.class);
_config = cfg;
_nextHopCache = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
}
public void receiveEncrypted(byte encrypted[]) {
@@ -30,8 +33,11 @@ class OutboundReceiver implements TunnelGateway.Receiver {
if (_log.shouldLog(Log.DEBUG))
_log.debug("received encrypted, sending out " + _config + ": " + msg);
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
RouterInfo ri = _nextHopCache;
if (ri == null)
ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
if (ri != null) {
_nextHopCache = ri;
send(msg, ri);
} else {
if (_log.shouldLog(Log.DEBUG))
@@ -65,8 +71,10 @@ class OutboundReceiver implements TunnelGateway.Receiver {
if (_log.shouldLog(Log.DEBUG))
_log.debug("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
+ " successful? " + (ri != null));
if (ri != null)
if (ri != null) {
_nextHopCache = ri;
send(_msg, ri);
}
}
}

View File

@@ -38,6 +38,7 @@ public class TunnelDispatcher implements Service {
/** what is the date/time on which the last non-locally-created tunnel expires? */
private long _lastParticipatingExpiration;
private BloomFilterIVValidator _validator;
private LeaveTunnel _leaveJob;
/** Creates a new instance of TunnelDispatcher */
public TunnelDispatcher(RouterContext ctx) {
@@ -50,6 +51,7 @@ public class TunnelDispatcher implements Service {
_participatingConfig = new HashMap();
_lastParticipatingExpiration = 0;
_validator = null;
_leaveJob = new LeaveTunnel(ctx);
ctx.statManager().createRateStat("tunnel.participatingTunnels",
"How many tunnels are we participating in?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
@@ -176,7 +178,7 @@ public class TunnelDispatcher implements Service {
_context.statManager().addRateData("tunnel.joinParticipant", 1, 0);
if (cfg.getExpiration() > _lastParticipatingExpiration)
_lastParticipatingExpiration = cfg.getExpiration();
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
_leaveJob.add(cfg);
}
/**
* We are the outbound endpoint in this tunnel, and did not create it
@@ -200,7 +202,7 @@ public class TunnelDispatcher implements Service {
if (cfg.getExpiration() > _lastParticipatingExpiration)
_lastParticipatingExpiration = cfg.getExpiration();
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
_leaveJob.add(cfg);
}
/**
@@ -228,7 +230,7 @@ public class TunnelDispatcher implements Service {
if (cfg.getExpiration() > _lastParticipatingExpiration)
_lastParticipatingExpiration = cfg.getExpiration();
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
_leaveJob.add(cfg);
}
public int getParticipatingCount() {
@@ -336,10 +338,11 @@ public class TunnelDispatcher implements Service {
_context.statManager().addRateData("tunnel.dispatchEndpoint", 1, 0);
} else {
_context.messageHistory().droppedTunnelDataMessageUnknown(msg.getUniqueId(), msg.getTunnelId().getTunnelId());
if (_log.shouldLog(Log.ERROR))
_log.error("no matching participant/endpoint for id=" + msg.getTunnelId().getTunnelId()
+ ": existing = " + _participants.keySet()
+ " / " + _outboundEndpoints.keySet());
int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN);
if (_log.shouldLog(level))
_log.log(level, "no matching participant/endpoint for id=" + msg.getTunnelId().getTunnelId()
+ " expiring in " + DataHelper.formatDuration(msg.getMessageExpiration()-_context.clock().now())
+ ": existing = " + _participants.size() + " / " + _outboundEndpoints.size());
}
}
@@ -374,8 +377,9 @@ public class TunnelDispatcher implements Service {
_context.statManager().addRateData("tunnel.dispatchInbound", 1, 0);
} else {
_context.messageHistory().droppedTunnelGatewayMessageUnknown(msg.getUniqueId(), msg.getTunnelId().getTunnelId());
if (_log.shouldLog(Log.ERROR))
_log.error("no matching tunnel for id=" + msg.getTunnelId().getTunnelId()
int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN);
if (_log.shouldLog(level))
_log.log(level, "no matching tunnel for id=" + msg.getTunnelId().getTunnelId()
+ ": gateway message expiring in "
+ DataHelper.formatDuration(msg.getMessageExpiration()-_context.clock().now())
+ "/"
@@ -383,7 +387,7 @@ public class TunnelDispatcher implements Service {
+ " messageId " + msg.getUniqueId()
+ "/" + msg.getMessage().getUniqueId()
+ " messageType: " + msg.getMessage().getClass().getName()
+ " existing = " + _inboundGateways.keySet());
+ " existing = " + _inboundGateways.size());
}
long dispatchTime = _context.clock().now() - before;
@@ -423,7 +427,7 @@ public class TunnelDispatcher implements Service {
if (_log.shouldLog(Log.DEBUG))
_log.debug("dispatch outbound through " + outboundTunnel.getTunnelId()
+ ": " + msg);
if (msg.getMessageExpiration() < before) {
if (msg.getMessageExpiration() < before - Router.CLOCK_FUDGE_FACTOR) {
if (_log.shouldLog(Log.ERROR))
_log.error("why are you sending a tunnel message that expired "
+ (before-msg.getMessageExpiration()) + "ms ago? "
@@ -438,9 +442,10 @@ public class TunnelDispatcher implements Service {
} else {
_context.messageHistory().droppedTunnelGatewayMessageUnknown(msg.getUniqueId(), outboundTunnel.getTunnelId());
if (_log.shouldLog(Log.ERROR))
_log.error("no matching outbound tunnel for id=" + outboundTunnel
+ ": existing = " + _outboundGateways.keySet());
int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN);
if (_log.shouldLog(level))
_log.log(level, "no matching outbound tunnel for id=" + outboundTunnel
+ ": existing = " + _outboundGateways.size());
}
long dispatchTime = _context.clock().now() - before;
@@ -473,16 +478,59 @@ public class TunnelDispatcher implements Service {
public void renderStatusHTML(Writer out) throws IOException {}
private class LeaveTunnel extends JobImpl {
private HopConfig _config;
private List _configs;
private List _times;
public LeaveTunnel(RouterContext ctx, HopConfig config) {
public LeaveTunnel(RouterContext ctx) {
super(ctx);
_config = config;
getTiming().setStartAfter(config.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR);
_configs = new ArrayList(128);
_times = new ArrayList(128);
}
public void add(HopConfig cfg) {
Long dropTime = new Long(cfg.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR);
synchronized (LeaveTunnel.this) {
_configs.add(cfg);
_times.add(dropTime);
}
long oldAfter = getTiming().getStartAfter();
if (oldAfter < getContext().clock().now()) {
getTiming().setStartAfter(dropTime.longValue());
getContext().jobQueue().addJob(LeaveTunnel.this);
} else if (oldAfter >= dropTime.longValue()) {
getTiming().setStartAfter(dropTime.longValue());
} else {
// already scheduled for the future, and before this expiration
}
}
public String getName() { return "Leave participant"; }
public void runJob() {
remove(_config);
HopConfig cur = null;
Long nextTime = null;
long now = getContext().clock().now();
synchronized (LeaveTunnel.this) {
if (_configs.size() <= 0)
return;
nextTime = (Long)_times.get(0);
if (nextTime.longValue() <= now) {
cur = (HopConfig)_configs.remove(0);
_times.remove(0);
if (_times.size() > 0)
nextTime = (Long)_times.get(0);
else
nextTime = null;
}
}
if (cur != null)
remove(cur);
if (nextTime != null) {
getTiming().setStartAfter(nextTime.longValue());
getContext().jobQueue().addJob(LeaveTunnel.this);
}
}
}
}

View File

@@ -27,6 +27,7 @@ public class TunnelParticipant {
private InboundEndpointProcessor _inboundEndpointProcessor;
private InboundMessageDistributor _inboundDistributor;
private FragmentHandler _handler;
private RouterInfo _nextHopCache;
public TunnelParticipant(RouterContext ctx, HopConfig config, HopProcessor processor) {
this(ctx, config, processor, null);
@@ -44,6 +45,10 @@ public class TunnelParticipant {
_inboundEndpointProcessor = inEndProc;
if (inEndProc != null)
_inboundDistributor = new InboundMessageDistributor(ctx, inEndProc.getDestination());
if ( (_config != null) && (_config.getSendTo() != null) ) {
_nextHopCache = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
}
}
public void dispatch(TunnelDataMessage msg, Hash recvFrom) {
@@ -62,7 +67,9 @@ public class TunnelParticipant {
if ( (_config != null) && (_config.getSendTo() != null) ) {
_config.incrementProcessedMessages();
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
RouterInfo ri = _nextHopCache;
if (ri == null)
ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (ri != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send off to nextHop directly (" + _config.getSendTo().toBase64().substring(0,4)
@@ -115,6 +122,7 @@ public class TunnelParticipant {
public void runJob() {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (ri != null) {
_nextHopCache = ri;
send(_config, _msg, ri);
} else {
if (_log.shouldLog(Log.ERROR))
@@ -134,6 +142,7 @@ public class TunnelParticipant {
public void runJob() {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (ri != null) {
_nextHopCache = ri;
if (_log.shouldLog(Log.ERROR))
_log.error("Lookup the nextHop (" + _config.getSendTo().toBase64().substring(0,4)
+ " failed, but we found it!! where do we go for " + _config + "? msg dropped: " + _msg);

View File

@@ -8,13 +8,11 @@ import net.i2p.router.tunnel.TunnelCreatorConfig;
class ExpireJob extends JobImpl {
private TunnelPool _pool;
private TunnelCreatorConfig _cfg;
private Object _buildToken;
private boolean _leaseUpdated;
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool, Object buildToken) {
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
super(ctx);
_pool = pool;
_cfg = cfg;
_buildToken = buildToken;
_leaseUpdated = false;
// give 'em some extra time before dropping 'em
getTiming().setStartAfter(cfg.getExpiration()); // + Router.CLOCK_FUDGE_FACTOR);

View File

@@ -18,7 +18,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
if (length < 0)
return null;
HashSet matches = new HashSet(length);
ctx.profileOrganizer().selectNotFailingPeers(length, null, matches);
ctx.profileOrganizer().selectNotFailingPeers(length, null, matches, true);
matches.remove(ctx.routerHash());
ArrayList rv = new ArrayList(matches);

View File

@@ -13,17 +13,13 @@ import net.i2p.util.Log;
class OnCreatedJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
private Object _buildToken;
private PooledTunnelCreatorConfig _cfg;
private boolean _fake;
public OnCreatedJob(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, boolean fake, Object buildToken) {
public OnCreatedJob(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg) {
super(ctx);
_log = ctx.logManager().getLog(OnCreatedJob.class);
_pool = pool;
_cfg = cfg;
_fake = fake;
_buildToken = buildToken;
}
public String getName() { return "Tunnel built"; }
public void runJob() {
@@ -34,17 +30,16 @@ class OnCreatedJob extends JobImpl {
getContext().tunnelDispatcher().joinOutbound(_cfg);
}
_pool.addTunnel(_cfg);
TestJob testJob = (_cfg.getLength() > 1 ? new TestJob(getContext(), _cfg, _pool, _buildToken) : null);
RebuildJob rebuildJob = (_fake ? null : new RebuildJob(getContext(), _cfg, _pool, _buildToken));
ExpireJob expireJob = new ExpireJob(getContext(), _cfg, _pool, _buildToken);
TestJob testJob = (_cfg.getLength() > 1 ? new TestJob(getContext(), _cfg, _pool) : null);
RebuildJob rebuildJob = new RebuildJob(getContext(), _cfg, _pool);
ExpireJob expireJob = new ExpireJob(getContext(), _cfg, _pool);
_cfg.setTunnelPool(_pool);
_cfg.setTestJob(testJob);
_cfg.setRebuildJob(rebuildJob);
_cfg.setExpireJob(expireJob);
if (_cfg.getLength() > 1) // no need to test 0 hop tunnels
getContext().jobQueue().addJob(testJob);
if (!_fake) // if we built a 0 hop tunnel in response to a failure, don't rebuild
getContext().jobQueue().addJob(rebuildJob);
getContext().jobQueue().addJob(rebuildJob); // always try to rebuild (ignored if too many)
getContext().jobQueue().addJob(expireJob);
}
}

View File

@@ -11,20 +11,18 @@ import net.i2p.router.tunnel.TunnelCreatorConfig;
*/
class RebuildJob extends JobImpl {
private TunnelPool _pool;
private Object _buildToken;
private TunnelCreatorConfig _cfg;
public RebuildJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool, Object buildToken) {
public RebuildJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
super(ctx);
_pool = pool;
_cfg = cfg;
_buildToken = buildToken;
long rebuildOn = cfg.getExpiration() - pool.getSettings().getRebuildPeriod();
rebuildOn -= ctx.random().nextInt(pool.getSettings().getRebuildPeriod());
rebuildOn -= ctx.random().nextInt(pool.getSettings().getRebuildPeriod()*2);
getTiming().setStartAfter(rebuildOn);
}
public String getName() { return "Rebuild tunnel"; }
public void runJob() {
_pool.getBuilder().buildTunnel(getContext(), _pool, _buildToken);
_pool.getBuilder().buildTunnel(getContext(), _pool);
}
}

View File

@@ -43,11 +43,12 @@ public class RequestTunnelJob extends JobImpl {
private TunnelCreatorConfig _config;
private long _lastSendTime;
private boolean _isFake;
private boolean _isExploratory;
static final int HOP_REQUEST_TIMEOUT = 30*1000;
private static final int LOOKUP_TIMEOUT = 10*1000;
public RequestTunnelJob(RouterContext ctx, TunnelCreatorConfig cfg, Job onCreated, Job onFailed, int hop, boolean isFake) {
public RequestTunnelJob(RouterContext ctx, TunnelCreatorConfig cfg, Job onCreated, Job onFailed, int hop, boolean isFake, boolean isExploratory) {
super(ctx);
_log = ctx.logManager().getLog(RequestTunnelJob.class);
_config = cfg;
@@ -58,13 +59,16 @@ public class RequestTunnelJob extends JobImpl {
_lookups = 0;
_lastSendTime = 0;
_isFake = isFake;
_isExploratory = isExploratory;
ctx.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.receiveRejectionTransient", "How often we are rejected due to transient overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.receiveRejectionBandwidth", "How often we are rejected due to bandwidth overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.receiveRejectionCritical", "How often we are rejected due to critical failure?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildFailure", "How often we fail to build a tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildSuccess", "How often we succeed building a tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildFailure", "How often we fail to build a non-exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildExploratoryFailure", "How often we fail to build an exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildSuccess", "How often we succeed building a non-exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildExploratorySuccess", "How often we succeed building an exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_log.shouldLog(Log.DEBUG))
_log.debug("Requesting hop " + hop + " in " + cfg);
@@ -108,7 +112,7 @@ public class RequestTunnelJob extends JobImpl {
+ _currentConfig.getReceiveTunnel() + ": " + _config);
// inbound tunnel with more than just ourselves
RequestTunnelJob req = new RequestTunnelJob(getContext(), _config, _onCreated,
_onFailed, _currentHop - 1, _isFake);
_onFailed, _currentHop - 1, _isFake, _isExploratory);
if (_isFake)
req.runJob();
else
@@ -257,19 +261,25 @@ public class RequestTunnelJob extends JobImpl {
_log.info("tunnel building failed: " + _config + " at hop " + _currentHop);
if (_onFailed != null)
getContext().jobQueue().addJob(_onFailed);
getContext().statManager().addRateData("tunnel.buildFailure", 1, 0);
if (_isExploratory)
getContext().statManager().addRateData("tunnel.buildExploratoryFailure", 1, 0);
else
getContext().statManager().addRateData("tunnel.buildFailure", 1, 0);
}
private void peerSuccess() {
getContext().profileManager().tunnelJoined(_currentPeer.getIdentity().calculateHash(),
getContext().clock().now() - _lastSendTime);
if (_currentHop > 0) {
RequestTunnelJob j = new RequestTunnelJob(getContext(), _config, _onCreated, _onFailed, _currentHop - 1, _isFake);
RequestTunnelJob j = new RequestTunnelJob(getContext(), _config, _onCreated, _onFailed, _currentHop - 1, _isFake, _isExploratory);
getContext().jobQueue().addJob(j);
} else {
if (_onCreated != null)
getContext().jobQueue().addJob(_onCreated);
getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0);
if (_isExploratory)
getContext().statManager().addRateData("tunnel.buildExploratorySuccess", 1, 0);
else
getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0);
}
}

View File

@@ -24,22 +24,22 @@ import net.i2p.util.Log;
class TestJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
private Object _buildToken;
private PooledTunnelCreatorConfig _cfg;
private boolean _found;
/** base to randomize the test delay on */
private static final int TEST_DELAY = 60*1000;
public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool, Object buildToken) {
public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) {
super(ctx);
_log = ctx.logManager().getLog(TestJob.class);
_pool = pool;
_cfg = cfg;
_buildToken = buildToken;
getTiming().setStartAfter(getDelay() + ctx.clock().now());
ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels",
@@ -132,7 +132,10 @@ class TestJob extends JobImpl {
}
private void testFailed(long timeToFail) {
getContext().statManager().addRateData("tunnel.testFailedTime", timeToFail, timeToFail);
if (_pool.getSettings().isExploratory())
getContext().statManager().addRateData("tunnel.testExploratoryFailedTime", timeToFail, timeToFail);
else
getContext().statManager().addRateData("tunnel.testFailedTime", timeToFail, timeToFail);
_cfg.tunnelFailed();
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel test failed in " + timeToFail + "ms: " + _cfg);

View File

@@ -27,50 +27,42 @@ public class TunnelBuilder {
* jobs are built. This call does not block.
*
*/
public void buildTunnel(RouterContext ctx, TunnelPool pool, Object poolToken) {
buildTunnel(ctx, pool, false, poolToken);
public void buildTunnel(RouterContext ctx, TunnelPool pool) {
buildTunnel(ctx, pool, false);
}
public void buildTunnel(RouterContext ctx, TunnelPool pool, boolean fake, Object poolToken) {
if (!pool.keepBuilding(poolToken))
return;
public void buildTunnel(RouterContext ctx, TunnelPool pool, boolean zeroHop) {
// this is probably overkill (ya think?)
pool.refreshSettings();
PooledTunnelCreatorConfig cfg = configTunnel(ctx, pool, fake);
if ( (cfg == null) && (!fake) ) {
RetryJob j = new RetryJob(ctx, pool, poolToken);
PooledTunnelCreatorConfig cfg = configTunnel(ctx, pool, zeroHop);
if (cfg == null) {
RetryJob j = new RetryJob(ctx, pool);
j.getTiming().setStartAfter(ctx.clock().now() + ctx.random().nextInt(30*1000));
ctx.jobQueue().addJob(j);
return;
}
OnCreatedJob onCreated = new OnCreatedJob(ctx, pool, cfg, fake, poolToken);
RetryJob onFailed= (fake ? null : new RetryJob(ctx, pool, poolToken));
OnCreatedJob onCreated = new OnCreatedJob(ctx, pool, cfg);
RetryJob onFailed= (zeroHop ? null : new RetryJob(ctx, pool));
// queue up a job to request the endpoint to join the tunnel, which then
// requeues up another job for earlier hops, etc, until it reaches the
// gateway. after the gateway is confirmed, onCreated is fired
RequestTunnelJob req = new RequestTunnelJob(ctx, cfg, onCreated, onFailed, cfg.getLength()-1, fake);
if (fake) // lets get it done inline, as we /need/ it asap
RequestTunnelJob req = new RequestTunnelJob(ctx, cfg, onCreated, onFailed, cfg.getLength()-1, zeroHop, pool.getSettings().isExploratory());
if (zeroHop || (cfg.getLength() <= 1) ) // lets get it done inline, as we /need/ it asap
req.runJob();
else
ctx.jobQueue().addJob(req);
}
private PooledTunnelCreatorConfig configTunnel(RouterContext ctx, TunnelPool pool, boolean fake) {
private PooledTunnelCreatorConfig configTunnel(RouterContext ctx, TunnelPool pool, boolean zeroHop) {
Log log = ctx.logManager().getLog(TunnelBuilder.class);
TunnelPoolSettings settings = pool.getSettings();
long expiration = ctx.clock().now() + settings.getDuration();
List peers = null;
long failures = countFailures(ctx);
boolean failing = (failures > 5) && (pool.getSettings().getAllowZeroHop());
boolean failsafe = false;
if (failing && (ctx.random().nextInt(100) < failures) )
failsafe = true;
if (fake || failsafe) {
if (zeroHop) {
peers = new ArrayList(1);
peers.add(ctx.routerHash());
if ( (failsafe) && (log.shouldLog(Log.WARN)) )
if (log.shouldLog(Log.WARN))
log.warn("Building failsafe tunnel for " + pool);
} else {
peers = pool.getSelector().selectPeers(ctx, settings);
@@ -80,10 +72,10 @@ public class TunnelBuilder {
// the pool is refusing 0 hop tunnels
if (peers == null) {
if (log.shouldLog(Log.ERROR))
log.error("No peers to put in the new tunnel! selectPeers returned null! boo, hiss! fake=" + fake);
log.error("No peers to put in the new tunnel! selectPeers returned null! boo, hiss! fake=" + zeroHop);
} else {
if (log.shouldLog(Log.ERROR))
log.error("No peers to put in the new tunnel! selectPeers returned an empty list?! fake=" + fake);
log.error("No peers to put in the new tunnel! selectPeers returned an empty list?! fake=" + zeroHop);
}
return null;
}
@@ -108,36 +100,20 @@ public class TunnelBuilder {
return cfg;
}
private long countFailures(RouterContext ctx) {
RateStat rs = ctx.statManager().getRate("tunnel.testFailedTime");
if (rs == null)
return 0;
Rate r = rs.getRate(10*60*1000);
if (r == null)
return 0;
else
return r.getCurrentEventCount();
}
/**
* If the building fails, try, try again.
*
*/
private class RetryJob extends JobImpl {
private TunnelPool _pool;
private Object _buildToken;
public RetryJob(RouterContext ctx, TunnelPool pool, Object buildToken) {
public RetryJob(RouterContext ctx, TunnelPool pool) {
super(ctx);
_pool = pool;
_buildToken = buildToken;
}
public String getName() { return "tunnel create failed"; }
public String getName() { return "Tunnel create failed"; }
public void runJob() {
// yikes, nothing left, lets get some backup (if we're allowed)
if ( (_pool.selectTunnel() == null) && (_pool.getSettings().getAllowZeroHop()) )
_pool.buildFake();
buildTunnel(getContext(), _pool, _buildToken);
_pool.refreshBuilders();
}
}
}

View File

@@ -26,10 +26,14 @@ abstract class TunnelPeerSelector {
if (settings.getLengthVariance() != 0) {
int skew = settings.getLengthVariance();
if (skew > 0)
length += ctx.random().nextInt(skew);
length += ctx.random().nextInt(skew+1);
else {
skew = 0 - skew;
length += ctx.random().nextInt(2*skew) - skew;
skew = 1 - skew;
int off = ctx.random().nextInt(skew);
if (ctx.random().nextBoolean())
length += off;
else
length -= off;
}
if (length < 0)
length = 0;

View File

@@ -30,14 +30,6 @@ public class TunnelPool {
private boolean _alive;
private long _lifetimeProcessed;
/**
* list of pool tokens (Object) passed around during building/rebuilding/etc.
* if/when the token is removed from this list, that sequence of building/rebuilding/etc
* should cease (though others may continue).
*
*/
private List _tokens;
public TunnelPool(RouterContext ctx, TunnelPoolManager mgr, TunnelPoolSettings settings, TunnelPeerSelector sel, TunnelBuilder builder) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelPool.class);
@@ -46,7 +38,6 @@ public class TunnelPool {
_tunnels = new ArrayList(settings.getLength() + settings.getBackupQuantity());
_peerSelector = sel;
_builder = builder;
_tokens = new ArrayList(settings.getBackupQuantity() + settings.getQuantity());
_alive = false;
_lifetimeProcessed = 0;
refreshSettings();
@@ -70,75 +61,39 @@ public class TunnelPool {
}
public void shutdown() {
_alive = false;
synchronized (_tokens) { _tokens.clear(); }
}
private int countUsableTunnels() {
int valid = 0;
synchronized (_tunnels) {
for (int i = 0; i < _tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)_tunnels.get(i);
if (info.getExpiration() > _context.clock().now() + 3*_settings.getRebuildPeriod())
valid++;
}
}
return valid;
}
private int refreshBuilders() {
/**
* Fire up as many buildTunnel tasks as necessary, returning how many
* were added
*
*/
int refreshBuilders() {
if (!_alive) return 0;
// only start up new build tasks if we need more of 'em
int target = _settings.getQuantity() + _settings.getBackupQuantity();
int oldTokenCount = 0;
List newTokens = null;
synchronized (_tokens) {
oldTokenCount = _tokens.size();
while (_tokens.size() > target)
_tokens.remove(0);
if (_tokens.size() < target) {
int wanted = target - _tokens.size();
newTokens = new ArrayList(wanted);
for (int i = 0; i < wanted; i++) {
Object token = new Object();
newTokens.add(token);
_tokens.add(token);
}
}
}
int usableTunnels = countUsableTunnels();
if (newTokens != null) {
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": refreshing builders, previously had " + oldTokenCount
if ( (target > usableTunnels) && (_log.shouldLog(Log.INFO)) )
_log.info(toString() + ": refreshing builders, previously had " + usableTunnels
+ ", want a total of " + target + ", creating "
+ newTokens.size() + " new ones.");
for (int i = 0; i < newTokens.size(); i++) {
Object token = newTokens.get(i);
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Building a tunnel with the token " + token);
_builder.buildTunnel(_context, this, token);
}
return newTokens.size();
} else {
return 0;
}
}
/** do we still need this sequence of build/rebuild/etc to continue? */
public boolean keepBuilding(Object token) {
boolean connected = true;
boolean rv = false;
int remaining = 0;
int wanted = _settings.getQuantity() + _settings.getBackupQuantity();
if ( (_settings.getDestination() != null) && (!_context.clientManager().isLocal(_settings.getDestination())) )
connected = false;
synchronized (_tokens) {
if (!connected) {
// client disconnected, so stop rebuilding this series
_tokens.remove(token);
rv = false;
} else {
rv = _tokens.contains(token);
}
remaining = _tokens.size();
}
+ (target-usableTunnels) + " new ones.");
for (int i = usableTunnels; i < target; i++)
_builder.buildTunnel(_context, this);
if (remaining <= 0) {
_manager.removeTunnels(_settings.getDestination());
}
if (!rv) {
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": keepBuilding does NOT want building to continue (want "
+ wanted + ", have " + remaining);
}
return rv;
return (target > usableTunnels ? target-usableTunnels : 0);
}
void refreshSettings() {
@@ -183,7 +138,7 @@ public class TunnelPool {
}
if (_alive && _settings.getAllowZeroHop())
buildFake();
buildFallback();
if (allowRecurseOnFail)
return selectTunnel(false);
else
@@ -217,6 +172,8 @@ public class TunnelPool {
}
}
int getTunnelCount() { synchronized (_tunnels) { return _tunnels.size(); } }
public TunnelBuilder getBuilder() { return _builder; }
public TunnelPoolSettings getSettings() { return _settings; }
public void setSettings(TunnelPoolSettings settings) {
@@ -247,6 +204,8 @@ public class TunnelPool {
if (ls != null)
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
refreshBuilders();
}
public void removeTunnel(TunnelInfo info) {
@@ -271,15 +230,23 @@ public class TunnelPool {
_log.warn(toString() + ": unable to build a new leaseSet on removal (" + remaining
+ " remaining), request a new tunnel");
if (_settings.getAllowZeroHop())
buildFake();
buildFallback();
}
}
boolean connected = true;
if ( (_settings.getDestination() != null) && (!_context.clientManager().isLocal(_settings.getDestination())) )
connected = false;
if ( (getTunnelCount() <= 0) && (!connected) ) {
_manager.removeTunnels(_settings.getDestination());
return;
}
refreshBuilders();
}
public void tunnelFailed(PooledTunnelCreatorConfig cfg) {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": Tunnel failed: " + cfg, new Exception("failure cause"));
_log.warn(toString() + ": Tunnel failed: " + cfg);
int remaining = 0;
LeaseSet ls = null;
synchronized (_tunnels) {
@@ -298,7 +265,8 @@ public class TunnelPool {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": unable to build a new leaseSet on failure (" + remaining
+ " remaining), request a new tunnel");
buildFake(false);
if (remaining < _settings.getBackupQuantity() + _settings.getQuantity())
buildFallback();
}
}
refreshBuilders();
@@ -320,24 +288,25 @@ public class TunnelPool {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": unable to build a new leaseSet on expire (" + remaining
+ " remaining), request a new tunnel");
if (_settings.getAllowZeroHop())
buildFake();
if ( (remaining < _settings.getBackupQuantity() + _settings.getQuantity())
&& (_settings.getAllowZeroHop()) )
buildFallback();
}
}
}
void buildFake() { buildFake(true); }
void buildFake(boolean zeroHop) {
void buildFallback() {
int quantity = _settings.getBackupQuantity() + _settings.getQuantity();
int usable = countUsableTunnels();
if (usable >= quantity) return;
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": building a fake tunnel (allow zeroHop? " + zeroHop + ")");
Object tempToken = new Object();
synchronized (_tokens) {
_tokens.add(tempToken);
}
_builder.buildTunnel(_context, this, zeroHop, tempToken);
synchronized (_tokens) {
_tokens.remove(tempToken);
}
_log.info(toString() + ": building a fallback tunnel (usable: " + usable + " needed: " + quantity + ")");
if ( (usable == 0) && (_settings.getAllowZeroHop()) )
_builder.buildTunnel(_context, this, true);
else
_builder.buildTunnel(_context, this);
refreshBuilders();
}
/**

View File

@@ -26,12 +26,14 @@ import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.router.tunnel.HopConfig;
import net.i2p.util.Log;
/**
*
*/
public class TunnelPoolManager implements TunnelManagerFacade {
private RouterContext _context;
private Log _log;
/** Hash (destination) to TunnelPool */
private Map _clientInboundPools;
/** Hash (destination) to TunnelPool */
@@ -41,6 +43,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public TunnelPoolManager(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelPoolManager.class);
HandlerJobBuilder builder = new HandleTunnelCreateMessageJob.Builder(ctx);
ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, builder);
@@ -63,7 +66,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public TunnelInfo selectInboundTunnel() {
TunnelInfo info = _inboundExploratory.selectTunnel();
if (info == null) {
_inboundExploratory.buildFake();
_inboundExploratory.buildFallback();
// still can be null, but probably not
info = _inboundExploratory.selectTunnel();
}
@@ -80,6 +83,9 @@ public class TunnelPoolManager implements TunnelManagerFacade {
if (pool != null) {
return pool.selectTunnel();
}
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "wtf, want the inbound tunnel for " + destination.calculateHash().toBase64() +
" but there isn't a pool?");
return null;
}
@@ -87,7 +93,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public TunnelInfo selectOutboundTunnel() {
TunnelInfo info = _outboundExploratory.selectTunnel();
if (info == null) {
_outboundExploratory.buildFake();
_outboundExploratory.buildFallback();
// still can be null, but probably not
info = _outboundExploratory.selectTunnel();
}
@@ -224,6 +230,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public void removeTunnels(Hash destination) {
if (destination == null) return;
if (_context.clientManager().isLocal(destination)) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "wtf, why are you removing the pool for " + destination.toBase64(), new Exception("i did it"));
}
TunnelPool inbound = null;
TunnelPool outbound = null;
synchronized (_clientInboundPools) {
@@ -268,7 +278,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
public String getName() { return "Bootstrap tunnel pool"; }
public void runJob() {
_pool.buildFake(false);
_pool.buildFallback();
}
}