forked from I2P_Developers/i2p.i2p
Compare commits
35 Commits
i2p-1.9.0
...
programdat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c67f06ffc | ||
|
|
0ce816acd1 | ||
|
|
2fd01c8390 | ||
|
|
16eb1577f9 | ||
|
|
ecd58a833c | ||
|
|
b533ccaabd | ||
|
|
0f560139f3 | ||
|
|
b610b7a695 | ||
|
|
26f882edd5 | ||
|
|
f00e020f2b | ||
|
|
edd0dd0b0d | ||
|
|
031fa45eb8 | ||
|
|
a2eee5a673 | ||
|
|
45c160f27a | ||
|
|
7ba59b4338 | ||
|
|
554b17fe9a | ||
|
|
64632eed4a | ||
|
|
49299f3f28 | ||
|
|
00774590b0 | ||
|
|
52b640b582 | ||
|
|
911e69e3ae | ||
|
|
d809d6653d | ||
|
|
8df81fc0a1 | ||
|
|
ed76829562 | ||
|
|
4f4044c3f0 | ||
|
|
3a4bfc9c07 | ||
|
|
b25c207e9a | ||
|
|
fcae43547b | ||
|
|
b34b0cc399 | ||
|
|
f4875d12fa | ||
|
|
2f06e9bebf | ||
|
|
9b6dde008d | ||
|
|
6ddaa72a86 | ||
|
|
d064de1913 | ||
|
|
97013f8874 |
@@ -3,10 +3,15 @@ package net.i2p.router.web;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.app.ClientAppManager;
|
||||
import net.i2p.app.NotificationService;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.util.EventLog;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleTimer2;
|
||||
import net.i2p.util.SystemVersion;
|
||||
|
||||
/**
|
||||
* Periodic check
|
||||
@@ -15,16 +20,17 @@ import net.i2p.util.SimpleTimer2;
|
||||
* In routerconsole because java.lang.management is
|
||||
* not available in Android.
|
||||
*
|
||||
* @since 0.9.55
|
||||
* @since 0.9.55, public since 0.9.56
|
||||
*/
|
||||
class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
public class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
|
||||
private final I2PAppContext _context;
|
||||
private final RouterContext _context;
|
||||
private final Log _log;
|
||||
private static final String PROP_INTERVAL = "router.deadlockDetectIntervalHours";
|
||||
private static final long DEFAULT_INTERVAL = 24;
|
||||
private static final long DEFAULT_INTERVAL = SystemVersion.isSlow() ? 12 : 4;
|
||||
private static final AtomicBoolean _isDeadlocked = new AtomicBoolean();
|
||||
|
||||
public DeadlockDetector(I2PAppContext ctx) {
|
||||
public DeadlockDetector(RouterContext ctx) {
|
||||
super(ctx.simpleTimer2());
|
||||
_context = ctx;
|
||||
_log = _context.logManager().getLog(DeadlockDetector.class);
|
||||
@@ -41,6 +47,7 @@ class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
public void timeReached() {
|
||||
long start = System.currentTimeMillis();
|
||||
boolean detected = detect();
|
||||
// only reschedule if not detected
|
||||
if (!detected) {
|
||||
long time = System.currentTimeMillis() - start;
|
||||
if (_log.shouldDebug())
|
||||
@@ -55,7 +62,9 @@ class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
return detect(_context);
|
||||
}
|
||||
|
||||
public static boolean detect(I2PAppContext ctx) {
|
||||
public static boolean detect(RouterContext ctx) {
|
||||
if (_isDeadlocked.get())
|
||||
return true;
|
||||
try {
|
||||
ThreadMXBean mxb = ManagementFactory.getThreadMXBean();
|
||||
long[] ids = mxb.findDeadlockedThreads();
|
||||
@@ -72,7 +81,8 @@ class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
infos = mxb.getThreadInfo(ids, Integer.MAX_VALUE);
|
||||
}
|
||||
StringBuilder buf = new StringBuilder(2048);
|
||||
buf.append("Deadlock detected, please report\n\n");
|
||||
String msg1 = Messages.getString("Deadlock detected", ctx) + " - " + Messages.getString("Please report", ctx);
|
||||
buf.append(msg1).append("\n\n");
|
||||
for (int i = 0; i < infos.length; i++) {
|
||||
ThreadInfo info = infos[i];
|
||||
if (info == null)
|
||||
@@ -86,9 +96,20 @@ class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
}
|
||||
buf.append('\n');
|
||||
}
|
||||
buf.append("\nAfter reporting, please restart your router!\n");
|
||||
String msg2 = Messages.getString("After reporting, please restart your router", ctx);
|
||||
buf.append('\n').append(msg2).append('\n');
|
||||
Log log = ctx.logManager().getLog(DeadlockDetector.class);
|
||||
log.log(Log.CRIT, buf.toString());
|
||||
ctx.router().eventLog().addEvent(EventLog.DEADLOCK, infos.length + " threads");
|
||||
_isDeadlocked.set(true);
|
||||
ClientAppManager cmgr = ctx.clientAppManager();
|
||||
if (cmgr != null) {
|
||||
NotificationService ns = (NotificationService) cmgr.getRegisteredApp("desktopgui");
|
||||
if (ns != null) {
|
||||
ns.notify("Router", null, Log.CRIT, Messages.getString("Router", ctx),
|
||||
msg1 + '\n' + msg2, null);
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
// class not found, unsupportedoperation, ...
|
||||
Log log = ctx.logManager().getLog(DeadlockDetector.class);
|
||||
@@ -98,6 +119,15 @@ class DeadlockDetector extends SimpleTimer2.TimedEvent {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the results of the last test. Does not run a new test.
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
public static boolean isDeadlocked() {
|
||||
return _isDeadlocked.get();
|
||||
}
|
||||
|
||||
/*
|
||||
public static void main(String[] args) {
|
||||
final Object o1 = new Object();
|
||||
|
||||
@@ -36,6 +36,7 @@ public class EventLogHelper extends FormHandler {
|
||||
EventLog.CLOCK_SHIFT, _x("Clock shifted"),
|
||||
EventLog.CRASHED, _x("Crashed"),
|
||||
EventLog.CRITICAL, _x("Critical error"),
|
||||
EventLog.DEADLOCK, _x("Deadlock detected"),
|
||||
EventLog.INSTALLED, _x("Installed new version"),
|
||||
EventLog.INSTALL_FAILED, _x("Install failed"),
|
||||
EventLog.NETWORK, _x("Network error"),
|
||||
|
||||
@@ -121,24 +121,27 @@ class NetDbRenderer {
|
||||
if (h != null && h.length == Hash.HASH_LENGTH) {
|
||||
Hash hash = new Hash(h);
|
||||
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(hash);
|
||||
boolean banned = false;
|
||||
if (ri == null) {
|
||||
// remote lookup
|
||||
LookupWaiter lw = new LookupWaiter();
|
||||
_context.netDb().lookupRouterInfo(hash, lw, lw, 8*1000);
|
||||
// just wait right here in the middle of the rendering, sure
|
||||
synchronized(lw) {
|
||||
try { lw.wait(9*1000); } catch (InterruptedException ie) {}
|
||||
banned = _context.banlist().isBanlisted(hash);
|
||||
if (!banned) {
|
||||
// remote lookup
|
||||
LookupWaiter lw = new LookupWaiter();
|
||||
_context.netDb().lookupRouterInfo(hash, lw, lw, 8*1000);
|
||||
// just wait right here in the middle of the rendering, sure
|
||||
synchronized(lw) {
|
||||
try { lw.wait(9*1000); } catch (InterruptedException ie) {}
|
||||
}
|
||||
ri = _context.netDb().lookupRouterInfoLocally(hash);
|
||||
}
|
||||
ri = _context.netDb().lookupRouterInfoLocally(hash);
|
||||
}
|
||||
if (ri != null) {
|
||||
renderRouterInfo(buf, ri, false, true);
|
||||
} else {
|
||||
buf.append("<div class=\"netdbnotfound\">");
|
||||
buf.append(_t("Router")).append(' ');
|
||||
if (routerPrefix != null)
|
||||
buf.append(routerPrefix);
|
||||
buf.append(' ').append(_t("not found in network database"));
|
||||
buf.append(routerPrefix);
|
||||
buf.append(' ').append(banned ? "is banned" : _t("not found in network database"));
|
||||
buf.append("</div>");
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -26,6 +26,7 @@ import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.networkdb.reseed.ReseedChecker;
|
||||
import net.i2p.router.transport.TransportUtil;
|
||||
import net.i2p.router.web.CSSHelper;
|
||||
import net.i2p.router.web.DeadlockDetector;
|
||||
import net.i2p.router.web.HelperBase;
|
||||
import net.i2p.router.web.NewsHelper;
|
||||
import net.i2p.router.web.WebAppStarter;
|
||||
@@ -951,6 +952,15 @@ public class SummaryHelper extends HelperBase {
|
||||
.append("</a></h4>");
|
||||
}
|
||||
|
||||
if (DeadlockDetector.isDeadlocked()) {
|
||||
buf.append("<div class=\"sb_notice\"><b>")
|
||||
.append(_t("Deadlock detected"))
|
||||
.append(" - <a href=\"/logs\">")
|
||||
.append(_t("Please report"))
|
||||
.append("</a> - ").append(_t("After reporting, please restart your router"))
|
||||
.append("</b></div>");
|
||||
}
|
||||
|
||||
ReseedChecker checker = _context.netDb().reseedChecker();
|
||||
String status = checker.getStatus();
|
||||
if (status.length() > 0) {
|
||||
|
||||
@@ -77,6 +77,13 @@
|
||||
<h3 id="addrtitle"><%=intl._t("Address book")%>: <%=intl._t(book.getBook())%></h3>
|
||||
<h4 id="storagepath"><%=intl._t("Storage")%>: ${book.displayName}</h4>
|
||||
|
||||
<%
|
||||
// This is what does the form processing.
|
||||
// We need to do this before any notEmpty test and before loadBookMessages() which displays the entry count.
|
||||
// Messages will be displayed below.
|
||||
String formMessages = book.getMessages();
|
||||
%>
|
||||
|
||||
${book.loadBookMessages}
|
||||
|
||||
<% if (book.getBook().equals("private")) { %>
|
||||
@@ -128,15 +135,18 @@ ${book.loadBookMessages}
|
||||
</div>
|
||||
</form>
|
||||
<% } /* book.getEntries().length() > 0 */ %>
|
||||
</c:if><% /* book.notEmpty */ %>
|
||||
|
||||
</div>
|
||||
</div><% /* headline */ %>
|
||||
|
||||
<div id="messages">${book.messages}<%
|
||||
<% /* need this whether book is empty or not to display the form messages */ %>
|
||||
<div id="messages"><%=formMessages%><%
|
||||
if (importMessages != null) {
|
||||
%><%=importMessages%><%
|
||||
}
|
||||
%></div>
|
||||
|
||||
<c:if test="${book.notEmpty}">
|
||||
<div id="filter">
|
||||
<c:if test="${book.hasFilter}">
|
||||
<span><%=intl._t("Current filter")%>: <b>${book.filter}</b>
|
||||
@@ -274,7 +284,7 @@ ${book.loadBookMessages}
|
||||
<input type="hidden" name="begin" value="0">
|
||||
<input type="hidden" name="end" value="49">
|
||||
<div id="add">
|
||||
<h3 id="addnewaddr" ><%=intl._t("Add new destination")%></h3>
|
||||
<h3 id="addnewaddr" class="unexpanded"><%=intl._t("Add new destination")%></h3>
|
||||
<table id="addnewaddrtable">
|
||||
<tr>
|
||||
<td><b><%=intl._t("Hostname")%></b></td>
|
||||
@@ -287,10 +297,12 @@ ${book.loadBookMessages}
|
||||
</table>
|
||||
<p class="buttons" id="addnewaddrbutton">
|
||||
<input class="cancel" type="reset" value="<%=intl._t("Cancel")%>" >
|
||||
<c:if test="${book.notEmpty}">
|
||||
<input class="accept" type="submit" name="action" value="<%=intl._t("Replace")%>" >
|
||||
<% if (!book.getBook().equals("published")) { %>
|
||||
<input class="add" type="submit" name="action" value="<%=intl._t("Add Alternate")%>" >
|
||||
<% } %>
|
||||
</c:if><% /* book.notEmpty */ %>
|
||||
<input class="add" type="submit" name="action" value="<%=intl._t("Add")%>" >
|
||||
</p>
|
||||
</div>
|
||||
@@ -303,7 +315,7 @@ ${book.loadBookMessages}
|
||||
<input type="hidden" name="begin" value="0">
|
||||
<input type="hidden" name="end" value="49">
|
||||
<div id="import">
|
||||
<h3 id="importhosts"><%=intl._t("Import from hosts.txt file")%></h3>
|
||||
<h3 id="importhosts" class="unexpanded"><%=intl._t("Import from hosts.txt file")%></h3>
|
||||
<table id="importhostsform">
|
||||
<tr>
|
||||
<td><b><%=intl._t("File")%></b></td>
|
||||
|
||||
@@ -576,9 +576,17 @@ tr.list0 {
|
||||
}
|
||||
|
||||
.expanded {
|
||||
background: url(/themes/console/light/images/dropdown_active.png) #f8f8ff !important;
|
||||
background: url(/themes/console/dark/images/dropdown_active.png) !important;
|
||||
background-repeat: no-repeat !important;
|
||||
background-position: .5%, 15% !important;
|
||||
padding-left: 2% !important;
|
||||
}
|
||||
|
||||
.unexpanded {
|
||||
background: url(/themes/console/dark/images/dropdown.png) !important;
|
||||
background-repeat: no-repeat !important;
|
||||
background-position: .5%, 15% !important;
|
||||
padding-left: 2% !important;
|
||||
}
|
||||
|
||||
#addrtitle.expanded {
|
||||
|
||||
@@ -9,11 +9,19 @@ package net.i2p.util;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
|
||||
/**
|
||||
* File-based log writer thread that pulls log records from the LogManager,
|
||||
@@ -93,12 +101,31 @@ class FileLogWriter extends LogWriter {
|
||||
* @since 0.9.19 renamed from closeFile()
|
||||
*/
|
||||
protected void closeWriter() {
|
||||
closeWriter(_currentFile, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gzip the closed file
|
||||
*
|
||||
* @param threadGzipper if true, spin off a thread
|
||||
* @since 0.9.55
|
||||
*/
|
||||
private void closeWriter(File currentFile, boolean threadGzipper) {
|
||||
Writer out = _currentOut;
|
||||
if (out != null) {
|
||||
try {
|
||||
out.close();
|
||||
} catch (IOException ioe) {}
|
||||
}
|
||||
if (_manager.shouldGzip() && currentFile != null && currentFile.length() >= _manager.getMinGzipSize()) {
|
||||
Thread gzipper = new Gzipper(currentFile);
|
||||
if (threadGzipper) {
|
||||
gzipper.setPriority(Thread.MIN_PRIORITY);
|
||||
gzipper.start(); // rotate
|
||||
} else {
|
||||
gzipper.run(); // shutdown
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -107,6 +134,7 @@ class FileLogWriter extends LogWriter {
|
||||
* Caller must synch
|
||||
*/
|
||||
private void rotateFile() {
|
||||
File old = _currentFile;
|
||||
File f = getNextFile();
|
||||
_currentFile = f;
|
||||
_numBytesInCurrentFile = 0;
|
||||
@@ -125,7 +153,9 @@ class FileLogWriter extends LogWriter {
|
||||
//System.exit(0);
|
||||
}
|
||||
}
|
||||
closeWriter();
|
||||
closeWriter(old, true);
|
||||
if (_manager.shouldGzip())
|
||||
(new File(f.getPath() + ".gz")).delete();
|
||||
try {
|
||||
_currentOut = new BufferedWriter(new OutputStreamWriter(new SecureFileOutputStream(f), "UTF-8"));
|
||||
} catch (IOException ioe) {
|
||||
@@ -180,7 +210,8 @@ class FileLogWriter extends LogWriter {
|
||||
f = new File(base, replace(pattern, i));
|
||||
else
|
||||
f = new File(replace(pattern, i));
|
||||
if (!f.exists()) {
|
||||
// check for file or file.gz
|
||||
if (!f.exists() && !(_manager.shouldGzip() && (new File(f.getPath() + ".gz").exists()))) {
|
||||
_rotationNum = i;
|
||||
return f;
|
||||
}
|
||||
@@ -197,7 +228,18 @@ class FileLogWriter extends LogWriter {
|
||||
if (oldest == null) {
|
||||
oldest = f;
|
||||
} else {
|
||||
if (f.lastModified() < oldest.lastModified()) {
|
||||
// set file or file.gz for last mod check
|
||||
File ff, oo;
|
||||
if (!_manager.shouldGzip() || f.exists())
|
||||
ff = f;
|
||||
else
|
||||
ff = new File(f.getPath() + ".gz");
|
||||
if (!_manager.shouldGzip() || oldest.exists())
|
||||
oo = oldest;
|
||||
else
|
||||
oo = new File(oldest.getPath() + ".gz");
|
||||
|
||||
if (ff.lastModified() < oo.lastModified()) {
|
||||
_rotationNum = i;
|
||||
oldest = f;
|
||||
}
|
||||
@@ -218,4 +260,34 @@ class FileLogWriter extends LogWriter {
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.55
|
||||
*/
|
||||
private static class Gzipper extends I2PAppThread {
|
||||
private final File _f;
|
||||
|
||||
public Gzipper(File f) {
|
||||
super("Log file compressor");
|
||||
_f = f;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
File to = new File(_f.getPath() + ".gz");
|
||||
InputStream in = null;
|
||||
OutputStream out = null;
|
||||
try {
|
||||
in = new BufferedInputStream(new FileInputStream(_f));
|
||||
out = new BufferedOutputStream(new GZIPOutputStream(new SecureFileOutputStream(to)));
|
||||
DataHelper.copy(in, out);
|
||||
} catch (IOException ioe) {
|
||||
System.out.println("Error compressing log file " + _f);
|
||||
} finally {
|
||||
if (in != null) try { in.close(); } catch (IOException ioe) {}
|
||||
if (out != null) try { out.close(); } catch (IOException ioe) {}
|
||||
to.setLastModified(_f.lastModified());
|
||||
_f.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,8 @@ import net.i2p.data.DataHelper;
|
||||
* This also fires off a LogWriter thread that pulls pending records off and
|
||||
* writes them where appropriate.
|
||||
*
|
||||
* As of 0.9.41, this class may be overridden via I2PAppContext.setLogManager()
|
||||
*
|
||||
*/
|
||||
public class LogManager implements Flushable {
|
||||
public final static String CONFIG_LOCATION_PROP = "loggerConfigLocation";
|
||||
@@ -65,6 +67,10 @@ public class LogManager implements Flushable {
|
||||
private static final String PROP_DUP = "logger.dropDuplicates";
|
||||
/** @since 0.9.18 */
|
||||
private static final String PROP_FLUSH = "logger.flushInterval";
|
||||
/** @since 0.9.56 */
|
||||
private static final String PROP_GZIP = "logger.gzip";
|
||||
/** @since 0.9.56 */
|
||||
private static final String PROP_MIN_GZIP_SIZE = "logger.minGzipSize";
|
||||
public final static String PROP_RECORD_PREFIX = "logger.record.";
|
||||
|
||||
public final static String DEFAULT_FORMAT = DATE + " " + PRIORITY + " [" + THREAD + "] " + CLASS + ": " + MESSAGE;
|
||||
@@ -79,6 +85,9 @@ public class LogManager implements Flushable {
|
||||
public final static String DEFAULT_DEFAULTLEVEL = Log.STR_ERROR;
|
||||
public final static String DEFAULT_ONSCREENLEVEL = Log.STR_CRIT;
|
||||
private static final int MIN_FILESIZE_LIMIT = 16*1024;
|
||||
private final static boolean DEFAULT_GZIP = false;
|
||||
private static final int DEFAULT_MIN_GZIP_SIZE = 64*1024;
|
||||
|
||||
|
||||
private final I2PAppContext _context;
|
||||
private final Log _log;
|
||||
@@ -133,6 +142,8 @@ public class LogManager implements Flushable {
|
||||
private final AtomicLong _droppedRecords = new AtomicLong();
|
||||
// in seconds
|
||||
private int _flushInterval = (int) (LogWriter.FLUSH_INTERVAL / 1000);
|
||||
private boolean _gzip;
|
||||
private long _minGzipSize;
|
||||
|
||||
private boolean _alreadyNoticedMissingConfig;
|
||||
|
||||
@@ -452,6 +463,17 @@ public class LogManager implements Flushable {
|
||||
String str = config.getProperty(PROP_DUP);
|
||||
_dropDuplicates = str == null || Boolean.parseBoolean(str);
|
||||
|
||||
str = config.getProperty(PROP_GZIP);
|
||||
_gzip = str != null ? Boolean.parseBoolean(str) : DEFAULT_GZIP;
|
||||
if (_gzip) {
|
||||
_minGzipSize = DEFAULT_MIN_GZIP_SIZE;
|
||||
try {
|
||||
str = config.getProperty(PROP_MIN_GZIP_SIZE);
|
||||
if (str != null)
|
||||
_minGzipSize = Long.parseLong(str);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Log set to use the base log file as " + _baseLogfilename);
|
||||
|
||||
@@ -673,6 +695,20 @@ public class LogManager implements Flushable {
|
||||
return _rotationLimit;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.56
|
||||
*/
|
||||
boolean shouldGzip() {
|
||||
return _gzip;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.56
|
||||
*/
|
||||
long getMinGzipSize() {
|
||||
return _gzip ? _minGzipSize : Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
/** @return success */
|
||||
public synchronized boolean saveConfig() {
|
||||
Properties props = createConfig();
|
||||
@@ -712,6 +748,8 @@ public class LogManager implements Flushable {
|
||||
rv.setProperty(PROP_DISPLAYONSCREENLEVEL, Log.toLevelString(_onScreenLimit));
|
||||
rv.setProperty(PROP_CONSOLEBUFFERSIZE, Integer.toString(_consoleBufferSize));
|
||||
rv.setProperty(PROP_FLUSH, Integer.toString(_flushInterval));
|
||||
rv.setProperty(PROP_GZIP, Boolean.toString(_gzip));
|
||||
rv.setProperty(PROP_MIN_GZIP_SIZE, Long.toString(_minGzipSize));
|
||||
|
||||
for (LogLimit lim : _limits) {
|
||||
rv.setProperty(PROP_RECORD_PREFIX + lim.getRootName(), Log.toLevelString(lim.getLimit()));
|
||||
@@ -812,16 +850,10 @@ public class LogManager implements Flushable {
|
||||
_consoleBuffer.clear();
|
||||
}
|
||||
|
||||
private static final AtomicInteger __id = new AtomicInteger();
|
||||
|
||||
private class ShutdownHook extends I2PAppThread {
|
||||
private final int _id;
|
||||
public ShutdownHook() {
|
||||
_id = __id.incrementAndGet();
|
||||
}
|
||||
@Override
|
||||
public void run() {
|
||||
setName("Log " + _id + " shutdown ");
|
||||
setName("Log shutdown");
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,14 @@ public class ObjectCounter<K> implements Serializable {
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a high value
|
||||
* @since 0.9.56
|
||||
*/
|
||||
public void max(K h) {
|
||||
map.put(h, new AtomicInteger(Integer.MAX_VALUE / 2));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return current count
|
||||
*/
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
i2p (1.9.0-1~bionic+1) bionic; urgency=medium
|
||||
|
||||
* New upstream version 1.9.0
|
||||
|
||||
-- zzz on i2p (key signing) <zzz@i2pmail.org> Tue, 23 Aug 2022 12:12:12 +0000
|
||||
|
||||
i2p (1.8.0-1~bionic+1) bionic; urgency=medium
|
||||
|
||||
* New upstream version 1.8.0
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
i2p (1.9.0-1ubuntu1) focal; urgency=medium
|
||||
|
||||
* New upstream version 1.9.0
|
||||
|
||||
-- zzz on i2p (key signing) <zzz@i2pmail.org> Tue, 23 Aug 2022 12:12:12 +0000
|
||||
|
||||
i2p (1.8.0-1ubuntu1) focal; urgency=medium
|
||||
|
||||
* New upstream version 1.8.0
|
||||
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@@ -1,3 +1,9 @@
|
||||
i2p (1.9.0-1ubuntu1) focal; urgency=medium
|
||||
|
||||
* New upstream version 1.9.0
|
||||
|
||||
-- zzz on i2p (key signing) <zzz@i2pmail.org> Tue, 23 Aug 2022 12:12:12 +0000
|
||||
|
||||
i2p (1.8.0-1ubuntu1) focal; urgency=medium
|
||||
|
||||
* New upstream version 1.8.0
|
||||
|
||||
2
debian/control
vendored
2
debian/control
vendored
@@ -79,7 +79,7 @@ Architecture: all
|
||||
Section: net
|
||||
Priority: optional
|
||||
Depends: ${misc:Depends}, ${java:Depends}, ${shlibs:Depends},
|
||||
openjdk-18-jre-headless | openjdk-17-jre-headless | openjdk-16-jre-headless | openjdk-15-jre-headless | openjdk-11-jre-headless | default-jre-headless | java18-runtime-headless | java17-runtime-headless | java16-runtime-headless | java15-runtime-headless | java11-runtime-headless,
|
||||
openjdk-18-jre-headless | openjdk-17-jre-headless | openjdk-11-jre-headless | default-jre-headless | java18-runtime-headless | java17-runtime-headless | java11-runtime-headless,
|
||||
geoip-database,
|
||||
gettext-base,
|
||||
libgetopt-java,
|
||||
|
||||
33
history.txt
33
history.txt
@@ -1,3 +1,36 @@
|
||||
2022-09-06 zzz
|
||||
* NetDB: Fix reseeding when clock is skewed
|
||||
* SSU2: Don't publish or connect if our MTU becomes too small
|
||||
|
||||
2022-09-04 zzz
|
||||
* SusiDNS: Fix adding to empty address book
|
||||
|
||||
2022-09-03 zzz
|
||||
* NetDB: Query connected peers for their RI directly
|
||||
* UPnP: Fix opening IPv6 ports
|
||||
|
||||
2022-09-01 zzz
|
||||
* Router: Ensure database store message is processed before reply job (Gitlab #364)
|
||||
|
||||
2022-08-29 zzz
|
||||
* SSU2: Implement ack-immediate flag
|
||||
|
||||
2022-08-28 zzz
|
||||
* Console:
|
||||
- Add notification and summary bar info on deadlock
|
||||
- Linkify router hash even if not in netdb
|
||||
* Util: Add option to gzip router logs
|
||||
|
||||
2022-08-25 zzz
|
||||
* Router: Fix deadlock via rebuildRouterAddress() and UDPTransport
|
||||
* SSU2:
|
||||
- Implement path challenge and connection migration
|
||||
- Fix packets exceeding MTU by up to 3 bytes
|
||||
- Immediately fail session request containing zero token
|
||||
|
||||
2022-08-23 zzz
|
||||
* Router: Add deadlocks to event log
|
||||
|
||||
2022-08-22 1.9.0 released
|
||||
|
||||
2022-08-10 zzz
|
||||
|
||||
@@ -137,3 +137,4 @@ Sybil:2400;8500;1302;824;a150;95;144;951
|
||||
Sybil:2400;8500;1302;825;150;95;147;89
|
||||
Sybil:2400;8500;1302;828;a150;95;153;2202
|
||||
Tunnels:JbifzqZZqeTXtxK6KDqNUPWaW-phKqeS~tfJT82SIYI=
|
||||
Tunnels:QPUV1bW6arN2zp3gTBMvOEvgSuKbXUqk2oqHkb~UoSw=
|
||||
|
||||
@@ -31,10 +31,13 @@
|
||||
- Trial Debian build: Run 'ant debcheckpatch' and fix any issues.
|
||||
Build and test a preliminary Debian build with 'ant debian' and fix any issues
|
||||
|
||||
- Javadoc test: 'ant javadoc' and 'ant mavenCentral.deps'
|
||||
- Javadoc test: 'ant javadoc'
|
||||
with a recent Oracle JDK (12+), and fix any issues.
|
||||
Oracle JDK will error on things that OpenJDK does not!
|
||||
|
||||
- Java 7 test: 'ant mavenCentral.deps' to ensure
|
||||
that Android will build correcly; fix any issues
|
||||
|
||||
|
||||
## A day or two before
|
||||
|
||||
|
||||
@@ -51,7 +51,8 @@
|
||||
Package install, running as a service: <code dir="ltr">/var/lib/i2p/i2p-config/eepsite/docroot/</code>
|
||||
</li>
|
||||
<li><b>Windows</b><br>
|
||||
<code dir="ltr">%LOCALAPPDATA%\I2P\eepsite\docroot\</code>
|
||||
Standard install: <code dir="ltr">%LOCALAPPDATA%\I2P\eepsite\docroot\</code>
|
||||
Windows Service install: <code dir="ltr">%PROGRAMDATA%\I2P\eepsite\docroot\</code>
|
||||
</li>
|
||||
<li><b>Mac</b><br>
|
||||
<code dir="ltr">/Users/(user)/Library/Application Support/i2p</code>
|
||||
|
||||
@@ -11,7 +11,7 @@ if "%1"=="uninstall" (
|
||||
) else (
|
||||
FINDSTR /I "^wrapper.java.additional.5=-Di2p.dir.config=" %_WRAPPER_CONF%
|
||||
if not errorlevel 1 goto end
|
||||
echo wrapper.java.additional.5=-Di2p.dir.config="%ALLUSERSPROFILE%\Application Data\i2p" >> %_WRAPPER_CONF%
|
||||
echo wrapper.java.additional.5=-Di2p.dir.config="%PROGRAMDATA%\i2p" >> %_WRAPPER_CONF%
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<programGroup defaultName="I2P" location="startMenu" />
|
||||
<shortcut name="Open I2P Profile Folder (service)"
|
||||
target="explorer"
|
||||
commandLine=""%allusersprofile%\Application Data\i2p""
|
||||
commandLine=""%programdata%\i2p""
|
||||
iconFile="%systemroot%\system32\shell32.dll"
|
||||
iconIndex="3"
|
||||
initialState="normal"
|
||||
|
||||
@@ -286,10 +286,12 @@ public class RouterAddress extends DataStructureImpl {
|
||||
// reduce Object proliferation
|
||||
if (_transportStyle.equals("SSU"))
|
||||
_transportStyle = "SSU";
|
||||
else if (_transportStyle.equals("NTCP"))
|
||||
_transportStyle = "NTCP";
|
||||
else if (_transportStyle.equals("NTCP2"))
|
||||
_transportStyle = "NTCP2";
|
||||
else if (_transportStyle.equals("NTCP"))
|
||||
_transportStyle = "NTCP";
|
||||
else if (_transportStyle.equals("SSU2"))
|
||||
_transportStyle = "SSU2";
|
||||
DataHelper.readProperties(in, _options);
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import net.i2p.data.Hash;
|
||||
import net.i2p.data.router.RouterIdentity;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.TunnelDataMessage;
|
||||
@@ -206,6 +207,34 @@ public class InNetMessagePool implements Service {
|
||||
allowMatches = false;
|
||||
break;
|
||||
|
||||
// If a DSM has a reply job, run the DSM inline
|
||||
// so the entry is stored in the netdb before the reply job runs.
|
||||
// FloodOnlyLookupMatchJob no longer stores the entry
|
||||
case DatabaseStoreMessage.MESSAGE_TYPE:
|
||||
List<OutNetMessage> origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
||||
HandlerJobBuilder dsmbuilder = _handlerJobBuilders[DatabaseStoreMessage.MESSAGE_TYPE];
|
||||
Job dsmjob = dsmbuilder.createJob(messageBody, fromRouter, fromRouterHash);
|
||||
int sz = origMessages.size();
|
||||
if (sz > 0) {
|
||||
// DSM inline, reply jobs on queue
|
||||
if (dsmjob != null)
|
||||
dsmjob.runJob();
|
||||
for (int i = 0; i < sz; i++) {
|
||||
OutNetMessage omsg = origMessages.get(i);
|
||||
ReplyJob job = omsg.getOnReplyJob();
|
||||
if (job != null) {
|
||||
job.setMessage(messageBody);
|
||||
_context.jobQueue().addJob(job);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// DSM on queue, no reply jobs
|
||||
if (dsmjob != null)
|
||||
_context.jobQueue().addJob(dsmjob);
|
||||
}
|
||||
allowMatches = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
// why don't we allow type 0? There used to be a message of type 0 long ago...
|
||||
if ( (type > 0) && (type < _handlerJobBuilders.length) ) {
|
||||
|
||||
@@ -41,6 +41,7 @@ import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.data.i2np.GarlicMessage;
|
||||
import net.i2p.data.router.RouterAddress;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.router.CommSystemFacade.Status;
|
||||
import net.i2p.router.crypto.FamilyKeyCrypto;
|
||||
@@ -1003,9 +1004,11 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
public void rebuildRouterInfo(boolean blockingRebuild) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Rebuilding new routerInfo, publish inline? " + blockingRebuild, new Exception("I did it"));
|
||||
// deadlock thru createAddresses() thru SSU REA... moved outside lock
|
||||
List<RouterAddress> addresses = _context.commSystem().createAddresses();
|
||||
_routerInfoLock.writeLock().lock();
|
||||
try {
|
||||
locked_rebuildRouterInfo(blockingRebuild);
|
||||
locked_rebuildRouterInfo(addresses);
|
||||
} finally {
|
||||
_routerInfoLock.writeLock().unlock();
|
||||
}
|
||||
@@ -1016,9 +1019,8 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
* Rebuild and republish our routerInfo since something significant
|
||||
* has changed.
|
||||
*
|
||||
* @param blockingRebuild ignored, always nonblocking
|
||||
*/
|
||||
private void locked_rebuildRouterInfo(boolean blockingRebuild) {
|
||||
private void locked_rebuildRouterInfo(List<RouterAddress> addresses) {
|
||||
RouterInfo ri;
|
||||
if (_routerInfo != null)
|
||||
ri = new RouterInfo(_routerInfo);
|
||||
@@ -1030,8 +1032,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
Properties stats = _context.statPublisher().publishStatistics();
|
||||
|
||||
ri.setOptions(stats);
|
||||
// deadlock thru createAddresses() thru SSU REA... move outside lock?
|
||||
ri.setAddresses(_context.commSystem().createAddresses());
|
||||
ri.setAddresses(addresses);
|
||||
|
||||
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
|
||||
if (key == null) {
|
||||
|
||||
@@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Git";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 0;
|
||||
public final static long BUILD = 7;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Ask a connected peer for his RI.
|
||||
* Modified from SingleSearchJob.
|
||||
*
|
||||
* Mainly for older routers. As of 0.9.55, transports will
|
||||
* periodically send their RI.
|
||||
* Some old routers may not respond or may send DSRM,
|
||||
* e.g. if hidden (and i2pd?)
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
class DirectLookupJob extends FloodOnlySearchJob {
|
||||
private OutNetMessage _onm;
|
||||
private final RouterInfo _oldRI;
|
||||
|
||||
private static final int TIMEOUT = 8*1000;
|
||||
|
||||
/**
|
||||
* @param peer for Router Info only
|
||||
*/
|
||||
public DirectLookupJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash peer, RouterInfo oldRI, Job onFind, Job onFail) {
|
||||
super(ctx, facade, peer, onFind, onFail, TIMEOUT);
|
||||
_oldRI = oldRI;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() { return "NetDb direct RI request"; }
|
||||
|
||||
@Override
|
||||
public boolean shouldProcessDSRM() { return false; } // don't loop
|
||||
|
||||
@Override
|
||||
public void runJob() {
|
||||
RouterContext ctx = getContext();
|
||||
_onm = ctx.messageRegistry().registerPending(_replySelector, _onReply, _onTimeout);
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(ctx, true);
|
||||
dlm.setFrom(ctx.routerHash());
|
||||
long exp = ctx.clock().now() + 5*1000;
|
||||
dlm.setMessageExpiration(exp);
|
||||
dlm.setSearchKey(_key);
|
||||
dlm.setSearchType(DatabaseLookupMessage.Type.RI);
|
||||
OutNetMessage m = new OutNetMessage(ctx, dlm, exp,
|
||||
OutNetMessage.PRIORITY_MY_NETDB_LOOKUP, _oldRI);
|
||||
ctx.commSystem().processMessage(m);
|
||||
_lookupsRemaining.set(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
void failed() {
|
||||
RouterContext ctx = getContext();
|
||||
ctx.messageRegistry().unregisterPending(_onm);
|
||||
ctx.profileManager().dbLookupFailed(_key);
|
||||
_facade.complete(_key);
|
||||
for (Job j : _onFailed) {
|
||||
ctx.jobQueue().addJob(j);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
void success() {
|
||||
// don't give him any credit
|
||||
//getContext().profileManager().dbLookupSuccessful(_to, System.currentTimeMillis()-_created);
|
||||
_facade.complete(_key);
|
||||
RouterContext ctx = getContext();
|
||||
for (Job j : _onFind) {
|
||||
ctx.jobQueue().addJob(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
/**
|
||||
* Override to not call failed() in setMessage(),
|
||||
* as it will be called from runJob()
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
class DirectLookupMatchJob extends FloodOnlyLookupMatchJob {
|
||||
|
||||
public DirectLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx, job);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() { return "Direct lookup match"; }
|
||||
|
||||
/**
|
||||
* Override to not call failed() in setMessage(),
|
||||
* as it will be called from runJob()
|
||||
*/
|
||||
@Override
|
||||
public void setMessage(I2NPMessage message) {
|
||||
if (message.getType() != DatabaseStoreMessage.MESSAGE_TYPE)
|
||||
return;
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
if (dsm.getKey().equals(_search.getKey()))
|
||||
_success = true;
|
||||
}
|
||||
}
|
||||
@@ -11,9 +11,9 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
private final Log _log;
|
||||
private final FloodSearchJob _search;
|
||||
private volatile boolean _success;
|
||||
protected final Log _log;
|
||||
protected final FloodSearchJob _search;
|
||||
protected volatile boolean _success;
|
||||
|
||||
public FloodOnlyLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
@@ -50,8 +50,8 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
return;
|
||||
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(_search.getJobId() + ": got a DSM for "
|
||||
if (_log.shouldDebug())
|
||||
_log.debug(_search.getJobId() + ": got a DSM for "
|
||||
+ dsm.getKey().toBase64());
|
||||
// This store will handled by HFDSMJ.
|
||||
// Just note success here.
|
||||
|
||||
@@ -56,9 +56,24 @@ abstract class FloodOnlySearchJob extends FloodSearchJob {
|
||||
_timeoutMs = Math.min(timeoutMs, SearchJob.PER_FLOODFILL_PEER_TIMEOUT);
|
||||
_expiration = _timeoutMs + ctx.clock().now();
|
||||
_unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES);
|
||||
_replySelector = new FloodOnlyLookupSelector(getContext(), this);
|
||||
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
|
||||
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
|
||||
_replySelector = new FloodOnlyLookupSelector(ctx, this);
|
||||
_onReply = new FloodOnlyLookupMatchJob(ctx, this);
|
||||
_onTimeout = new FloodOnlyLookupTimeoutJob(ctx, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* For DirectLookupJob extension, RI only, different match job
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
protected FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs) {
|
||||
super(ctx, facade, key, onFind, onFailed, timeoutMs, false);
|
||||
_timeoutMs = timeoutMs;
|
||||
_expiration = _timeoutMs + ctx.clock().now();
|
||||
_unheardFrom = new HashSet<Hash>(1);
|
||||
_replySelector = new FloodOnlyLookupSelector(ctx, this);
|
||||
_onReply = new DirectLookupMatchJob(ctx, this);
|
||||
_onTimeout = new FloodOnlyLookupTimeoutJob(ctx, this);
|
||||
}
|
||||
|
||||
public boolean shouldProcessDSRM() { return _shouldProcessDSRM; }
|
||||
|
||||
@@ -464,12 +464,12 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
|
||||
if (isNew) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("this is the first search for that key, fire off the FloodSearchJob");
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("New ISJ for " + key.toBase64());
|
||||
_context.jobQueue().addJob(searchJob);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Deferring flood search for " + key.toBase64() + " with " + _activeFloodQueries.size() + " in progress");
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Wait for pending ISJ for " + key.toBase64());
|
||||
searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
// not necessarily LS
|
||||
_context.statManager().addRateData("netDb.lookupDeferred", 1, searchJob.getExpiration()-_context.clock().now());
|
||||
@@ -579,6 +579,32 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
*/
|
||||
@Override
|
||||
protected void lookupBeforeDropping(Hash peer, RouterInfo info) {
|
||||
if (_context.commSystem().isEstablished(peer)) {
|
||||
// see DirectLookupJob
|
||||
boolean isNew = false;
|
||||
FloodSearchJob searchJob;
|
||||
Job onFindJob = new DropLookupFoundJob(_context, peer, info);
|
||||
Job onFailedLookupJob = new DropLookupFailedJob(_context, peer, info);
|
||||
synchronized (_activeFloodQueries) {
|
||||
searchJob = _activeFloodQueries.get(peer);
|
||||
if (searchJob == null) {
|
||||
searchJob = new DirectLookupJob(_context, this, peer, info, onFindJob, onFailedLookupJob);
|
||||
_activeFloodQueries.put(peer, searchJob);
|
||||
isNew = true;
|
||||
}
|
||||
}
|
||||
if (isNew) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Direct RI lookup for " + peer.toBase64());
|
||||
_context.jobQueue().addJob(searchJob);
|
||||
} else {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Pending Direct RI lookup for " + peer.toBase64());
|
||||
searchJob.addDeferred(onFindJob, onFailedLookupJob, 10*1000, false);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// following are some special situations, we don't want to
|
||||
// drop the peer in these cases
|
||||
// yikes don't do this - stack overflow // getFloodfillPeers().size() == 0 ||
|
||||
@@ -609,17 +635,17 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
// entry locally, firing no job if it gets a reply with an updated value (meaning
|
||||
// we shouldn't drop them but instead use the new data), or if they all time out,
|
||||
// firing the dropLookupFailedJob, which actually removes out local reference
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("ISJ lookup before dropping for " + peer.toBase64() + ' ' + info.getPublished());
|
||||
search(peer, new DropLookupFoundJob(_context, peer, info), new DropLookupFailedJob(_context, peer, info), 10*1000, false);
|
||||
}
|
||||
|
||||
private class DropLookupFailedJob extends JobImpl {
|
||||
private final Hash _peer;
|
||||
private final RouterInfo _info;
|
||||
|
||||
public DropLookupFailedJob(RouterContext ctx, Hash peer, RouterInfo info) {
|
||||
super(ctx);
|
||||
_peer = peer;
|
||||
_info = info;
|
||||
}
|
||||
public String getName() { return "Lookup on failure of netDb peer timed out"; }
|
||||
public void runJob() {
|
||||
@@ -639,10 +665,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
public String getName() { return "Lookup on failure of netDb peer matched"; }
|
||||
public void runJob() {
|
||||
RouterInfo updated = lookupRouterInfoLocally(_peer);
|
||||
if ( (updated != null) && (updated.getPublished() > _info.getPublished()) ) {
|
||||
// great, a legitimate update
|
||||
} else {
|
||||
// they just sent us what we already had. kill 'em both
|
||||
if (updated == null || updated.getPublished() <= _info.getPublished()) {
|
||||
// they just sent us what we already had
|
||||
dropAfterLookupFailed(_peer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1379,6 +1379,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
*/
|
||||
void dropAfterLookupFailed(Hash peer) {
|
||||
_context.peerManager().removeCapabilities(peer);
|
||||
_negativeCache.cache(peer);
|
||||
_kb.remove(peer);
|
||||
//if (removed) {
|
||||
// if (_log.shouldLog(Log.INFO))
|
||||
|
||||
@@ -36,6 +36,15 @@ class NegativeLookupCache {
|
||||
this.counter.increment(h);
|
||||
}
|
||||
|
||||
/**
|
||||
* Negative cache the hash until the next clean time.
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
public void cache(Hash h) {
|
||||
this.counter.max(h);
|
||||
}
|
||||
|
||||
public boolean isCached(Hash h) {
|
||||
if (counter.count(h) >= _maxFails)
|
||||
return true;
|
||||
|
||||
@@ -366,7 +366,7 @@ public class PersistentDataStore extends TransientDataStore {
|
||||
requeue(READ_DELAY);
|
||||
return;
|
||||
}
|
||||
long now = getContext().clock().now();
|
||||
long now = System.currentTimeMillis();
|
||||
// check directory mod time to save a lot of object churn in scanning all the file names
|
||||
long lastMod = _dbDir.lastModified();
|
||||
// if size() (= RI + LS) is too low, call anyway to check for reseed
|
||||
|
||||
@@ -80,14 +80,14 @@ public class WorkingDir {
|
||||
} else {
|
||||
String home = System.getProperty("user.home");
|
||||
if (isWindows) {
|
||||
String appdata = System.getenv("LOCALAPPDATA");
|
||||
if (appdata != null) {
|
||||
home = appdata;
|
||||
String localappdata = System.getenv("LOCALAPPDATA");
|
||||
if (localappdata != null) {
|
||||
home = localappdata;
|
||||
}
|
||||
// Don't mess with existing Roaming Application Data installs,
|
||||
// in case somebody is using roaming appdata for a reason
|
||||
// already. In new installs, use local appdata by default. -idk
|
||||
appdata = System.getenv("APPDATA");
|
||||
String appdata = System.getenv("APPDATA");
|
||||
if (appdata != null) {
|
||||
File checkOld = new File(appdata, WORKING_DIR_DEFAULT_WINDOWS);
|
||||
if (checkOld.exists() && checkOld.isDirectory()){
|
||||
@@ -106,6 +106,7 @@ public class WorkingDir {
|
||||
if (routerConfig.exists() && clientAppsConfig.exists())
|
||||
home = appdata;
|
||||
}
|
||||
System.err.println("System is Windows: " + home);
|
||||
}
|
||||
}
|
||||
dirf = new SecureDirectory(home, WORKING_DIR_DEFAULT_WINDOWS);
|
||||
@@ -391,7 +392,7 @@ public class WorkingDir {
|
||||
}
|
||||
out.println(s);
|
||||
}
|
||||
System.err.println("Copied " + oldFile + " with modifications");
|
||||
System.err.println("Copied file " + oldFile + " with modifications");
|
||||
if (out.checkError())
|
||||
throw new IOException("Failed write to " + newFile);
|
||||
return true;
|
||||
@@ -456,7 +457,7 @@ public class WorkingDir {
|
||||
System.err.println("FAILED copy " + src.getPath());
|
||||
return false;
|
||||
}
|
||||
System.err.println("Created " + targetDir.getPath());
|
||||
System.err.println("Created Directory " + targetDir.getPath());
|
||||
}
|
||||
// SecureDirectory is a File so this works for non-directories too
|
||||
File targetFile = new SecureDirectory(targetDir, src.getName());
|
||||
@@ -473,7 +474,7 @@ public class WorkingDir {
|
||||
System.err.println("FAILED copy " + src.getPath());
|
||||
return false;
|
||||
}
|
||||
System.err.println("Created " + targetFile.getPath());
|
||||
System.err.println("Created File " + targetFile.getPath());
|
||||
}
|
||||
boolean rv = true;
|
||||
for (int i = 0; i < children.length; i++) {
|
||||
@@ -497,7 +498,7 @@ public class WorkingDir {
|
||||
in = new FileInputStream(src);
|
||||
out = new SecureFileOutputStream(dst);
|
||||
DataHelper.copy(in, out);
|
||||
System.err.println("Copied " + src.getPath());
|
||||
System.err.println("Copied File " + src.getPath());
|
||||
} catch (IOException ioe) {
|
||||
System.err.println("FAILED copy " + src.getPath() + ": " + ioe);
|
||||
rv = false;
|
||||
|
||||
@@ -632,17 +632,12 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
buf.append("<img height=\"11\" width=\"16\" alt=\"").append(c.toUpperCase(Locale.US)).append("\" title=\"");
|
||||
buf.append(countryName);
|
||||
buf.append("\" src=\"/flags.jsp?c=").append(c).append("\"> ");
|
||||
}
|
||||
else
|
||||
} else {
|
||||
buf.append("<img class=\"unknownflag\" height=\"11\" width=\"16\" alt=\"??\" src=\"/flags.jsp?c=a0\" title=\"").append(_t("unknown")).append("\"> ");
|
||||
buf.append("<tt>");
|
||||
boolean found = _context.netDb().lookupRouterInfoLocally(peer) != null;
|
||||
if (found)
|
||||
buf.append("<a title=\"").append(_t("NetDb entry")).append("\" href=\"netdb?r=").append(h).append("\">");
|
||||
}
|
||||
buf.append("<tt><a title=\"").append(_t("NetDb entry")).append("\" href=\"netdb?r=").append(h).append("\">");
|
||||
buf.append(h, 0, 4);
|
||||
if (found)
|
||||
buf.append("</a>");
|
||||
buf.append("</tt>");
|
||||
buf.append("</a></tt>");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
|
||||
@@ -828,7 +828,7 @@ public class TransportManager implements TransportEventListener {
|
||||
config != TransportUtil.IPv6Config.IPV6_DISABLED &&
|
||||
!t.isIPv6Firewalled()) {
|
||||
RouterAddress ra = t.getCurrentAddress(true);
|
||||
if (ra == null) {
|
||||
if (ra == null || ra.getHost() == null) {
|
||||
if (t.getStyle().equals(UDPTransport.STYLE)) {
|
||||
UDPTransport udp = (UDPTransport) t;
|
||||
ra = udp.getCurrentExternalAddress(true);
|
||||
|
||||
@@ -250,7 +250,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
String s = null;
|
||||
// try to determine if we've been down for 30 days or more
|
||||
long minDowntime = _context.router().isHidden() ? MIN_DOWNTIME_TO_REKEY_HIDDEN : MIN_DOWNTIME_TO_REKEY;
|
||||
boolean shouldRekey = _context.getEstimatedDowntime() >= minDowntime;
|
||||
boolean shouldRekey = !allowLocal() && _context.getEstimatedDowntime() >= minDowntime;
|
||||
if (!shouldRekey) {
|
||||
s = ctx.getProperty(PROP_NTCP2_SP);
|
||||
if (s != null) {
|
||||
|
||||
@@ -76,7 +76,6 @@ class OutboundNTCP2State implements EstablishState {
|
||||
private final HandshakeState _handshakeState;
|
||||
private final RouterInfo _aliceRI;
|
||||
private final int _aliceRISize;
|
||||
private int _padlen1;
|
||||
private int _padlen2;
|
||||
private final int _padlen3;
|
||||
private final SessionKey _bobHash;
|
||||
|
||||
@@ -432,7 +432,10 @@ class EstablishmentManager {
|
||||
}
|
||||
if (version == 2) {
|
||||
int mtu = addr.getMTU();
|
||||
if (mtu > 0 && mtu < PeerState2.MIN_MTU) {
|
||||
boolean isIPv6 = TransportUtil.isIPv6(ra);
|
||||
int ourMTU = _transport.getMTU(isIPv6);
|
||||
if ((mtu > 0 && mtu < PeerState2.MIN_MTU) ||
|
||||
(ourMTU > 0 && ourMTU < PeerState2.MIN_MTU)) {
|
||||
if (ra.getTransportStyle().equals("SSU2")) {
|
||||
_transport.markUnreachable(toHash);
|
||||
_transport.failed(msg, "MTU too small");
|
||||
@@ -677,7 +680,12 @@ class EstablishmentManager {
|
||||
return;
|
||||
}
|
||||
|
||||
/**** TODO
|
||||
/****
|
||||
// A token request or session request with a bad token is
|
||||
// inexpensive to reply to.
|
||||
// A token can only be used once, so a replayed session request
|
||||
// will only generate a retry.
|
||||
// So probably don't need a replay detector at all
|
||||
if (_replayFilter.add(state.getReceivedX(), 0, 8)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Duplicate X in session request from: " + from);
|
||||
|
||||
@@ -121,6 +121,8 @@ class InboundEstablishState2 extends InboundEstablishState implements SSU2Payloa
|
||||
(ENFORCE_TOKEN && !_transport.getEstablisher().isInboundTokenValid(_remoteHostId, token)))) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Invalid token " + token + " in session request from: " + _aliceSocketAddress);
|
||||
if (token == 0)
|
||||
throw new GeneralSecurityException("Zero token in session request from: " + _aliceSocketAddress);
|
||||
_currentState = InboundState.IB_STATE_REQUEST_BAD_TOKEN_RECEIVED;
|
||||
_sendHeaderEncryptKey2 = introKey;
|
||||
// Generate token for the retry.
|
||||
@@ -162,14 +164,17 @@ class InboundEstablishState2 extends InboundEstablishState implements SSU2Payloa
|
||||
// termination block received
|
||||
throw new GeneralSecurityException("Termination block in Session/Token Request");
|
||||
}
|
||||
if (_timeReceived == 0)
|
||||
if (_timeReceived == 0) {
|
||||
_currentState = InboundState.IB_STATE_FAILED;
|
||||
throw new GeneralSecurityException("No DateTime block in Session/Token Request");
|
||||
}
|
||||
_skew = _establishBegin - _timeReceived;
|
||||
if (_skew > MAX_SKEW || _skew < 0 - MAX_SKEW) {
|
||||
_currentState = InboundState.IB_STATE_FAILED;
|
||||
// send retry with termination
|
||||
UDPPacket retry = _transport.getBuilder2().buildRetryPacket(this, SSU2Util.REASON_SKEW);
|
||||
_transport.send(retry);
|
||||
throw new GeneralSecurityException("Skew exceeded in Session/Token Request: " + _skew);
|
||||
throw new GeneralSecurityException("Skew exceeded in Session/Token Request (retry sent): " + _skew);
|
||||
}
|
||||
packetReceived();
|
||||
if (_log.shouldDebug())
|
||||
@@ -185,7 +190,8 @@ class InboundEstablishState2 extends InboundEstablishState implements SSU2Payloa
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Processed " + blocks + " blocks on " + this);
|
||||
} catch (Exception e) {
|
||||
_log.error("IES2 payload error\n" + net.i2p.util.HexDump.dump(payload, 0, length), e);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("IES2 payload error\n" + net.i2p.util.HexDump.dump(payload, 0, length), e);
|
||||
throw new GeneralSecurityException("IES2 payload error", e);
|
||||
}
|
||||
}
|
||||
@@ -472,6 +478,9 @@ class InboundEstablishState2 extends InboundEstablishState implements SSU2Payloa
|
||||
|
||||
/** note that we just sent a Retry packet */
|
||||
public synchronized void retryPacketSent() {
|
||||
// retry after clock skew
|
||||
if (_currentState == InboundState.IB_STATE_FAILED)
|
||||
return;
|
||||
if (_currentState != InboundState.IB_STATE_REQUEST_BAD_TOKEN_RECEIVED &&
|
||||
_currentState != InboundState.IB_STATE_TOKEN_REQUEST_RECEIVED)
|
||||
throw new IllegalStateException("Bad state for Retry Sent: " + _currentState);
|
||||
|
||||
@@ -90,7 +90,7 @@ class IntroductionManager {
|
||||
/** map of relay tag to PeerState who have given us introduction tags */
|
||||
private final Map<Long, PeerState> _inbound;
|
||||
/** map of relay nonce to alice PeerState who requested it */
|
||||
private final Map<Long, PeerState2> _nonceToAlice;
|
||||
private final ConcurrentHashMap<Long, PeerState2> _nonceToAlice;
|
||||
private final Set<InetAddress> _recentHolePunches;
|
||||
private long _lastHolePunchClean;
|
||||
|
||||
|
||||
@@ -4,7 +4,9 @@ import java.net.InetAddress;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.SocketException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.util.Log;
|
||||
@@ -60,6 +62,8 @@ public class MTU {
|
||||
return 0;
|
||||
}
|
||||
if (ifcs != null) {
|
||||
// save for fallback loop below, so we don't have to call getNetworkInterfaces() again
|
||||
List<NetworkInterface> interfaces = new ArrayList<NetworkInterface>();
|
||||
while (ifcs.hasMoreElements()) {
|
||||
NetworkInterface ifc = ifcs.nextElement();
|
||||
try {
|
||||
@@ -72,6 +76,7 @@ public class MTU {
|
||||
} catch (SocketException e) {
|
||||
continue;
|
||||
}
|
||||
interfaces.add(ifc);
|
||||
for(Enumeration<InetAddress> addrs = ifc.getInetAddresses(); addrs.hasMoreElements();) {
|
||||
InetAddress addr = addrs.nextElement();
|
||||
if (ia.equals(addr)) {
|
||||
@@ -105,6 +110,10 @@ public class MTU {
|
||||
}
|
||||
if (isSSU2)
|
||||
return Math.min(mtu, PeerState2.MAX_MTU);
|
||||
// don't rectify 1280 down to 1276 because that
|
||||
// borks a shared SSU/SSU2 address
|
||||
if (mtu == PeerState2.MIN_MTU)
|
||||
return PeerState2.MIN_MTU;
|
||||
return rectify(isIPv6, mtu);
|
||||
} catch (SocketException se) {
|
||||
// ignore
|
||||
@@ -116,6 +125,59 @@ public class MTU {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// didn't find a match, probably behind a NAT
|
||||
// try again, looping through all the interfaces that were up,
|
||||
// just get the minimum of all interfaces with addresses of that type v4/v6
|
||||
boolean isIPv6 = ia instanceof Inet6Address;
|
||||
int rv = 1501;
|
||||
outer:
|
||||
for (NetworkInterface ifc : interfaces) {
|
||||
for(Enumeration<InetAddress> addrs = ifc.getInetAddresses(); addrs.hasMoreElements();) {
|
||||
InetAddress addr = addrs.nextElement();
|
||||
if (isIPv6 != (addr instanceof Inet6Address))
|
||||
continue;
|
||||
if (addr.isLinkLocalAddress() ||
|
||||
addr.isMulticastAddress() ||
|
||||
addr.isAnyLocalAddress() ||
|
||||
addr.isLoopbackAddress())
|
||||
continue;
|
||||
if (isIPv6) {
|
||||
// ygg
|
||||
byte[] ip = addr.getAddress();
|
||||
if ((ip[0] & 0xfe) == 0x02)
|
||||
continue outer;
|
||||
}
|
||||
int mtu;
|
||||
try {
|
||||
mtu = ifc.getMTU();
|
||||
} catch (Throwable t) {
|
||||
continue outer;
|
||||
}
|
||||
if (mtu < 0)
|
||||
continue outer;
|
||||
if (isIPv6 && mtu > 1420) {
|
||||
byte[] ip = addr.getAddress();
|
||||
if (mtu > 1472 &&
|
||||
ip[0] == 0x20 && ip[1] == 0x01 &&
|
||||
ip[2] == 0x04 && ip[3] == 0x70)
|
||||
mtu = 1472;
|
||||
if (ip[0] == 0x2a && ip[1] == 0x06 &&
|
||||
ip[2] == (byte) 0xa0 && ip[3] == 0x04)
|
||||
mtu = 1420;
|
||||
}
|
||||
if (mtu < rv)
|
||||
rv = mtu;
|
||||
continue outer;
|
||||
}
|
||||
}
|
||||
if (rv < 1501) {
|
||||
if (isSSU2)
|
||||
return rv;
|
||||
if (rv == PeerState2.MIN_MTU)
|
||||
return rv;
|
||||
return rectify(isIPv6, rv);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -140,18 +202,20 @@ public class MTU {
|
||||
}
|
||||
|
||||
public static void main(String args[]) {
|
||||
/****
|
||||
System.out.println("Cmd line interfaces:");
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
try {
|
||||
InetAddress test = InetAddress.getByName(args[i]);
|
||||
System.out.println("MTU of " + args[i] + " is " + getMTU(test));
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
if (args.length > 0) {
|
||||
System.out.println("Cmd line interfaces:");
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
try {
|
||||
InetAddress test = InetAddress.getByName(args[i]);
|
||||
System.out.println("I2P MTU of " + args[i] + " is " + getMTU(test, false) +
|
||||
"; SSU2 MTU is " + getMTU(test, true));
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("All interfaces:");
|
||||
****/
|
||||
try {
|
||||
Enumeration<NetworkInterface> ifcs = NetworkInterface.getNetworkInterfaces();
|
||||
if (ifcs != null) {
|
||||
|
||||
@@ -463,8 +463,11 @@ class OutboundMessageFragments {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Building packet for " + next + " to " + peer);
|
||||
int curTotalDataSize = state.fragmentSize(next.num);
|
||||
if (next.num > 0 && peer.getVersion() > 1)
|
||||
curTotalDataSize += SSU2Util.DATA_FOLLOWON_EXTRA_SIZE;
|
||||
if (peer.getVersion() > 1) {
|
||||
curTotalDataSize += SSU2Util.FIRST_FRAGMENT_HEADER_SIZE;
|
||||
if (next.num > 0)
|
||||
curTotalDataSize += SSU2Util.DATA_FOLLOWON_EXTRA_SIZE;
|
||||
}
|
||||
// now stuff in more fragments if they fit
|
||||
if (i +1 < toSend.size()) {
|
||||
int maxAvail;
|
||||
|
||||
@@ -275,6 +275,11 @@ class PacketBuilder2 {
|
||||
//if (_log.shouldDebug())
|
||||
// _log.debug("Packet " + pktNum + " before encryption:\n" + HexDump.dump(data, 0, off));
|
||||
|
||||
// ack immediate flag
|
||||
if (numFragments > 0) {
|
||||
data[SHORT_HEADER_FLAGS_OFFSET] = peer.getFlags();
|
||||
}
|
||||
|
||||
encryptDataPacket(packet, peer.getSendCipher(), pktNum, peer.getSendHeaderEncryptKey1(), peer.getSendHeaderEncryptKey2());
|
||||
setTo(packet, peer.getRemoteIPAddress(), peer.getRemotePort());
|
||||
//if (_log.shouldDebug())
|
||||
@@ -343,7 +348,7 @@ class PacketBuilder2 {
|
||||
*
|
||||
*/
|
||||
public UDPPacket buildACK(PeerState2 peer) {
|
||||
return buildPacket(Collections.emptyList(), peer);
|
||||
return buildPacket(Collections.<Fragment>emptyList(), peer);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -351,8 +356,8 @@ class PacketBuilder2 {
|
||||
* This will also include acks, a new token block, and padding.
|
||||
*/
|
||||
public UDPPacket buildSessionDestroyPacket(int reason, PeerState2 peer) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Sending termination " + reason + " to : " + peer);
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Sending termination " + reason + " to : " + peer);
|
||||
List<Block> blocks = new ArrayList<Block>(2);
|
||||
if (peer.getKeyEstablishedTime() - _context.clock().now() > EstablishmentManager.IB_TOKEN_EXPIRATION / 2 &&
|
||||
!_context.router().gracefulShutdownInProgress()) {
|
||||
@@ -363,7 +368,7 @@ class PacketBuilder2 {
|
||||
}
|
||||
Block block = new SSU2Payload.TerminationBlock(reason, peer.getReceivedMessages().getHighestSet());
|
||||
blocks.add(block);
|
||||
UDPPacket packet = buildPacket(Collections.emptyList(), blocks, peer);
|
||||
UDPPacket packet = buildPacket(Collections.<Fragment>emptyList(), blocks, peer);
|
||||
packet.setMessageType(TYPE_DESTROY);
|
||||
return packet;
|
||||
}
|
||||
@@ -652,7 +657,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
public UDPPacket buildPeerTestFromAlice(byte[] signedData, PeerState2 bob) {
|
||||
Block block = new SSU2Payload.PeerTestBlock(1, 0, null, signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), bob);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), bob);
|
||||
rv.setMessageType(TYPE_TFA);
|
||||
return rv;
|
||||
}
|
||||
@@ -688,7 +693,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
public UDPPacket buildPeerTestToAlice(int code, Hash charlieHash, byte[] signedData, PeerState2 alice) {
|
||||
Block block = new SSU2Payload.PeerTestBlock(4, code, charlieHash, signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), alice);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), alice);
|
||||
rv.setMessageType(TYPE_TTA);
|
||||
return rv;
|
||||
}
|
||||
@@ -724,7 +729,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
public UDPPacket buildPeerTestToCharlie(Hash aliceHash, byte[] signedData, PeerState2 charlie) {
|
||||
Block block = new SSU2Payload.PeerTestBlock(2, 0, aliceHash, signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), charlie);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), charlie);
|
||||
rv.setMessageType(TYPE_TBC);
|
||||
return rv;
|
||||
}
|
||||
@@ -737,7 +742,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
public UDPPacket buildPeerTestToBob(int code, byte[] signedData, PeerState2 bob) {
|
||||
Block block = new SSU2Payload.PeerTestBlock(3, code, null, signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), bob);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), bob);
|
||||
rv.setMessageType(TYPE_TCB);
|
||||
return rv;
|
||||
}
|
||||
@@ -751,7 +756,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
UDPPacket buildRelayRequest(byte[] signedData, PeerState2 bob) {
|
||||
Block block = new SSU2Payload.RelayRequestBlock(signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), bob);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), bob);
|
||||
rv.setMessageType(TYPE_RREQ);
|
||||
rv.setPriority(PRIORITY_HIGH);
|
||||
return rv;
|
||||
@@ -766,7 +771,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
UDPPacket buildRelayIntro(byte[] signedData, PeerState2 charlie) {
|
||||
Block block = new SSU2Payload.RelayIntroBlock(signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), charlie);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), charlie);
|
||||
rv.setMessageType(TYPE_INTRO);
|
||||
return rv;
|
||||
}
|
||||
@@ -781,7 +786,7 @@ class PacketBuilder2 {
|
||||
*/
|
||||
UDPPacket buildRelayResponse(byte[] signedData, PeerState2 state) {
|
||||
Block block = new SSU2Payload.RelayResponseBlock(signedData);
|
||||
UDPPacket rv = buildPacket(Collections.emptyList(), Collections.singletonList(block), state);
|
||||
UDPPacket rv = buildPacket(Collections.<Fragment>emptyList(), Collections.singletonList(block), state);
|
||||
rv.setMessageType(TYPE_RESP);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ public class PeerState {
|
||||
protected final UDPTransport _transport;
|
||||
|
||||
/** have we migrated away from this peer to another newer one? */
|
||||
private volatile boolean _dead;
|
||||
protected volatile boolean _dead;
|
||||
|
||||
/** The minimum number of outstanding messages (NOT fragments/packets) */
|
||||
private static final int MIN_CONCURRENT_MSGS = 8;
|
||||
@@ -2214,6 +2214,17 @@ public class PeerState {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* SSU 2 only
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
protected boolean shouldRequestImmediateAck() {
|
||||
synchronized(_sendWindowBytesRemainingLock) {
|
||||
return _sendWindowBytesRemaining < _sendWindowBytes / 3;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfer the basic activity/state from the old peer to the current peer
|
||||
*
|
||||
|
||||
@@ -5,6 +5,7 @@ import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
@@ -24,7 +25,10 @@ import net.i2p.data.i2np.I2NPMessageException;
|
||||
import net.i2p.data.i2np.I2NPMessageImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.transport.TransportUtil;
|
||||
import net.i2p.router.transport.udp.InboundMessageFragments.ModifiableLong;
|
||||
import net.i2p.router.transport.udp.PacketBuilder.Fragment;
|
||||
|
||||
import static net.i2p.router.transport.udp.SSU2Util.*;
|
||||
import net.i2p.util.HexDump;
|
||||
import net.i2p.util.Log;
|
||||
@@ -59,6 +63,8 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
*/
|
||||
private final SSU2Bitfield _ackedMessages;
|
||||
private final ConcurrentHashMap<Long, List<PacketBuilder.Fragment>> _sentMessages;
|
||||
private final ACKTimer _ackTimer;
|
||||
|
||||
private long _sentMessagesLastExpired;
|
||||
private byte[] _ourIP;
|
||||
private int _ourPort;
|
||||
@@ -68,6 +74,28 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
private long _sessConfSentTime;
|
||||
private int _sessConfSentCount;
|
||||
|
||||
// Connection Migration, synch on _migrationLock
|
||||
private enum MigrationState {
|
||||
MIGRATION_STATE_NONE,
|
||||
MIGRATION_STATE_PENDING,
|
||||
// unused below here
|
||||
MIGRATION_STATE_CANCELLED,
|
||||
MIGRATION_STATE_FAILED,
|
||||
MIGRATION_STATE_SUCCESS
|
||||
}
|
||||
private final Object _migrationLock = new Object();
|
||||
private MigrationState _migrationState = MigrationState.MIGRATION_STATE_NONE;
|
||||
private long _migrationStarted;
|
||||
private long _migrationNextSendTime;
|
||||
private byte[] _pathChallengeData;
|
||||
private long _pathChallengeSendCount;
|
||||
private RemoteHostId _pendingRemoteHostId;
|
||||
private RemoteHostId _previousRemoteHostId;
|
||||
private static final int MAX_PATH_CHALLENGE_SENDS = 4;
|
||||
private static final long MAX_PATH_CHALLENGE_TIME = 30*1000;
|
||||
private static final long PATH_CHALLENGE_DELAY = 5*1000;
|
||||
|
||||
|
||||
// As SSU
|
||||
public static final int MIN_SSU_IPV4_MTU = 1292;
|
||||
public static final int MAX_SSU_IPV4_MTU = 1484;
|
||||
@@ -114,6 +142,7 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
// For outbound, SessionConfirmed is packet 0
|
||||
_packetNumber.set(1);
|
||||
}
|
||||
_ackTimer = new ACKTimer();
|
||||
}
|
||||
|
||||
// SSU 1 overrides
|
||||
@@ -171,7 +200,7 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
protected synchronized void messagePartiallyReceived(long now) {
|
||||
if (_wantACKSendSince <= 0) {
|
||||
_wantACKSendSince = now;
|
||||
new ACKTimer();
|
||||
_ackTimer.schedule();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,15 +347,9 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
*
|
||||
* @param from source address
|
||||
* @param packet fully encrypted, header and body decryption will be done here
|
||||
* @since 0.9.56
|
||||
* @since 0.9.55
|
||||
*/
|
||||
void receivePacket(RemoteHostId from, UDPPacket packet) {
|
||||
if (!from.equals(_remoteHostId)) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Got packet from " + from + " expected " + _remoteHostId + " on " + this);
|
||||
// Connection Migration TODO
|
||||
}
|
||||
|
||||
DatagramPacket dpacket = packet.getPacket();
|
||||
byte[] data = dpacket.getData();
|
||||
int off = dpacket.getOffset();
|
||||
@@ -386,17 +409,120 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
_log.warn("dup pkt rcvd: " + n + " on " + this);
|
||||
return;
|
||||
}
|
||||
|
||||
int payloadLen = len - (SHORT_HEADER_SIZE + MAC_LEN);
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("New " + len + " byte pkt " + n + " rcvd on " + this);
|
||||
SSU2Payload.processPayload(_context, this, data, off + SHORT_HEADER_SIZE, payloadLen, false, from);
|
||||
packetReceived(payloadLen);
|
||||
|
||||
if (!_dead) {
|
||||
// Connection Migration
|
||||
// We process packets regardless of source.
|
||||
// This is after all the header checks, decryption, and payload processing.
|
||||
// Any failures will have thrown or returned before here.
|
||||
// Path Response callback is before this and will reset the state if successful.
|
||||
|
||||
boolean limitSending = false;
|
||||
synchronized(_migrationLock) {
|
||||
switch(_migrationState) {
|
||||
case MIGRATION_STATE_NONE:
|
||||
if (!from.equals(_remoteHostId)) {
|
||||
// QUIC: Must be highest set to protect against reordered packets
|
||||
if (SSU2Util.ENABLE_PATH_CHALLENGE &&
|
||||
from.getIP().length == _remoteHostId.getIP().length &&
|
||||
n == _receivedMessages.getHighestSet() &&
|
||||
TransportUtil.isValidPort(from.getPort()) &&
|
||||
_transport.isValid(from.getIP())) {
|
||||
// send challenge
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Start migration to " + from + " on " + this);
|
||||
_migrationState = MigrationState.MIGRATION_STATE_PENDING;
|
||||
_migrationStarted = _context.clock().now();
|
||||
_migrationNextSendTime = _migrationStarted + PATH_CHALLENGE_DELAY;
|
||||
_pathChallengeData = new byte[8];
|
||||
_context.random().nextBytes(_pathChallengeData);
|
||||
_pathChallengeSendCount = 1;
|
||||
_pendingRemoteHostId = from;
|
||||
sendPathChallenge(dpacket.getAddress(), from.getPort());
|
||||
} else {
|
||||
// don't attempt to switch
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Not migrating to " + from + " on " + this);
|
||||
}
|
||||
limitSending = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case MIGRATION_STATE_PENDING:
|
||||
if (from.equals(_remoteHostId)) {
|
||||
// cancel
|
||||
_migrationState = MigrationState.MIGRATION_STATE_NONE;
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Cancel migration on " + this);
|
||||
} else {
|
||||
// still waiting
|
||||
long now = _context.clock().now();
|
||||
if (now > _migrationStarted + MAX_PATH_CHALLENGE_TIME ||
|
||||
_pathChallengeSendCount > MAX_PATH_CHALLENGE_SENDS) {
|
||||
// time exceeded
|
||||
_migrationState = MigrationState.MIGRATION_STATE_NONE;
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Migration failed on " + this);
|
||||
} else if (from.equals(_pendingRemoteHostId)) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Migration pending, got another packet from " + from + " on " + this);
|
||||
if (now > _migrationNextSendTime) {
|
||||
// retransmit challenge
|
||||
_migrationNextSendTime = now + (PATH_CHALLENGE_DELAY << _pathChallengeSendCount);
|
||||
_pathChallengeSendCount++;
|
||||
sendPathChallenge(dpacket.getAddress(), from.getPort());
|
||||
}
|
||||
limitSending = true;
|
||||
} else {
|
||||
// a third ip/port ???
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Migration pending, got packet from 3rd address " + from + " on " + this);
|
||||
limitSending = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (limitSending)
|
||||
ECNReceived();
|
||||
} //// !_dead
|
||||
|
||||
boolean ackImmediate = (header.data[SHORT_HEADER_FLAGS_OFFSET] & 0x01) != 0 && _context.getBooleanProperty("ssu2.ackImmediate");
|
||||
if (ackImmediate) {
|
||||
_ackTimer.scheduleImmediate();
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Bad encrypted packet on: " + this + '\n' + HexDump.dump(data, off, len), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Caller must synch on _migrationState
|
||||
* @since 0.9.56
|
||||
*/
|
||||
private void sendPathChallenge(InetAddress toIP, int toPort) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Send path challenge to " + toIP + ' ' + toPort + " on " + this);
|
||||
List<SSU2Payload.Block> blocks = new ArrayList<SSU2Payload.Block>(3);
|
||||
blocks.add(new SSU2Payload.DateTimeBlock(_context));
|
||||
blocks.add(new SSU2Payload.AddressBlock(toIP.getAddress(), toPort));
|
||||
blocks.add(new SSU2Payload.PathChallengeBlock(_pathChallengeData));
|
||||
UDPPacket packet = _transport.getBuilder2().buildPacket(Collections.emptyList(), blocks, this);
|
||||
// fix up IP/port
|
||||
DatagramPacket pkt = packet.getPacket();
|
||||
pkt.setAddress(toIP);
|
||||
pkt.setPort(toPort);
|
||||
_transport.send(packet);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// begin payload callbacks
|
||||
/////////////////////////////////////////////////////////
|
||||
@@ -410,8 +536,8 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
}
|
||||
|
||||
public void gotRI(RouterInfo ri, boolean isHandshake, boolean flood) throws DataFormatException {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Got RI in data phase " + ri + "\non: " + this);
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Got RI in data phase " + ri + "\non: " + this);
|
||||
try {
|
||||
Hash h = ri.getHash();
|
||||
if (h.equals(_context.routerHash()))
|
||||
@@ -457,7 +583,7 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
}
|
||||
if (tag > 0) {
|
||||
SSU2Payload.Block block = new SSU2Payload.RelayTagBlock(tag);
|
||||
UDPPacket pkt = _transport.getBuilder2().buildPacket(Collections.emptyList(),
|
||||
UDPPacket pkt = _transport.getBuilder2().buildPacket(Collections.<Fragment>emptyList(),
|
||||
Collections.singletonList(block),
|
||||
this);
|
||||
_transport.send(pkt);
|
||||
@@ -643,13 +769,14 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
_transport.send(pkt);
|
||||
}
|
||||
_transport.getEstablisher().receiveSessionDestroy(_remoteHostId, this);
|
||||
_dead = true;
|
||||
}
|
||||
|
||||
public void gotPathChallenge(RemoteHostId from, byte[] data) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Got PATH CHALLENGE block, length: " + data.length + " on " + this);
|
||||
SSU2Payload.Block block = new SSU2Payload.PathResponseBlock(data);
|
||||
UDPPacket pkt = _transport.getBuilder2().buildPacket(Collections.emptyList(),
|
||||
UDPPacket pkt = _transport.getBuilder2().buildPacket(Collections.<Fragment>emptyList(),
|
||||
Collections.singletonList(block),
|
||||
this);
|
||||
// TODO send to from address?
|
||||
@@ -659,13 +786,51 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
public void gotPathResponse(RemoteHostId from, byte[] data) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Got PATH RESPONSE block, length: " + data.length + " on " + this);
|
||||
// TODO
|
||||
synchronized(_migrationLock) {
|
||||
switch(_migrationState) {
|
||||
case MIGRATION_STATE_PENDING:
|
||||
if (from.equals(_pendingRemoteHostId) && DataHelper.eq(data, _pathChallengeData)) {
|
||||
// success
|
||||
_migrationState = MigrationState.MIGRATION_STATE_NONE;
|
||||
_pathChallengeData = null;
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Migration successful, changed address from " + _remoteHostId + " to " + from + " for " + this);
|
||||
_transport.changePeerAddress(this, from);
|
||||
_mtu = MIN_MTU;
|
||||
EstablishmentManager.Token token = _transport.getEstablisher().getInboundToken(from);
|
||||
SSU2Payload.Block block = new SSU2Payload.NewTokenBlock(token.token, token.expires);
|
||||
UDPPacket pkt = _transport.getBuilder2().buildPacket(Collections.emptyList(),
|
||||
Collections.singletonList(block),
|
||||
this);
|
||||
_transport.send(pkt);
|
||||
} else {
|
||||
// caller will handle
|
||||
// ACK-eliciting
|
||||
messagePartiallyReceived();
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
messagePartiallyReceived();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// end payload callbacks
|
||||
/////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Caller should sync; UDPTransport must remove and add to peersByRemoteHost map
|
||||
* @since 0.9.56
|
||||
*/
|
||||
void changeAddress(RemoteHostId id) {
|
||||
_previousRemoteHostId = _remoteHostId;
|
||||
_remoteHostId = id;
|
||||
_remotePort = id.getPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Do what MessageReceiver does, but inline and for SSU2.
|
||||
* Will always be more than one fragment.
|
||||
@@ -801,16 +966,50 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flag byte to be sent in header
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
byte getFlags() {
|
||||
return shouldRequestImmediateAck() ? (byte) 0x01 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timer to send an ack-only packet.
|
||||
*/
|
||||
private class ACKTimer extends SimpleTimer2.TimedEvent {
|
||||
|
||||
/**
|
||||
* Caller must schedule
|
||||
*/
|
||||
public ACKTimer() {
|
||||
super(_context.simpleTimer2());
|
||||
}
|
||||
|
||||
/**
|
||||
* Ack soon, based on the current RTT
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
public void schedule() {
|
||||
long delta = Math.max(10, Math.min(_rtt/6, ACK_FREQUENCY));
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Sending delayed ack in " + delta + ": " + PeerState2.this);
|
||||
schedule(delta);
|
||||
reschedule(delta, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ack almost immediately
|
||||
*
|
||||
* @since 0.9.56
|
||||
*/
|
||||
public void scheduleImmediate() {
|
||||
_wantACKSendSince = _context.clock().now();
|
||||
long delta = Math.min(_rtt/16, 5);
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Sending immediate ack in " + delta + ": " + PeerState2.this);
|
||||
reschedule(delta, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -21,7 +21,7 @@ final class SSU2Util {
|
||||
// features
|
||||
public static final boolean ENABLE_RELAY = true;
|
||||
public static final boolean ENABLE_PEER_TEST = true;
|
||||
public static final boolean ENABLE_PATH_CHALLENGE = false;
|
||||
public static final boolean ENABLE_PATH_CHALLENGE = true;
|
||||
|
||||
// lengths
|
||||
/** 32 */
|
||||
|
||||
@@ -428,7 +428,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
String s = null;
|
||||
// try to determine if we've been down for 30 days or more
|
||||
long minDowntime = _context.router().isHidden() ? MIN_DOWNTIME_TO_REKEY_HIDDEN : MIN_DOWNTIME_TO_REKEY;
|
||||
boolean shouldRekey = _context.getEstimatedDowntime() >= minDowntime;
|
||||
boolean shouldRekey = !allowLocal() && _context.getEstimatedDowntime() >= minDowntime;
|
||||
if (!shouldRekey) {
|
||||
s = ctx.getProperty(PROP_SSU2_SP);
|
||||
if (s != null) {
|
||||
@@ -1645,7 +1645,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
|
||||
/**
|
||||
* Get the state by SSU2 connection ID
|
||||
* @since 0.9.56
|
||||
* @since 0.9.55
|
||||
*/
|
||||
PeerState2 getPeerState(long rcvConnID) {
|
||||
return _peersByConnID.get(Long.valueOf(rcvConnID));
|
||||
@@ -1690,6 +1690,24 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
_log.info("Changed port from " + oldPort + " to " + newPort + " for " + peer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove and add to peersByRemoteHost map
|
||||
* @since 0.9.56
|
||||
*/
|
||||
void changePeerAddress(PeerState2 peer, RemoteHostId newAddress) {
|
||||
RemoteHostId oldAddress;
|
||||
synchronized (_addDropLock) {
|
||||
oldAddress = peer.getRemoteHostId();
|
||||
if (!oldAddress.equals(newAddress)) {
|
||||
_peersByRemoteHost.remove(oldAddress);
|
||||
peer.changeAddress(newAddress);
|
||||
_peersByRemoteHost.put(newAddress, peer);
|
||||
}
|
||||
}
|
||||
if (_log.shouldInfo() && !oldAddress.equals(newAddress))
|
||||
_log.info("Changed address from " + oldAddress + " to " + newAddress + " for " + peer);
|
||||
}
|
||||
|
||||
/**
|
||||
* For IntroductionManager
|
||||
* @return may be null if not started
|
||||
@@ -2772,7 +2790,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
options.setProperty(UDPAddress.PROP_CAPACITY, caps);
|
||||
if (mtu != PeerState.LARGE_MTU && mtu > 0)
|
||||
options.setProperty(UDPAddress.PROP_MTU, Integer.toString(mtu));
|
||||
if (_enableSSU2)
|
||||
if (_enableSSU2 && (mtu >= PeerState2.MIN_MTU || mtu == 0))
|
||||
addSSU2Options(options);
|
||||
RouterAddress current = getCurrentAddress(false);
|
||||
RouterAddress addr = new RouterAddress(STYLE, options, SSU_OUTBOUND_COST);
|
||||
@@ -2864,7 +2882,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
else if (config == IPV6_NOT_PREFERRED)
|
||||
cost++;
|
||||
}
|
||||
if (_enableSSU2)
|
||||
if (_enableSSU2 && (mtu >= PeerState2.MIN_MTU || mtu == 0))
|
||||
addSSU2Options(options);
|
||||
RouterAddress addr = new RouterAddress(STYLE, options, cost);
|
||||
|
||||
@@ -2934,7 +2952,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
opts.setProperty(UDPAddress.PROP_CAPACITY, isIPv6 ? CAP_IPV6 : CAP_IPV4);
|
||||
if (mtu != PeerState.LARGE_MTU && mtu > 0)
|
||||
opts.setProperty(UDPAddress.PROP_MTU, Integer.toString(mtu));
|
||||
if (_enableSSU2)
|
||||
if (_enableSSU2 && (mtu >= PeerState2.MIN_MTU || mtu == 0))
|
||||
addSSU2Options(opts);
|
||||
RouterAddress addr = new RouterAddress(STYLE, opts, SSU_OUTBOUND_COST);
|
||||
RouterAddress current = getCurrentAddress(isIPv6);
|
||||
|
||||
@@ -40,6 +40,7 @@ public class EventLog {
|
||||
public static final String CLOCK_SHIFT = "clockShift";
|
||||
public static final String CRASHED = "crashed";
|
||||
public static final String CRITICAL = "critical";
|
||||
public static final String DEADLOCK = "deadlock";
|
||||
public static final String INSTALLED = "installed";
|
||||
public static final String INSTALL_FAILED = "installFailed";
|
||||
public static final String NETWORK = "network";
|
||||
|
||||
Reference in New Issue
Block a user