Compare commits

...

13 Commits

Author SHA1 Message Date
jrandom
589cbd675a * 2006-02-27 0.6.1.12 released
2006-02-27  jrandom
    * Adjust the jbigi.jar to use the athlon-optimized jbigi on windows/amd64
      machines, rather than the generic jbigi (until we have an athlon64
      optimized version)
2006-02-27 19:05:40 +00:00
jrandom
c486f5980a * 2006-02-27 0.6.1.12 released
2006-02-27  jrandom
    * Adjust the jbigi.jar to use the athlon-optimized jbigi on windows/amd64
      machines, rather than the generic jbigi (until we have an athlon64
      optimized version)
2006-02-27 18:51:31 +00:00
jrandom
eee21aa301 2006-02-26 jrandom
* Switch from the bouncycastle to the gnu-crypto implementation for
      SHA256, as benchmarks show a 10-30% speedup.
    * Removed some unnecessary object caches
    * Don't close i2psnark streams prematurely
2006-02-26 21:30:56 +00:00
zzz
a2854cf6f6 2006-02-25 zzz spelling fix 2006-02-25 21:51:46 +00:00
jrandom
62b7cf64da 2006-02-25 jrandom
* Made the Syndie permalinks in the thread view point to the blog view
    * Disabled TCP again (since the live net seems to be doing well w/out it)
    * Fix the message time on inbound SSU establishment (thanks zzz!)
    * Don't be so aggressive with parallel tunnel creation when a tunnel pool
      just starts up
2006-02-25 20:41:51 +00:00
jrandom
7b2a435aad 2006-02-24 jrandom
* Rounding calculation cleanup in the stats, and avoid an uncontested
      mutex (thanks ripple!)
    * SSU handshake cleanup to help force incompatible peers to stop nagging
      us by both not giving them an updated reference to us and by dropping
      future handshake packets from them.
2006-02-24 09:35:52 +00:00
jrandom
3d8d21e543 2006-02-23 jrandom
* Increase the SSU retransmit ceiling (for slow links)
    * Estimate the sender's SSU MTU (to help see if we agree)
2006-02-23 14:38:39 +00:00
jrandom
8b7958cff2 2006-02-22 jrandom
* Fix to properly profile tunnel joins (thanks Ragnarok, frosk, et al!)
    * More aggressive poor-man's PMTU, allowing larger MTUs on less reliable
      links
    * Further class validator refactorings
2006-02-23 08:08:37 +00:00
jrandom
7bb792836d 2006-02-22 jrandom
* Fix to properly profile tunnel joins (thanks Ragnarok, frosk, et al!)
    * More aggressive poor-man's PMTU, allowing larger MTUs on less reliable
      links
    * Further class validator refactorings
2006-02-23 01:48:47 +00:00
jrandom
03f509ca54 2006-02-22 jrandom
* Handle a rare race under high bandwidth situations in the SSU transport
    * Minor refactoring so we don't confuse sun's 1.6.0-b2 validator
2006-02-22 14:54:22 +00:00
complication
5f05631936 2006-02-21 Complication
* Reactivate TCP tranport by default, in addition to re-allowing
2006-02-22 06:19:19 +00:00
zzz
5cfedd4c8b 2006-02-21 zzz update 2006-02-22 03:34:02 +00:00
zzz
269fec64a5 2006-02-21 zzz
announce 0.6.1.11
2006-02-21 20:12:14 +00:00
46 changed files with 1529 additions and 1007 deletions

View File

@@ -93,9 +93,9 @@ public class I2PSnarkUtil {
if (opts.getProperty("i2p.streaming.inactivityTimeout") == null)
opts.setProperty("i2p.streaming.inactivityTimeout", "90000");
if (opts.getProperty("i2p.streaming.inactivityAction") == null)
opts.setProperty("i2p.streaming.inactivityAction", "1");
if (opts.getProperty("i2p.streaming.writeTimeout") == null)
opts.setProperty("i2p.streaming.writeTimeout", "90000");
opts.setProperty("i2p.streaming.inactivityAction", "2"); // 1 == disconnect, 2 == ping
//if (opts.getProperty("i2p.streaming.writeTimeout") == null)
// opts.setProperty("i2p.streaming.writeTimeout", "90000");
//if (opts.getProperty("i2p.streaming.readTimeout") == null)
// opts.setProperty("i2p.streaming.readTimeout", "120000");
_manager = I2PSocketManagerFactory.createManager(_i2cpHost, _i2cpPort, opts);

View File

@@ -127,7 +127,7 @@ public class I2PSnarkServlet extends HttpServlet {
}
} else if ( (newURL != null) && (newURL.trim().length() > "http://.i2p/".length()) ) {
_manager.addMessage("Fetching " + newURL);
I2PThread fetch = new I2PThread(new FetchAndAdd(newURL), "Fetch and add");
I2PThread fetch = new I2PThread(new FetchAndAdd(_manager, newURL), "Fetch and add");
fetch.start();
} else {
// no file or URL specified
@@ -267,56 +267,6 @@ public class I2PSnarkServlet extends HttpServlet {
}
}
private class FetchAndAdd implements Runnable {
private String _url;
public FetchAndAdd(String url) {
_url = url;
}
public void run() {
_url = _url.trim();
File file = I2PSnarkUtil.instance().get(_url, false);
try {
if ( (file != null) && (file.exists()) && (file.length() > 0) ) {
_manager.addMessage("Torrent fetched from " + _url);
FileInputStream in = null;
try {
in = new FileInputStream(file);
MetaInfo info = new MetaInfo(in);
String name = info.getName();
name = name.replace('/', '_');
name = name.replace('\\', '_');
name = name.replace('&', '+');
name = name.replace('\'', '_');
name = name.replace('"', '_');
name = name.replace('`', '_');
name = name + ".torrent";
File torrentFile = new File(_manager.getDataDir(), name);
String canonical = torrentFile.getCanonicalPath();
if (torrentFile.exists()) {
if (_manager.getTorrent(canonical) != null)
_manager.addMessage("Torrent already running: " + name);
else
_manager.addMessage("Torrent already in the queue: " + name);
} else {
FileUtil.copy(file.getAbsolutePath(), canonical, true);
_manager.addTorrent(canonical);
}
} catch (IOException ioe) {
_manager.addMessage("Torrent at " + _url + " was not valid: " + ioe.getMessage());
} finally {
try { in.close(); } catch (IOException ioe) {}
}
} else {
_manager.addMessage("Torrent was not retrieved from " + _url);
}
} finally {
if (file != null) file.delete();
}
}
}
private List getSortedSnarks(HttpServletRequest req) {
Set files = _manager.listTorrentFiles();
TreeSet fileNames = new TreeSet(files); // sorts it alphabetically
@@ -635,4 +585,57 @@ public class I2PSnarkServlet extends HttpServlet {
private static final String TABLE_FOOTER = "</table>\n";
private static final String FOOTER = "</body></html>";
}
}
class FetchAndAdd implements Runnable {
private SnarkManager _manager;
private String _url;
public FetchAndAdd(SnarkManager mgr, String url) {
_manager = mgr;
_url = url;
}
public void run() {
_url = _url.trim();
File file = I2PSnarkUtil.instance().get(_url, false);
try {
if ( (file != null) && (file.exists()) && (file.length() > 0) ) {
_manager.addMessage("Torrent fetched from " + _url);
FileInputStream in = null;
try {
in = new FileInputStream(file);
MetaInfo info = new MetaInfo(in);
String name = info.getName();
name = name.replace('/', '_');
name = name.replace('\\', '_');
name = name.replace('&', '+');
name = name.replace('\'', '_');
name = name.replace('"', '_');
name = name.replace('`', '_');
name = name + ".torrent";
File torrentFile = new File(_manager.getDataDir(), name);
String canonical = torrentFile.getCanonicalPath();
if (torrentFile.exists()) {
if (_manager.getTorrent(canonical) != null)
_manager.addMessage("Torrent already running: " + name);
else
_manager.addMessage("Torrent already in the queue: " + name);
} else {
FileUtil.copy(file.getAbsolutePath(), canonical, true);
_manager.addTorrent(canonical);
}
} catch (IOException ioe) {
_manager.addMessage("Torrent at " + _url + " was not valid: " + ioe.getMessage());
} finally {
try { in.close(); } catch (IOException ioe) {}
}
} else {
_manager.addMessage("Torrent was not retrieved from " + _url);
}
} finally {
if (file != null) file.delete();
}
}
}

View File

@@ -49,7 +49,7 @@
<input type="checkbox" name="enableloadtesting" value="true" <jsp:getProperty name="nethelper" property="enableLoadTesting" /> />
<p>If enabled, your router will periodically anonymously probe some of your peers
to see what sort of throughput they can handle. This improves your router's ability
to pick faster peers, but can cost substantial bandwidth. Relevent data from the
to pick faster peers, but can cost substantial bandwidth. Relevant data from the
load testing is fed into the profiles as well as the
<a href="oldstats.jsp#test.rtt">test.rtt</a> and related stats.</p>
<hr />

View File

@@ -34,7 +34,9 @@ public class BlogRenderer extends HTMLRenderer {
}
public void receiveHeaderEnd() {
_preBodyBuffer.append("<div class=\"syndieBlogPost\"><hr style=\"display: none\" />\n");
_preBodyBuffer.append("<div class=\"syndieBlogPost\" id=\"");
_preBodyBuffer.append(_entry.getURI().getKeyHash().toBase64()).append('/').append(_entry.getURI().getEntryId());
_preBodyBuffer.append("\"><hr style=\"display: none\" />\n");
_preBodyBuffer.append("<div class=\"syndieBlogPostHeader\">\n");
_preBodyBuffer.append("<div class=\"syndieBlogPostSubject\">");
String subject = (String)_headers.get(HEADER_SUBJECT);
@@ -160,12 +162,24 @@ public class BlogRenderer extends HTMLRenderer {
protected String getEntryURL(boolean showImages) {
return getEntryURL(_entry, _blog, showImages);
}
static String getEntryURL(EntryContainer entry, BlogInfo blog, boolean showImages) {
static String getEntryURL(EntryContainer entry, BlogInfo blog, boolean showImages) {
if (entry == null) return "unknown";
return "blog.jsp?"
+ ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&amp;"
+ ViewBlogServlet.PARAM_ENTRY + "="
+ Base64.encode(entry.getURI().getKeyHash().getData()) + '/' + entry.getURI().getEntryId();
return getEntryURL(entry.getURI(), blog, null, showImages);
}
static String getEntryURL(BlogURI entry, BlogInfo blog, BlogURI comment, boolean showImages) {
if (entry == null) return "unknown";
if (comment == null) {
return "blog.jsp?"
+ ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&amp;"
+ ViewBlogServlet.PARAM_ENTRY + "="
+ Base64.encode(entry.getKeyHash().getData()) + '/' + entry.getEntryId();
} else {
return "blog.jsp?"
+ ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&amp;"
+ ViewBlogServlet.PARAM_ENTRY + "="
+ Base64.encode(entry.getKeyHash().getData()) + '/' + entry.getEntryId()
+ '#' + Base64.encode(comment.getKeyHash().getData()) + '/' + comment.getEntryId();
}
}
protected String getAttachmentURLBase() {
@@ -218,4 +232,4 @@ public class BlogRenderer extends HTMLRenderer {
buf.append(ViewBlogServlet.PARAM_OFFSET).append('=').append(pageNum*numPerPage).append("&amp;");
return buf.toString();
}
}
}

View File

@@ -122,36 +122,52 @@ public class ThreadedHTMLRenderer extends HTMLRenderer {
public static String getViewPostLink(String uri, ThreadNode node, User user, boolean isPermalink,
String offset, String tags, String author, boolean authorOnly) {
StringBuffer buf = new StringBuffer(64);
buf.append(uri);
if (node.getChildCount() > 0) {
buf.append('?').append(PARAM_VISIBLE).append('=');
ThreadNode child = node.getChild(0);
buf.append(child.getEntry().getKeyHash().toBase64()).append('/');
buf.append(child.getEntry().getEntryId()).append('&');
if (isPermalink) {
// link to the blog view of the original poster
BlogURI rootBlog = null;
ThreadNode parent = node;
while (parent != null) {
if (parent.getParent() != null) {
parent = parent.getParent();
} else {
rootBlog = parent.getEntry();
break;
}
}
BlogInfo root = BlogManager.instance().getArchive().getBlogInfo(rootBlog.getKeyHash());
return BlogRenderer.getEntryURL(parent.getEntry(), root, node.getEntry(), true);
} else {
buf.append('?').append(PARAM_VISIBLE).append('=');
StringBuffer buf = new StringBuffer(64);
buf.append(uri);
if (node.getChildCount() > 0) {
buf.append('?').append(PARAM_VISIBLE).append('=');
ThreadNode child = node.getChild(0);
buf.append(child.getEntry().getKeyHash().toBase64()).append('/');
buf.append(child.getEntry().getEntryId()).append('&');
} else {
buf.append('?').append(PARAM_VISIBLE).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
}
buf.append(PARAM_VIEW_POST).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
if (!isPermalink) {
if (!empty(offset))
buf.append(PARAM_OFFSET).append('=').append(offset).append('&');
if (!empty(tags))
buf.append(PARAM_TAGS).append('=').append(tags).append('&');
}
if (authorOnly && !empty(author)) {
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(PARAM_THREAD_AUTHOR).append("=true&");
} else if (!isPermalink && !empty(author))
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
return buf.toString();
}
buf.append(PARAM_VIEW_POST).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
if (!isPermalink) {
if (!empty(offset))
buf.append(PARAM_OFFSET).append('=').append(offset).append('&');
if (!empty(tags))
buf.append(PARAM_TAGS).append('=').append(tags).append('&');
}
if (authorOnly && !empty(author)) {
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(PARAM_THREAD_AUTHOR).append("=true&");
} else if (!isPermalink && !empty(author))
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
return buf.toString();
}
public static String getViewPostLink(String uri, BlogURI post, User user, boolean isPermalink,
@@ -272,8 +288,7 @@ public class ThreadedHTMLRenderer extends HTMLRenderer {
out.write("\n<a href=\"");
out.write(getViewPostLink(baseURI, node, user, true, offset, requestTags, filteredAuthor, authorOnly));
out.write("\" title=\"Select a shareable link directly to this post\">permalink</a>\n");
out.write("\" title=\"Select a link directly to this post within the blog\">permalink</a>\n");
if (true || (!inlineReply) ) {
String refuseReply = (String)_headers.get(HEADER_REFUSE_REPLIES);

View File

@@ -0,0 +1,198 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: BaseHash.java,v 1.10 2005/10/06 04:24:14 rsdio Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
/**
* <p>A base abstract class to facilitate hash implementations.</p>
*
* @version $Revision: 1.10 $
*/
public abstract class BaseHash implements IMessageDigest {
// Constants and variables
// -------------------------------------------------------------------------
/** The canonical name prefix of the hash. */
protected String name;
/** The hash (output) size in bytes. */
protected int hashSize;
/** The hash (inner) block size in bytes. */
protected int blockSize;
/** Number of bytes processed so far. */
protected long count;
/** Temporary input buffer. */
protected byte[] buffer;
// Constructor(s)
// -------------------------------------------------------------------------
/**
* <p>Trivial constructor for use by concrete subclasses.</p>
*
* @param name the canonical name prefix of this instance.
* @param hashSize the block size of the output in bytes.
* @param blockSize the block size of the internal transform.
*/
protected BaseHash(String name, int hashSize, int blockSize) {
super();
this.name = name;
this.hashSize = hashSize;
this.blockSize = blockSize;
this.buffer = new byte[blockSize];
resetContext();
}
// Class methods
// -------------------------------------------------------------------------
// Instance methods
// -------------------------------------------------------------------------
// IMessageDigest interface implementation ---------------------------------
public String name() {
return name;
}
public int hashSize() {
return hashSize;
}
public int blockSize() {
return blockSize;
}
public void update(byte b) {
// compute number of bytes still unhashed; ie. present in buffer
int i = (int)(count % blockSize);
count++;
buffer[i] = b;
if (i == (blockSize - 1)) {
transform(buffer, 0);
}
}
public void update(byte[] b) {
update(b, 0, b.length);
}
public void update(byte[] b, int offset, int len) {
int n = (int)(count % blockSize);
count += len;
int partLen = blockSize - n;
int i = 0;
if (len >= partLen) {
System.arraycopy(b, offset, buffer, n, partLen);
transform(buffer, 0);
for (i = partLen; i + blockSize - 1 < len; i+= blockSize) {
transform(b, offset + i);
}
n = 0;
}
if (i < len) {
System.arraycopy(b, offset + i, buffer, n, len - i);
}
}
public byte[] digest() {
byte[] tail = padBuffer(); // pad remaining bytes in buffer
update(tail, 0, tail.length); // last transform of a message
byte[] result = getResult(); // make a result out of context
reset(); // reset this instance for future re-use
return result;
}
public void reset() { // reset this instance for future re-use
count = 0L;
for (int i = 0; i < blockSize; ) {
buffer[i++] = 0;
}
resetContext();
}
// methods to be implemented by concrete subclasses ------------------------
public abstract Object clone();
public abstract boolean selfTest();
/**
* <p>Returns the byte array to use as padding before completing a hash
* operation.</p>
*
* @return the bytes to pad the remaining bytes in the buffer before
* completing a hash operation.
*/
protected abstract byte[] padBuffer();
/**
* <p>Constructs the result from the contents of the current context.</p>
*
* @return the output of the completed hash operation.
*/
protected abstract byte[] getResult();
/** Resets the instance for future re-use. */
protected abstract void resetContext();
/**
* <p>The block digest transformation per se.</p>
*
* @param in the <i>blockSize</i> long block, as an array of bytes to digest.
* @param offset the index where the data to digest is located within the
* input buffer.
*/
protected abstract void transform(byte[] in, int offset);
}

View File

@@ -0,0 +1,141 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: IMessageDigest.java,v 1.11 2005/10/06 04:24:14 rsdio Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
/**
* <p>The basic visible methods of any hash algorithm.</p>
*
* <p>A hash (or message digest) algorithm produces its output by iterating a
* basic compression function on blocks of data.</p>
*
* @version $Revision: 1.11 $
*/
public interface IMessageDigest extends Cloneable {
// Constants
// -------------------------------------------------------------------------
// Methods
// -------------------------------------------------------------------------
/**
* <p>Returns the canonical name of this algorithm.</p>
*
* @return the canonical name of this instance.
*/
String name();
/**
* <p>Returns the output length in bytes of this message digest algorithm.</p>
*
* @return the output length in bytes of this message digest algorithm.
*/
int hashSize();
/**
* <p>Returns the algorithm's (inner) block size in bytes.</p>
*
* @return the algorithm's inner block size in bytes.
*/
int blockSize();
/**
* <p>Continues a message digest operation using the input byte.</p>
*
* @param b the input byte to digest.
*/
void update(byte b);
/**
* <p>Continues a message digest operation, by filling the buffer, processing
* data in the algorithm's HASH_SIZE-bit block(s), updating the context and
* count, and buffering the remaining bytes in buffer for the next
* operation.</p>
*
* @param in the input block.
*/
void update(byte[] in);
/**
* <p>Continues a message digest operation, by filling the buffer, processing
* data in the algorithm's HASH_SIZE-bit block(s), updating the context and
* count, and buffering the remaining bytes in buffer for the next
* operation.</p>
*
* @param in the input block.
* @param offset start of meaningful bytes in input block.
* @param length number of bytes, in input block, to consider.
*/
void update(byte[] in, int offset, int length);
/**
* <p>Completes the message digest by performing final operations such as
* padding and resetting the instance.</p>
*
* @return the array of bytes representing the hash value.
*/
byte[] digest();
/**
* <p>Resets the current context of this instance clearing any eventually cached
* intermediary values.</p>
*/
void reset();
/**
* <p>A basic test. Ensures that the digest of a pre-determined message is equal
* to a known pre-computed value.</p>
*
* @return <tt>true</tt> if the implementation passes a basic self-test.
* Returns <tt>false</tt> otherwise.
*/
boolean selfTest();
/**
* <p>Returns a clone copy of this instance.</p>
*
* @return a clone copy of this instance.
*/
Object clone();
}

View File

@@ -0,0 +1,262 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: Sha256.java,v 1.2 2005/10/06 04:24:14 rsdio Exp $
//
// Copyright (C) 2003 Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
//import gnu.crypto.util.Util;
/**
* <p>Implementation of SHA2-1 [SHA-256] per the IETF Draft Specification.</p>
*
* <p>References:</p>
* <ol>
* <li><a href="http://ftp.ipv4.heanet.ie/pub/ietf/internet-drafts/draft-ietf-ipsec-ciph-aes-cbc-03.txt">
* Descriptions of SHA-256, SHA-384, and SHA-512</a>,</li>
* <li>http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf</li>
* </ol>
*
* Modified by jrandom@i2p.net to remove unnecessary gnu-crypto dependencies, and
* renamed from Sha256 to avoid conflicts with JVMs using gnu-crypto as their JCE
* provider.
*
* @version $Revision: 1.2 $
*/
public class Sha256Standalone extends BaseHash {
// Constants and variables
// -------------------------------------------------------------------------
private static final int[] k = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
private static final int BLOCK_SIZE = 64; // inner block size in bytes
private static final String DIGEST0 =
"BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD";
private static final int[] w = new int[64];
/** caches the result of the correctness test, once executed. */
private static Boolean valid;
/** 256-bit interim result. */
private int h0, h1, h2, h3, h4, h5, h6, h7;
// Constructor(s)
// -------------------------------------------------------------------------
/** Trivial 0-arguments constructor. */
public Sha256Standalone() {
super("sha256/standalone", 32, BLOCK_SIZE);
}
/**
* <p>Private constructor for cloning purposes.</p>
*
* @param md the instance to clone.
*/
private Sha256Standalone(Sha256Standalone md) {
this();
this.h0 = md.h0;
this.h1 = md.h1;
this.h2 = md.h2;
this.h3 = md.h3;
this.h4 = md.h4;
this.h5 = md.h5;
this.h6 = md.h6;
this.h7 = md.h7;
this.count = md.count;
this.buffer = (byte[]) md.buffer.clone();
}
// Class methods
// -------------------------------------------------------------------------
public static final int[] G(int hh0, int hh1, int hh2, int hh3, int hh4,
int hh5, int hh6, int hh7, byte[] in, int offset) {
return sha(hh0, hh1, hh2, hh3, hh4, hh5, hh6, hh7, in, offset);
}
// Instance methods
// -------------------------------------------------------------------------
// java.lang.Cloneable interface implementation ----------------------------
public Object clone() {
return new Sha256Standalone(this);
}
// Implementation of concrete methods in BaseHash --------------------------
protected void transform(byte[] in, int offset) {
int[] result = sha(h0, h1, h2, h3, h4, h5, h6, h7, in, offset);
h0 = result[0];
h1 = result[1];
h2 = result[2];
h3 = result[3];
h4 = result[4];
h5 = result[5];
h6 = result[6];
h7 = result[7];
}
protected byte[] padBuffer() {
int n = (int) (count % BLOCK_SIZE);
int padding = (n < 56) ? (56 - n) : (120 - n);
byte[] result = new byte[padding + 8];
// padding is always binary 1 followed by binary 0s
result[0] = (byte) 0x80;
// save number of bits, casting the long to an array of 8 bytes
long bits = count << 3;
result[padding++] = (byte)(bits >>> 56);
result[padding++] = (byte)(bits >>> 48);
result[padding++] = (byte)(bits >>> 40);
result[padding++] = (byte)(bits >>> 32);
result[padding++] = (byte)(bits >>> 24);
result[padding++] = (byte)(bits >>> 16);
result[padding++] = (byte)(bits >>> 8);
result[padding ] = (byte) bits;
return result;
}
protected byte[] getResult() {
return new byte[] {
(byte)(h0 >>> 24), (byte)(h0 >>> 16), (byte)(h0 >>> 8), (byte) h0,
(byte)(h1 >>> 24), (byte)(h1 >>> 16), (byte)(h1 >>> 8), (byte) h1,
(byte)(h2 >>> 24), (byte)(h2 >>> 16), (byte)(h2 >>> 8), (byte) h2,
(byte)(h3 >>> 24), (byte)(h3 >>> 16), (byte)(h3 >>> 8), (byte) h3,
(byte)(h4 >>> 24), (byte)(h4 >>> 16), (byte)(h4 >>> 8), (byte) h4,
(byte)(h5 >>> 24), (byte)(h5 >>> 16), (byte)(h5 >>> 8), (byte) h5,
(byte)(h6 >>> 24), (byte)(h6 >>> 16), (byte)(h6 >>> 8), (byte) h6,
(byte)(h7 >>> 24), (byte)(h7 >>> 16), (byte)(h7 >>> 8), (byte) h7
};
}
protected void resetContext() {
// magic SHA-256 initialisation constants
h0 = 0x6a09e667;
h1 = 0xbb67ae85;
h2 = 0x3c6ef372;
h3 = 0xa54ff53a;
h4 = 0x510e527f;
h5 = 0x9b05688c;
h6 = 0x1f83d9ab;
h7 = 0x5be0cd19;
}
public boolean selfTest() {
if (valid == null) {
Sha256Standalone md = new Sha256Standalone();
md.update((byte) 0x61); // a
md.update((byte) 0x62); // b
md.update((byte) 0x63); // c
String result = "broken"; //Util.toString(md.digest());
valid = new Boolean(DIGEST0.equals(result));
}
return valid.booleanValue();
}
// SHA specific methods ----------------------------------------------------
private static final synchronized int[]
sha(int hh0, int hh1, int hh2, int hh3, int hh4, int hh5, int hh6, int hh7, byte[] in, int offset) {
int A = hh0;
int B = hh1;
int C = hh2;
int D = hh3;
int E = hh4;
int F = hh5;
int G = hh6;
int H = hh7;
int r, T, T2;
for (r = 0; r < 16; r++) {
w[r] = in[offset++] << 24 |
(in[offset++] & 0xFF) << 16 |
(in[offset++] & 0xFF) << 8 |
(in[offset++] & 0xFF);
}
for (r = 16; r < 64; r++) {
T = w[r - 2];
T2 = w[r - 15];
w[r] = (((T >>> 17) | (T << 15)) ^ ((T >>> 19) | (T << 13)) ^ (T >>> 10)) + w[r - 7] + (((T2 >>> 7) | (T2 << 25)) ^ ((T2 >>> 18) | (T2 << 14)) ^ (T2 >>> 3)) + w[r - 16];
}
for (r = 0; r < 64; r++) {
T = H + (((E >>> 6) | (E << 26)) ^ ((E >>> 11) | (E << 21)) ^ ((E >>> 25) | (E << 7))) + ((E & F) ^ (~E & G)) + k[r] + w[r];
T2 = (((A >>> 2) | (A << 30)) ^ ((A >>> 13) | (A << 19)) ^ ((A >>> 22) | (A << 10))) + ((A & B) ^ (A & C) ^ (B & C));
H = G;
G = F;
F = E;
E = D + T;
D = C;
C = B;
B = A;
A = T + T2;
}
return new int[] {
hh0 + A, hh1 + B, hh2 + C, hh3 + D, hh4 + E, hh5 + F, hh6 + G, hh7 + H
};
}
}

View File

@@ -54,7 +54,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.HashMap;
import org.bouncycastle.crypto.digests.SHA256Digest;
import gnu.crypto.hash.Sha256Standalone;
import net.i2p.crypto.CryptixRijndael_Algorithm;
import net.i2p.crypto.CryptixAESKeyCache;
@@ -91,7 +91,7 @@ import net.i2p.crypto.CryptixAESKeyCache;
* Bruce Schneier). ISBN 0-471-22357-3.</li>
* </ul>
*
* Modified by jrandom for I2P to use Bouncycastle's SHA256, Cryptix's AES,
* Modified by jrandom for I2P to use a standalone gnu-crypto SHA256, Cryptix's AES,
* to strip out some unnecessary dependencies and increase the buffer size.
* Renamed from Fortuna to FortunaStandalone so it doesn't conflict with the
* gnu-crypto implementation, which has been imported into GNU/classpath
@@ -106,7 +106,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
private static final int NUM_POOLS = 32;
private static final int MIN_POOL_SIZE = 64;
private final Generator generator;
private final SHA256Digest[] pools;
private final Sha256Standalone[] pools;
private long lastReseed;
private int pool;
private int pool0Count;
@@ -118,9 +118,9 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
{
super("Fortuna i2p");
generator = new Generator();
pools = new SHA256Digest[NUM_POOLS];
pools = new Sha256Standalone[NUM_POOLS];
for (int i = 0; i < NUM_POOLS; i++)
pools[i] = new SHA256Digest();
pools[i] = new Sha256Standalone();
lastReseed = 0;
pool = 0;
pool0Count = 0;
@@ -143,8 +143,6 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
generator.init(attributes);
}
/** fillBlock is not thread safe, so will be locked anyway */
private byte fillBlockBuf[] = new byte[32];
public void fillBlock()
{
if (pool0Count >= MIN_POOL_SIZE
@@ -155,9 +153,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
for (int i = 0; i < NUM_POOLS; i++)
{
if (reseedCount % (1 << i) == 0) {
byte buf[] = fillBlockBuf;//new byte[32];
pools[i].doFinal(buf, 0);
generator.addRandomBytes(buf);//pools[i].digest());
generator.addRandomBytes(pools[i].digest());
}
}
lastReseed = System.currentTimeMillis();
@@ -221,7 +217,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
private static final int LIMIT = 1 << 20;
private final SHA256Digest hash;
private final Sha256Standalone hash;
private final byte[] counter;
private final byte[] key;
/** current encryption key built from the keying material */
@@ -232,7 +228,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
public Generator ()
{
super("Fortuna.generator.i2p");
this.hash = new SHA256Digest();
this.hash = new Sha256Standalone();
counter = new byte[16]; //cipher.defaultBlockSize()];
buffer = new byte[16]; //cipher.defaultBlockSize()];
int keysize = 32;
@@ -285,9 +281,9 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
{
hash.update(key, 0, key.length);
hash.update(seed, offset, length);
//byte[] newkey = hash.digest();
//System.arraycopy(newkey, 0, key, 0, Math.min(key.length, newkey.length));
hash.doFinal(key, 0);
byte[] newkey = hash.digest();
System.arraycopy(newkey, 0, key, 0, Math.min(key.length, newkey.length));
//hash.doFinal(key, 0);
resetKey();
incrementCounter();
seeded = true;

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.53 $ $Date: 2006/02/16 15:44:09 $";
public final static String VERSION = "0.6.1.11";
public final static String ID = "$Revision: 1.54 $ $Date: 2006/02/21 10:20:17 $";
public final static String VERSION = "0.6.1.12";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -11,11 +11,10 @@ import net.i2p.crypto.CryptixAESEngine;
import net.i2p.crypto.DSAEngine;
import net.i2p.crypto.DummyDSAEngine;
import net.i2p.crypto.DummyElGamalEngine;
import net.i2p.crypto.DummyHMACSHA256Generator;
import net.i2p.crypto.DummyPooledRandomSource;
import net.i2p.crypto.ElGamalAESEngine;
import net.i2p.crypto.ElGamalEngine;
import net.i2p.crypto.HMACSHA256Generator;
import net.i2p.crypto.HMACGenerator;
import net.i2p.crypto.KeyGenerator;
import net.i2p.crypto.PersistentSessionKeyManager;
import net.i2p.crypto.SHA256Generator;
@@ -67,7 +66,7 @@ public class I2PAppContext {
private ElGamalAESEngine _elGamalAESEngine;
private AESEngine _AESEngine;
private LogManager _logManager;
private HMACSHA256Generator _hmac;
private HMACGenerator _hmac;
private SHA256Generator _sha;
private Clock _clock;
private DSAEngine _dsa;
@@ -342,17 +341,14 @@ public class I2PAppContext {
* other than for consistency, and perhaps later we'll want to
* include some stats.
*/
public HMACSHA256Generator hmac() {
public HMACGenerator hmac() {
if (!_hmacInitialized) initializeHMAC();
return _hmac;
}
private void initializeHMAC() {
synchronized (this) {
if (_hmac == null) {
if ("true".equals(getProperty("i2p.fakeHMAC", "false")))
_hmac = new DummyHMACSHA256Generator(this);
else
_hmac= new HMACSHA256Generator(this);
_hmac= new HMACGenerator(this);
}
_hmacInitialized = true;
}

View File

@@ -1,52 +0,0 @@
package net.i2p.crypto;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
/**
* Calculate the HMAC-SHA256 of a key+message. All the good stuff occurs
* in {@link org.bouncycastle.crypto.macs.HMac} and
* {@link org.bouncycastle.crypto.digests.SHA256Digest}.
*
*/
public class DummyHMACSHA256Generator extends HMACSHA256Generator {
private I2PAppContext _context;
public DummyHMACSHA256Generator(I2PAppContext context) {
super(context);
_context = context;
}
public static HMACSHA256Generator getInstance() {
return I2PAppContext.getGlobalContext().hmac();
}
/**
* Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[]) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
return calculate(key, data, 0, data.length);
}
/**
* Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[], int offset, int length) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
byte rv[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(key.getData(), 0, rv, 0, Hash.HASH_LENGTH);
if (Hash.HASH_LENGTH >= length)
DataHelper.xor(data, offset, rv, 0, rv, 0, length);
else
DataHelper.xor(data, offset, rv, 0, rv, 0, Hash.HASH_LENGTH);
return new Hash(rv);
}
}

View File

@@ -8,46 +8,26 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import org.bouncycastle.crypto.digests.SHA256Digest;
import org.bouncycastle.crypto.digests.MD5Digest;
import org.bouncycastle.crypto.macs.HMac;
/**
* Calculate the HMAC-SHA256 of a key+message. All the good stuff occurs
* Calculate the HMAC-MD5 of a key+message. All the good stuff occurs
* in {@link org.bouncycastle.crypto.macs.HMac} and
* {@link org.bouncycastle.crypto.digests.SHA256Digest}. Alternately, if
* the context property "i2p.HMACMD5" is set to true, then this whole HMAC
* generator will be transformed into HMACMD5, maintaining the same size and
* using {@link org.bouncycastle.crypto.digests.MD5Digest}.
* {@link org.bouncycastle.crypto.digests.MD5Digest}.
*
*/
public class HMACSHA256Generator {
public class HMACGenerator {
private I2PAppContext _context;
/** set of available HMAC instances for calculate */
private List _available;
/** set of available byte[] buffers for verify */
private List _availableTmp;
private boolean _useMD5;
private int _macSize;
public static final boolean DEFAULT_USE_MD5 = true;
public HMACSHA256Generator(I2PAppContext context) {
public HMACGenerator(I2PAppContext context) {
_context = context;
_available = new ArrayList(32);
_availableTmp = new ArrayList(32);
if ("true".equals(context.getProperty("i2p.HMACMD5", Boolean.toString(DEFAULT_USE_MD5).toLowerCase())))
_useMD5 = true;
else
_useMD5 = false;
if ("true".equals(context.getProperty("i2p.HMACBrokenSize", "false")))
_macSize = 32;
else
_macSize = (_useMD5 ? 16 : 32);
}
public static HMACSHA256Generator getInstance() {
return I2PAppContext.getGlobalContext().hmac();
}
/**
@@ -61,24 +41,6 @@ public class HMACSHA256Generator {
return new Hash(rv);
}
/**
* Calculate the HMAC of the data with the given key
*/
/*
public Hash calculate(SessionKey key, byte data[], int offset, int length) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
HMac mac = acquire();
mac.init(key.getData());
mac.update(data, offset, length);
byte rv[] = new byte[Hash.HASH_LENGTH];
mac.doFinal(rv, 0);
release(mac);
return new Hash(rv);
}
*/
/**
* Calculate the HMAC of the data with the given key
*/
@@ -131,10 +93,7 @@ public class HMACSHA256Generator {
// the HMAC is hardcoded to use SHA256 digest size
// for backwards compatability. next time we have a backwards
// incompatible change, we should update this by removing ", 32"
if (_useMD5)
return new HMac(new MD5Digest(), 32);
else
return new HMac(new SHA256Digest(), 32);
return new HMac(new MD5Digest(), 32);
}
private void release(HMac mac) {
synchronized (_available) {

View File

@@ -7,17 +7,19 @@ import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.Hash;
import org.bouncycastle.crypto.digests.SHA256Digest;
import gnu.crypto.hash.Sha256Standalone;
/**
* Defines a wrapper for SHA-256 operation. All the good stuff occurs
* in the Bouncycastle {@link org.bouncycastle.crypto.digests.SHA256Digest}
* in the GNU-Crypto {@link gnu.crypto.hash.Sha256Standalone}
*
*/
public final class SHA256Generator {
private List _digests;
private List _digestsGnu;
public SHA256Generator(I2PAppContext context) {
_digests = new ArrayList(32);
_digestsGnu = new ArrayList(32);
}
public static final SHA256Generator getInstance() {
@@ -32,47 +34,44 @@ public final class SHA256Generator {
return calculateHash(source, 0, source.length);
}
public final Hash calculateHash(byte[] source, int start, int len) {
byte rv[] = new byte[Hash.HASH_LENGTH];
calculateHash(source, start, len, rv, 0);
Sha256Standalone digest = acquireGnu();
digest.update(source, start, len);
byte rv[] = digest.digest();
releaseGnu(digest);
return new Hash(rv);
}
public final void calculateHash(byte[] source, int start, int len, byte out[], int outOffset) {
SHA256Digest digest = acquire();
Sha256Standalone digest = acquireGnu();
digest.update(source, start, len);
digest.doFinal(out, outOffset);
release(digest);
byte rv[] = digest.digest();
releaseGnu(digest);
System.arraycopy(rv, 0, out, outOffset, rv.length);
}
private SHA256Digest acquire() {
SHA256Digest rv = null;
synchronized (_digests) {
if (_digests.size() > 0)
rv = (SHA256Digest)_digests.remove(0);
private Sha256Standalone acquireGnu() {
Sha256Standalone rv = null;
synchronized (_digestsGnu) {
if (_digestsGnu.size() > 0)
rv = (Sha256Standalone)_digestsGnu.remove(0);
}
if (rv != null)
rv.reset();
else
rv = new SHA256Digest();
rv = new Sha256Standalone();
return rv;
}
private void release(SHA256Digest digest) {
synchronized (_digests) {
if (_digests.size() < 32) {
_digests.add(digest);
private void releaseGnu(Sha256Standalone digest) {
synchronized (_digestsGnu) {
if (_digestsGnu.size() < 32) {
_digestsGnu.add(digest);
}
}
}
public static void main(String args[]) {
I2PAppContext ctx = I2PAppContext.getGlobalContext();
byte orig[] = new byte[4096];
ctx.random().nextBytes(orig);
Hash old = ctx.sha().calculateHash(orig);
SHA256Digest d = new SHA256Digest();
d.update(orig, 0, orig.length);
byte out[] = new byte[Hash.HASH_LENGTH];
d.doFinal(out, 0);
System.out.println("eq? " + net.i2p.data.DataHelper.eq(out, old.getData()));
for (int i = 0; i < args.length; i++)
System.out.println("SHA256 [" + args[i] + "] = [" + Base64.encode(ctx.sha().calculateHash(args[i].getBytes()).getData()) + "]");
}

View File

@@ -187,10 +187,10 @@ public class Rate {
// ok ok, lets coalesce
// how much were we off by? (so that we can sample down the measured values)
double periodFactor = measuredPeriod / _period;
_lastTotalValue = (_currentTotalValue == 0 ? 0.0D : _currentTotalValue / periodFactor);
_lastEventCount = (_currentEventCount == 0 ? 0L : (long) (_currentEventCount / periodFactor));
_lastTotalEventTime = (_currentTotalEventTime == 0 ? 0L : (long) (_currentTotalEventTime / periodFactor));
double periodFactor = measuredPeriod / (double)_period;
_lastTotalValue = _currentTotalValue / periodFactor;
_lastEventCount = (long) (_currentEventCount / periodFactor);
_lastTotalEventTime = (long) (_currentTotalEventTime / periodFactor);
_lastCoalesceDate = now;
if (_lastTotalValue > _extremeTotalValue) {
@@ -237,10 +237,12 @@ public class Rate {
*/
public double getLastEventSaturation() {
if ((_lastEventCount > 0) && (_lastTotalEventTime > 0)) {
double eventTime = (double) _lastTotalEventTime / (double) _lastEventCount;
/*double eventTime = (double) _lastTotalEventTime / (double) _lastEventCount;
double maxEvents = _period / eventTime;
double saturation = _lastEventCount / maxEvents;
return saturation;
*/
return ((double)_lastTotalEventTime) / (double)_period;
}
return 0.0D;

View File

@@ -146,15 +146,15 @@ public class DecayingBloomFilter {
for (int i = 0; i < _extenders.length; i++)
DataHelper.xor(entry, offset, _extenders[i], 0, _extended, _entryBytes * (i+1), _entryBytes);
boolean seen = _current.member(_extended);
seen = seen || _previous.member(_extended);
boolean seen = _current.locked_member(_extended);
seen = seen || _previous.locked_member(_extended);
if (seen) {
_currentDuplicates++;
return true;
} else {
if (addIfNew) {
_current.insert(_extended);
_previous.insert(_extended);
_current.locked_insert(_extended);
_previous.locked_insert(_extended);
}
return false;
}

View File

@@ -1,292 +0,0 @@
package org.bouncycastle.crypto.digests;
/*
* Copyright (c) 2000 - 2004 The Legion Of The Bouncy Castle
* (http://www.bouncycastle.org)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
/**
* FIPS 180-2 implementation of SHA-256.
*
* <pre>
* block word digest
* SHA-1 512 32 160
* SHA-256 512 32 256
* SHA-384 1024 64 384
* SHA-512 1024 64 512
* </pre>
*/
public class SHA256Digest
extends GeneralDigest
{
private static final int DIGEST_LENGTH = 32;
private int H1, H2, H3, H4, H5, H6, H7, H8;
private int[] X = new int[64];
private int xOff;
/**
* Standard constructor
*/
public SHA256Digest()
{
reset();
}
/**
* Copy constructor. This will copy the state of the provided
* message digest.
*/
public SHA256Digest(SHA256Digest t)
{
super(t);
H1 = t.H1;
H2 = t.H2;
H3 = t.H3;
H4 = t.H4;
H5 = t.H5;
H6 = t.H6;
H7 = t.H7;
H8 = t.H8;
System.arraycopy(t.X, 0, X, 0, t.X.length);
xOff = t.xOff;
}
public String getAlgorithmName()
{
return "SHA-256";
}
public int getDigestSize()
{
return DIGEST_LENGTH;
}
protected void processWord(
byte[] in,
int inOff)
{
X[xOff++] = ((in[inOff] & 0xff) << 24) | ((in[inOff + 1] & 0xff) << 16)
| ((in[inOff + 2] & 0xff) << 8) | ((in[inOff + 3] & 0xff));
if (xOff == 16)
{
processBlock();
}
}
private void unpackWord(
int word,
byte[] out,
int outOff)
{
out[outOff] = (byte)(word >>> 24);
out[outOff + 1] = (byte)(word >>> 16);
out[outOff + 2] = (byte)(word >>> 8);
out[outOff + 3] = (byte)word;
}
protected void processLength(
long bitLength)
{
if (xOff > 14)
{
processBlock();
}
X[14] = (int)(bitLength >>> 32);
X[15] = (int)(bitLength & 0xffffffff);
}
public int doFinal(
byte[] out,
int outOff)
{
finish();
unpackWord(H1, out, outOff);
unpackWord(H2, out, outOff + 4);
unpackWord(H3, out, outOff + 8);
unpackWord(H4, out, outOff + 12);
unpackWord(H5, out, outOff + 16);
unpackWord(H6, out, outOff + 20);
unpackWord(H7, out, outOff + 24);
unpackWord(H8, out, outOff + 28);
reset();
return DIGEST_LENGTH;
}
/**
* reset the chaining variables
*/
public void reset()
{
super.reset();
/* SHA-256 initial hash value
* The first 32 bits of the fractional parts of the square roots
* of the first eight prime numbers
*/
H1 = 0x6a09e667;
H2 = 0xbb67ae85;
H3 = 0x3c6ef372;
H4 = 0xa54ff53a;
H5 = 0x510e527f;
H6 = 0x9b05688c;
H7 = 0x1f83d9ab;
H8 = 0x5be0cd19;
xOff = 0;
for (int i = 0; i != X.length; i++)
{
X[i] = 0;
}
}
protected void processBlock()
{
//
// expand 16 word block into 64 word blocks.
//
for (int t = 16; t <= 63; t++)
{
X[t] = Theta1(X[t - 2]) + X[t - 7] + Theta0(X[t - 15]) + X[t - 16];
}
//
// set up working variables.
//
int a = H1;
int b = H2;
int c = H3;
int d = H4;
int e = H5;
int f = H6;
int g = H7;
int h = H8;
for (int t = 0; t <= 63; t++)
{
int T1, T2;
T1 = h + Sum1(e) + Ch(e, f, g) + K[t] + X[t];
T2 = Sum0(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + T1;
d = c;
c = b;
b = a;
a = T1 + T2;
}
H1 += a;
H2 += b;
H3 += c;
H4 += d;
H5 += e;
H6 += f;
H7 += g;
H8 += h;
//
// reset the offset and clean out the word buffer.
//
xOff = 0;
for (int i = 0; i != X.length; i++)
{
X[i] = 0;
}
}
private int rotateRight(
int x,
int n)
{
return (x >>> n) | (x << (32 - n));
}
/* SHA-256 functions */
private int Ch(
int x,
int y,
int z)
{
return ((x & y) ^ ((~x) & z));
}
private int Maj(
int x,
int y,
int z)
{
return ((x & y) ^ (x & z) ^ (y & z));
}
private int Sum0(
int x)
{
return rotateRight(x, 2) ^ rotateRight(x, 13) ^ rotateRight(x, 22);
}
private int Sum1(
int x)
{
return rotateRight(x, 6) ^ rotateRight(x, 11) ^ rotateRight(x, 25);
}
private int Theta0(
int x)
{
return rotateRight(x, 7) ^ rotateRight(x, 18) ^ (x >>> 3);
}
private int Theta1(
int x)
{
return rotateRight(x, 17) ^ rotateRight(x, 19) ^ (x >>> 10);
}
/* SHA-256 Constants
* (represent the first 32 bits of the fractional parts of the
* cube roots of the first sixty-four prime numbers)
*/
static final int K[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
}

View File

@@ -1,4 +1,48 @@
$Id: history.txt,v 1.413 2006/02/21 08:31:25 jrandom Exp $
$Id: history.txt,v 1.421 2006/02/26 16:30:58 jrandom Exp $
* 2006-02-27 0.6.1.12 released
2006-02-27 jrandom
* Adjust the jbigi.jar to use the athlon-optimized jbigi on windows/amd64
machines, rather than the generic jbigi (until we have an athlon64
optimized version)
2006-02-26 jrandom
* Switch from the bouncycastle to the gnu-crypto implementation for
SHA256, as benchmarks show a 10-30% speedup.
* Removed some unnecessary object caches
* Don't close i2psnark streams prematurely
2006-02-25 jrandom
* Made the Syndie permalinks in the thread view point to the blog view
* Disabled TCP again (since the live net seems to be doing well w/out it)
* Fix the message time on inbound SSU establishment (thanks zzz!)
* Don't be so aggressive with parallel tunnel creation when a tunnel pool
just starts up
2006-02-24 jrandom
* Rounding calculation cleanup in the stats, and avoid an uncontested
mutex (thanks ripple!)
* SSU handshake cleanup to help force incompatible peers to stop nagging
us by both not giving them an updated reference to us and by dropping
future handshake packets from them.
2006-02-23 jrandom
* Increase the SSU retransmit ceiling (for slow links)
* Estimate the sender's SSU MTU (to help see if we agree)
2006-02-22 jrandom
* Fix to properly profile tunnel joins (thanks Ragnarok, frosk, et al!)
* More aggressive poor-man's PMTU, allowing larger MTUs on less reliable
links
* Further class validator refactorings
2006-02-22 jrandom
* Handle a rare race under high bandwidth situations in the SSU transport
* Minor refactoring so we don't confuse sun's 1.6.0-b2 validator
2006-02-21 Complication
* Reactivate TCP tranport by default, in addition to re-allowing
* 2006-02-21 0.6.1.11 released

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006/02/16 15:44:07 $">
<i2p.release version="0.6.1.11" date="2006/02/21" minVersion="0.6"
<i2p.news date="$Date: 2006/02/21 10:20:21 $">
<i2p.release version="0.6.1.12" date="2006/02/27" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"

View File

@@ -4,7 +4,7 @@
<info>
<appname>i2p</appname>
<appversion>0.6.1.11</appversion>
<appversion>0.6.1.12</appversion>
<authors>
<author name="I2P" email="support@i2p.net"/>
</authors>

View File

@@ -19,3 +19,7 @@ the libg++.so.5 dependency that has been a problem for a few linux distros.
On Feb 8, 2006, the libjbigi-linux-viac3.so was added to jbigi.jar after
being compiled by jrandom on linux/p4 (cross compiled to --host=viac3)
On Feb 27, 2006, jbigi-win-athlon.dll was copied to jbigi-win-athlon64.dll,
as it should offer amd64 users better performance than jbigi-win-none.dll
until we get a full amd64 build.

Binary file not shown.

Binary file not shown.

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006/02/16 15:44:07 $">
<i2p.release version="0.6.1.11" date="2006/02/21" minVersion="0.6"
<i2p.news date="$Date: 2006/02/21 22:34:02 $">
<i2p.release version="0.6.1.12" date="2006/02/27" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
@@ -10,13 +10,14 @@
anonlogs="http://i2p/Nf3ab-ZFkmI-LyMt7GjgT-jfvZ3zKDl0L96pmGQXF1B82W2Bfjf0n7~288vafocjFLnQnVcmZd~-p0-Oolfo9aW2Rm-AhyqxnxyLlPBqGxsJBXjPhm1JBT4Ia8FB-VXt0BuY0fMKdAfWwN61-tj4zIcQWRxv3DFquwEf035K~Ra4SWOqiuJgTRJu7~o~DzHVljVgWIzwf8Z84cz0X33pv-mdG~~y0Bsc2qJVnYwjjR178YMcRSmNE0FVMcs6f17c6zqhMw-11qjKpY~EJfHYCx4lBWF37CD0obbWqTNUIbL~78vxqZRT3dgAgnLixog9nqTO-0Rh~NpVUZnoUi7fNR~awW5U3Cf7rU7nNEKKobLue78hjvRcWn7upHUF45QqTDuaM3yZa7OsjbcH-I909DOub2Q0Dno6vIwuA7yrysccN1sbnkwZbKlf4T6~iDdhaSLJd97QCyPOlbyUfYy9QLNExlRqKgNVJcMJRrIual~Lb1CLbnzt0uvobM57UpqSAAAA/meeting141"
publiclogs="http://www.i2p.net/meeting141" />
&#149;
2006-02-16:
0.6.1.10 released with some major updates - it is <b>not</b> backwards compatible, so upgrading is essential.
2006-02-27:
<a href="http://dev.i2p/pipermail/i2p/2006-February/001265.html">0.6.1.12 released</a>
with some useful SSU and peer selection fixes - please upgrade as soon as possible.
<br>
&#149;
2006-02-14:
<a href="http://dev.i2p/pipermail/i2p/2006-February/001260.html">status notes</a>
2006-02-21:
<a href="http://dev.i2p/pipermail/i2p/2006-February/001264.html">status notes</a>
and
<a href="http://www.i2p/meeting168">meeting log</a>
<a href="http://www.i2p/meeting169">meeting log</a>
<br>
</i2p.news>

View File

@@ -153,7 +153,7 @@ public class Router {
shutdown(EXIT_OOM);
}
};
_shutdownHook = new ShutdownHook();
_shutdownHook = new ShutdownHook(_context);
_gracefulShutdownDetector = new I2PThread(new GracefulShutdown());
_gracefulShutdownDetector.setDaemon(true);
_gracefulShutdownDetector.setName("Graceful shutdown hook");
@@ -210,7 +210,7 @@ public class Router {
public void setRouterInfo(RouterInfo info) {
_routerInfo = info;
if (info != null)
_context.jobQueue().addJob(new PersistRouterInfoJob());
_context.jobQueue().addJob(new PersistRouterInfoJob(_context));
}
/**
@@ -245,8 +245,8 @@ public class Router {
_context.tunnelDispatcher().startup();
_context.inNetMessagePool().startup();
startupQueue();
_context.jobQueue().addJob(new CoalesceStatsJob());
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob());
_context.jobQueue().addJob(new CoalesceStatsJob(_context));
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob(_context));
warmupCrypto();
_sessionKeyPersistenceHelper.startup();
//_context.adminManager().startup();
@@ -449,89 +449,6 @@ public class Router {
finalShutdown(EXIT_HARD_RESTART);
}
/**
* coalesce the stats framework every minute
*
*/
private final class CoalesceStatsJob extends JobImpl {
public CoalesceStatsJob() {
super(Router.this._context);
Router.this._context.statManager().createRateStat("bw.receiveBps", "How fast we receive data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("bw.sendBps", "How fast we send data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
}
public String getName() { return "Coalesce stats"; }
public void runJob() {
Router.this._context.statManager().coalesceStats();
RateStat receiveRate = _context.statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
Router.this._context.statManager().addRateData("bw.receiveBps", (long)bps, 60*1000);
}
}
RateStat sendRate = _context.statManager().getRate("transport.sendMessageSize");
if (sendRate != null) {
Rate rate = sendRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
Router.this._context.statManager().addRateData("bw.sendBps", (long)bps, 60*1000);
}
}
int active = Router.this._context.commSystem().countActivePeers();
Router.this._context.statManager().addRateData("router.activePeers", active, 60*1000);
int fast = Router.this._context.profileOrganizer().countFastPeers();
Router.this._context.statManager().addRateData("router.fastPeers", fast, 60*1000);
int highCap = Router.this._context.profileOrganizer().countHighCapacityPeers();
Router.this._context.statManager().addRateData("router.highCapacityPeers", highCap, 60*1000);
requeue(60*1000);
}
}
/**
* Update the routing Key modifier every day at midnight (plus on startup).
* This is done here because we want to make sure the key is updated before anyone
* uses it.
*/
private final class UpdateRoutingKeyModifierJob extends JobImpl {
private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
public UpdateRoutingKeyModifierJob() { super(Router.this._context); }
public String getName() { return "Update Routing Key Modifier"; }
public void runJob() {
Router.this._context.routingKeyGenerator().generateDateBasedModData();
requeue(getTimeTillMidnight());
}
private long getTimeTillMidnight() {
long now = Router.this._context.clock().now();
_cal.setTime(new Date(now));
_cal.set(Calendar.YEAR, _cal.get(Calendar.YEAR)); // gcj <= 4.0 workaround
_cal.set(Calendar.DAY_OF_YEAR, _cal.get(Calendar.DAY_OF_YEAR)); // gcj <= 4.0 workaround
_cal.add(Calendar.DATE, 1);
_cal.set(Calendar.HOUR_OF_DAY, 0);
_cal.set(Calendar.MINUTE, 0);
_cal.set(Calendar.SECOND, 0);
_cal.set(Calendar.MILLISECOND, 0);
long then = _cal.getTime().getTime();
long howLong = then - now;
if (howLong < 0) // hi kaffe
howLong = 24*60*60*1000l + howLong;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Time till midnight: " + howLong + "ms");
return howLong;
}
}
private void warmupCrypto() {
_context.random().nextBoolean();
new DHSessionKeyBuilder(); // load the class so it starts the precalc process
@@ -1060,7 +977,7 @@ public class Router {
return _context.getProperty("router.pingFile", "router.ping");
}
private static final long LIVELINESS_DELAY = 60*1000;
static final long LIVELINESS_DELAY = 60*1000;
/**
* Start a thread that will periodically update the file "router.ping", but if
@@ -1082,84 +999,177 @@ public class Router {
}
}
// not an I2PThread for context creation issues
Thread t = new Thread(new MarkLiveliness(f));
Thread t = new Thread(new MarkLiveliness(_context, this, f));
t.setName("Mark router liveliness");
t.setDaemon(true);
t.start();
return true;
}
private class MarkLiveliness implements Runnable {
private File _pingFile;
public MarkLiveliness(File f) {
_pingFile = f;
}
public void run() {
_pingFile.deleteOnExit();
do {
ping();
try { Thread.sleep(LIVELINESS_DELAY); } catch (InterruptedException ie) {}
} while (_isAlive);
_pingFile.delete();
}
private void ping() {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(_pingFile);
fos.write(("" + System.currentTimeMillis()).getBytes());
} catch (IOException ioe) {
if (_log != null) {
_log.log(Log.CRIT, "Error writing to ping file", ioe);
} else {
System.err.println("Error writing to ping file");
ioe.printStackTrace();
}
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
/**
* coalesce the stats framework every minute
*
*/
class CoalesceStatsJob extends JobImpl {
public CoalesceStatsJob(RouterContext ctx) {
super(ctx);
ctx.statManager().createRateStat("bw.receiveBps", "How fast we receive data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("bw.sendBps", "How fast we send data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
}
public String getName() { return "Coalesce stats"; }
public void runJob() {
getContext().statManager().coalesceStats();
RateStat receiveRate = getContext().statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
getContext().statManager().addRateData("bw.receiveBps", (long)KBps, 60*1000);
}
}
}
private static int __id = 0;
private class ShutdownHook extends Thread {
private int _id;
public ShutdownHook() {
_id = ++__id;
}
public void run() {
setName("Router " + _id + " shutdown");
_log.log(Log.CRIT, "Shutting down the router...");
shutdown(EXIT_HARD);
}
}
/** update the router.info file whenever its, er, updated */
private class PersistRouterInfoJob extends JobImpl {
public PersistRouterInfoJob() { super(Router.this._context); }
public String getName() { return "Persist Updated Router Information"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Persisting updated router info");
String infoFilename = getConfigSetting(PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = PROP_INFO_FILENAME_DEFAULT;
RouterInfo info = getRouterInfo();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infoFilename);
info.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error rebuilding the router information", dfe);
} catch (IOException ioe) {
_log.error("Error writing out the rebuilt router information", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
RateStat sendRate = getContext().statManager().getRate("transport.sendMessageSize");
if (sendRate != null) {
Rate rate = sendRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
getContext().statManager().addRateData("bw.sendBps", (long)KBps, 60*1000);
}
}
int active = getContext().commSystem().countActivePeers();
getContext().statManager().addRateData("router.activePeers", active, 60*1000);
int fast = getContext().profileOrganizer().countFastPeers();
getContext().statManager().addRateData("router.fastPeers", fast, 60*1000);
int highCap = getContext().profileOrganizer().countHighCapacityPeers();
getContext().statManager().addRateData("router.highCapacityPeers", highCap, 60*1000);
requeue(60*1000);
}
}
/**
* Update the routing Key modifier every day at midnight (plus on startup).
* This is done here because we want to make sure the key is updated before anyone
* uses it.
*/
class UpdateRoutingKeyModifierJob extends JobImpl {
private Log _log;
private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
public UpdateRoutingKeyModifierJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Update Routing Key Modifier"; }
public void runJob() {
_log = getContext().logManager().getLog(getClass());
getContext().routingKeyGenerator().generateDateBasedModData();
requeue(getTimeTillMidnight());
}
private long getTimeTillMidnight() {
long now = getContext().clock().now();
_cal.setTime(new Date(now));
_cal.set(Calendar.YEAR, _cal.get(Calendar.YEAR)); // gcj <= 4.0 workaround
_cal.set(Calendar.DAY_OF_YEAR, _cal.get(Calendar.DAY_OF_YEAR)); // gcj <= 4.0 workaround
_cal.add(Calendar.DATE, 1);
_cal.set(Calendar.HOUR_OF_DAY, 0);
_cal.set(Calendar.MINUTE, 0);
_cal.set(Calendar.SECOND, 0);
_cal.set(Calendar.MILLISECOND, 0);
long then = _cal.getTime().getTime();
long howLong = then - now;
if (howLong < 0) // hi kaffe
howLong = 24*60*60*1000l + howLong;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Time till midnight: " + howLong + "ms");
return howLong;
}
}
class MarkLiveliness implements Runnable {
private RouterContext _context;
private Router _router;
private File _pingFile;
public MarkLiveliness(RouterContext ctx, Router router, File pingFile) {
_context = ctx;
_router = router;
_pingFile = pingFile;
}
public void run() {
_pingFile.deleteOnExit();
do {
ping();
try { Thread.sleep(Router.LIVELINESS_DELAY); } catch (InterruptedException ie) {}
} while (_router.isAlive());
_pingFile.delete();
}
private void ping() {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(_pingFile);
fos.write(("" + System.currentTimeMillis()).getBytes());
} catch (IOException ioe) {
System.err.println("Error writing to ping file");
ioe.printStackTrace();
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}
class ShutdownHook extends Thread {
private RouterContext _context;
private static int __id = 0;
private int _id;
public ShutdownHook(RouterContext ctx) {
_context = ctx;
_id = ++__id;
}
public void run() {
setName("Router " + _id + " shutdown");
Log l = _context.logManager().getLog(Router.class);
l.log(Log.CRIT, "Shutting down the router...");
_context.router().shutdown(Router.EXIT_HARD);
}
}
/** update the router.info file whenever its, er, updated */
class PersistRouterInfoJob extends JobImpl {
private Log _log;
public PersistRouterInfoJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Persist Updated Router Information"; }
public void runJob() {
_log = getContext().logManager().getLog(PersistRouterInfoJob.class);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Persisting updated router info");
String infoFilename = getContext().getProperty(Router.PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
RouterInfo info = getContext().router().getRouterInfo();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infoFilename);
info.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error rebuilding the router information", dfe);
} catch (IOException ioe) {
_log.error("Error writing out the rebuilt router information", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}

View File

@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.355 $ $Date: 2006/02/21 08:31:24 $";
public final static String VERSION = "0.6.1.11";
public final static String ID = "$Revision: 1.362 $ $Date: 2006/02/26 16:30:57 $";
public final static String VERSION = "0.6.1.12";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);

View File

@@ -51,7 +51,7 @@ public class RepublishLeaseSetJob extends JobImpl {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0);
_facade.sendStore(_dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT, null);
_facade.sendStore(_dest, ls, new OnRepublishSuccess(getContext()), new OnRepublishFailure(getContext(), this), REPUBLISH_LEASESET_TIMEOUT, null);
//getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
}
} else {
@@ -76,21 +76,28 @@ public class RepublishLeaseSetJob extends JobImpl {
}
}
private class OnSuccess extends JobImpl {
public OnSuccess(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet successful"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("successful publishing of the leaseSet for " + _dest.toBase64());
}
}
private class OnFailure extends JobImpl {
public OnFailure(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet failed"; }
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64());
RepublishLeaseSetJob.this.requeue(getContext().random().nextInt(60*1000));
}
void requeueRepublish() {
if (_log.shouldLog(Log.WARN))
_log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64());
requeue(getContext().random().nextInt(60*1000));
}
}
class OnRepublishSuccess extends JobImpl {
public OnRepublishSuccess(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet successful"; }
public void runJob() {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("successful publishing of the leaseSet for " + _dest.toBase64());
}
}
class OnRepublishFailure extends JobImpl {
private RepublishLeaseSetJob _job;
public OnRepublishFailure(RouterContext ctx, RepublishLeaseSetJob job) {
super(ctx);
_job = job;
}
public String getName() { return "Publish leaseSet failed"; }
public void runJob() { _job.requeueRepublish(); }
}

View File

@@ -456,7 +456,7 @@ class SearchJob extends JobImpl {
void replyFound(DatabaseSearchReplyMessage message, Hash peer) {
long duration = _state.replyFound(peer);
// this processing can take a while, so split 'er up
getContext().jobQueue().addJob(new SearchReplyJob(getContext(), (DatabaseSearchReplyMessage)message, peer, duration));
getContext().jobQueue().addJob(new SearchReplyJob(getContext(), this, (DatabaseSearchReplyMessage)message, peer, duration));
}
/**
@@ -468,132 +468,6 @@ class SearchJob extends JobImpl {
// noop
}
private final class SearchReplyJob extends JobImpl {
private DatabaseSearchReplyMessage _msg;
/**
* Peer who we think sent us the reply. Note: could be spoofed! If the
* attacker knew we were searching for a particular key from a
* particular peer, they could send us some searchReply messages with
* shitty values, trying to get us to consider that peer unreliable.
* Potential fixes include either authenticated 'from' address or use a
* nonce in the search + searchReply (and check for it in the selector).
*
*/
private Hash _peer;
private int _curIndex;
private int _invalidPeers;
private int _seenPeers;
private int _newPeers;
private int _duplicatePeers;
private int _repliesPendingVerification;
private long _duration;
public SearchReplyJob(RouterContext enclosingContext, DatabaseSearchReplyMessage message, Hash peer, long duration) {
super(enclosingContext);
_msg = message;
_peer = peer;
_curIndex = 0;
_invalidPeers = 0;
_seenPeers = 0;
_newPeers = 0;
_duplicatePeers = 0;
_repliesPendingVerification = 0;
}
public String getName() { return "Process Reply for Kademlia Search"; }
public void runJob() {
if (_curIndex >= _msg.getNumReplies()) {
if (_repliesPendingVerification > 0) {
// we received new references from the peer, but still
// haven't verified all of them, so lets give it more time
SearchReplyJob.this.requeue(_timeoutMs);
} else {
// either they didn't tell us anything new or we have verified
// (or failed to verify) all of them. we're done
getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
_invalidPeers, _duplicatePeers, _duration);
if (_newPeers > 0)
newPeersFound(_newPeers);
}
} else {
Hash peer = _msg.getReply(_curIndex);
boolean shouldAdd = false;
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(peer);
if (info == null) {
// if the peer is giving us lots of bad peer references,
// dont try to fetch them.
boolean sendsBadInfo = getContext().profileOrganizer().peerSendsBadReplies(_peer);
if (!sendsBadInfo) {
// we don't need to search for everthing we're given here - only ones that
// are next in our search path...
if (getContext().shitlist().isShitlisted(peer)) {
if (_log.shouldLog(Log.INFO))
_log.info("Not looking for a shitlisted peer...");
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
} else {
//getContext().netDb().lookupRouterInfo(peer, new ReplyVerifiedJob(getContext(), peer), new ReplyNotVerifiedJob(getContext(), peer), _timeoutMs);
//_repliesPendingVerification++;
shouldAdd = true;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64());
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
}
}
if (_state.wasAttempted(peer)) {
_duplicatePeers++;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": dbSearchReply received on search referencing router "
+ peer);
if (shouldAdd) {
if (_facade.getKBuckets().add(peer))
_newPeers++;
else
_seenPeers++;
}
_curIndex++;
requeue(0);
}
}
/** the peer gave us a reference to a new router, and we were able to fetch it */
private final class ReplyVerifiedJob extends JobImpl {
private Hash _key;
public ReplyVerifiedJob(RouterContext enclosingContext, Hash key) {
super(enclosingContext);
_key = key;
}
public String getName() { return "Search reply value verified"; }
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64() + " verified: " + _key.toBase64());
_repliesPendingVerification--;
getContext().statManager().addRateData("netDb.searchReplyValidated", 1, 0);
}
}
/** the peer gave us a reference to a new router, and we were NOT able to fetch it */
private final class ReplyNotVerifiedJob extends JobImpl {
private Hash _key;
public ReplyNotVerifiedJob(RouterContext enclosingContext, Hash key) {
super(enclosingContext);
_key = key;
}
public String getName() { return "Search reply value NOT verified"; }
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64() + " failed verification: " + _key.toBase64());
_repliesPendingVerification--;
_invalidPeers++;
getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1, 0);
}
}
}
/**
* Called when a particular peer failed to respond before the timeout was
* reached, or if the peer could not be contacted at all.
@@ -833,4 +707,144 @@ class SearchJob extends JobImpl {
return super.toString() + " started "
+ DataHelper.formatDuration((getContext().clock().now() - _startedOn)) + " ago";
}
boolean wasAttempted(Hash peer) { return _state.wasAttempted(peer); }
long timeoutMs() { return _timeoutMs; }
boolean add(Hash peer) { return _facade.getKBuckets().add(peer); }
}
class SearchReplyJob extends JobImpl {
private DatabaseSearchReplyMessage _msg;
private Log _log;
/**
* Peer who we think sent us the reply. Note: could be spoofed! If the
* attacker knew we were searching for a particular key from a
* particular peer, they could send us some searchReply messages with
* shitty values, trying to get us to consider that peer unreliable.
* Potential fixes include either authenticated 'from' address or use a
* nonce in the search + searchReply (and check for it in the selector).
*
*/
private Hash _peer;
private int _curIndex;
private int _invalidPeers;
private int _seenPeers;
private int _newPeers;
private int _duplicatePeers;
private int _repliesPendingVerification;
private long _duration;
private SearchJob _searchJob;
public SearchReplyJob(RouterContext enclosingContext, SearchJob job, DatabaseSearchReplyMessage message, Hash peer, long duration) {
super(enclosingContext);
_log = enclosingContext.logManager().getLog(getClass());
_searchJob = job;
_msg = message;
_peer = peer;
_curIndex = 0;
_invalidPeers = 0;
_seenPeers = 0;
_newPeers = 0;
_duplicatePeers = 0;
_repliesPendingVerification = 0;
}
public String getName() { return "Process Reply for Kademlia Search"; }
public void runJob() {
if (_curIndex >= _msg.getNumReplies()) {
if (_repliesPendingVerification > 0) {
// we received new references from the peer, but still
// haven't verified all of them, so lets give it more time
requeue(_searchJob.timeoutMs());
} else {
// either they didn't tell us anything new or we have verified
// (or failed to verify) all of them. we're done
getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
_invalidPeers, _duplicatePeers, _duration);
if (_newPeers > 0)
_searchJob.newPeersFound(_newPeers);
}
} else {
Hash peer = _msg.getReply(_curIndex);
boolean shouldAdd = false;
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(peer);
if (info == null) {
// if the peer is giving us lots of bad peer references,
// dont try to fetch them.
boolean sendsBadInfo = getContext().profileOrganizer().peerSendsBadReplies(_peer);
if (!sendsBadInfo) {
// we don't need to search for everthing we're given here - only ones that
// are next in our search path...
if (getContext().shitlist().isShitlisted(peer)) {
if (_log.shouldLog(Log.INFO))
_log.info("Not looking for a shitlisted peer...");
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
} else {
//getContext().netDb().lookupRouterInfo(peer, new ReplyVerifiedJob(getContext(), peer), new ReplyNotVerifiedJob(getContext(), peer), _timeoutMs);
//_repliesPendingVerification++;
shouldAdd = true;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64());
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
}
}
if (_searchJob.wasAttempted(peer)) {
_duplicatePeers++;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": dbSearchReply received on search referencing router " + peer);
if (shouldAdd) {
if (_searchJob.add(peer))
_newPeers++;
else
_seenPeers++;
}
_curIndex++;
requeue(0);
}
}
void replyVerified() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64());
_repliesPendingVerification--;
getContext().statManager().addRateData("netDb.searchReplyValidated", 1, 0);
}
void replyNotVerified() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64());
_repliesPendingVerification--;
_invalidPeers++;
getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1, 0);
}
}
/** the peer gave us a reference to a new router, and we were able to fetch it */
class ReplyVerifiedJob extends JobImpl {
private Hash _key;
private SearchReplyJob _replyJob;
public ReplyVerifiedJob(RouterContext enclosingContext, SearchReplyJob srj, Hash key) {
super(enclosingContext);
_replyJob = srj;
_key = key;
}
public String getName() { return "Search reply value verified"; }
public void runJob() { _replyJob.replyVerified(); }
}
/** the peer gave us a reference to a new router, and we were NOT able to fetch it */
class ReplyNotVerifiedJob extends JobImpl {
private Hash _key;
private SearchReplyJob _replyJob;
public ReplyNotVerifiedJob(RouterContext enclosingContext, SearchReplyJob srj, Hash key) {
super(enclosingContext);
_key = key;
_replyJob = srj;
}
public String getName() { return "Search reply value NOT verified"; }
public void runJob() { _replyJob.replyNotVerified(); }
}

View File

@@ -20,35 +20,29 @@ class PersistProfilesJob extends JobImpl {
public String getName() { return "Persist profiles"; }
public void runJob() {
Set peers = _mgr.selectPeers();
Hash hashes[] = new Hash[peers.size()];
int i = 0;
for (Iterator iter = peers.iterator(); iter.hasNext(); )
hashes[i] = (Hash)iter.next();
getContext().jobQueue().addJob(new PersistProfileJob(getContext(), hashes));
}
private class PersistProfileJob extends JobImpl {
private Hash _peers[];
private int _cur;
public PersistProfileJob(RouterContext enclosingContext, Hash peers[]) {
super(enclosingContext);
_peers = peers;
_cur = 0;
}
public void runJob() {
if (_cur < _peers.length) {
_mgr.storeProfile(_peers[_cur]);
_cur++;
}
if (_cur >= _peers.length) {
// no more left, requeue up the main persist-em-all job
PersistProfilesJob.this.getTiming().setStartAfter(getContext().clock().now() + PERSIST_DELAY);
PersistProfilesJob.this.getContext().jobQueue().addJob(PersistProfilesJob.this);
} else {
// we've got peers left to persist, so requeue the persist profile job
PersistProfilesJob.PersistProfileJob.this.requeue(1000);
}
}
public String getName() { return "Persist profile"; }
getContext().jobQueue().addJob(new PersistProfileJob(getContext(), this, peers));
}
void persist(Hash peer) { _mgr.storeProfile(peer); }
void requeue() { requeue(PERSIST_DELAY); }
}
class PersistProfileJob extends JobImpl {
private PersistProfilesJob _job;
private Iterator _peers;
public PersistProfileJob(RouterContext enclosingContext, PersistProfilesJob job, Set peers) {
super(enclosingContext);
_peers = peers.iterator();
_job = job;
}
public void runJob() {
if (_peers.hasNext())
_job.persist((Hash)_peers.next());
if (_peers.hasNext()) {
requeue(1000);
} else {
// no more left, requeue up the main persist-em-all job
_job.requeue();
}
}
public String getName() { return "Persist profile"; }
}

View File

@@ -56,11 +56,12 @@ public class TransportManager implements TransportEventListener {
transport.setListener(null);
}
static final boolean ALLOW_TCP = true;
static final boolean ALLOW_TCP = false;
private void configTransports() {
String disableTCP = _context.router().getConfigSetting(PROP_DISABLE_TCP);
if ( !ALLOW_TCP || (disableTCP == null) || (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP)) ) {
// Unless overridden by constant or explicit config property, start TCP tranport
if ( !ALLOW_TCP || ((disableTCP != null) && (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP))) ) {
_log.info("Explicitly disabling the TCP transport!");
} else {
Transport t = new TCPTransport(_context);

View File

@@ -16,6 +16,8 @@ import net.i2p.data.RouterIdentity;
import net.i2p.data.SessionKey;
import net.i2p.data.Signature;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
@@ -440,7 +442,41 @@ public class EstablishmentManager {
_transport.inboundConnectionReceived();
_context.statManager().addRateData("udp.inboundEstablishTime", state.getLifetime(), 0);
sendOurInfo(peer, true);
sendInboundComplete(peer);
}
/**
* dont send our info immediately, just send a small data packet, and 5-10s later,
* if the peer isnt shitlisted, *then* send them our info. this will help kick off
* the oldnet
*/
private void sendInboundComplete(PeerState peer) {
SimpleTimer.getInstance().addEvent(new PublishToNewInbound(peer), 10*1000);
if (_log.shouldLog(Log.INFO))
_log.info("Completing to the peer after confirm: " + peer);
DeliveryStatusMessage dsm = new DeliveryStatusMessage(_context);
dsm.setArrival(Router.NETWORK_ID); // overloaded, sure, but future versions can check this
dsm.setMessageExpiration(_context.clock().now()+10*1000);
dsm.setMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_transport.send(dsm, peer);
}
private class PublishToNewInbound implements SimpleTimer.TimedEvent {
private PeerState _peer;
public PublishToNewInbound(PeerState peer) { _peer = peer; }
public void timeReached() {
Hash peer = _peer.getRemotePeer();
if ((peer != null) && (!_context.shitlist().isShitlisted(peer))) {
// ok, we are fine with them, send them our latest info
if (_log.shouldLog(Log.INFO))
_log.info("Publishing to the peer after confirm plus delay (without shitlist): " + peer.toBase64());
sendOurInfo(_peer, true);
} else {
// nuh uh. fuck 'em.
if (_log.shouldLog(Log.WARN))
_log.warn("NOT publishing to the peer after confirm plus delay (WITH shitlist): " + (peer != null ? peer.toBase64() : "unknown"));
}
_peer = null;
}
}
/**

View File

@@ -81,7 +81,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource
int acksIncluded = receiveACKs(from, data);
long afterACKs = _context.clock().now();
from.packetReceived();
from.packetReceived(data.getPacketSize());
_context.statManager().addRateData("udp.receiveMessagePeriod", afterMsgs-beforeMsgs, afterACKs-beforeMsgs);
_context.statManager().addRateData("udp.receiveACKPeriod", afterACKs-afterMsgs, afterACKs-beforeMsgs);
if ( (fragmentsIncluded > 0) && (acksIncluded > 0) )

View File

@@ -138,6 +138,7 @@ public class OutboundMessageFragments {
if ( (msgBody == null) || (target == null) )
return;
// todo: make sure the outNetMessage is initialzed once and only once
OutboundMessageState state = new OutboundMessageState(_context);
boolean ok = state.initialize(msg, msgBody);
if (ok) {

View File

@@ -38,6 +38,8 @@ public class PacketBuilder {
_context = ctx;
_transport = transport;
_log = ctx.logManager().getLog(PacketBuilder.class);
_context.statManager().createRateStat("udp.packetAuthTime", "How long it takes to encrypt and MAC a packet for sending", "udp", new long[] { 60*1000 });
_context.statManager().createRateStat("udp.packetAuthTimeSlow", "How long it takes to encrypt and MAC a packet for sending (when its slow)", "udp", new long[] { 60*1000, 10*60*1000 });
}
public UDPPacket buildPacket(OutboundMessageState state, int fragment, PeerState peer) {
@@ -1029,6 +1031,7 @@ public class PacketBuilder {
* @param iv IV to deliver
*/
private void authenticate(UDPPacket packet, SessionKey cipherKey, SessionKey macKey, ByteArray iv) {
long before = System.currentTimeMillis();
int encryptOffset = packet.getPacket().getOffset() + UDPPacket.IV_SIZE + UDPPacket.MAC_SIZE;
int encryptSize = packet.getPacket().getLength() - UDPPacket.IV_SIZE - UDPPacket.MAC_SIZE - packet.getPacket().getOffset();
byte data[] = packet.getPacket().getData();
@@ -1059,5 +1062,9 @@ public class PacketBuilder {
System.arraycopy(ba.getData(), 0, data, hmacOff, UDPPacket.MAC_SIZE);
System.arraycopy(iv.getData(), 0, data, hmacOff + UDPPacket.MAC_SIZE, UDPPacket.IV_SIZE);
_hmacCache.release(ba);
long timeToAuth = System.currentTimeMillis() - before;
_context.statManager().addRateData("udp.packetAuthTime", timeToAuth, timeToAuth);
if (timeToAuth > 100)
_context.statManager().addRateData("udp.packetAuthTimeSlow", timeToAuth, timeToAuth);
}
}

View File

@@ -58,11 +58,16 @@ public class PacketHandler {
_context.statManager().createRateStat("udp.droppedInvalidUnkown", "How old the packet we dropped due to invalidity (unkown type) was", "udp", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidReestablish", "How old the packet we dropped due to invalidity (doesn't use existing key, not an establishment) was", "udp", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidEstablish", "How old the packet we dropped due to invalidity (establishment, bad key) was", "udp", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidEstablish.inbound", "How old the packet we dropped due to invalidity (even though we have an active inbound establishment with the peer) was", "udp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidEstablish.outbound", "How old the packet we dropped due to invalidity (even though we have an active outbound establishment with the peer) was", "udp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidEstablish.new", "How old the packet we dropped due to invalidity (even though we do not have any active establishment with the peer) was", "udp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidInboundEstablish", "How old the packet we dropped due to invalidity (inbound establishment, bad key) was", "udp", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.droppedInvalidSkew", "How skewed the packet we dropped due to invalidity (valid except bad skew) was", "udp", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.packetDequeueTime", "How long it takes the UDPReader to pull a packet off the inbound packet queue (when its slow)", "udp", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.packetVerifyTime", "How long it takes the PacketHandler to verify a data packet after dequeueing (period is dequeue time)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.packetVerifyTimeSlow", "How long it takes the PacketHandler to verify a data packet after dequeueing when its slow (period is dequeue time)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.packetValidateMultipleCount", "How many times we validate a packet, if done more than once (period = afterValidate-enqueue)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.packetNoValidationLifetime", "How long packets that are never validated are around for", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
}
public void startup() {
@@ -88,6 +93,13 @@ public class PacketHandler {
}
return rv.toString();
}
/** the packet is from a peer we are establishing an outbound con to, but failed validation, so fallback */
private static final short OUTBOUND_FALLBACK = 1;
/** the packet is from a peer we are establishing an inbound con to, but failed validation, so fallback */
private static final short INBOUND_FALLBACK = 2;
/** the packet is not from anyone we know */
private static final short NEW_PEER = 3;
private class Handler implements Runnable {
private UDPPacketReader _reader;
@@ -104,6 +116,7 @@ public class PacketHandler {
UDPPacket packet = _endpoint.receive();
_state = 3;
if (packet == null) continue; // keepReading is probably false...
packet.received();
if (_log.shouldLog(Log.INFO))
_log.info("Received the packet " + packet);
@@ -134,18 +147,32 @@ public class PacketHandler {
+ packet + ": " + _reader);
}
long timeToDequeue = packet.getTimeSinceEnqueue() - packet.getTimeSinceReceived();
long timeToVerify = 0;
long beforeRecv = packet.getTimeSinceReceiveFragments();
if (beforeRecv > 0)
timeToVerify = beforeRecv - packet.getTimeSinceReceived();
long enqueueTime = packet.getEnqueueTime();
long recvTime = packet.getReceivedTime();
long beforeValidateTime = packet.getBeforeValidate();
long afterValidateTime = packet.getAfterValidate();
int validateCount = packet.getValidateCount();
long timeToDequeue = recvTime - enqueueTime;
long timeToValidate = 0;
long authTime = 0;
if (afterValidateTime > 0) {
timeToValidate = afterValidateTime - enqueueTime;
authTime = afterValidateTime - beforeValidateTime;
}
if (timeToDequeue > 50)
_context.statManager().addRateData("udp.packetDequeueTime", timeToDequeue, timeToDequeue);
if (timeToVerify > 0) {
_context.statManager().addRateData("udp.packetVerifyTime", timeToVerify, timeToDequeue);
if (timeToVerify > 100)
_context.statManager().addRateData("udp.packetVerifyTimeSlow", timeToVerify, timeToDequeue);
if (authTime > 50)
_context.statManager().addRateData("udp.packetAuthRecvTime", authTime, beforeValidateTime-recvTime);
if (afterValidateTime > 0) {
_context.statManager().addRateData("udp.packetVerifyTime", timeToValidate, authTime);
if (timeToValidate > 50)
_context.statManager().addRateData("udp.packetVerifyTimeSlow", timeToValidate, authTime);
}
if (validateCount > 1)
_context.statManager().addRateData("udp.packetValidateMultipleCount", validateCount, timeToValidate);
else if (validateCount <= 0)
_context.statManager().addRateData("udp.packetNoValidationLifetime", packet.getLifetime(), 0);
// back to the cache with thee!
packet.release();
@@ -186,7 +213,7 @@ public class PacketHandler {
_log.debug("Packet received is not for an inbound or outbound establishment");
// ok, not already known establishment, try as a new one
_state = 15;
receivePacket(reader, packet);
receivePacket(reader, packet, NEW_PEER);
}
}
} else {
@@ -248,13 +275,24 @@ public class PacketHandler {
_state = 26;
}
private void receivePacket(UDPPacketReader reader, UDPPacket packet) {
private void receivePacket(UDPPacketReader reader, UDPPacket packet, short peerType) {
_state = 27;
boolean isValid = packet.validate(_transport.getIntroKey());
if (!isValid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid introduction packet received: " + packet, new Exception("path"));
_context.statManager().addRateData("udp.droppedInvalidEstablish", packet.getLifetime(), packet.getExpiration());
switch (peerType) {
case INBOUND_FALLBACK:
_context.statManager().addRateData("udp.droppedInvalidEstablish.inbound", packet.getLifetime(), packet.getTimeSinceReceived());
break;
case OUTBOUND_FALLBACK:
_context.statManager().addRateData("udp.droppedInvalidEstablish.outbound", packet.getLifetime(), packet.getTimeSinceReceived());
break;
case NEW_PEER:
_context.statManager().addRateData("udp.droppedInvalidEstablish.new", packet.getLifetime(), packet.getTimeSinceReceived());
break;
}
_state = 28;
return;
} else {
@@ -306,7 +344,7 @@ public class PacketHandler {
// ok, we couldn't handle it with the established stuff, so fall back
// on earlier state packets
_state = 34;
receivePacket(reader, packet);
receivePacket(reader, packet, INBOUND_FALLBACK);
} else {
_context.statManager().addRateData("udp.droppedInvalidInboundEstablish", packet.getLifetime(), packet.getExpiration());
}
@@ -357,7 +395,7 @@ public class PacketHandler {
// ok, we couldn't handle it with the established stuff, so fall back
// on earlier state packets
_state = 41;
receivePacket(reader, packet);
receivePacket(reader, packet, OUTBOUND_FALLBACK);
_state = 42;
}
@@ -397,7 +435,7 @@ public class PacketHandler {
_state = 45;
RemoteHostId from = packet.getRemoteHost();
_state = 46;
switch (reader.readPayloadType()) {
case UDPPacket.PAYLOAD_TYPE_SESSION_REQUEST:
_state = 47;

View File

@@ -35,12 +35,16 @@ public class PacketPusher implements Runnable {
public void run() {
while (_alive) {
UDPPacket packets[] = _fragments.getNextVolley();
if (packets != null) {
for (int i = 0; i < packets.length; i++) {
if (packets[i] != null) // null for ACKed fragments
_sender.add(packets[i], 0); // 0 does not block //100); // blocks for up to 100ms
try {
UDPPacket packets[] = _fragments.getNextVolley();
if (packets != null) {
for (int i = 0; i < packets.length; i++) {
if (packets[i] != null) // null for ACKed fragments
_sender.add(packets[i], 0); // 0 does not block //100); // blocks for up to 100ms
}
}
} catch (Exception e) {
_log.log(Log.CRIT, "Error pushing", e);
}
}
}

View File

@@ -138,6 +138,9 @@ public class PeerState {
private long _theyRelayToUsAs;
/** what is the largest packet we can send to the peer? */
private int _mtu;
private int _mtuReceive;
/* how many consecutive packets at or under the min MTU have been received */
private long _consecutiveSmall;
/** when did we last check the MTU? */
private long _mtuLastChecked;
private long _mtuIncreases;
@@ -209,7 +212,7 @@ public class PeerState {
private static final int LARGE_MTU = 1350;
private static final int MIN_RTO = 100 + ACKSender.ACK_FREQUENCY;
private static final int MAX_RTO = 1200; // 5000;
private static final int MAX_RTO = 3000; // 5000;
/** override the default MTU */
private static final String PROP_DEFAULT_MTU = "i2np.udp.mtu";
@@ -248,6 +251,7 @@ public class PeerState {
_weRelayToThemAs = 0;
_theyRelayToUsAs = 0;
_mtu = getDefaultMTU();
_mtuReceive = _mtu;
_mtuLastChecked = -1;
_lastACKSend = -1;
_rtt = 1000;
@@ -378,6 +382,8 @@ public class PeerState {
public long getTheyRelayToUsAs() { return _theyRelayToUsAs; }
/** what is the largest packet we can send to the peer? */
public int getMTU() { return _mtu; }
/** estimate how large the other side is sending packets */
public int getReceiveMTU() { return _mtuReceive; }
/** when did we last check the MTU? */
public long getMTULastChecked() { return _mtuLastChecked; }
public long getMTUIncreases() { return _mtuIncreases; }
@@ -835,13 +841,7 @@ public class PeerState {
_messagesSent++;
if (numSends < 2) {
recalculateTimeouts(lifetime);
if (_mtu <= MIN_MTU) {
if (_context.random().nextInt(50*(int)_mtuDecreases) <= 0) {
_context.statManager().addRateData("udp.mtuIncrease", _packetsRetransmitted, _packetsTransmitted);
_mtu = LARGE_MTU;
_mtuIncreases++;
}
}
adjustMTU();
}
else if (_log.shouldLog(Log.WARN))
_log.warn("acked after numSends=" + numSends + " w/ lifetime=" + lifetime + " and size=" + bytesACKed);
@@ -870,14 +870,24 @@ public class PeerState {
_rto = MAX_RTO;
}
private void reduceMTU() {
if (_mtu > MIN_MTU) {
double retransPct = (double)_packetsRetransmitted/(double)_packetsTransmitted;
if (retransPct >= 0.05) { // should we go for lower?
_context.statManager().addRateData("udp.mtuDecrease", _packetsRetransmitted, _packetsTransmitted);
private void adjustMTU() {
double retransPct = 0;
if (_packetsTransmitted > 10) {
retransPct = (double)_packetsRetransmitted/(double)_packetsTransmitted;
boolean wantLarge = retransPct < .25d; // heuristic to allow fairly lossy links to use large MTUs
if (wantLarge && _mtu != LARGE_MTU) {
if (_context.random().nextLong(_mtuDecreases) <= 0) {
_mtu = LARGE_MTU;
_mtuIncreases++;
_context.statManager().addRateData("udp.mtuIncrease", _mtuIncreases, _mtuDecreases);
}
} else if (!wantLarge && _mtu == LARGE_MTU) {
_mtu = MIN_MTU;
_mtuDecreases++;
}
_context.statManager().addRateData("udp.mtuDecrease", _mtuDecreases, _mtuIncreases);
}
} else {
_mtu = DEFAULT_MTU;
}
}
@@ -895,7 +905,7 @@ public class PeerState {
}
congestionOccurred();
_context.statManager().addRateData("udp.congestedRTO", _rto, _rttDeviation);
reduceMTU();
adjustMTU();
//_rto *= 2;
}
public void packetsTransmitted(int packets) {
@@ -926,7 +936,18 @@ public class PeerState {
public long getPacketRetransmissionRate() { return _packetRetransmissionRate; }
public long getPacketsReceived() { return _packetsReceived; }
public long getPacketsReceivedDuplicate() { return _packetsReceivedDuplicate; }
public void packetReceived() { _packetsReceived++; }
public void packetReceived(int size) {
_packetsReceived++;
if (size <= MIN_MTU)
_consecutiveSmall++;
else
_consecutiveSmall = 0;
if ( (_consecutiveSmall < 50) && (_packetsReceived > 50) )
_mtuReceive = LARGE_MTU;
else
_mtuReceive = MIN_MTU;
}
/**
* we received a backoff request, so cut our send window
@@ -1202,10 +1223,13 @@ public class PeerState {
}
if ( (_retransmitter != null) && ( (_retransmitter.isExpired() || _retransmitter.isComplete()) ) )
OutboundMessageState retrans = _retransmitter;
if ( (retrans != null) && ( (retrans.isExpired() || retrans.isComplete()) ) ) {
_retransmitter = null;
retrans = null;
}
if ( (_retransmitter != null) && (_retransmitter != state) ) {
if ( (retrans != null) && (retrans != state) ) {
// choke it, since there's already another message retransmitting to this
// peer.
_context.statManager().addRateData("udp.blockedRetransmissions", _packetsRetransmitted, _packetsTransmitted);

View File

@@ -8,7 +8,6 @@ import java.util.ArrayList;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.crypto.HMACSHA256Generator;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
@@ -29,8 +28,9 @@ public class UDPPacket {
private volatile short _priority;
private volatile long _initializeTime;
private volatile long _expiration;
private volatile byte[] _data;
private volatile ByteArray _dataBuf;
private byte[] _data;
private byte[] _validateBuf;
private byte[] _ivBuf;
private volatile int _markedType;
private volatile RemoteHostId _remoteHost;
private volatile boolean _released;
@@ -38,8 +38,11 @@ public class UDPPacket {
private volatile Exception _acquiredBy;
private long _enqueueTime;
private long _receivedTime;
private long _beforeValidate;
private long _afterValidate;
private long _beforeReceiveFragments;
private long _afterHandlingTime;
private int _validateCount;
private boolean _isInbound;
private static final List _packetCache;
@@ -75,47 +78,34 @@ public class UDPPacket {
public static final byte BITFIELD_CONTINUATION = (byte)(1 << 7);
private static final int MAX_VALIDATE_SIZE = MAX_PACKET_SIZE;
private static final ByteCache _validateCache = ByteCache.getInstance(64, MAX_VALIDATE_SIZE);
private static final ByteCache _ivCache = ByteCache.getInstance(64, IV_SIZE);
private static final ByteCache _dataCache = ByteCache.getInstance(64, MAX_PACKET_SIZE);
private UDPPacket(I2PAppContext ctx, boolean inbound) {
ctx.statManager().createRateStat("udp.packetsLiveInbound", "Number of live inbound packets in memory", "udp", new long[] { 60*1000, 5*60*1000 });
ctx.statManager().createRateStat("udp.packetsLiveOutbound", "Number of live outbound packets in memory", "udp", new long[] { 60*1000, 5*60*1000 });
ctx.statManager().createRateStat("udp.packetsLivePendingRecvInbound", "Number of live inbound packets not yet handled by the PacketHandler", "udp", new long[] { 60*1000, 5*60*1000 });
ctx.statManager().createRateStat("udp.packetsLivePendingHandleInbound", "Number of live inbound packets not yet handled fully by the PacketHandler", "udp", new long[] { 60*1000, 5*60*1000 });
ctx.statManager().createRateStat("udp.fetchRemoteSlow", "How long it takes to grab the remote ip info", "udp", new long[] { 60*1000 });
// the data buffer is clobbered on init(..), but we need it to bootstrap
_packet = new DatagramPacket(new byte[MAX_PACKET_SIZE], MAX_PACKET_SIZE);
_data = new byte[MAX_PACKET_SIZE];
_packet = new DatagramPacket(_data, MAX_PACKET_SIZE);
_validateBuf = new byte[MAX_VALIDATE_SIZE];
_ivBuf = new byte[IV_SIZE];
init(ctx, inbound);
}
private void init(I2PAppContext ctx, boolean inbound) {
_context = ctx;
_dataBuf = _dataCache.acquire();
_data = _dataBuf.getData();
//_dataBuf = _dataCache.acquire();
Arrays.fill(_data, (byte)0);
//_packet = new DatagramPacket(_data, MAX_PACKET_SIZE);
_packet.setData(_data);
_isInbound = inbound;
_initializeTime = _context.clock().now();
_markedType = -1;
_validateCount = 0;
_remoteHost = null;
_released = false;
}
/*
public void initialize(int priority, long expiration, InetAddress host, int port) {
_priority = (short)priority;
_expiration = expiration;
resetBegin();
Arrays.fill(_data, (byte)0x00);
//_packet.setLength(0);
_packet.setAddress(host);
_packet.setPort(port);
_remoteHost = null;
_released = false;
_releasedBy = null;
}
*/
public void writeData(byte src[], int offset, int len) {
verifyNotReleased();
System.arraycopy(src, offset, _data, 0, len);
@@ -146,10 +136,14 @@ public class UDPPacket {
public RemoteHostId getRemoteHost() {
if (_remoteHost == null) {
long before = System.currentTimeMillis();
InetAddress addr = _packet.getAddress();
byte ip[] = addr.getAddress();
int port = _packet.getPort();
_remoteHost = new RemoteHostId(ip, port);
long timeToFetch = System.currentTimeMillis() - before;
if (timeToFetch > 50)
_context.statManager().addRateData("udp.fetchRemoteSlow", timeToFetch, getLifetime());
}
return _remoteHost;
}
@@ -161,8 +155,9 @@ public class UDPPacket {
*/
public boolean validate(SessionKey macKey) {
verifyNotReleased();
_beforeValidate = _context.clock().now();
boolean eq = false;
ByteArray buf = _validateCache.acquire();
Arrays.fill(_validateBuf, (byte)0);
// validate by comparing _data[0:15] and
// HMAC(payload + IV + (payloadLength ^ protocolVersion), macKey)
@@ -170,14 +165,14 @@ public class UDPPacket {
int payloadLength = _packet.getLength() - MAC_SIZE - IV_SIZE;
if (payloadLength > 0) {
int off = 0;
System.arraycopy(_data, _packet.getOffset() + MAC_SIZE + IV_SIZE, buf.getData(), off, payloadLength);
System.arraycopy(_data, _packet.getOffset() + MAC_SIZE + IV_SIZE, _validateBuf, off, payloadLength);
off += payloadLength;
System.arraycopy(_data, _packet.getOffset() + MAC_SIZE, buf.getData(), off, IV_SIZE);
System.arraycopy(_data, _packet.getOffset() + MAC_SIZE, _validateBuf, off, IV_SIZE);
off += IV_SIZE;
DataHelper.toLong(buf.getData(), off, 2, payloadLength ^ PacketBuilder.PROTOCOL_VERSION);
DataHelper.toLong(_validateBuf, off, 2, payloadLength ^ PacketBuilder.PROTOCOL_VERSION);
off += 2;
eq = _context.hmac().verify(macKey, buf.getData(), 0, off, _data, _packet.getOffset(), MAC_SIZE);
eq = _context.hmac().verify(macKey, _validateBuf, 0, off, _data, _packet.getOffset(), MAC_SIZE);
/*
Hash hmac = _context.hmac().calculate(macKey, buf.getData(), 0, off);
@@ -201,7 +196,8 @@ public class UDPPacket {
_log.warn("Payload length is " + payloadLength);
}
_validateCache.release(buf);
_afterValidate = _context.clock().now();
_validateCount++;
return eq;
}
@@ -212,11 +208,10 @@ public class UDPPacket {
*/
public void decrypt(SessionKey cipherKey) {
verifyNotReleased();
ByteArray iv = _ivCache.acquire();
System.arraycopy(_data, MAC_SIZE, iv.getData(), 0, IV_SIZE);
Arrays.fill(_ivBuf, (byte)0);
System.arraycopy(_data, MAC_SIZE, _ivBuf, 0, IV_SIZE);
int len = _packet.getLength();
_context.aes().decrypt(_data, _packet.getOffset() + MAC_SIZE + IV_SIZE, _data, _packet.getOffset() + MAC_SIZE + IV_SIZE, cipherKey, iv.getData(), len - MAC_SIZE - IV_SIZE);
_ivCache.release(iv);
_context.aes().decrypt(_data, _packet.getOffset() + MAC_SIZE + IV_SIZE, _data, _packet.getOffset() + MAC_SIZE + IV_SIZE, cipherKey, _ivBuf, len - MAC_SIZE - IV_SIZE);
}
/** the UDPReceiver has tossed it onto the inbound queue */
@@ -237,6 +232,17 @@ public class UDPPacket {
/** a packet handler has finished parsing out the good bits */
long getTimeSinceHandling() { return (_afterHandlingTime > 0 ? _context.clock().now() - _afterHandlingTime : 0); }
/** when it was added to the endpoint's receive queue */
long getEnqueueTime() { return _enqueueTime; }
/** when it was pulled off the endpoint receive queue */
long getReceivedTime() { return _receivedTime; }
/** when we began validate() */
long getBeforeValidate() { return _beforeValidate; }
/** when we finished validate() */
long getAfterValidate() { return _afterValidate; }
/** how many times we tried to validate the packet */
int getValidateCount() { return _validateCount; }
public String toString() {
verifyNotReleased();
StringBuffer buf = new StringBuffer(64);
@@ -282,7 +288,7 @@ public class UDPPacket {
//_releasedBy = new Exception("released by");
//_acquiredBy = null;
//
_dataCache.release(_dataBuf);
//_dataCache.release(_dataBuf);
if (!CACHE)
return;
synchronized (_packetCache) {

View File

@@ -252,6 +252,7 @@ public class UDPPacketReader {
/** parse out the data message */
public class DataReader {
public int getPacketSize() { return _payloadLength; }
public boolean readACKsIncluded() {
return flagSet(UDPPacket.DATA_FLAG_EXPLICIT_ACK);
}

View File

@@ -47,6 +47,7 @@ public class UDPReceiver {
_context.statManager().createRateStat("udp.droppedInboundProbabalistically", "How many packet we drop probabalistically (to simulate failures)", "udp", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.acceptedInboundProbabalistically", "How many packet we accept probabalistically (to simulate failures)", "udp", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.receiveHolePunch", "How often we receive a NAT hole punch", "udp", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("udp.ignorePacketFromDroplist", "Packet lifetime for those dropped on the drop list", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
}
public void startup() {
@@ -124,6 +125,15 @@ public class UDPReceiver {
private final int doReceive(UDPPacket packet) {
if (_log.shouldLog(Log.INFO))
_log.info("Received: " + packet);
RemoteHostId from = packet.getRemoteHost();
if (_transport.isInDropList(from)) {
if (_log.shouldLog(Log.INFO))
_log.info("Ignoring packet from the drop-listed peer: " + from);
_context.statManager().addRateData("udp.ignorePacketFromDroplist", packet.getLifetime(), 0);
packet.release();
return 0;
}
packet.enqueue();
boolean rejected = false;

View File

@@ -78,6 +78,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
/** shared slow bid for unconnected peers when we want to prefer UDP */
private TransportBid _slowPreferredBid;
/** list of RemoteHostId for peers whose packets we want to drop outright */
private List _dropList;
private static final int DROPLIST_PERIOD = 10*60*1000;
private static final int MAX_DROPLIST_SIZE = 256;
public static final String STYLE = "SSU";
public static final String PROP_INTERNAL_PORT = "i2np.udp.internalPort";
@@ -124,6 +130,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
_log = ctx.logManager().getLog(UDPTransport.class);
_peersByIdent = new HashMap(128);
_peersByRemoteHost = new HashMap(128);
_dropList = new ArrayList(256);
_endpoint = null;
TimedWeightedPriorityMessageQueue mq = new TimedWeightedPriorityMessageQueue(ctx, PRIORITY_LIMITS, PRIORITY_WEIGHT, this);
@@ -155,7 +162,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
_context.statManager().createRateStat("udp.addressTestInsteadOfUpdate", "How many times we fire off a peer test of ourselves instead of adjusting our own reachable address?", "udp", new long[] { 1*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.addressUpdated", "How many times we adjust our own reachable IP address", "udp", new long[] { 1*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.proactiveReestablish", "How long a session was idle for when we proactively reestablished it", "udp", new long[] { 1*60*1000, 20*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.dropPeerDroplist", "How many peers currently have their packets dropped outright when a new peer is added to the list?", "udp", new long[] { 1*60*1000, 20*60*1000 });
__instance = this;
}
@@ -572,11 +579,30 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
+ " because they are in the wrong net");
}
*/
_context.shitlist().shitlistRouter(dsm.getRouterInfo().getIdentity().calculateHash(), "Part of the wrong network");
dropPeer(dsm.getRouterInfo().getIdentity().calculateHash());
Hash peerHash = dsm.getRouterInfo().getIdentity().calculateHash();
PeerState peer = getPeerState(peerHash);
if (peer != null) {
RemoteHostId remote = peer.getRemoteHostId();
boolean added = false;
int droplistSize = 0;
synchronized (_dropList) {
if (!_dropList.contains(remote)) {
while (_dropList.size() > MAX_DROPLIST_SIZE)
_dropList.remove(0);
_dropList.add(remote);
added = true;
}
droplistSize = _dropList.size();
}
if (added) {
_context.statManager().addRateData("udp.dropPeerDroplist", droplistSize, 0);
SimpleTimer.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
}
}
_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network");
dropPeer(peerHash);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping the peer " + dsm.getRouterInfo().getIdentity().calculateHash().toBase64()
+ " because they are in the wrong net");
_log.warn("Dropping the peer " + peerHash.toBase64() + " because they are in the wrong net");
return;
} else {
if (dsm.getRouterInfo() != null) {
@@ -597,6 +623,17 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
peer.expireInboundMessages();
}
private class RemoveDropList implements SimpleTimer.TimedEvent {
private RemoteHostId _peer;
public RemoveDropList(RemoteHostId peer) { _peer = peer; }
public void timeReached() {
synchronized (_dropList) {
_dropList.remove(_peer);
}
}
}
public boolean isInDropList(RemoteHostId peer) { synchronized (_dropList) { return _dropList.contains(peer); } }
void dropPeer(Hash peer) {
PeerState state = getPeerState(peer);
@@ -1254,6 +1291,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
long idleIn = (now-peer.getLastReceiveTime())/1000;
long idleOut = (now-peer.getLastSendTime())/1000;
if (idleIn < 0) idleIn = 0;
if (idleOut < 0) idleOut = 0;
buf.append("<td valign=\"top\" ><code>");
buf.append(idleIn);
@@ -1316,9 +1355,11 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append("</code></td>");
buf.append("<td valign=\"top\" ><code>");
buf.append(peer.getMTU()).append('/');
buf.append(peer.getMTUIncreases()).append('/');
buf.append(peer.getMTUDecreases());
buf.append(peer.getMTU()).append("/").append(peer.getReceiveMTU());
//.append('/');
//buf.append(peer.getMTUIncreases()).append('/');
//buf.append(peer.getMTUDecreases());
buf.append("</code></td>");
long sent = peer.getPacketsTransmitted();
@@ -1433,7 +1474,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
"<b id=\"def.rtt\">rtt</b>: the round trip time is how long it takes to get an acknowledgement of a packet<br />\n" +
"<b id=\"def.dev\">dev</b>: the standard deviation of the round trip time, to help control the retransmit timeout<br />\n" +
"<b id=\"def.rto\">rto</b>: the retransmit timeout controls how frequently an unacknowledged packet will be retransmitted<br />\n" +
"<b id=\"def.mtu\">mtu</b>: current sending packet size/number of times it increased/number of times it decreased<br />\n" +
"<b id=\"def.mtu\">mtu</b>: current sending packet size / estimated receiving packet size<br />\n" +
"<b id=\"def.send\">send</b>: the number of packets sent to the peer<br />\n" +
"<b id=\"def.recv\">recv</b>: the number of packets received from the peer<br />\n" +
"<b id=\"def.resent\">resent</b>: the number of packets retransmitted to the peer<br />\n" +

View File

@@ -28,6 +28,8 @@ public class TunnelCreatorConfig implements TunnelInfo {
private boolean _isInbound;
private long _messagesProcessed;
private volatile long _verifiedBytesTransferred;
private boolean _failed;
private int _failures;
public TunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound) {
this(ctx, length, isInbound, null);
@@ -45,6 +47,8 @@ public class TunnelCreatorConfig implements TunnelInfo {
_destination = destination;
_messagesProcessed = 0;
_verifiedBytesTransferred = 0;
_failed = false;
_failures = 0;
}
/** how many hops are there in the tunnel? */
@@ -90,8 +94,6 @@ public class TunnelCreatorConfig implements TunnelInfo {
public long getReplyMessageId() { return _replyMessageId; }
public void setReplyMessageId(long id) { _replyMessageId = id; }
public void testSuccessful(int ms) {}
/** take note of a message being pumped through this tunnel */
public void incrementProcessedMessages() { _messagesProcessed++; }
public long getProcessedMessagesCount() { return _messagesProcessed; }
@@ -135,6 +137,30 @@ public class TunnelCreatorConfig implements TunnelInfo {
}
private static final int MAX_CONSECUTIVE_TEST_FAILURES = 2;
/**
* The tunnel failed, so stop using it
*/
public boolean tunnelFailed() {
_failures++;
if (_failures > MAX_CONSECUTIVE_TEST_FAILURES) {
_failed = true;
return false;
} else {
return true;
}
}
public boolean getTunnelFailed() { return _failed; }
public int getTunnelFailures() { return _failures; }
public void testSuccessful(int ms) {
int failures = _failures - 1;
if (failures < 0)
_failures = 0;
else
_failures = failures;
}
public String toString() {
// H0:1235-->H1:2345-->H2:2345
@@ -168,6 +194,8 @@ public class TunnelCreatorConfig implements TunnelInfo {
if (_replyMessageId > 0)
buf.append(" replyMessageId ").append(_replyMessageId);
buf.append(" with ").append(_messagesProcessed).append("/").append(_verifiedBytesTransferred).append(" msgs/bytes");
buf.append(" with ").append(_failures).append(" failures");
return buf.toString();
}

View File

@@ -105,6 +105,15 @@ public class TunnelDispatcher implements Service {
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
"How many messages are sent through a participating tunnel?", "Tunnels",
new long[] { 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.ownedMessageCount",
"How many messages are sent through a tunnel we created (period == failures)?", "Tunnels",
new long[] { 60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("tunnel.failedCompletelyMessages",
"How many messages are sent through a tunnel that failed prematurely (period == failures)?", "Tunnels",
new long[] { 60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("tunnel.failedPartially",
"How many messages are sent through a tunnel that only failed partially (period == failures)?", "Tunnels",
new long[] { 60*1000l, 10*60*1000l, 60*60*1000l });
}
private TunnelGateway.QueuePreprocessor createPreprocessor(HopConfig cfg) {
@@ -301,6 +310,15 @@ public class TunnelDispatcher implements Service {
// update stats based on gw.getMessagesSent()
}
}
long msgs = cfg.getProcessedMessagesCount();
int failures = cfg.getTunnelFailures();
boolean failed = cfg.getTunnelFailed();
_context.statManager().addRateData("tunnel.ownedMessageCount", msgs, failures);
if (failed) {
_context.statManager().addRateData("tunnel.failedCompletelyMessages", msgs, failures);
} else if (failures > 0) {
_context.statManager().addRateData("tunnel.failedPartiallyMessages", msgs, failures);
}
}
/**

View File

@@ -188,6 +188,7 @@ class BuildHandler {
if (howBad == 0) {
// w3wt
_context.profileManager().tunnelJoined(peer, rtt);
} else {
allAgree = false;
switch (howBad) {

View File

@@ -13,10 +13,8 @@ import net.i2p.util.Log;
*/
public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
private TunnelPool _pool;
private boolean _failed;
private TestJob _testJob;
private Job _expireJob;
private int _failures;
private TunnelInfo _pairedTunnel;
/** Creates a new instance of PooledTunnelCreatorConfig */
@@ -26,20 +24,29 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
}
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound, Hash destination) {
super(ctx, length, isInbound, destination);
_failed = false;
_pool = null;
_failures = 0;
}
public void testSuccessful(int ms) {
if (_testJob != null) {
if (_testJob != null)
_testJob.testSuccessful(ms);
super.testSuccessful(ms);
}
/**
* The tunnel failed, so stop using it
*/
public boolean tunnelFailed() {
boolean rv = super.tunnelFailed();
if (!rv) {
// remove us from the pool (but not the dispatcher) so that we aren't
// selected again. _expireJob is left to do its thing, in case there
// are any straggling messages coming down the tunnel
_pool.tunnelFailed(this);
if (_testJob != null) // just in case...
_context.jobQueue().removeJob(_testJob);
}
int failures = _failures - 1;
if (failures < 0)
_failures = 0;
else
_failures = failures;
return rv;
}
public Properties getOptions() {
@@ -47,31 +54,6 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
return _pool.getSettings().getUnknownOptions();
}
public String toString() {
return super.toString() + " with " + _failures + " failures";
}
private static final int MAX_CONSECUTIVE_TEST_FAILURES = 2;
/**
* The tunnel failed, so stop using it
*/
public boolean tunnelFailed() {
_failures++;
if (_failures > MAX_CONSECUTIVE_TEST_FAILURES) {
_failed = true;
// remove us from the pool (but not the dispatcher) so that we aren't
// selected again. _expireJob is left to do its thing, in case there
// are any straggling messages coming down the tunnel
_pool.tunnelFailed(this);
if (_testJob != null) // just in case...
_context.jobQueue().removeJob(_testJob);
return false;
} else {
return true;
}
}
public boolean getTunnelFailed() { return _failed; }
public void setTunnelPool(TunnelPool pool) {
if (pool != null) {
_pool = pool;

View File

@@ -33,6 +33,7 @@ public class TunnelPool {
private TunnelInfo _lastSelected;
private long _lastSelectionPeriod;
private int _expireSkew;
private long _started;
public TunnelPool(RouterContext ctx, TunnelPoolManager mgr, TunnelPoolSettings settings, TunnelPeerSelector sel) {
_context = ctx;
@@ -46,11 +47,13 @@ public class TunnelPool {
_lastSelected = null;
_lifetimeProcessed = 0;
_expireSkew = _context.random().nextInt(90*1000);
_started = System.currentTimeMillis();
refreshSettings();
}
public void startup() {
_alive = true;
_started = System.currentTimeMillis();
_manager.getExecutor().repoll();
if (_settings.isInbound() && (_settings.getDestination() != null) ) {
// we just reconnected and didn't require any new tunnel builders.
@@ -101,6 +104,8 @@ public class TunnelPool {
return period;
}
private long getLifetime() { return System.currentTimeMillis() - _started; }
/**
* Pull a random tunnel out of the pool. If there are none available but
* the pool is configured to allow 0hop tunnels, this builds a fake one
@@ -555,12 +560,16 @@ public class TunnelPool {
if (rv + inProgress + expireLater + fallback > 4*standardAmount)
rv = 4*standardAmount - inProgress - expireLater - fallback;
long lifetime = getLifetime();
if ( (lifetime < 60*1000) && (rv + inProgress + fallback >= standardAmount) )
rv = standardAmount - inProgress - fallback;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Count: rv: " + rv + " allow? " + allowZeroHop
+ " 30s " + expire30s + " 90s " + expire90s + " 150s " + expire150s + " 210s " + expire210s
+ " 270s " + expire270s + " later " + expireLater
+ " std " + standardAmount + " inProgress " + inProgress + " fallback " + fallback
+ " for " + toString());
+ " for " + toString() + " up for " + lifetime);
if (rv < 0)
return 0;