forked from I2P_Developers/i2p.i2p
merge of 'c5e201203713f0fefcdef642ca50597f8936c79c'
and 'fbd68f812db1e891f96e212b3a5938beec0233b5'
This commit is contained in:
@@ -9,7 +9,9 @@ import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SessionKey;
|
||||
|
||||
/**
|
||||
* Hold the tunnel request record, managing its encryption and decryption.
|
||||
* Hold the tunnel request record, managing its ElGamal encryption and decryption.
|
||||
* Iterative AES encryption/decryption is done elsewhere.
|
||||
*
|
||||
* Cleartext:
|
||||
* <pre>
|
||||
* bytes 0-3: tunnel ID to receive messages as
|
||||
@@ -23,7 +25,13 @@ import net.i2p.data.SessionKey;
|
||||
* byte 184: flags
|
||||
* bytes 185-188: request time (in hours since the epoch)
|
||||
* bytes 189-192: next message ID
|
||||
* bytes 193-222: uninterpreted / random padding
|
||||
* bytes 193-221: uninterpreted / random padding
|
||||
* </pre>
|
||||
*
|
||||
* Encrypted:
|
||||
* <pre>
|
||||
* bytes 0-15: First 16 bytes of router hash
|
||||
* bytes 16-527: ElGamal encrypted block (discarding zero bytes at elg[0] and elg[257])
|
||||
* </pre>
|
||||
*
|
||||
*/
|
||||
@@ -85,9 +93,10 @@ public class BuildRequestRecord {
|
||||
* the gateway to which the reply should be sent.
|
||||
*/
|
||||
public Hash readNextIdentity() {
|
||||
byte rv[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(_data.getData(), _data.getOffset() + OFF_SEND_IDENT, rv, 0, Hash.HASH_LENGTH);
|
||||
return new Hash(rv);
|
||||
//byte rv[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(_data.getData(), _data.getOffset() + OFF_SEND_IDENT, rv, 0, Hash.HASH_LENGTH);
|
||||
//return new Hash(rv);
|
||||
return Hash.create(_data.getData(), _data.getOffset() + OFF_SEND_IDENT);
|
||||
}
|
||||
/**
|
||||
* Tunnel layer encryption key that the current hop should use
|
||||
@@ -152,7 +161,7 @@ public class BuildRequestRecord {
|
||||
|
||||
/**
|
||||
* Encrypt the record to the specified peer. The result is formatted as: <pre>
|
||||
* bytes 0-15: SHA-256-128 of the current hop's identity (the toPeer parameter)
|
||||
* bytes 0-15: truncated SHA-256 of the current hop's identity (the toPeer parameter)
|
||||
* bytes 15-527: ElGamal-2048 encrypted block
|
||||
* </pre>
|
||||
*/
|
||||
@@ -226,7 +235,7 @@ public class BuildRequestRecord {
|
||||
* byte 184: flags
|
||||
* bytes 185-188: request time (in hours since the epoch)
|
||||
* bytes 189-192: next message ID
|
||||
* bytes 193-222: uninterpreted / random padding
|
||||
* bytes 193-221: uninterpreted / random padding
|
||||
*/
|
||||
DataHelper.toLong(buf, OFF_RECV_TUNNEL, 4, receiveTunnelId);
|
||||
System.arraycopy(peer.getData(), 0, buf, OFF_OUR_IDENT, Hash.HASH_LENGTH);
|
||||
@@ -244,9 +253,7 @@ public class BuildRequestRecord {
|
||||
truncatedHour /= (60l*60l*1000l);
|
||||
DataHelper.toLong(buf, OFF_REQ_TIME, 4, truncatedHour);
|
||||
DataHelper.toLong(buf, OFF_SEND_MSG_ID, 4, nextMsgId);
|
||||
byte rnd[] = new byte[PADDING_SIZE];
|
||||
ctx.random().nextBytes(rnd);
|
||||
System.arraycopy(rnd, 0, buf, OFF_SEND_MSG_ID+4, rnd.length);
|
||||
ctx.random().nextBytes(buf, OFF_SEND_MSG_ID+4, PADDING_SIZE);
|
||||
|
||||
byte wroteIV[] = readReplyIV();
|
||||
if (!DataHelper.eq(iv, wroteIV))
|
||||
|
||||
@@ -1,32 +1,39 @@
|
||||
package net.i2p.data.i2np;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.util.Log;
|
||||
//import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Read and write the reply to a tunnel build message record.
|
||||
*
|
||||
* The reply record is the same size as the request record (528 bytes).
|
||||
* Bytes 0-526 contain random data.
|
||||
* Byte 527 contains the reply.
|
||||
*/
|
||||
public class BuildResponseRecord {
|
||||
|
||||
/**
|
||||
* Create a new encrypted response
|
||||
*
|
||||
* @param status the response
|
||||
* @param responseMessageId unused except for debugging
|
||||
* @return a 528-byte response record
|
||||
*/
|
||||
public byte[] create(I2PAppContext ctx, int status, SessionKey replyKey, byte replyIV[], long responseMessageId) {
|
||||
Log log = ctx.logManager().getLog(BuildResponseRecord.class);
|
||||
public static byte[] create(I2PAppContext ctx, int status, SessionKey replyKey, byte replyIV[], long responseMessageId) {
|
||||
//Log log = ctx.logManager().getLog(BuildResponseRecord.class);
|
||||
byte rv[] = new byte[TunnelBuildReplyMessage.RECORD_SIZE];
|
||||
ctx.random().nextBytes(rv);
|
||||
DataHelper.toLong(rv, TunnelBuildMessage.RECORD_SIZE-1, 1, status);
|
||||
// rv = AES(SHA256(padding+status) + padding + status, replyKey, replyIV)
|
||||
ctx.sha().calculateHash(rv, Hash.HASH_LENGTH, rv.length - Hash.HASH_LENGTH, rv, 0);
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug(responseMessageId + ": before encrypt: " + Base64.encode(rv, 0, 128) + " with " + replyKey.toBase64() + "/" + Base64.encode(replyIV));
|
||||
//if (log.shouldLog(Log.DEBUG))
|
||||
// log.debug(responseMessageId + ": before encrypt: " + Base64.encode(rv, 0, 128) + " with " + replyKey.toBase64() + "/" + Base64.encode(replyIV));
|
||||
ctx.aes().encrypt(rv, 0, rv, 0, replyKey, replyIV, rv.length);
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug(responseMessageId + ": after encrypt: " + Base64.encode(rv, 0, 128));
|
||||
//if (log.shouldLog(Log.DEBUG))
|
||||
// log.debug(responseMessageId + ": after encrypt: " + Base64.encode(rv, 0, 128));
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,49 +8,47 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines a message containing arbitrary bytes of data
|
||||
* This is what goes in a GarlicClove.
|
||||
* It was also previously used for generating test messages.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DataMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DataMessage.class);
|
||||
public class DataMessage extends FastI2NPMessageImpl {
|
||||
public final static int MESSAGE_TYPE = 20;
|
||||
private byte _data[];
|
||||
|
||||
private static final int MAX_SIZE = 64*1024;
|
||||
|
||||
public DataMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_data = null;
|
||||
}
|
||||
|
||||
public byte[] getData() {
|
||||
verifyUnwritten();
|
||||
return _data;
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if data previously set, to protect saved checksum
|
||||
*/
|
||||
public void setData(byte[] data) {
|
||||
verifyUnwritten();
|
||||
if (_data != null)
|
||||
throw new IllegalStateException();
|
||||
_data = data;
|
||||
}
|
||||
|
||||
public int getSize() {
|
||||
verifyUnwritten();
|
||||
return _data.length;
|
||||
}
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
long size = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
if (size > 64*1024)
|
||||
if (size > MAX_SIZE)
|
||||
throw new I2NPMessageException("wtf, size=" + size);
|
||||
_data = new byte[(int)size];
|
||||
System.arraycopy(data, curIndex, _data, 0, (int)size);
|
||||
@@ -63,9 +61,9 @@ public class DataMessage extends I2NPMessageImpl {
|
||||
else
|
||||
return 4 + _data.length;
|
||||
}
|
||||
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) {
|
||||
verifyUnwritten();
|
||||
if (_data == null) {
|
||||
out[curIndex++] = 0x0;
|
||||
out[curIndex++] = 0x0;
|
||||
@@ -81,30 +79,28 @@ public class DataMessage extends I2NPMessageImpl {
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
protected void written() {
|
||||
super.written();
|
||||
_data = null;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getData());
|
||||
return DataHelper.hashCode(_data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DataMessage) ) {
|
||||
DataMessage msg = (DataMessage)object;
|
||||
return DataHelper.eq(getData(),msg.getData());
|
||||
return DataHelper.eq(_data, msg._data);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[DataMessage: ");
|
||||
buf.append("\n\tData: ").append(DataHelper.toString(getData(), 64));
|
||||
buf.append("\n\tData: ").append(DataHelper.toString(_data, 64));
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -8,16 +8,17 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
//import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message a router sends to another router to search for a
|
||||
@@ -25,47 +26,57 @@ import net.i2p.util.Log;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DatabaseLookupMessage.class);
|
||||
public class DatabaseLookupMessage extends FastI2NPMessageImpl {
|
||||
//private final static Log _log = new Log(DatabaseLookupMessage.class);
|
||||
public final static int MESSAGE_TYPE = 2;
|
||||
private Hash _key;
|
||||
private Hash _fromHash;
|
||||
private TunnelId _replyTunnel;
|
||||
private Set _dontIncludePeers;
|
||||
/** this must be kept as a list to preserve the order and not break the checksum */
|
||||
private List<Hash> _dontIncludePeers;
|
||||
|
||||
private static volatile long _currentLookupPeriod = 0;
|
||||
private static volatile int _currentLookupCount = 0;
|
||||
//private static volatile long _currentLookupPeriod = 0;
|
||||
//private static volatile int _currentLookupCount = 0;
|
||||
// if we try to send over 20 netDb lookups in 10 seconds, we're acting up
|
||||
private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
|
||||
private static final long LOOKUP_THROTTLE_MAX = 50;
|
||||
//private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
|
||||
//private static final long LOOKUP_THROTTLE_MAX = 50;
|
||||
|
||||
/** Insanely big. Not much more than 1500 will fit in a message.
|
||||
Have to prevent a huge alloc on rcv of a malicious msg though */
|
||||
private static final int MAX_NUM_PEERS = 512;
|
||||
|
||||
public DatabaseLookupMessage(I2PAppContext context) {
|
||||
this(context, false);
|
||||
}
|
||||
|
||||
/** @param locallyCreated ignored */
|
||||
public DatabaseLookupMessage(I2PAppContext context, boolean locallyCreated) {
|
||||
super(context);
|
||||
//setSearchKey(null);
|
||||
//setFrom(null);
|
||||
//setDontIncludePeers(null);
|
||||
|
||||
context.statManager().createRateStat("router.throttleNetDbDoSSend", "How many netDb lookup messages we are sending during a period with a DoS detected", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
|
||||
// This is the wrong place for this, any throttling should be in netdb
|
||||
// And it doesnt throttle anyway (that would have to be in netdb), it just increments a stat
|
||||
//context.statManager().createRateStat("router.throttleNetDbDoSSend", "How many netDb lookup messages we are sending during a period with a DoS detected", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
//
|
||||
// only check DoS generation if we are creating the message...
|
||||
if (locallyCreated) {
|
||||
// we do this in the writeMessage so we know that we have all the data
|
||||
int dosCount = detectDoS(context);
|
||||
if (dosCount > 0) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Are we flooding the network with NetDb messages? (" + dosCount
|
||||
+ " messages so far)", new Exception("Flood cause"));
|
||||
}
|
||||
}
|
||||
//if (locallyCreated) {
|
||||
// // we do this in the writeMessage so we know that we have all the data
|
||||
// int dosCount = detectDoS(context);
|
||||
// if (dosCount > 0) {
|
||||
// if (_log.shouldLog(Log.WARN))
|
||||
// _log.warn("Are we flooding the network with NetDb messages? (" + dosCount
|
||||
// + " messages so far)", new Exception("Flood cause"));
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return number of netDb messages in this period, if flood, else 0
|
||||
*
|
||||
*/
|
||||
/*****
|
||||
private static int detectDoS(I2PAppContext context) {
|
||||
int count = _currentLookupCount++;
|
||||
// now lets check for DoS
|
||||
@@ -87,53 +98,129 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
*****/
|
||||
|
||||
/**
|
||||
* Defines the key being searched for
|
||||
*/
|
||||
public Hash getSearchKey() { return _key; }
|
||||
public void setSearchKey(Hash key) { _key = key; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if key previously set, to protect saved checksum
|
||||
*/
|
||||
public void setSearchKey(Hash key) {
|
||||
if (_key != null)
|
||||
throw new IllegalStateException();
|
||||
_key = key;
|
||||
}
|
||||
|
||||
/**
|
||||
* Contains the router who requested this lookup
|
||||
*
|
||||
*/
|
||||
public Hash getFrom() { return _fromHash; }
|
||||
public void setFrom(Hash from) { _fromHash = from; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if from previously set, to protect saved checksum
|
||||
*/
|
||||
public void setFrom(Hash from) {
|
||||
if (_fromHash != null)
|
||||
throw new IllegalStateException();
|
||||
_fromHash = from;
|
||||
}
|
||||
|
||||
/**
|
||||
* Contains the tunnel ID a reply should be sent to
|
||||
*
|
||||
*/
|
||||
public TunnelId getReplyTunnel() { return _replyTunnel; }
|
||||
public void setReplyTunnel(TunnelId replyTunnel) { _replyTunnel = replyTunnel; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if tunnel previously set, to protect saved checksum
|
||||
*/
|
||||
public void setReplyTunnel(TunnelId replyTunnel) {
|
||||
if (_replyTunnel != null)
|
||||
throw new IllegalStateException();
|
||||
_replyTunnel = replyTunnel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set of peers that a lookup reply should NOT include
|
||||
* Set of peers that a lookup reply should NOT include.
|
||||
* WARNING - returns a copy.
|
||||
*
|
||||
* @return Set of Hash objects, each of which is the H(routerIdentity) to skip
|
||||
* @return Set of Hash objects, each of which is the H(routerIdentity) to skip, or null
|
||||
*/
|
||||
public Set getDontIncludePeers() { return _dontIncludePeers; }
|
||||
public void setDontIncludePeers(Set peers) {
|
||||
public Set<Hash> getDontIncludePeers() {
|
||||
if (_dontIncludePeers == null)
|
||||
return null;
|
||||
return new HashSet(_dontIncludePeers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace the dontInclude set with this set.
|
||||
* WARNING - makes a copy.
|
||||
* Invalidates the checksum.
|
||||
*
|
||||
* @param peers may be null
|
||||
*/
|
||||
public void setDontIncludePeers(Collection<Hash> peers) {
|
||||
_hasChecksum = false;
|
||||
if (peers != null)
|
||||
_dontIncludePeers = new HashSet(peers);
|
||||
_dontIncludePeers = new ArrayList(peers);
|
||||
else
|
||||
_dontIncludePeers = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to the set.
|
||||
* Invalidates the checksum.
|
||||
*
|
||||
* @param peer non-null
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public void addDontIncludePeer(Hash peer) {
|
||||
if (_dontIncludePeers == null)
|
||||
_dontIncludePeers = new ArrayList();
|
||||
else if (_dontIncludePeers.contains(peer))
|
||||
return;
|
||||
_hasChecksum = false;
|
||||
_dontIncludePeers.add(peer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to the set.
|
||||
* Invalidates the checksum.
|
||||
*
|
||||
* @param peers non-null
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public void addDontIncludePeers(Collection<Hash> peers) {
|
||||
_hasChecksum = false;
|
||||
if (_dontIncludePeers == null) {
|
||||
_dontIncludePeers = new ArrayList(peers);
|
||||
} else {
|
||||
for (Hash peer : peers) {
|
||||
if (!_dontIncludePeers.contains(peer))
|
||||
_dontIncludePeers.add(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
//byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_key = new Hash(keyData);
|
||||
//_key = new Hash(keyData);
|
||||
|
||||
byte fromData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, fromData, 0, Hash.HASH_LENGTH);
|
||||
//byte fromData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, fromData, 0, Hash.HASH_LENGTH);
|
||||
_fromHash = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_fromHash = new Hash(fromData);
|
||||
//_fromHash = new Hash(fromData);
|
||||
|
||||
boolean tunnelSpecified = false;
|
||||
switch (data[curIndex]) {
|
||||
@@ -156,14 +243,15 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
int numPeers = (int)DataHelper.fromLong(data, curIndex, 2);
|
||||
curIndex += 2;
|
||||
|
||||
if ( (numPeers < 0) || (numPeers >= (1<<16) ) )
|
||||
if ( (numPeers < 0) || (numPeers > MAX_NUM_PEERS) )
|
||||
throw new I2NPMessageException("Invalid number of peers - " + numPeers);
|
||||
Set peers = new HashSet(numPeers);
|
||||
List<Hash> peers = new ArrayList(numPeers);
|
||||
for (int i = 0; i < numPeers; i++) {
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
//byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
Hash p = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
peers.add(new Hash(peer));
|
||||
peers.add(p);
|
||||
}
|
||||
_dontIncludePeers = peers;
|
||||
}
|
||||
@@ -197,15 +285,17 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
} else {
|
||||
out[curIndex++] = DataHelper.BOOLEAN_FALSE;
|
||||
}
|
||||
if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) {
|
||||
if ( (_dontIncludePeers == null) || (_dontIncludePeers.isEmpty()) ) {
|
||||
out[curIndex++] = 0x0;
|
||||
out[curIndex++] = 0x0;
|
||||
} else {
|
||||
byte len[] = DataHelper.toLong(2, _dontIncludePeers.size());
|
||||
int size = _dontIncludePeers.size();
|
||||
if (size > MAX_NUM_PEERS)
|
||||
throw new I2NPMessageException("Too many peers: " + size);
|
||||
byte len[] = DataHelper.toLong(2, size);
|
||||
out[curIndex++] = len[0];
|
||||
out[curIndex++] = len[1];
|
||||
for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Hash peer : _dontIncludePeers) {
|
||||
System.arraycopy(peer.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
}
|
||||
@@ -215,31 +305,34 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getSearchKey()) +
|
||||
DataHelper.hashCode(getFrom()) +
|
||||
DataHelper.hashCode(getReplyTunnel()) +
|
||||
return DataHelper.hashCode(_key) +
|
||||
DataHelper.hashCode(_fromHash) +
|
||||
DataHelper.hashCode(_replyTunnel) +
|
||||
DataHelper.hashCode(_dontIncludePeers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DatabaseLookupMessage) ) {
|
||||
DatabaseLookupMessage msg = (DatabaseLookupMessage)object;
|
||||
return DataHelper.eq(getSearchKey(),msg.getSearchKey()) &&
|
||||
DataHelper.eq(getFrom(),msg.getFrom()) &&
|
||||
DataHelper.eq(getReplyTunnel(),msg.getReplyTunnel()) &&
|
||||
DataHelper.eq(_dontIncludePeers,msg.getDontIncludePeers());
|
||||
return DataHelper.eq(_key, msg._key) &&
|
||||
DataHelper.eq(_fromHash, msg._fromHash) &&
|
||||
DataHelper.eq(_replyTunnel, msg._replyTunnel) &&
|
||||
DataHelper.eq(_dontIncludePeers, msg._dontIncludePeers);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[DatabaseLookupMessage: ");
|
||||
buf.append("\n\tSearch Key: ").append(getSearchKey());
|
||||
buf.append("\n\tFrom: ").append(getFrom());
|
||||
buf.append("\n\tReply Tunnel: ").append(getReplyTunnel());
|
||||
buf.append("\n\tSearch Key: ").append(_key);
|
||||
buf.append("\n\tFrom: ").append(_fromHash);
|
||||
buf.append("\n\tReply Tunnel: ").append(_replyTunnel);
|
||||
buf.append("\n\tDont Include Peers: ");
|
||||
if (_dontIncludePeers != null)
|
||||
buf.append(_dontIncludePeers.size());
|
||||
|
||||
@@ -8,14 +8,12 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message a router sends to another router in response to a
|
||||
@@ -24,62 +22,71 @@ import net.i2p.util.Log;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DatabaseSearchReplyMessage.class);
|
||||
public class DatabaseSearchReplyMessage extends FastI2NPMessageImpl {
|
||||
public final static int MESSAGE_TYPE = 3;
|
||||
private Hash _key;
|
||||
private List _peerHashes;
|
||||
private List<Hash> _peerHashes;
|
||||
private Hash _from;
|
||||
|
||||
public DatabaseSearchReplyMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_context.statManager().createRateStat("netDb.searchReplyMessageSend", "How many search reply messages we send", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("netDb.searchReplyMessageReceive", "How many search reply messages we receive", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
setSearchKey(null);
|
||||
// do this in netdb if we need it
|
||||
//_context.statManager().createRateStat("netDb.searchReplyMessageSend", "How many search reply messages we send", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
//_context.statManager().createRateStat("netDb.searchReplyMessageReceive", "How many search reply messages we receive", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
_peerHashes = new ArrayList(3);
|
||||
setFromHash(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines the key being searched for
|
||||
*/
|
||||
public Hash getSearchKey() { return _key; }
|
||||
public void setSearchKey(Hash key) { _key = key; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if key previously set, to protect saved checksum
|
||||
*/
|
||||
public void setSearchKey(Hash key) {
|
||||
if (_key != null)
|
||||
throw new IllegalStateException();
|
||||
_key = key;
|
||||
}
|
||||
|
||||
public int getNumReplies() { return _peerHashes.size(); }
|
||||
public Hash getReply(int index) { return (Hash)_peerHashes.get(index); }
|
||||
public Hash getReply(int index) { return _peerHashes.get(index); }
|
||||
public void addReply(Hash peer) { _peerHashes.add(peer); }
|
||||
//public void addReplies(Collection replies) { _peerHashes.addAll(replies); }
|
||||
|
||||
public Hash getFromHash() { return _from; }
|
||||
public void setFromHash(Hash from) { _from = from; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
//byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_key = new Hash(keyData);
|
||||
//_key = new Hash(keyData);
|
||||
|
||||
int num = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
|
||||
_peerHashes.clear();
|
||||
for (int i = 0; i < num; i++) {
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
//byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
Hash p = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
addReply(new Hash(peer));
|
||||
addReply(p);
|
||||
}
|
||||
|
||||
byte from[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, from, 0, Hash.HASH_LENGTH);
|
||||
//byte from[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, from, 0, Hash.HASH_LENGTH);
|
||||
_from = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_from = new Hash(from);
|
||||
//_from = new Hash(from);
|
||||
|
||||
_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
|
||||
//_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
|
||||
}
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
@@ -110,32 +117,35 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DatabaseSearchReplyMessage) ) {
|
||||
DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)object;
|
||||
return DataHelper.eq(getSearchKey(),msg.getSearchKey()) &&
|
||||
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
|
||||
return DataHelper.eq(_key,msg._key) &&
|
||||
DataHelper.eq(_from,msg._from) &&
|
||||
DataHelper.eq(_peerHashes,msg._peerHashes);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getSearchKey()) +
|
||||
DataHelper.hashCode(getFromHash()) +
|
||||
return DataHelper.hashCode(_key) +
|
||||
DataHelper.hashCode(_from) +
|
||||
DataHelper.hashCode(_peerHashes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[DatabaseSearchReplyMessage: ");
|
||||
buf.append("\n\tSearch Key: ").append(getSearchKey());
|
||||
buf.append("\n\tSearch Key: ").append(_key);
|
||||
buf.append("\n\tReplies: # = ").append(getNumReplies());
|
||||
for (int i = 0; i < getNumReplies(); i++) {
|
||||
buf.append("\n\t\tReply [").append(i).append("]: ").append(getReply(i));
|
||||
}
|
||||
buf.append("\n\tFrom: ").append(getFromHash());
|
||||
buf.append("\n\tFrom: ").append(_from);
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -12,83 +12,62 @@ import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message a router sends to another router to test the network
|
||||
* database reachability, as well as the reply message sent back.
|
||||
*
|
||||
* TODO: Don't decompress and recompress RouterInfos at the OBEP and IBGW.
|
||||
* Could this even change the message length or corrupt things?
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DatabaseStoreMessage.class);
|
||||
public class DatabaseStoreMessage extends FastI2NPMessageImpl {
|
||||
public final static int MESSAGE_TYPE = 1;
|
||||
private Hash _key;
|
||||
private int _type;
|
||||
private LeaseSet _leaseSet;
|
||||
private RouterInfo _info;
|
||||
private byte[] _leaseSetCache;
|
||||
private byte[] _routerInfoCache;
|
||||
private DatabaseEntry _dbEntry;
|
||||
private byte[] _byteCache;
|
||||
private long _replyToken;
|
||||
private TunnelId _replyTunnel;
|
||||
private Hash _replyGateway;
|
||||
|
||||
public final static int KEY_TYPE_ROUTERINFO = 0;
|
||||
public final static int KEY_TYPE_LEASESET = 1;
|
||||
|
||||
public DatabaseStoreMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setValueType(-1);
|
||||
setKey(null);
|
||||
setLeaseSet(null);
|
||||
setRouterInfo(null);
|
||||
setReplyToken(0);
|
||||
setReplyTunnel(null);
|
||||
setReplyGateway(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines the key in the network database being stored
|
||||
*
|
||||
*/
|
||||
public Hash getKey() { return _key; }
|
||||
public void setKey(Hash key) { _key = key; }
|
||||
|
||||
/**
|
||||
* Defines the router info value in the network database being stored
|
||||
*
|
||||
*/
|
||||
public RouterInfo getRouterInfo() { return _info; }
|
||||
public void setRouterInfo(RouterInfo routerInfo) {
|
||||
_info = routerInfo;
|
||||
if (_info != null)
|
||||
setValueType(KEY_TYPE_ROUTERINFO);
|
||||
public Hash getKey() {
|
||||
if (_key != null)
|
||||
return _key; // receive
|
||||
if (_dbEntry != null)
|
||||
return _dbEntry.getHash(); // create
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines the lease set value in the network database being stored
|
||||
*
|
||||
* Defines the entry in the network database being stored
|
||||
*/
|
||||
public LeaseSet getLeaseSet() { return _leaseSet; }
|
||||
public void setLeaseSet(LeaseSet leaseSet) {
|
||||
_leaseSet = leaseSet;
|
||||
if (_leaseSet != null)
|
||||
setValueType(KEY_TYPE_LEASESET);
|
||||
}
|
||||
|
||||
public DatabaseEntry getEntry() { return _dbEntry; }
|
||||
|
||||
/**
|
||||
* Defines type of key being stored in the network database -
|
||||
* either KEY_TYPE_ROUTERINFO or KEY_TYPE_LEASESET
|
||||
*
|
||||
* This also sets the key
|
||||
* @throws IllegalStateException if data previously set, to protect saved checksum
|
||||
*/
|
||||
public int getValueType() { return _type; }
|
||||
public void setValueType(int type) { _type = type; }
|
||||
public void setEntry(DatabaseEntry entry) {
|
||||
if (_dbEntry != null)
|
||||
throw new IllegalStateException();
|
||||
_dbEntry = entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* If a reply is desired, this token specifies the message ID that should
|
||||
@@ -98,6 +77,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
* @return positive reply token ID, or 0 if no reply is necessary.
|
||||
*/
|
||||
public long getReplyToken() { return _replyToken; }
|
||||
|
||||
/**
|
||||
* Update the reply token.
|
||||
*
|
||||
@@ -117,16 +97,14 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
public Hash getReplyGateway() { return _replyGateway; }
|
||||
public void setReplyGateway(Hash peer) { _replyGateway = peer; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_key = new Hash(keyData);
|
||||
|
||||
_type = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
type = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
|
||||
_replyToken = DataHelper.fromLong(data, curIndex, 4);
|
||||
@@ -138,68 +116,93 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
_replyTunnel = new TunnelId(tunnel);
|
||||
curIndex += 4;
|
||||
|
||||
byte gw[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
|
||||
_replyGateway = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_replyGateway = new Hash(gw);
|
||||
} else {
|
||||
_replyTunnel = null;
|
||||
_replyGateway = null;
|
||||
}
|
||||
|
||||
if (_type == KEY_TYPE_LEASESET) {
|
||||
_leaseSet = new LeaseSet();
|
||||
if (type == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
_dbEntry = new LeaseSet();
|
||||
try {
|
||||
_leaseSet.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
|
||||
_dbEntry.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the leaseSet", dfe);
|
||||
} catch (IOException ioe) {
|
||||
throw new I2NPMessageException("Error reading the leaseSet", ioe);
|
||||
}
|
||||
} else if (_type == KEY_TYPE_ROUTERINFO) {
|
||||
_info = new RouterInfo();
|
||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
_dbEntry = new RouterInfo();
|
||||
int compressedSize = (int)DataHelper.fromLong(data, curIndex, 2);
|
||||
curIndex += 2;
|
||||
if (compressedSize <= 0 || curIndex + compressedSize > data.length || curIndex + compressedSize > dataSize + offset)
|
||||
throw new I2NPMessageException("Compressed RI length: " + compressedSize +
|
||||
" but remaining bytes: " + Math.min(data.length - curIndex, dataSize + offset -curIndex));
|
||||
|
||||
try {
|
||||
// TODO we could delay decompression, just copy to a new byte array and store in _byteCache
|
||||
// May not be necessary since the IBGW now uses UnknownI2NPMessage.
|
||||
// DSMs at the OBEP are generally garlic wrapped, so the OBEP won't see it.
|
||||
// If we do delay it, getEntry() will have to check if _dbEntry is null and _byteCache
|
||||
// is non-null, and then decompress.
|
||||
byte decompressed[] = DataHelper.decompress(data, curIndex, compressedSize);
|
||||
_info.readBytes(new ByteArrayInputStream(decompressed));
|
||||
_dbEntry.readBytes(new ByteArrayInputStream(decompressed));
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the routerInfo", dfe);
|
||||
} catch (IOException ioe) {
|
||||
throw new I2NPMessageException("Compressed routerInfo was corrupt", ioe);
|
||||
throw new I2NPMessageException("Corrupt compressed routerInfo size = " + compressedSize, ioe);
|
||||
}
|
||||
} else {
|
||||
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
|
||||
throw new I2NPMessageException("Invalid type of key read from the structure - " + type);
|
||||
}
|
||||
//if (!key.equals(_dbEntry.getHash()))
|
||||
// throw new I2NPMessageException("Hash mismatch in DSM");
|
||||
}
|
||||
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
/**
|
||||
* calculate the message body's length (not including the header and footer)
|
||||
*
|
||||
* @throws IllegalStateException
|
||||
*/
|
||||
protected int calculateWrittenLength() {
|
||||
// TODO if _byteCache is non-null, don't check _dbEntry
|
||||
if (_dbEntry == null)
|
||||
throw new IllegalStateException("Missing entry");
|
||||
int len = Hash.HASH_LENGTH + 1 + 4; // key+type+replyToken
|
||||
if (_replyToken > 0)
|
||||
len += 4 + Hash.HASH_LENGTH; // replyTunnel+replyGateway
|
||||
if (_type == KEY_TYPE_LEASESET) {
|
||||
_leaseSetCache = _leaseSet.toByteArray();
|
||||
len += _leaseSetCache.length;
|
||||
} else if (_type == KEY_TYPE_ROUTERINFO) {
|
||||
byte uncompressed[] = _info.toByteArray();
|
||||
byte compressed[] = DataHelper.compress(uncompressed);
|
||||
_routerInfoCache = compressed;
|
||||
len += compressed.length + 2;
|
||||
int type = _dbEntry.getType();
|
||||
if (type == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
if (_byteCache == null) {
|
||||
_byteCache = _dbEntry.toByteArray();
|
||||
}
|
||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
// only decompress once
|
||||
if (_byteCache == null) {
|
||||
byte uncompressed[] = _dbEntry.toByteArray();
|
||||
_byteCache = DataHelper.compress(uncompressed);
|
||||
}
|
||||
len += 2;
|
||||
} else {
|
||||
throw new IllegalStateException("Invalid key type " + type);
|
||||
}
|
||||
len += _byteCache.length;
|
||||
return len;
|
||||
}
|
||||
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if (_key == null) throw new I2NPMessageException("Invalid key");
|
||||
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type");
|
||||
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set");
|
||||
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info");
|
||||
if (_dbEntry == null) throw new I2NPMessageException("Missing entry");
|
||||
int type = _dbEntry.getType();
|
||||
if (type != DatabaseEntry.KEY_TYPE_LEASESET && type != DatabaseEntry.KEY_TYPE_ROUTERINFO)
|
||||
throw new I2NPMessageException("Invalid key type " + type);
|
||||
|
||||
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
// Use the hash of the DatabaseEntry
|
||||
System.arraycopy(getKey().getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
byte type[] = DataHelper.toLong(1, _type);
|
||||
out[curIndex++] = type[0];
|
||||
out[curIndex++] = (byte) type;
|
||||
byte tok[] = DataHelper.toLong(4, _replyToken);
|
||||
System.arraycopy(tok, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
@@ -215,59 +218,53 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
}
|
||||
|
||||
if (_type == KEY_TYPE_LEASESET) {
|
||||
// initialized in calculateWrittenLength
|
||||
System.arraycopy(_leaseSetCache, 0, out, curIndex, _leaseSetCache.length);
|
||||
curIndex += _leaseSetCache.length;
|
||||
} else if (_type == KEY_TYPE_ROUTERINFO) {
|
||||
byte len[] = DataHelper.toLong(2, _routerInfoCache.length);
|
||||
// _byteCache initialized in calculateWrittenLength
|
||||
if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
byte len[] = DataHelper.toLong(2, _byteCache.length);
|
||||
out[curIndex++] = len[0];
|
||||
out[curIndex++] = len[1];
|
||||
System.arraycopy(_routerInfoCache, 0, out, curIndex, _routerInfoCache.length);
|
||||
curIndex += _routerInfoCache.length;
|
||||
}
|
||||
System.arraycopy(_byteCache, 0, out, curIndex, _byteCache.length);
|
||||
curIndex += _byteCache.length;
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getKey()) +
|
||||
DataHelper.hashCode(getLeaseSet()) +
|
||||
DataHelper.hashCode(getRouterInfo()) +
|
||||
getValueType() +
|
||||
(int)getReplyToken() +
|
||||
DataHelper.hashCode(getReplyTunnel()) +
|
||||
DataHelper.hashCode(getReplyGateway());
|
||||
DataHelper.hashCode(_dbEntry) +
|
||||
(int) _replyToken +
|
||||
DataHelper.hashCode(_replyTunnel) +
|
||||
DataHelper.hashCode(_replyGateway);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DatabaseStoreMessage) ) {
|
||||
DatabaseStoreMessage msg = (DatabaseStoreMessage)object;
|
||||
return DataHelper.eq(getKey(),msg.getKey()) &&
|
||||
DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) &&
|
||||
DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) &&
|
||||
DataHelper.eq(getValueType(),msg.getValueType()) &&
|
||||
getReplyToken() == msg.getReplyToken() &&
|
||||
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) &&
|
||||
DataHelper.eq(getReplyGateway(), msg.getReplyGateway());
|
||||
DataHelper.eq(_dbEntry,msg.getEntry()) &&
|
||||
_replyToken == msg._replyToken &&
|
||||
DataHelper.eq(_replyTunnel, msg._replyTunnel) &&
|
||||
DataHelper.eq(_replyGateway, msg._replyGateway);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[DatabaseStoreMessage: ");
|
||||
buf.append("\n\tExpiration: ").append(getMessageExpiration());
|
||||
buf.append("\n\tUnique ID: ").append(getUniqueId());
|
||||
buf.append("\n\tExpiration: ").append(_expiration);
|
||||
buf.append("\n\tUnique ID: ").append(_uniqueId);
|
||||
buf.append("\n\tKey: ").append(getKey());
|
||||
buf.append("\n\tValue Type: ").append(getValueType());
|
||||
buf.append("\n\tRouter Info: ").append(getRouterInfo());
|
||||
buf.append("\n\tLease Set: ").append(getLeaseSet());
|
||||
buf.append("\n\tReply token: ").append(getReplyToken());
|
||||
buf.append("\n\tReply tunnel: ").append(getReplyTunnel());
|
||||
buf.append("\n\tReply gateway: ").append(getReplyGateway());
|
||||
buf.append("\n\tEntry: ").append(_dbEntry);
|
||||
buf.append("\n\tReply token: ").append(_replyToken);
|
||||
buf.append("\n\tReply tunnel: ").append(_replyTunnel);
|
||||
buf.append("\n\tReply gateway: ").append(_replyGateway);
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Contains the sending router's current time, to sync (and verify sync)
|
||||
*
|
||||
*/
|
||||
public class DateMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DateMessage.class);
|
||||
public final static int MESSAGE_TYPE = 16;
|
||||
private long _now;
|
||||
|
||||
public DateMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_now = context.clock().now();
|
||||
}
|
||||
|
||||
public long getNow() { return _now; }
|
||||
public void setNow(long now) { _now = now; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
_now = DataHelper.fromLong(data, curIndex, DataHelper.DATE_LENGTH);
|
||||
}
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
return DataHelper.DATE_LENGTH; // now
|
||||
}
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if (_now <= 0) throw new I2NPMessageException("Not enough data to write out");
|
||||
|
||||
DataHelper.toLong(out, curIndex, DataHelper.DATE_LENGTH, _now);
|
||||
curIndex += DataHelper.DATE_LENGTH;
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
public int hashCode() {
|
||||
return (int)getNow();
|
||||
}
|
||||
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DateMessage) ) {
|
||||
DateMessage msg = (DateMessage)object;
|
||||
return msg.getNow() == getNow();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DateMessage: ");
|
||||
buf.append("Now: ").append(_now);
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
@@ -18,16 +18,21 @@ import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
//import net.i2p.util.Log;
|
||||
|
||||
|
||||
/**
|
||||
* Contains the delivery instructions
|
||||
* Contains the delivery instructions for garlic cloves.
|
||||
* Generic "delivery instructions" are used both in tunnel messages
|
||||
* and in garlic cloves, with slight differences.
|
||||
* However,
|
||||
* the tunnel message generator TrivialPreprocessor and reader FragmentHandler do not use this class,
|
||||
* the reading and writing is handled inline there.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DeliveryInstructions extends DataStructureImpl {
|
||||
private final static Log _log = new Log(DeliveryInstructions.class);
|
||||
//private final static Log _log = new Log(DeliveryInstructions.class);
|
||||
private boolean _encrypted;
|
||||
private SessionKey _encryptionKey;
|
||||
private int _deliveryMode;
|
||||
@@ -46,43 +51,96 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
private final static int FLAG_MODE_ROUTER = 2;
|
||||
private final static int FLAG_MODE_TUNNEL = 3;
|
||||
|
||||
/** @deprecated unused */
|
||||
private final static long FLAG_ENCRYPTED = 128;
|
||||
private final static long FLAG_MODE = 96;
|
||||
private final static long FLAG_DELAY = 16;
|
||||
|
||||
public DeliveryInstructions() {
|
||||
setEncrypted(false);
|
||||
setEncryptionKey(null);
|
||||
setDeliveryMode(-1);
|
||||
setDestination(null);
|
||||
setRouter(null);
|
||||
setTunnelId(null);
|
||||
setDelayRequested(false);
|
||||
setDelaySeconds(0);
|
||||
_deliveryMode = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* For cloves only (not tunnels), default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public boolean getEncrypted() { return _encrypted; }
|
||||
|
||||
/**
|
||||
* For cloves only (not tunnels), default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void setEncrypted(boolean encrypted) { _encrypted = encrypted; }
|
||||
|
||||
/**
|
||||
* For cloves only (not tunnels), default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public SessionKey getEncryptionKey() { return _encryptionKey; }
|
||||
|
||||
/**
|
||||
* For cloves only (not tunnels), default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void setEncryptionKey(SessionKey key) { _encryptionKey = key; }
|
||||
|
||||
/** default -1 */
|
||||
public int getDeliveryMode() { return _deliveryMode; }
|
||||
|
||||
/** @param mode 0-3 */
|
||||
public void setDeliveryMode(int mode) { _deliveryMode = mode; }
|
||||
|
||||
/** default null */
|
||||
public Hash getDestination() { return _destinationHash; }
|
||||
|
||||
/** required for DESTINATION */
|
||||
public void setDestination(Hash dest) { _destinationHash = dest; }
|
||||
|
||||
/** default null */
|
||||
public Hash getRouter() { return _routerHash; }
|
||||
|
||||
/** required for ROUTER or TUNNEL */
|
||||
public void setRouter(Hash router) { _routerHash = router; }
|
||||
|
||||
/** default null */
|
||||
public TunnelId getTunnelId() { return _tunnelId; }
|
||||
|
||||
/** required for TUNNEL */
|
||||
public void setTunnelId(TunnelId id) { _tunnelId = id; }
|
||||
|
||||
/**
|
||||
* default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public boolean getDelayRequested() { return _delayRequested; }
|
||||
|
||||
/**
|
||||
* default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void setDelayRequested(boolean req) { _delayRequested = req; }
|
||||
|
||||
/**
|
||||
* default 0, unusedx
|
||||
* @deprecated unused
|
||||
*/
|
||||
public long getDelaySeconds() { return _delaySeconds; }
|
||||
|
||||
/**
|
||||
* default 0, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void setDelaySeconds(long seconds) { _delaySeconds = seconds; }
|
||||
|
||||
/**
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
long flags = DataHelper.readLong(in, 1);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Read flags: " + flags + " mode: " + flagMode(flags));
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Read flags: " + flags + " mode: " + flagMode(flags));
|
||||
|
||||
/****
|
||||
if (flagEncrypted(flags)) {
|
||||
SessionKey k = new SessionKey();
|
||||
k.readBytes(in);
|
||||
@@ -91,24 +149,28 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
} else {
|
||||
setEncrypted(false);
|
||||
}
|
||||
****/
|
||||
|
||||
setDeliveryMode(flagMode(flags));
|
||||
switch (flagMode(flags)) {
|
||||
case FLAG_MODE_LOCAL:
|
||||
break;
|
||||
case FLAG_MODE_DESTINATION:
|
||||
Hash destHash = new Hash();
|
||||
destHash.readBytes(in);
|
||||
//Hash destHash = new Hash();
|
||||
//destHash.readBytes(in);
|
||||
Hash destHash = Hash.create(in);
|
||||
setDestination(destHash);
|
||||
break;
|
||||
case FLAG_MODE_ROUTER:
|
||||
Hash routerHash = new Hash();
|
||||
routerHash.readBytes(in);
|
||||
//Hash routerHash = new Hash();
|
||||
//routerHash.readBytes(in);
|
||||
Hash routerHash = Hash.create(in);
|
||||
setRouter(routerHash);
|
||||
break;
|
||||
case FLAG_MODE_TUNNEL:
|
||||
Hash tunnelRouterHash = new Hash();
|
||||
tunnelRouterHash.readBytes(in);
|
||||
//Hash tunnelRouterHash = new Hash();
|
||||
//tunnelRouterHash.readBytes(in);
|
||||
Hash tunnelRouterHash = Hash.create(in);
|
||||
setRouter(tunnelRouterHash);
|
||||
TunnelId id = new TunnelId();
|
||||
id.readBytes(in);
|
||||
@@ -129,9 +191,10 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
int cur = offset;
|
||||
long flags = DataHelper.fromLong(data, cur, 1);
|
||||
cur++;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Read flags: " + flags + " mode: " + flagMode(flags));
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Read flags: " + flags + " mode: " + flagMode(flags));
|
||||
|
||||
/****
|
||||
if (flagEncrypted(flags)) {
|
||||
byte kd[] = new byte[SessionKey.KEYSIZE_BYTES];
|
||||
System.arraycopy(data, cur, kd, 0, SessionKey.KEYSIZE_BYTES);
|
||||
@@ -141,28 +204,32 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
} else {
|
||||
setEncrypted(false);
|
||||
}
|
||||
****/
|
||||
|
||||
setDeliveryMode(flagMode(flags));
|
||||
switch (flagMode(flags)) {
|
||||
case FLAG_MODE_LOCAL:
|
||||
break;
|
||||
case FLAG_MODE_DESTINATION:
|
||||
byte destHash[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, destHash, 0, Hash.HASH_LENGTH);
|
||||
//byte destHash[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, cur, destHash, 0, Hash.HASH_LENGTH);
|
||||
Hash dh = Hash.create(data, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
setDestination(new Hash(destHash));
|
||||
setDestination(dh);
|
||||
break;
|
||||
case FLAG_MODE_ROUTER:
|
||||
byte routerHash[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, routerHash, 0, Hash.HASH_LENGTH);
|
||||
//byte routerHash[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, cur, routerHash, 0, Hash.HASH_LENGTH);
|
||||
Hash rh = Hash.create(data, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
setRouter(new Hash(routerHash));
|
||||
setRouter(rh);
|
||||
break;
|
||||
case FLAG_MODE_TUNNEL:
|
||||
byte tunnelRouterHash[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, tunnelRouterHash, 0, Hash.HASH_LENGTH);
|
||||
//byte tunnelRouterHash[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, cur, tunnelRouterHash, 0, Hash.HASH_LENGTH);
|
||||
Hash trh = Hash.create(data, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
setRouter(new Hash(tunnelRouterHash));
|
||||
setRouter(trh);
|
||||
setTunnelId(new TunnelId(DataHelper.fromLong(data, cur, 4)));
|
||||
cur += 4;
|
||||
break;
|
||||
@@ -180,24 +247,34 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* For cloves only (not tunnels), default false, unused
|
||||
* @deprecated unused
|
||||
*/
|
||||
/****
|
||||
private boolean flagEncrypted(long flags) {
|
||||
return (0 != (flags & FLAG_ENCRYPTED));
|
||||
}
|
||||
****/
|
||||
|
||||
/** high bits */
|
||||
private int flagMode(long flags) {
|
||||
long v = flags & FLAG_MODE;
|
||||
v >>>= 5;
|
||||
return (int)v;
|
||||
}
|
||||
|
||||
/** unused */
|
||||
private boolean flagDelay(long flags) {
|
||||
return (0 != (flags & FLAG_DELAY));
|
||||
}
|
||||
|
||||
private long getFlags() {
|
||||
long val = 0L;
|
||||
/****
|
||||
if (getEncrypted())
|
||||
val = val | FLAG_ENCRYPTED;
|
||||
****/
|
||||
long fmode = 0;
|
||||
switch (getDeliveryMode()) {
|
||||
case FLAG_MODE_LOCAL:
|
||||
@@ -215,21 +292,23 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
val = val | fmode;
|
||||
if (getDelayRequested())
|
||||
val = val | FLAG_DELAY;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("getFlags() = " + val);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("getFlags() = " + val);
|
||||
return val;
|
||||
}
|
||||
|
||||
private int getAdditionalInfoSize() {
|
||||
int additionalSize = 0;
|
||||
/****
|
||||
if (getEncrypted()) {
|
||||
if (_encryptionKey == null) throw new IllegalStateException("Encryption key is not set");
|
||||
additionalSize += SessionKey.KEYSIZE_BYTES;
|
||||
}
|
||||
****/
|
||||
switch (getDeliveryMode()) {
|
||||
case FLAG_MODE_LOCAL:
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("mode = local");
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("mode = local");
|
||||
break;
|
||||
case FLAG_MODE_DESTINATION:
|
||||
if (_destinationHash == null) throw new IllegalStateException("Destination hash is not set");
|
||||
@@ -258,11 +337,15 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
int offset = 0;
|
||||
offset += getAdditionalInfo(rv, offset);
|
||||
if (offset != additionalSize)
|
||||
_log.log(Log.CRIT, "wtf, additionalSize = " + additionalSize + ", offset = " + offset);
|
||||
//_log.log(Log.CRIT, "wtf, additionalSize = " + additionalSize + ", offset = " + offset);
|
||||
throw new IllegalStateException("wtf, additionalSize = " + additionalSize + ", offset = " + offset);
|
||||
return rv;
|
||||
}
|
||||
|
||||
private int getAdditionalInfo(byte rv[], int offset) {
|
||||
int origOffset = offset;
|
||||
|
||||
/****
|
||||
if (getEncrypted()) {
|
||||
if (_encryptionKey == null) throw new IllegalStateException("Encryption key is not set");
|
||||
System.arraycopy(_encryptionKey.getData(), 0, rv, offset, SessionKey.KEYSIZE_BYTES);
|
||||
@@ -273,24 +356,26 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Is NOT Encrypted");
|
||||
}
|
||||
****/
|
||||
|
||||
switch (getDeliveryMode()) {
|
||||
case FLAG_MODE_LOCAL:
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("mode = local");
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("mode = local");
|
||||
break;
|
||||
case FLAG_MODE_DESTINATION:
|
||||
if (_destinationHash == null) throw new IllegalStateException("Destination hash is not set");
|
||||
System.arraycopy(_destinationHash.getData(), 0, rv, offset, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("mode = destination, hash = " + _destinationHash);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("mode = destination, hash = " + _destinationHash);
|
||||
break;
|
||||
case FLAG_MODE_ROUTER:
|
||||
if (_routerHash == null) throw new IllegalStateException("Router hash is not set");
|
||||
System.arraycopy(_routerHash.getData(), 0, rv, offset, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("mode = router, routerHash = " + _routerHash);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("mode = router, routerHash = " + _routerHash);
|
||||
break;
|
||||
case FLAG_MODE_TUNNEL:
|
||||
if ( (_routerHash == null) || (_tunnelId == null) ) throw new IllegalStateException("Router hash or tunnel ID is not set");
|
||||
@@ -298,29 +383,32 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
offset += Hash.HASH_LENGTH;
|
||||
DataHelper.toLong(rv, offset, 4, _tunnelId.getTunnelId());
|
||||
offset += 4;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("mode = tunnel, tunnelId = " + _tunnelId.getTunnelId()
|
||||
+ ", routerHash = " + _routerHash);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("mode = tunnel, tunnelId = " + _tunnelId.getTunnelId()
|
||||
// + ", routerHash = " + _routerHash);
|
||||
break;
|
||||
}
|
||||
if (getDelayRequested()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("delay requested: " + getDelaySeconds());
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("delay requested: " + getDelaySeconds());
|
||||
DataHelper.toLong(rv, offset, 4, getDelaySeconds());
|
||||
offset += 4;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("delay NOT requested");
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("delay NOT requested");
|
||||
}
|
||||
return offset - origOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if ( (_deliveryMode < 0) || (_deliveryMode > FLAG_MODE_TUNNEL) ) throw new DataFormatException("Invalid data: mode = " + _deliveryMode);
|
||||
long flags = getFlags();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Write flags: " + flags + " mode: " + getDeliveryMode()
|
||||
+ " =?= " + flagMode(flags));
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Write flags: " + flags + " mode: " + getDeliveryMode()
|
||||
// + " =?= " + flagMode(flags));
|
||||
byte additionalInfo[] = getAdditionalInfo();
|
||||
DataHelper.writeLong(out, 1, flags);
|
||||
if (additionalInfo != null) {
|
||||
@@ -330,14 +418,14 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
}
|
||||
|
||||
/**
|
||||
* return the number of bytes written to the target
|
||||
* @return the number of bytes written to the target
|
||||
*/
|
||||
public int writeBytes(byte target[], int offset) {
|
||||
if ( (_deliveryMode < 0) || (_deliveryMode > FLAG_MODE_TUNNEL) ) throw new IllegalStateException("Invalid data: mode = " + _deliveryMode);
|
||||
long flags = getFlags();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Write flags: " + flags + " mode: " + getDeliveryMode()
|
||||
+ " =?= " + flagMode(flags));
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Write flags: " + flags + " mode: " + getDeliveryMode()
|
||||
// + " =?= " + flagMode(flags));
|
||||
int origOffset = offset;
|
||||
DataHelper.toLong(target, offset, 1, flags);
|
||||
offset++;
|
||||
@@ -350,6 +438,7 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
+ getAdditionalInfoSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof DeliveryInstructions))
|
||||
return false;
|
||||
@@ -357,13 +446,14 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
return (getDelayRequested() == instr.getDelayRequested()) &&
|
||||
(getDelaySeconds() == instr.getDelaySeconds()) &&
|
||||
(getDeliveryMode() == instr.getDeliveryMode()) &&
|
||||
(getEncrypted() == instr.getEncrypted()) &&
|
||||
//(getEncrypted() == instr.getEncrypted()) &&
|
||||
DataHelper.eq(getDestination(), instr.getDestination()) &&
|
||||
DataHelper.eq(getEncryptionKey(), instr.getEncryptionKey()) &&
|
||||
DataHelper.eq(getRouter(), instr.getRouter()) &&
|
||||
DataHelper.eq(getTunnelId(), instr.getTunnelId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)getDelaySeconds() +
|
||||
getDeliveryMode() +
|
||||
@@ -373,8 +463,9 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
DataHelper.hashCode(getTunnelId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append("[DeliveryInstructions: ");
|
||||
buf.append("\n\tDelivery mode: ");
|
||||
switch (getDeliveryMode()) {
|
||||
@@ -394,7 +485,7 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
buf.append("\n\tDelay requested: ").append(getDelayRequested());
|
||||
buf.append("\n\tDelay seconds: ").append(getDelaySeconds());
|
||||
buf.append("\n\tDestination: ").append(getDestination());
|
||||
buf.append("\n\tEncrypted: ").append(getEncrypted());
|
||||
//buf.append("\n\tEncrypted: ").append(getEncrypted());
|
||||
buf.append("\n\tEncryption key: ").append(getEncryptionKey());
|
||||
buf.append("\n\tRouter: ").append(getRouter());
|
||||
buf.append("\n\tTunnelId: ").append(getTunnelId());
|
||||
|
||||
@@ -8,11 +8,8 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message sent back in reply to a message when requested, containing
|
||||
@@ -20,25 +17,47 @@ import net.i2p.util.Log;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DeliveryStatusMessage.class);
|
||||
public class DeliveryStatusMessage extends FastI2NPMessageImpl {
|
||||
public final static int MESSAGE_TYPE = 10;
|
||||
private long _id;
|
||||
private long _arrival;
|
||||
|
||||
public DeliveryStatusMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setMessageId(-1);
|
||||
setArrival(-1);
|
||||
_id = -1;
|
||||
_arrival = -1;
|
||||
}
|
||||
|
||||
public long getMessageId() { return _id; }
|
||||
public void setMessageId(long id) { _id = id; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if id previously set, to protect saved checksum
|
||||
*/
|
||||
public void setMessageId(long id) {
|
||||
if (_id >= 0)
|
||||
throw new IllegalStateException();
|
||||
_id = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Misnamed, as it is generally (always?) set by the creator to the current time,
|
||||
* in some future usage it could be set on arrival
|
||||
*/
|
||||
public long getArrival() { return _arrival; }
|
||||
public void setArrival(long arrival) { _arrival = arrival; }
|
||||
|
||||
/**
|
||||
* Misnamed, as it is generally (always?) set by the creator to the current time,
|
||||
* in some future usage it could be set on arrival
|
||||
*/
|
||||
public void setArrival(long arrival) {
|
||||
// To accomodate setting on arrival,
|
||||
// invalidate the stored checksum instead of throwing ISE
|
||||
if (_arrival >= 0)
|
||||
_hasChecksum = false;
|
||||
_arrival = arrival;
|
||||
}
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
@@ -64,22 +83,25 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)getMessageId() + (int)getArrival();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DeliveryStatusMessage) ) {
|
||||
DeliveryStatusMessage msg = (DeliveryStatusMessage)object;
|
||||
return DataHelper.eq(getMessageId(),msg.getMessageId()) &&
|
||||
DataHelper.eq(getArrival(),msg.getArrival());
|
||||
return _id == msg.getMessageId() &&
|
||||
_arrival == msg.getArrival();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[DeliveryStatusMessage: ");
|
||||
buf.append("\n\tMessage ID: ").append(getMessageId());
|
||||
buf.append("\n\tArrival: ").append(_context.clock().now() - _arrival);
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.PrivateKey;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Contains the private key which matches the EndPointPublicKey which, in turn,
|
||||
* is published on the LeaseSet and used to encrypt messages to the router to
|
||||
* which a Destination is currently connected.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class EndPointPrivateKey extends DataStructureImpl {
|
||||
private final static Log _log = new Log(EndPointPrivateKey.class);
|
||||
private PrivateKey _key;
|
||||
|
||||
public EndPointPrivateKey() { setKey(null); }
|
||||
|
||||
public PrivateKey getKey() { return _key; }
|
||||
public void setKey(PrivateKey key) { _key= key; }
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_key = new PrivateKey();
|
||||
_key.readBytes(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_key == null) throw new DataFormatException("Invalid key");
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof EndPointPublicKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((EndPointPublicKey)obj).getKey());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[EndPointPrivateKey: " + getKey() + "]";
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Contains the public key which matches the EndPointPrivateKey. This is
|
||||
* published on the LeaseSet and used to encrypt messages to the router to
|
||||
* which a Destination is currently connected.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class EndPointPublicKey extends DataStructureImpl {
|
||||
private final static Log _log = new Log(EndPointPublicKey.class);
|
||||
private PublicKey _key;
|
||||
|
||||
public EndPointPublicKey() { setKey(null); }
|
||||
|
||||
public PublicKey getKey() { return _key; }
|
||||
public void setKey(PublicKey key) { _key= key; }
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_key = new PublicKey();
|
||||
_key.readBytes(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_key == null) throw new DataFormatException("Invalid key");
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof EndPointPublicKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((EndPointPublicKey)obj).getKey());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[EndPointPublicKey: " + getKey() + "]";
|
||||
}
|
||||
}
|
||||
186
router/java/src/net/i2p/data/i2np/FastI2NPMessageImpl.java
Normal file
186
router/java/src/net/i2p/data/i2np/FastI2NPMessageImpl.java
Normal file
@@ -0,0 +1,186 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.util.HexDump;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleByteCache;
|
||||
|
||||
/**
|
||||
* Ignore, but save, the SHA-256 checksum in the full 16-byte header when read in.
|
||||
* Use the same checksum when writing out.
|
||||
*
|
||||
* This is a savings for NTCP in,
|
||||
* and for NTCP-in to NTCP-out for TunnelDataMessages.
|
||||
* It's also a savings for messages embedded in other messages.
|
||||
* Note that SSU does not use the SHA-256 checksum.
|
||||
*
|
||||
* Subclasses must take care to set _hasChecksum to false to invalidate it
|
||||
* if the message payload changes between reading and writing.
|
||||
*
|
||||
* It isn't clear where, if anywhere, we actually need to send a checksum.
|
||||
* For point-to-point messages over NTCP where we know the router version
|
||||
* of the peer, we could add a method to skip checksum generation.
|
||||
* For end-to-end I2NP messages embedded in a Garlic, TGM, etc...
|
||||
* we would need a flag day.
|
||||
*
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public abstract class FastI2NPMessageImpl extends I2NPMessageImpl {
|
||||
protected byte _checksum;
|
||||
// We skip the fiction that CHECKSUM_LENGTH will ever be anything but 1
|
||||
protected boolean _hasChecksum;
|
||||
|
||||
public FastI2NPMessageImpl(I2PAppContext context) {
|
||||
super(context);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated unused
|
||||
* @throws UnsupportedOperationException
|
||||
*/
|
||||
@Override
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated unused
|
||||
* @throws UnsupportedOperationException
|
||||
*/
|
||||
@Override
|
||||
public int readBytes(InputStream in, int type, byte buffer[]) throws I2NPMessageException, IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore, but save, the checksum, to be used later if necessary.
|
||||
*
|
||||
* @param maxLen read no more than this many bytes from data starting at offset, even if it is longer
|
||||
* This includes the type byte only if type < 0
|
||||
* @throws IllegalStateException if called twice, to protect saved checksum
|
||||
*/
|
||||
@Override
|
||||
public int readBytes(byte data[], int type, int offset, int maxLen) throws I2NPMessageException {
|
||||
if (_hasChecksum)
|
||||
throw new IllegalStateException(getClass().getSimpleName() + " read twice");
|
||||
int headerSize = HEADER_LENGTH;
|
||||
if (type >= 0)
|
||||
headerSize--;
|
||||
if (maxLen < headerSize)
|
||||
throw new I2NPMessageException("Payload is too short " + maxLen);
|
||||
int cur = offset;
|
||||
if (type < 0) {
|
||||
type = (int)DataHelper.fromLong(data, cur, 1);
|
||||
cur++;
|
||||
}
|
||||
_uniqueId = DataHelper.fromLong(data, cur, 4);
|
||||
cur += 4;
|
||||
_expiration = DataHelper.fromLong(data, cur, DataHelper.DATE_LENGTH);
|
||||
cur += DataHelper.DATE_LENGTH;
|
||||
int size = (int)DataHelper.fromLong(data, cur, 2);
|
||||
cur += 2;
|
||||
_checksum = data[cur];
|
||||
cur++;
|
||||
|
||||
if (cur + size > data.length || headerSize + size > maxLen)
|
||||
throw new I2NPMessageException("Payload is too short ["
|
||||
+ "data.len=" + data.length
|
||||
+ "maxLen=" + maxLen
|
||||
+ " offset=" + offset
|
||||
+ " cur=" + cur
|
||||
+ " wanted=" + size + "]: " + getClass().getSimpleName());
|
||||
|
||||
int sz = Math.min(size, maxLen - headerSize);
|
||||
readMessage(data, cur, sz, type);
|
||||
cur += sz;
|
||||
_hasChecksum = true;
|
||||
if (VERIFY_TEST && _log.shouldLog(Log.INFO))
|
||||
_log.info("Ignored c/s " + getClass().getSimpleName());
|
||||
return cur - offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated unused
|
||||
* @throws UnsupportedOperationException
|
||||
*/
|
||||
@Override
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* This tests the reuse-checksum feature.
|
||||
* The results are that mostly UnknownI2NPMessages (from inside a TGM),
|
||||
* with a lot of DeliveryStatusMessages,
|
||||
* and a few DatabaseLookupMessages that get reused.
|
||||
* The last two are tiny, but the savings at the gateway should help.
|
||||
*/
|
||||
private static final boolean VERIFY_TEST = false;
|
||||
|
||||
/**
|
||||
* If available, use the previously-computed or previously-read checksum for speed
|
||||
*/
|
||||
@Override
|
||||
public int toByteArray(byte buffer[]) {
|
||||
if (_hasChecksum)
|
||||
return toByteArrayWithSavedChecksum(buffer);
|
||||
if (VERIFY_TEST && _log.shouldLog(Log.INFO))
|
||||
_log.info("Generating new c/s " + getClass().getSimpleName());
|
||||
return super.toByteArray(buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Use a previously-computed checksum for speed
|
||||
*/
|
||||
protected int toByteArrayWithSavedChecksum(byte buffer[]) {
|
||||
try {
|
||||
int writtenLen = writeMessageBody(buffer, HEADER_LENGTH);
|
||||
if (VERIFY_TEST) {
|
||||
byte[] h = SimpleByteCache.acquire(32);
|
||||
_context.sha().calculateHash(buffer, HEADER_LENGTH, writtenLen - HEADER_LENGTH, h, 0);
|
||||
if (h[0] != _checksum) {
|
||||
_log.log(Log.CRIT, "Please report " + getClass().getSimpleName() +
|
||||
" size " + writtenLen +
|
||||
" saved c/s " + Integer.toHexString(_checksum & 0xff) +
|
||||
" calc " + Integer.toHexString(h[0] & 0xff), new Exception());
|
||||
_log.log(Log.CRIT, "DUMP:\n" + HexDump.dump(buffer, HEADER_LENGTH, writtenLen - HEADER_LENGTH));
|
||||
_log.log(Log.CRIT, "RAW:\n" + Base64.encode(buffer, HEADER_LENGTH, writtenLen - HEADER_LENGTH));
|
||||
_checksum = h[0];
|
||||
} else if (_log.shouldLog(Log.INFO)) {
|
||||
_log.info("Using saved c/s " + getClass().getSimpleName() + ' ' + _checksum);
|
||||
}
|
||||
SimpleByteCache.release(h);
|
||||
}
|
||||
int payloadLen = writtenLen - HEADER_LENGTH;
|
||||
int off = 0;
|
||||
DataHelper.toLong(buffer, off, 1, getType());
|
||||
off += 1;
|
||||
DataHelper.toLong(buffer, off, 4, _uniqueId);
|
||||
off += 4;
|
||||
DataHelper.toLong(buffer, off, DataHelper.DATE_LENGTH, _expiration);
|
||||
off += DataHelper.DATE_LENGTH;
|
||||
DataHelper.toLong(buffer, off, 2, payloadLen);
|
||||
off += 2;
|
||||
buffer[off] = _checksum;
|
||||
return writtenLen;
|
||||
} catch (I2NPMessageException ime) {
|
||||
_context.logManager().getLog(getClass()).log(Log.CRIT, "Error writing", ime);
|
||||
throw new IllegalStateException("Unable to serialize the message " + getClass().getSimpleName(), ime);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,27 +24,25 @@ import net.i2p.util.Log;
|
||||
* Contains one deliverable message encrypted to a router along with instructions
|
||||
* and a certificate 'paying for' the delivery.
|
||||
*
|
||||
* Note that certificates are always the null certificate at this time, others are unimplemented.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class GarlicClove extends DataStructureImpl {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
//private final RouterContext _context;
|
||||
private DeliveryInstructions _instructions;
|
||||
private I2NPMessage _msg;
|
||||
private long _cloveId;
|
||||
private Date _expiration;
|
||||
private Certificate _certificate;
|
||||
private I2NPMessageHandler _handler;
|
||||
private final I2NPMessageHandler _handler;
|
||||
|
||||
public GarlicClove(RouterContext context) {
|
||||
_context = context;
|
||||
//_context = context;
|
||||
_log = context.logManager().getLog(GarlicClove.class);
|
||||
_handler = new I2NPMessageHandler(context);
|
||||
setInstructions(null);
|
||||
setData(null);
|
||||
setCloveId(-1);
|
||||
setExpiration(null);
|
||||
setCertificate(null);
|
||||
_cloveId = -1;
|
||||
}
|
||||
|
||||
public DeliveryInstructions getInstructions() { return _instructions; }
|
||||
@@ -58,6 +56,9 @@ public class GarlicClove extends DataStructureImpl {
|
||||
public Certificate getCertificate() { return _certificate; }
|
||||
public void setCertificate(Certificate cert) { _certificate = cert; }
|
||||
|
||||
/**
|
||||
* @deprecated unused, use byte array method to avoid copying
|
||||
*/
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_instructions = new DeliveryInstructions();
|
||||
_instructions.readBytes(in);
|
||||
@@ -72,8 +73,9 @@ public class GarlicClove extends DataStructureImpl {
|
||||
_expiration = DataHelper.readDate(in);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("CloveID read: " + _cloveId + " expiration read: " + _expiration);
|
||||
_certificate = new Certificate();
|
||||
_certificate.readBytes(in);
|
||||
//_certificate = new Certificate();
|
||||
//_certificate.readBytes(in);
|
||||
_certificate = Certificate.create(in);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Read cert: " + _certificate);
|
||||
}
|
||||
@@ -89,8 +91,6 @@ public class GarlicClove extends DataStructureImpl {
|
||||
_msg = _handler.lastRead();
|
||||
} catch (I2NPMessageException ime) {
|
||||
throw new DataFormatException("Unable to read the message from a garlic clove", ime);
|
||||
} catch (IOException ioe) {
|
||||
throw new DataFormatException("Not enough data to read the clove", ioe);
|
||||
}
|
||||
_cloveId = DataHelper.fromLong(source, cur, 4);
|
||||
cur += 4;
|
||||
@@ -98,34 +98,38 @@ public class GarlicClove extends DataStructureImpl {
|
||||
cur += DataHelper.DATE_LENGTH;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("CloveID read: " + _cloveId + " expiration read: " + _expiration);
|
||||
_certificate = new Certificate();
|
||||
cur += _certificate.readBytes(source, cur);
|
||||
//_certificate = new Certificate();
|
||||
//cur += _certificate.readBytes(source, cur);
|
||||
_certificate = Certificate.create(source, cur);
|
||||
cur += _certificate.size();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Read cert: " + _certificate);
|
||||
return cur - offset;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @deprecated unused, use byte array method to avoid copying
|
||||
*/
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
StringBuffer error = null;
|
||||
StringBuilder error = null;
|
||||
if (_instructions == null) {
|
||||
if (error == null) error = new StringBuffer();
|
||||
if (error == null) error = new StringBuilder();
|
||||
error.append("No instructions ");
|
||||
}
|
||||
if (_msg == null) {
|
||||
if (error == null) error = new StringBuffer();
|
||||
if (error == null) error = new StringBuilder();
|
||||
error.append("No message ");
|
||||
}
|
||||
if (_cloveId < 0) {
|
||||
if (error == null) error = new StringBuffer();
|
||||
if (error == null) error = new StringBuilder();
|
||||
error.append("CloveID < 0 [").append(_cloveId).append("] ");
|
||||
}
|
||||
if (_expiration == null) {
|
||||
if (error == null) error = new StringBuffer();
|
||||
if (error == null) error = new StringBuilder();
|
||||
error.append("Expiration is null ");
|
||||
}
|
||||
if (_certificate == null) {
|
||||
if (error == null) error = new StringBuffer();
|
||||
if (error == null) error = new StringBuilder();
|
||||
error.append("Certificate is null ");
|
||||
}
|
||||
|
||||
@@ -156,6 +160,7 @@ public class GarlicClove extends DataStructureImpl {
|
||||
_log.debug("Written cert: " + _certificate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = new byte[estimateSize()];
|
||||
int offset = 0;
|
||||
@@ -186,17 +191,19 @@ public class GarlicClove extends DataStructureImpl {
|
||||
+ _certificate.size(); // certificate
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof GarlicClove))
|
||||
return false;
|
||||
GarlicClove clove = (GarlicClove)obj;
|
||||
return DataHelper.eq(getCertificate(), clove.getCertificate()) &&
|
||||
DataHelper.eq(getCloveId(), clove.getCloveId()) &&
|
||||
_cloveId == clove.getCloveId() &&
|
||||
DataHelper.eq(getData(), clove.getData()) &&
|
||||
DataHelper.eq(getExpiration(), clove.getExpiration()) &&
|
||||
DataHelper.eq(getInstructions(), clove.getInstructions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getCertificate()) +
|
||||
(int)getCloveId() +
|
||||
@@ -205,8 +212,9 @@ public class GarlicClove extends DataStructureImpl {
|
||||
DataHelper.hashCode(getInstructions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append("[GarlicClove: ");
|
||||
buf.append("\n\tInstructions: ").append(getInstructions());
|
||||
buf.append("\n\tCertificate: ").append(getCertificate());
|
||||
|
||||
@@ -8,55 +8,52 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the wrapped garlic message
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class GarlicMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(GarlicMessage.class);
|
||||
public class GarlicMessage extends FastI2NPMessageImpl {
|
||||
public final static int MESSAGE_TYPE = 11;
|
||||
private byte[] _data;
|
||||
|
||||
public GarlicMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setData(null);
|
||||
}
|
||||
|
||||
public byte[] getData() {
|
||||
verifyUnwritten();
|
||||
return _data;
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if data previously set, to protect saved checksum
|
||||
*/
|
||||
public void setData(byte[] data) {
|
||||
verifyUnwritten();
|
||||
if (_data != null)
|
||||
throw new IllegalStateException();
|
||||
_data = data;
|
||||
}
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
long len = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
if ( (len <= 0) || (len > 64*1024) ) throw new I2NPMessageException("size="+len);
|
||||
if ( (len <= 0) || (len > MAX_SIZE) ) throw new I2NPMessageException("size="+len);
|
||||
_data = new byte[(int)len];
|
||||
System.arraycopy(data, curIndex, _data, 0, (int)len);
|
||||
}
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
verifyUnwritten();
|
||||
return 4 + _data.length;
|
||||
}
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
verifyUnwritten();
|
||||
byte len[] = DataHelper.toLong(4, _data.length);
|
||||
System.arraycopy(len, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
@@ -67,15 +64,12 @@ public class GarlicMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getData());
|
||||
}
|
||||
|
||||
protected void written() {
|
||||
super.written();
|
||||
_data = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof GarlicMessage) ) {
|
||||
GarlicMessage msg = (GarlicMessage)object;
|
||||
@@ -85,10 +79,11 @@ public class GarlicMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[GarlicMessage: ");
|
||||
buf.append("\n\tData length: ").append(getData().length).append(" bytes");
|
||||
buf.append("Data length: ").append(getData().length).append(" bytes");
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -26,16 +26,53 @@ public interface I2NPMessage extends DataStructure {
|
||||
* Read the body into the data structures, after the initial type byte, using
|
||||
* the current class's format as defined by the I2NP specification
|
||||
*
|
||||
* Unused - All transports provide encapsulation and so we have byte arrays available.
|
||||
*
|
||||
* @param in stream to read from
|
||||
* @param type I2NP message type
|
||||
* starting at type if type is < 0 (16 byte header)
|
||||
* starting at ID if type is >= 0 (15 byte header)
|
||||
* @param type I2NP message type. If less than zero, read the type from data
|
||||
* @param buffer scratch buffer to be used when reading and parsing
|
||||
* @return size of the message read (including headers)
|
||||
* @throws I2NPMessageException if the stream doesn't contain a valid message
|
||||
* that this class can read.
|
||||
* @throws IOException if there is a problem reading from the stream
|
||||
* @deprecated unused
|
||||
*/
|
||||
public int readBytes(InputStream in, int type, byte buffer[]) throws I2NPMessageException, IOException;
|
||||
public int readBytes(byte data[], int type, int offset) throws I2NPMessageException, IOException;
|
||||
|
||||
/**
|
||||
* Read the body into the data structures, after the initial type byte, using
|
||||
* the current class's format as defined by the I2NP specification
|
||||
*
|
||||
* @param data the data
|
||||
* @param type I2NP message type. If less than zero, read the type from data
|
||||
* @param offset where to start
|
||||
* starting at type if type is < 0 (16 byte header)
|
||||
* starting at ID if type is >= 0 (15 byte header)
|
||||
* @return size of the message read (including headers)
|
||||
* @throws I2NPMessageException if there is no valid message
|
||||
* @throws IOException if there is a problem reading from the stream
|
||||
*/
|
||||
public int readBytes(byte data[], int type, int offset) throws I2NPMessageException;
|
||||
|
||||
/**
|
||||
* Read the body into the data structures, after the initial type byte, using
|
||||
* the current class's format as defined by the I2NP specification
|
||||
*
|
||||
* @param data the data, may or may not include the type
|
||||
* @param type I2NP message type. If less than zero, read the type from data
|
||||
* @param offset where to start
|
||||
* starting at type if type is < 0 (16 byte header)
|
||||
* starting at ID if type is >= 0 (15 byte header)
|
||||
* @param maxLen read no more than this many bytes from data starting at offset, even if it is longer
|
||||
* This includes the type byte only if type < 0
|
||||
* @return size of the message read (including headers)
|
||||
* @throws I2NPMessageException if there is no valid message
|
||||
* @throws IOException if there is a problem reading from the stream
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public int readBytes(byte data[], int type, int offset, int maxLen) throws I2NPMessageException;
|
||||
|
||||
/**
|
||||
* Read the body into the data structures, after the initial type byte and
|
||||
@@ -50,8 +87,8 @@ public interface I2NPMessage extends DataStructure {
|
||||
* that this class can read.
|
||||
* @throws IOException if there is a problem reading from the stream
|
||||
*/
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException;
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException, IOException;
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException;
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException;
|
||||
|
||||
/**
|
||||
* Return the unique identifier for this type of I2NP message, as defined in
|
||||
@@ -60,7 +97,7 @@ public interface I2NPMessage extends DataStructure {
|
||||
public int getType();
|
||||
|
||||
/**
|
||||
* Replay resistent message Id
|
||||
* Replay resistant message ID
|
||||
*/
|
||||
public long getUniqueId();
|
||||
public void setUniqueId(long id);
|
||||
@@ -73,22 +110,25 @@ public interface I2NPMessage extends DataStructure {
|
||||
public void setMessageExpiration(long exp);
|
||||
|
||||
|
||||
/** How large the message is, including any checksums */
|
||||
/** How large the message is, including any checksums, i.e. full 16 byte header */
|
||||
public int getMessageSize();
|
||||
/** How large the raw message is */
|
||||
|
||||
/** How large the raw message is with the short 5 byte header */
|
||||
public int getRawMessageSize();
|
||||
|
||||
|
||||
/**
|
||||
* write the message to the buffer, returning the number of bytes written.
|
||||
* the data is formatted so as to be self contained, with the type, size,
|
||||
* expiration, unique id, as well as a checksum bundled along.
|
||||
* Full 16 byte header.
|
||||
*/
|
||||
public int toByteArray(byte buffer[]);
|
||||
|
||||
/**
|
||||
* write the message to the buffer, returning the number of bytes written.
|
||||
* the data is is not self contained - it does not include the size,
|
||||
* unique id, or any checksum, but does include the type and expiration.
|
||||
* Short 5 byte header.
|
||||
*/
|
||||
public int toRawByteArray(byte buffer[]);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
@@ -22,8 +21,8 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class I2NPMessageHandler {
|
||||
private Log _log;
|
||||
private I2PAppContext _context;
|
||||
private final Log _log;
|
||||
private final I2PAppContext _context;
|
||||
private long _lastReadBegin;
|
||||
private long _lastReadEnd;
|
||||
private int _lastSize;
|
||||
@@ -33,14 +32,17 @@ public class I2NPMessageHandler {
|
||||
public I2NPMessageHandler(I2PAppContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(I2NPMessageHandler.class);
|
||||
_messageBuffer = null;
|
||||
_lastSize = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an I2NPMessage from the stream and return the fully populated object.
|
||||
*
|
||||
* @throws IOException if there is an IO problem reading from the stream
|
||||
* This is only called by I2NPMessageReader which is unused.
|
||||
* All transports provide encapsulation and so we have byte arrays available.
|
||||
*
|
||||
* @deprecated use the byte array method to avoid an extra copy if you have it
|
||||
*
|
||||
* @throws I2NPMessageException if there is a problem handling the particular
|
||||
* message - if it is an unknown type or has improper formatting, etc.
|
||||
*/
|
||||
@@ -50,19 +52,17 @@ public class I2NPMessageHandler {
|
||||
int type = (int)DataHelper.readLong(in, 1);
|
||||
_lastReadBegin = System.currentTimeMillis();
|
||||
I2NPMessage msg = I2NPMessageImpl.createMessage(_context, type);
|
||||
if (msg == null)
|
||||
throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message");
|
||||
// can't be null
|
||||
//if (msg == null)
|
||||
// throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message");
|
||||
try {
|
||||
_lastSize = msg.readBytes(in, type, _messageBuffer);
|
||||
} catch (IOException ioe) {
|
||||
throw ioe;
|
||||
} catch (I2NPMessageException ime) {
|
||||
throw ime;
|
||||
} catch (Exception e) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error reading the stream", e);
|
||||
throw new IOException("Unknown error reading the " + msg.getClass().getName()
|
||||
+ ": " + e.getMessage());
|
||||
throw new I2NPMessageException("Unknown error reading the " + msg.getClass().getSimpleName(), e);
|
||||
}
|
||||
_lastReadEnd = System.currentTimeMillis();
|
||||
return msg;
|
||||
@@ -79,46 +79,62 @@ public class I2NPMessageHandler {
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an I2NPMessage from the stream and return the fully populated object.
|
||||
* Read an I2NPMessage from the byte array and return the fully populated object.
|
||||
*
|
||||
* @throws IOException if there is an IO problem reading from the stream
|
||||
* @throws I2NPMessageException if there is a problem handling the particular
|
||||
* message - if it is an unknown type or has improper formatting, etc.
|
||||
*/
|
||||
public I2NPMessage readMessage(byte data[]) throws IOException, I2NPMessageException {
|
||||
readMessage(data, 0);
|
||||
public I2NPMessage readMessage(byte data[]) throws I2NPMessageException {
|
||||
readMessage(data, 0, data.length);
|
||||
return lastRead();
|
||||
}
|
||||
public int readMessage(byte data[], int offset) throws IOException, I2NPMessageException {
|
||||
|
||||
/**
|
||||
* Result is retreived with lastRead()
|
||||
*/
|
||||
public int readMessage(byte data[], int offset) throws I2NPMessageException {
|
||||
return readMessage(data, offset, data.length - offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a limit on the max to read from the data buffer, so that
|
||||
* we can use a large buffer but prevent the reader from reading off the end.
|
||||
*
|
||||
* Result is retreived with lastRead()
|
||||
*
|
||||
* @param maxLen read no more than this many bytes from data starting at offset, even if it is longer
|
||||
* must be at least 16
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public int readMessage(byte data[], int offset, int maxLen) throws I2NPMessageException {
|
||||
int cur = offset;
|
||||
// we will assume that maxLen is >= 1 here. It's checked to be >= 16 in readBytes()
|
||||
int type = (int)DataHelper.fromLong(data, cur, 1);
|
||||
cur++;
|
||||
_lastReadBegin = System.currentTimeMillis();
|
||||
I2NPMessage msg = I2NPMessageImpl.createMessage(_context, type);
|
||||
if (msg == null) {
|
||||
int sz = data.length-offset;
|
||||
boolean allZero = false;
|
||||
for (int i = offset; i < data.length; i++) {
|
||||
if (data[i] != 0) {
|
||||
allZero = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message (remaining sz="
|
||||
+ sz + " all zeros? " + allZero + ")");
|
||||
}
|
||||
// can't be null
|
||||
//if (msg == null) {
|
||||
// int sz = data.length-offset;
|
||||
// boolean allZero = false;
|
||||
// for (int i = offset; i < data.length; i++) {
|
||||
// if (data[i] != 0) {
|
||||
// allZero = false;
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message (remaining sz="
|
||||
// + sz + " all zeros? " + allZero + ")");
|
||||
//}
|
||||
try {
|
||||
_lastSize = msg.readBytes(data, type, cur);
|
||||
_lastSize = msg.readBytes(data, type, cur, maxLen - 1);
|
||||
cur += _lastSize;
|
||||
} catch (IOException ioe) {
|
||||
throw ioe;
|
||||
} catch (I2NPMessageException ime) {
|
||||
throw ime;
|
||||
} catch (Exception e) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error reading the stream", e);
|
||||
throw new IOException("Unknown error reading the " + msg.getClass().getName()
|
||||
+ ": " + e.getMessage());
|
||||
throw new I2NPMessageException("Unknown error reading the " + msg.getClass().getSimpleName(), e);
|
||||
}
|
||||
_lastReadEnd = System.currentTimeMillis();
|
||||
_lastRead = msg;
|
||||
@@ -128,6 +144,7 @@ public class I2NPMessageHandler {
|
||||
public long getLastReadTime() { return _lastReadEnd - _lastReadBegin; }
|
||||
public int getLastSize() { return _lastSize; }
|
||||
|
||||
/****
|
||||
public static void main(String args[]) {
|
||||
try {
|
||||
I2NPMessage msg = new I2NPMessageHandler(I2PAppContext.getGlobalContext()).readMessage(new FileInputStream(args[0]));
|
||||
@@ -136,4 +153,5 @@ public class I2NPMessageHandler {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
****/
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ package net.i2p.data.i2np;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataFormatException;
|
||||
@@ -20,6 +20,7 @@ import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleByteCache;
|
||||
|
||||
/**
|
||||
* Defines the base message implementation.
|
||||
@@ -27,22 +28,32 @@ import net.i2p.util.Log;
|
||||
* @author jrandom
|
||||
*/
|
||||
public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPMessage {
|
||||
private Log _log;
|
||||
protected I2PAppContext _context;
|
||||
private long _expiration;
|
||||
private long _uniqueId;
|
||||
private boolean _written;
|
||||
private boolean _read;
|
||||
protected final Log _log;
|
||||
protected final I2PAppContext _context;
|
||||
protected long _expiration;
|
||||
protected long _uniqueId;
|
||||
|
||||
public final static long DEFAULT_EXPIRATION_MS = 1*60*1000; // 1 minute by default
|
||||
public final static int CHECKSUM_LENGTH = 1; //Hash.HASH_LENGTH;
|
||||
|
||||
private static final boolean RAW_FULL_SIZE = false;
|
||||
/** 16 */
|
||||
public static final int HEADER_LENGTH = 1 // type
|
||||
+ 4 // uniqueId
|
||||
+ DataHelper.DATE_LENGTH // expiration
|
||||
+ 2 // payload length
|
||||
+ CHECKSUM_LENGTH;
|
||||
|
||||
// Whether SSU used the full header or a truncated header.
|
||||
// We are stuck with the short header, can't change it now.
|
||||
//private static final boolean RAW_FULL_SIZE = false;
|
||||
|
||||
/** unsynchronized as its pretty much read only (except at startup) */
|
||||
private static final Map _builders = new HashMap(8);
|
||||
/** unused */
|
||||
private static final Map<Integer, Builder> _builders = new ConcurrentHashMap(1);
|
||||
|
||||
/** @deprecated unused */
|
||||
public static final void registerBuilder(Builder builder, int type) { _builders.put(Integer.valueOf(type), builder); }
|
||||
/** interface for extending the types of messages handled */
|
||||
|
||||
/** interface for extending the types of messages handled - unused */
|
||||
public interface Builder {
|
||||
/** instantiate a new I2NPMessage to be populated shortly */
|
||||
public I2NPMessage build(I2PAppContext ctx);
|
||||
@@ -53,12 +64,16 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
_log = context.logManager().getLog(I2NPMessageImpl.class);
|
||||
_expiration = _context.clock().now() + DEFAULT_EXPIRATION_MS;
|
||||
_uniqueId = _context.random().nextLong(MAX_ID_VALUE);
|
||||
_written = false;
|
||||
_read = false;
|
||||
//_context.statManager().createRateStat("i2np.writeTime", "How long it takes to write an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
|
||||
//_context.statManager().createRateStat("i2np.readTime", "How long it takes to read an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the whole message but only if it's exactly 1024 bytes.
|
||||
* Unused - All transports provide encapsulation and so we have byte arrays available.
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
try {
|
||||
readBytes(in, -1, new byte[1024]);
|
||||
@@ -66,6 +81,31 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
throw new DataFormatException("Bad bytes", ime);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the header, then read the rest into buffer, then call
|
||||
* readMessage in the implemented message type
|
||||
*
|
||||
* This does a copy from the stream to the buffer, so if you already
|
||||
* have a byte array, use the other readBytes() instead.
|
||||
*
|
||||
*<pre>
|
||||
* Specifically:
|
||||
* 1 byte type (if caller didn't read already, as specified by the type param
|
||||
* 4 byte ID
|
||||
* 8 byte expiration
|
||||
* 2 byte size
|
||||
* 1 byte checksum
|
||||
* size bytes of payload (read by readMessage() in implementation)
|
||||
*</pre>
|
||||
*
|
||||
* Unused - All transports provide encapsulation and so we have byte arrays available.
|
||||
*
|
||||
* @param type the message type or -1 if we should read it here
|
||||
* @param buffer temp buffer to use
|
||||
* @return total length of the message
|
||||
* @deprecated unused
|
||||
*/
|
||||
public int readBytes(InputStream in, int type, byte buffer[]) throws I2NPMessageException, IOException {
|
||||
try {
|
||||
if (type < 0)
|
||||
@@ -93,26 +133,62 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
cur += numRead;
|
||||
}
|
||||
|
||||
Hash calc = _context.sha().calculateHash(buffer, 0, size);
|
||||
byte[] calc = SimpleByteCache.acquire(Hash.HASH_LENGTH);
|
||||
_context.sha().calculateHash(buffer, 0, size, calc, 0);
|
||||
//boolean eq = calc.equals(h);
|
||||
boolean eq = DataHelper.eq(checksum, 0, calc.getData(), 0, CHECKSUM_LENGTH);
|
||||
boolean eq = DataHelper.eq(checksum, 0, calc, 0, CHECKSUM_LENGTH);
|
||||
SimpleByteCache.release(calc);
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
|
||||
throw new I2NPMessageException("Bad checksum on " + size + " byte I2NP " + getClass().getSimpleName());
|
||||
|
||||
long start = _context.clock().now();
|
||||
//long start = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(buffer, 0, size, type);
|
||||
//long time = _context.clock().now() - start;
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.readTime", time, time);
|
||||
_read = true;
|
||||
return size + Hash.HASH_LENGTH + 1 + 4 + DataHelper.DATE_LENGTH;
|
||||
return CHECKSUM_LENGTH + 1 + 2 + 4 + DataHelper.DATE_LENGTH + size;
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the message header", dfe);
|
||||
}
|
||||
}
|
||||
public int readBytes(byte data[], int type, int offset) throws I2NPMessageException, IOException {
|
||||
|
||||
/**
|
||||
* Read the header, then read the rest into buffer, then call
|
||||
* readMessage in the implemented message type
|
||||
*
|
||||
*<pre>
|
||||
* Specifically:
|
||||
* 1 byte type (if caller didn't read already, as specified by the type param
|
||||
* 4 byte ID
|
||||
* 8 byte expiration
|
||||
* 2 byte size
|
||||
* 1 byte checksum
|
||||
* size bytes of payload (read by readMessage() in implementation)
|
||||
*</pre>
|
||||
*
|
||||
* @param type the message type or -1 if we should read it here
|
||||
* @return total length of the message
|
||||
*/
|
||||
public int readBytes(byte data[], int type, int offset) throws I2NPMessageException {
|
||||
return readBytes(data, type, offset, data.length - offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a limit on the max to read from the data buffer, so that
|
||||
* we can use a large buffer but prevent the reader from reading off the end.
|
||||
*
|
||||
* @param maxLen read no more than this many bytes from data starting at offset, even if it is longer
|
||||
* This includes the type byte only if type < 0
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public int readBytes(byte data[], int type, int offset, int maxLen) throws I2NPMessageException {
|
||||
int headerSize = HEADER_LENGTH;
|
||||
if (type >= 0)
|
||||
headerSize--;
|
||||
if (maxLen < headerSize)
|
||||
throw new I2NPMessageException("Payload is too short " + maxLen);
|
||||
int cur = offset;
|
||||
if (type < 0) {
|
||||
type = (int)DataHelper.fromLong(data, cur, 1);
|
||||
@@ -130,31 +206,38 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
cur += CHECKSUM_LENGTH;
|
||||
//h.setData(hdata);
|
||||
|
||||
if (cur + size > data.length)
|
||||
if (cur + size > data.length || headerSize + size > maxLen)
|
||||
throw new I2NPMessageException("Payload is too short ["
|
||||
+ "data.len=" + data.length
|
||||
+ "maxLen=" + maxLen
|
||||
+ " offset=" + offset
|
||||
+ " cur=" + cur
|
||||
+ " wanted=" + size + "]: " + getClass().getName());
|
||||
+ " wanted=" + size + "]: " + getClass().getSimpleName());
|
||||
|
||||
Hash calc = _context.sha().calculateHash(data, cur, size);
|
||||
//boolean eq = calc.equals(h);
|
||||
boolean eq = DataHelper.eq(hdata, 0, calc.getData(), 0, CHECKSUM_LENGTH);
|
||||
int sz = Math.min(size, maxLen - headerSize);
|
||||
byte[] calc = SimpleByteCache.acquire(Hash.HASH_LENGTH);
|
||||
_context.sha().calculateHash(data, cur, sz, calc, 0);
|
||||
boolean eq = DataHelper.eq(hdata, 0, calc, 0, CHECKSUM_LENGTH);
|
||||
SimpleByteCache.release(calc);
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
|
||||
throw new I2NPMessageException("Bad checksum on " + size + " byte I2NP " + getClass().getSimpleName());
|
||||
|
||||
long start = _context.clock().now();
|
||||
//long start = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(data, cur, size, type);
|
||||
cur += size;
|
||||
readMessage(data, cur, sz, type);
|
||||
cur += sz;
|
||||
//long time = _context.clock().now() - start;
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.readTime", time, time);
|
||||
_read = true;
|
||||
return cur - offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Don't do this if you need a byte array - use toByteArray()
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
int size = getMessageSize();
|
||||
if (size < 15 + CHECKSUM_LENGTH) throw new DataFormatException("Unable to build the message");
|
||||
@@ -180,42 +263,36 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
public synchronized int getMessageSize() {
|
||||
return calculateWrittenLength()+15 + CHECKSUM_LENGTH; // 16 bytes in the header
|
||||
}
|
||||
|
||||
/**
|
||||
* The raw header consists of a one-byte type and a 4-byte expiration in seconds only.
|
||||
* Used by SSU only!
|
||||
*/
|
||||
public synchronized int getRawMessageSize() {
|
||||
if (RAW_FULL_SIZE)
|
||||
return getMessageSize();
|
||||
else
|
||||
//if (RAW_FULL_SIZE)
|
||||
// return getMessageSize();
|
||||
//else
|
||||
return calculateWrittenLength()+5;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte data[] = new byte[getMessageSize()];
|
||||
int written = toByteArray(data);
|
||||
if (written != data.length) {
|
||||
_log.log(Log.CRIT, "Error writing out " + data.length + " (written: " + written + ", msgSize: " + getMessageSize() +
|
||||
", writtenLen: " + calculateWrittenLength() + ") for " + getClass().getName());
|
||||
", writtenLen: " + calculateWrittenLength() + ") for " + getClass().getSimpleName());
|
||||
return null;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
public int toByteArray(byte buffer[]) {
|
||||
long start = _context.clock().now();
|
||||
|
||||
int prefixLen = 1 // type
|
||||
+ 4 // uniqueId
|
||||
+ DataHelper.DATE_LENGTH // expiration
|
||||
+ 2 // payload length
|
||||
+ CHECKSUM_LENGTH; // walnuts
|
||||
//byte prefix[][] = new byte[][] { DataHelper.toLong(1, getType()),
|
||||
// DataHelper.toLong(4, _uniqueId),
|
||||
// DataHelper.toLong(DataHelper.DATE_LENGTH, _expiration),
|
||||
// new byte[2],
|
||||
// new byte[CHECKSUM_LENGTH]};
|
||||
//byte suffix[][] = new byte[][] { };
|
||||
try {
|
||||
int writtenLen = writeMessageBody(buffer, prefixLen);
|
||||
int payloadLen = writtenLen - prefixLen;
|
||||
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen);
|
||||
int writtenLen = writeMessageBody(buffer, HEADER_LENGTH);
|
||||
int payloadLen = writtenLen - HEADER_LENGTH;
|
||||
byte[] h = SimpleByteCache.acquire(Hash.HASH_LENGTH);
|
||||
_context.sha().calculateHash(buffer, HEADER_LENGTH, payloadLen, h, 0);
|
||||
|
||||
int off = 0;
|
||||
DataHelper.toLong(buffer, off, 1, getType());
|
||||
@@ -226,27 +303,25 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
off += DataHelper.DATE_LENGTH;
|
||||
DataHelper.toLong(buffer, off, 2, payloadLen);
|
||||
off += 2;
|
||||
System.arraycopy(h.getData(), 0, buffer, off, CHECKSUM_LENGTH);
|
||||
|
||||
//long time = _context.clock().now() - start;
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.writeTime", time, time);
|
||||
System.arraycopy(h, 0, buffer, off, CHECKSUM_LENGTH);
|
||||
SimpleByteCache.release(h);
|
||||
|
||||
return writtenLen;
|
||||
} catch (I2NPMessageException ime) {
|
||||
_context.logManager().getLog(getClass()).log(Log.CRIT, "Error writing", ime);
|
||||
throw new IllegalStateException("Unable to serialize the message (" + getClass().getName()
|
||||
+ "): " + ime.getMessage());
|
||||
throw new IllegalStateException("Unable to serialize the message " + getClass().getSimpleName(), ime);
|
||||
}
|
||||
}
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected abstract int calculateWrittenLength();
|
||||
|
||||
/**
|
||||
* write the message body to the output array, starting at the given index.
|
||||
* @return the index into the array after the last byte written
|
||||
*/
|
||||
protected abstract int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException;
|
||||
|
||||
/*
|
||||
protected int toByteArray(byte out[], byte[][] prefix, byte[][] suffix) throws I2NPMessageException {
|
||||
int curIndex = 0;
|
||||
@@ -267,27 +342,30 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Write the message with a short 5-byte header.
|
||||
* THe header consists of a one-byte type and a 4-byte expiration in seconds only.
|
||||
* Used by SSU only!
|
||||
*/
|
||||
public int toRawByteArray(byte buffer[]) {
|
||||
verifyUnwritten();
|
||||
if (RAW_FULL_SIZE)
|
||||
return toByteArray(buffer);
|
||||
//if (RAW_FULL_SIZE)
|
||||
// return toByteArray(buffer);
|
||||
try {
|
||||
int off = 0;
|
||||
DataHelper.toLong(buffer, off, 1, getType());
|
||||
off += 1;
|
||||
DataHelper.toLong(buffer, off, 4, _expiration/1000); // seconds
|
||||
// January 19 2038? No, unsigned, good until Feb. 7 2106
|
||||
// in seconds, round up so we don't lose time every hop
|
||||
DataHelper.toLong(buffer, off, 4, (_expiration + 500) / 1000);
|
||||
off += 4;
|
||||
return writeMessageBody(buffer, off);
|
||||
} catch (I2NPMessageException ime) {
|
||||
_context.logManager().getLog(getClass()).log(Log.CRIT, "Error writing", ime);
|
||||
throw new IllegalStateException("Unable to serialize the message (" + getClass().getName()
|
||||
+ "): " + ime.getMessage());
|
||||
} finally {
|
||||
written();
|
||||
throw new IllegalStateException("Unable to serialize the message " + getClass().getSimpleName(), ime);
|
||||
}
|
||||
}
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException {
|
||||
// ignore the handler (overridden in subclasses if necessary
|
||||
try {
|
||||
readMessage(data, offset, dataSize, type);
|
||||
@@ -297,49 +375,50 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
}
|
||||
|
||||
|
||||
/*****
|
||||
public static I2NPMessage fromRawByteArray(I2PAppContext ctx, byte buffer[], int offset, int len) throws I2NPMessageException {
|
||||
return fromRawByteArray(ctx, buffer, offset, len, new I2NPMessageHandler(ctx));
|
||||
}
|
||||
*****/
|
||||
|
||||
/**
|
||||
* Read the message with a short 5-byte header.
|
||||
* THe header consists of a one-byte type and a 4-byte expiration in seconds only.
|
||||
* Used by SSU only!
|
||||
*/
|
||||
public static I2NPMessage fromRawByteArray(I2PAppContext ctx, byte buffer[], int offset, int len, I2NPMessageHandler handler) throws I2NPMessageException {
|
||||
int type = (int)DataHelper.fromLong(buffer, offset, 1);
|
||||
offset++;
|
||||
I2NPMessageImpl msg = (I2NPMessageImpl)createMessage(ctx, type);
|
||||
if (msg == null)
|
||||
throw new I2NPMessageException("Unknown message type: " + type);
|
||||
if (RAW_FULL_SIZE) {
|
||||
try {
|
||||
msg.readBytes(buffer, type, offset);
|
||||
} catch (IOException ioe) {
|
||||
throw new I2NPMessageException("Error reading the " + msg, ioe);
|
||||
}
|
||||
msg.read();
|
||||
return msg;
|
||||
}
|
||||
//if (RAW_FULL_SIZE) {
|
||||
// try {
|
||||
// msg.readBytes(buffer, type, offset);
|
||||
// } catch (IOException ioe) {
|
||||
// throw new I2NPMessageException("Error reading the " + msg, ioe);
|
||||
// }
|
||||
// return msg;
|
||||
//}
|
||||
|
||||
try {
|
||||
long expiration = DataHelper.fromLong(buffer, offset, 4) * 1000; // seconds
|
||||
// January 19 2038? No, unsigned, good until Feb. 7 2106
|
||||
// in seconds, round up so we don't lose time every hop
|
||||
long expiration = (DataHelper.fromLong(buffer, offset, 4) * 1000) + 500;
|
||||
offset += 4;
|
||||
int dataSize = len - 1 - 4;
|
||||
msg.readMessage(buffer, offset, dataSize, type, handler);
|
||||
msg.setMessageExpiration(expiration);
|
||||
msg.read();
|
||||
return msg;
|
||||
} catch (IOException ioe) {
|
||||
throw new I2NPMessageException("IO error reading raw message", ioe);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new I2NPMessageException("Corrupt message (negative expiration)", iae);
|
||||
}
|
||||
}
|
||||
|
||||
protected void verifyUnwritten() {
|
||||
if (_written) throw new IllegalStateException("Already written");
|
||||
}
|
||||
protected void written() { _written = true; }
|
||||
protected void read() { _read = true; }
|
||||
|
||||
/**
|
||||
* Yes, this is fairly ugly, but its the only place it ever happens.
|
||||
*
|
||||
* @return non-null, returns an UnknownI2NPMessage if unknown type
|
||||
*/
|
||||
public static I2NPMessage createMessage(I2PAppContext context, int type) throws I2NPMessageException {
|
||||
switch (type) {
|
||||
@@ -351,8 +430,9 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
return new DatabaseSearchReplyMessage(context);
|
||||
case DeliveryStatusMessage.MESSAGE_TYPE:
|
||||
return new DeliveryStatusMessage(context);
|
||||
case DateMessage.MESSAGE_TYPE:
|
||||
return new DateMessage(context);
|
||||
// unused since forever (0.5?)
|
||||
//case DateMessage.MESSAGE_TYPE:
|
||||
// return new DateMessage(context);
|
||||
case GarlicMessage.MESSAGE_TYPE:
|
||||
return new GarlicMessage(context);
|
||||
case TunnelDataMessage.MESSAGE_TYPE:
|
||||
@@ -361,20 +441,23 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
return new TunnelGatewayMessage(context);
|
||||
case DataMessage.MESSAGE_TYPE:
|
||||
return new DataMessage(context);
|
||||
case TunnelCreateMessage.MESSAGE_TYPE:
|
||||
return new TunnelCreateMessage(context);
|
||||
case TunnelCreateStatusMessage.MESSAGE_TYPE:
|
||||
return new TunnelCreateStatusMessage(context);
|
||||
// unused since 0.6.1.10
|
||||
case TunnelBuildMessage.MESSAGE_TYPE:
|
||||
return new TunnelBuildMessage(context);
|
||||
case TunnelBuildReplyMessage.MESSAGE_TYPE:
|
||||
return new TunnelBuildReplyMessage(context);
|
||||
// since 0.7.10
|
||||
case VariableTunnelBuildMessage.MESSAGE_TYPE:
|
||||
return new VariableTunnelBuildMessage(context);
|
||||
// since 0.7.10
|
||||
case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
|
||||
return new VariableTunnelBuildReplyMessage(context);
|
||||
default:
|
||||
Builder builder = (Builder)_builders.get(Integer.valueOf(type));
|
||||
if (builder == null)
|
||||
return null;
|
||||
else
|
||||
// unused
|
||||
Builder builder = _builders.get(Integer.valueOf(type));
|
||||
if (builder != null)
|
||||
return builder.build(context);
|
||||
return new UnknownI2NPMessage(context, type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,15 @@ import net.i2p.util.Log;
|
||||
* thrown, or the connection being closed. Routers should use this rather
|
||||
* than read from the stream themselves.
|
||||
*
|
||||
* Deprecated - unused.
|
||||
* This was used by the old TCP transport.
|
||||
* Both the NTCP and SSU transports provide encapsulation
|
||||
* of I2NP messages, so they use I2NPMessageHandlers directly.
|
||||
* If we ever add a transport that does not provide encapsulation,
|
||||
* this will be useful again.
|
||||
*
|
||||
* @deprecated unused
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class I2NPMessageReader {
|
||||
@@ -56,16 +65,19 @@ public class I2NPMessageReader {
|
||||
*
|
||||
*/
|
||||
public void startReading() { _readerThread.start(); }
|
||||
|
||||
/**
|
||||
* Have the already started reader pause its reading indefinitely
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void pauseReading() { _reader.pauseRunner(); }
|
||||
|
||||
/**
|
||||
* Resume reading after a pause
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void resumeReading() { _reader.resumeRunner(); }
|
||||
|
||||
/**
|
||||
* Cancel reading.
|
||||
*
|
||||
@@ -106,8 +118,13 @@ public class I2NPMessageReader {
|
||||
_stayAlive = true;
|
||||
_handler = new I2NPMessageHandler(_context);
|
||||
}
|
||||
|
||||
/** deprecated unused */
|
||||
public void pauseRunner() { _doRun = false; }
|
||||
|
||||
/** deprecated unused */
|
||||
public void resumeRunner() { _doRun = true; }
|
||||
|
||||
public void cancelRunner() {
|
||||
_doRun = false;
|
||||
_stayAlive = false;
|
||||
@@ -153,7 +170,8 @@ public class I2NPMessageReader {
|
||||
cancelRunner();
|
||||
}
|
||||
}
|
||||
if (!_doRun) {
|
||||
// ??? unused
|
||||
if (_stayAlive && !_doRun) {
|
||||
// pause .5 secs when we're paused
|
||||
try { Thread.sleep(500); } catch (InterruptedException ie) {}
|
||||
}
|
||||
|
||||
@@ -1,53 +1,27 @@
|
||||
package net.i2p.data.i2np;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.ByteArray;
|
||||
|
||||
/**
|
||||
*
|
||||
* The basic build message with 8 records.
|
||||
*/
|
||||
public class TunnelBuildMessage extends I2NPMessageImpl {
|
||||
private ByteArray _records[];
|
||||
|
||||
public class TunnelBuildMessage extends TunnelBuildMessageBase {
|
||||
|
||||
public static final int MESSAGE_TYPE = 21;
|
||||
public static final int RECORD_COUNT = 8;
|
||||
|
||||
public TunnelBuildMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_records = new ByteArray[RECORD_COUNT];
|
||||
super(context, MAX_RECORD_COUNT);
|
||||
}
|
||||
|
||||
public void setRecord(int index, ByteArray record) { _records[index] = record; }
|
||||
public ByteArray getRecord(int index) { return _records[index]; }
|
||||
|
||||
public static final int RECORD_SIZE = 512+16;
|
||||
|
||||
protected int calculateWrittenLength() { return RECORD_SIZE * RECORD_COUNT; }
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
public void readMessage(byte[] data, int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE)
|
||||
throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
if (dataSize != calculateWrittenLength())
|
||||
throw new I2NPMessageException("Wrong length (expects " + calculateWrittenLength() + ", recv " + dataSize + ")");
|
||||
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
int off = offset + (i * RECORD_SIZE);
|
||||
byte rec[] = new byte[RECORD_SIZE];
|
||||
System.arraycopy(data, off, rec, 0, RECORD_SIZE);
|
||||
setRecord(i, new ByteArray(rec)); //new ByteArray(data, off, len));
|
||||
}
|
||||
/** @since 0.7.12 */
|
||||
protected TunnelBuildMessage(I2PAppContext context, int records) {
|
||||
super(context, records);
|
||||
}
|
||||
|
||||
protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException {
|
||||
int remaining = out.length - (curIndex + calculateWrittenLength());
|
||||
if (remaining < 0)
|
||||
throw new I2NPMessageException("Not large enough (too short by " + remaining + ")");
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE);
|
||||
curIndex += RECORD_SIZE;
|
||||
}
|
||||
return curIndex;
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[TunnelBuildMessage]";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,74 @@
|
||||
package net.i2p.data.i2np;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.ByteArray;
|
||||
|
||||
/**
|
||||
* Base for TBM, TBRM, VTBM, VTBRM
|
||||
* Retrofitted over them.
|
||||
* There's really no difference between the build and build reply.
|
||||
*
|
||||
* TBM and VBTM (but not TBRM and VTBRM?) messages are modified
|
||||
* in-place by doing a single setRecord(), and retransmitted.
|
||||
* Therefore they are NOT good candidates to use FastI2NPMessageImpl;
|
||||
* the checksum would have to be invalidated with every setRecord().
|
||||
* Which we could do in TBM and VTBM but not TBRM and VTBRM,
|
||||
* but keep it simple for now.
|
||||
*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public abstract class TunnelBuildMessageBase extends I2NPMessageImpl {
|
||||
protected ByteArray _records[];
|
||||
protected int RECORD_COUNT;
|
||||
public static final int MAX_RECORD_COUNT = 8;
|
||||
|
||||
public TunnelBuildMessageBase(I2PAppContext context) {
|
||||
this(context, MAX_RECORD_COUNT);
|
||||
}
|
||||
|
||||
/** @since 0.7.12 */
|
||||
protected TunnelBuildMessageBase(I2PAppContext context, int records) {
|
||||
super(context);
|
||||
if (records > 0) {
|
||||
RECORD_COUNT = records;
|
||||
_records = new ByteArray[records];
|
||||
}
|
||||
// else will be initialized by readMessage()
|
||||
}
|
||||
|
||||
public void setRecord(int index, ByteArray record) { _records[index] = record; }
|
||||
|
||||
public ByteArray getRecord(int index) { return _records[index]; }
|
||||
|
||||
/** @since 0.7.12 */
|
||||
public int getRecordCount() { return RECORD_COUNT; }
|
||||
|
||||
public static final int RECORD_SIZE = 512+16;
|
||||
|
||||
protected int calculateWrittenLength() { return RECORD_SIZE * RECORD_COUNT; }
|
||||
|
||||
public void readMessage(byte[] data, int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != getType())
|
||||
throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
if (dataSize != calculateWrittenLength())
|
||||
throw new I2NPMessageException("Wrong length (expects " + calculateWrittenLength() + ", recv " + dataSize + ")");
|
||||
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
int off = offset + (i * RECORD_SIZE);
|
||||
byte rec[] = new byte[RECORD_SIZE];
|
||||
System.arraycopy(data, off, rec, 0, RECORD_SIZE);
|
||||
setRecord(i, new ByteArray(rec));
|
||||
}
|
||||
}
|
||||
|
||||
protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException {
|
||||
int remaining = out.length - (curIndex + calculateWrittenLength());
|
||||
if (remaining < 0)
|
||||
throw new I2NPMessageException("Not large enough (too short by " + remaining + ")");
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE);
|
||||
curIndex += RECORD_SIZE;
|
||||
}
|
||||
return curIndex;
|
||||
}
|
||||
}
|
||||
@@ -1,56 +1,29 @@
|
||||
package net.i2p.data.i2np;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.ByteArray;
|
||||
|
||||
/**
|
||||
* The basic build reply message with 8 records.
|
||||
* Transmitted from the new outbound endpoint to the creator through a
|
||||
* reply tunnel
|
||||
*/
|
||||
public class TunnelBuildReplyMessage extends I2NPMessageImpl {
|
||||
private ByteArray _records[];
|
||||
|
||||
public class TunnelBuildReplyMessage extends TunnelBuildMessageBase {
|
||||
|
||||
public static final int MESSAGE_TYPE = 22;
|
||||
public static final int RECORD_COUNT = TunnelBuildMessage.RECORD_COUNT;
|
||||
|
||||
public TunnelBuildReplyMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_records = new ByteArray[RECORD_COUNT];
|
||||
super(context, MAX_RECORD_COUNT);
|
||||
}
|
||||
|
||||
public void setRecord(int index, ByteArray record) { _records[index] = record; }
|
||||
public ByteArray getRecord(int index) { return _records[index]; }
|
||||
|
||||
public static final int RECORD_SIZE = TunnelBuildMessage.RECORD_SIZE;
|
||||
|
||||
protected int calculateWrittenLength() { return RECORD_SIZE * RECORD_COUNT; }
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
public void readMessage(byte[] data, int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE)
|
||||
throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
if (dataSize != calculateWrittenLength())
|
||||
throw new I2NPMessageException("Wrong length (expects " + calculateWrittenLength() + ", recv " + dataSize + ")");
|
||||
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
int off = offset + (i * RECORD_SIZE);
|
||||
int len = RECORD_SIZE;
|
||||
byte rec[] = new byte[RECORD_SIZE];
|
||||
System.arraycopy(data, off, rec, 0, RECORD_SIZE);
|
||||
setRecord(i, new ByteArray(rec));
|
||||
//setRecord(i, new ByteArray(data, off, len));
|
||||
}
|
||||
/** @since 0.7.12 */
|
||||
protected TunnelBuildReplyMessage(I2PAppContext context, int records) {
|
||||
super(context, records);
|
||||
}
|
||||
|
||||
protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException {
|
||||
int remaining = out.length - (curIndex + calculateWrittenLength());
|
||||
if (remaining < 0)
|
||||
throw new I2NPMessageException("Not large enough (too short by " + remaining + ")");
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE);
|
||||
curIndex += RECORD_SIZE;
|
||||
}
|
||||
return curIndex;
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[TunnelBuildReplyMessage]";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Certificate;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message sent to a router to request that it participate in a
|
||||
* tunnel using the included configuration settings.
|
||||
*
|
||||
*/
|
||||
public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
private Log _log;
|
||||
public final static int MESSAGE_TYPE = 6;
|
||||
private Hash _nextRouter;
|
||||
private TunnelId _nextTunnelId;
|
||||
private int _durationSeconds;
|
||||
private SessionKey _layerKey;
|
||||
private SessionKey _ivKey;
|
||||
private Properties _options;
|
||||
private Hash _replyGateway;
|
||||
private TunnelId _replyTunnel;
|
||||
private SessionTag _replyTag;
|
||||
private SessionKey _replyKey;
|
||||
private boolean _isGateway;
|
||||
private long _nonce;
|
||||
private Certificate _certificate;
|
||||
|
||||
private byte[] _optionsCache;
|
||||
private byte[] _certificateCache;
|
||||
|
||||
public static final long MAX_NONCE_VALUE = ((1l << 32l) - 1l);
|
||||
|
||||
private static final Hash INVALID_HASH = new Hash(new byte[Hash.HASH_LENGTH]); // all 0s
|
||||
private static final TunnelId INVALID_TUNNEL = TunnelId.INVALID;
|
||||
|
||||
public TunnelCreateMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(TunnelCreateMessage.class);
|
||||
}
|
||||
|
||||
public void setNextRouter(Hash routerIdentityHash) { _nextRouter = routerIdentityHash; }
|
||||
public Hash getNextRouter() { return _nextRouter; }
|
||||
public void setNextTunnelId(TunnelId id) { _nextTunnelId = id; }
|
||||
public TunnelId getNextTunnelId() { return _nextTunnelId; }
|
||||
public void setDurationSeconds(int seconds) { _durationSeconds = seconds; }
|
||||
public int getDurationSeconds() { return _durationSeconds; }
|
||||
public void setLayerKey(SessionKey key) { _layerKey = key; }
|
||||
public SessionKey getLayerKey() { return _layerKey; }
|
||||
public void setIVKey(SessionKey key) { _ivKey = key; }
|
||||
public SessionKey getIVKey() { return _ivKey; }
|
||||
public void setCertificate(Certificate cert) { _certificate = cert; }
|
||||
public Certificate getCertificate() { return _certificate; }
|
||||
public void setReplyTag(SessionTag tag) { _replyTag = tag; }
|
||||
public SessionTag getReplyTag() { return _replyTag; }
|
||||
public void setReplyKey(SessionKey key) { _replyKey = key; }
|
||||
public SessionKey getReplyKey() { return _replyKey; }
|
||||
public void setReplyTunnel(TunnelId id) { _replyTunnel = id; }
|
||||
public TunnelId getReplyTunnel() { return _replyTunnel; }
|
||||
public void setReplyGateway(Hash peer) { _replyGateway = peer; }
|
||||
public Hash getReplyGateway() { return _replyGateway; }
|
||||
public void setNonce(long nonce) { _nonce = nonce; }
|
||||
public long getNonce() { return _nonce; }
|
||||
public void setIsGateway(boolean isGateway) { _isGateway = isGateway; }
|
||||
public boolean getIsGateway() { return _isGateway; }
|
||||
public Properties getOptions() { return _options; }
|
||||
public void setOptions(Properties opts) { _options = opts; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
|
||||
if (DataHelper.eq(INVALID_HASH.getData(), 0, data, offset, Hash.HASH_LENGTH)) {
|
||||
_nextRouter = null;
|
||||
} else {
|
||||
_nextRouter = new Hash(new byte[Hash.HASH_LENGTH]);
|
||||
System.arraycopy(data, offset, _nextRouter.getData(), 0, Hash.HASH_LENGTH);
|
||||
}
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
long id = DataHelper.fromLong(data, offset, 4);
|
||||
if (id > 0)
|
||||
_nextTunnelId = new TunnelId(id);
|
||||
offset += 4;
|
||||
|
||||
_durationSeconds = (int)DataHelper.fromLong(data, offset, 2);
|
||||
offset += 2;
|
||||
|
||||
_layerKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
|
||||
System.arraycopy(data, offset, _layerKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
_ivKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
|
||||
System.arraycopy(data, offset, _ivKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
try {
|
||||
Properties opts = new Properties();
|
||||
_options = opts;
|
||||
offset = DataHelper.fromProperties(data, offset, opts);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the options", dfe);
|
||||
}
|
||||
|
||||
_replyGateway = new Hash(new byte[Hash.HASH_LENGTH]);
|
||||
System.arraycopy(data, offset, _replyGateway.getData(), 0, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
_replyTunnel = new TunnelId(DataHelper.fromLong(data, offset, 4));
|
||||
offset += 4;
|
||||
|
||||
_replyTag = new SessionTag(new byte[SessionTag.BYTE_LENGTH]);
|
||||
System.arraycopy(data, offset, _replyTag.getData(), 0, SessionTag.BYTE_LENGTH);
|
||||
offset += SessionTag.BYTE_LENGTH;
|
||||
|
||||
_replyKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
|
||||
System.arraycopy(data, offset, _replyKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
_nonce = DataHelper.fromLong(data, offset, 4);
|
||||
offset += 4;
|
||||
|
||||
try {
|
||||
Certificate cert = new Certificate();
|
||||
_certificate = cert;
|
||||
offset += cert.readBytes(data, offset);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the certificate", dfe);
|
||||
}
|
||||
|
||||
Boolean b = DataHelper.fromBoolean(data, offset);
|
||||
if (b == null)
|
||||
throw new I2NPMessageException("isGateway == unknown?!");
|
||||
_isGateway = b.booleanValue();
|
||||
offset += DataHelper.BOOLEAN_LENGTH;
|
||||
}
|
||||
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
int length = 0;
|
||||
length += Hash.HASH_LENGTH; // nextRouter
|
||||
length += 4; // nextTunnel
|
||||
length += 2; // duration
|
||||
length += SessionKey.KEYSIZE_BYTES; // layerKey
|
||||
length += SessionKey.KEYSIZE_BYTES; // ivKey
|
||||
|
||||
if (_optionsCache == null)
|
||||
_optionsCache = DataHelper.toProperties(_options);
|
||||
length += _optionsCache.length;
|
||||
|
||||
length += Hash.HASH_LENGTH; // replyGateway
|
||||
length += 4; // replyTunnel
|
||||
length += SessionTag.BYTE_LENGTH; // replyTag
|
||||
length += SessionKey.KEYSIZE_BYTES; // replyKey
|
||||
length += 4; // nonce
|
||||
if (_certificateCache == null)
|
||||
_certificateCache = _certificate.toByteArray();
|
||||
length += _certificateCache.length;
|
||||
length += DataHelper.BOOLEAN_LENGTH;
|
||||
return length;
|
||||
}
|
||||
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte data[], int offset) throws I2NPMessageException {
|
||||
if (_nextRouter == null)
|
||||
System.arraycopy(INVALID_HASH.getData(), 0, data, offset, Hash.HASH_LENGTH);
|
||||
else
|
||||
System.arraycopy(_nextRouter.getData(), 0, data, offset, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
if (_nextTunnelId == null)
|
||||
DataHelper.toLong(data, offset, 4, 0);
|
||||
else
|
||||
DataHelper.toLong(data, offset, 4, _nextTunnelId.getTunnelId());
|
||||
offset += 4;
|
||||
|
||||
DataHelper.toLong(data, offset, 2, _durationSeconds);
|
||||
offset += 2;
|
||||
|
||||
System.arraycopy(_layerKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
System.arraycopy(_ivKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
if (_optionsCache == null)
|
||||
_optionsCache = DataHelper.toProperties(_options);
|
||||
System.arraycopy(_optionsCache, 0, data, offset, _optionsCache.length);
|
||||
offset += _optionsCache.length;
|
||||
|
||||
System.arraycopy(_replyGateway.getData(), 0, data, offset, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
DataHelper.toLong(data, offset, 4, _replyTunnel.getTunnelId());
|
||||
offset += 4;
|
||||
|
||||
System.arraycopy(_replyTag.getData(), 0, data, offset, SessionTag.BYTE_LENGTH);
|
||||
offset += SessionTag.BYTE_LENGTH;
|
||||
|
||||
System.arraycopy(_replyKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
DataHelper.toLong(data, offset, 4, _nonce);
|
||||
offset += 4;
|
||||
|
||||
if (_certificateCache == null)
|
||||
_certificateCache = _certificate.toByteArray();
|
||||
System.arraycopy(_certificateCache, 0, data, offset, _certificateCache.length);
|
||||
offset += _certificateCache.length;
|
||||
|
||||
DataHelper.toBoolean(data, offset, _isGateway);
|
||||
offset += DataHelper.BOOLEAN_LENGTH;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = super.toByteArray();
|
||||
if (rv == null)
|
||||
throw new RuntimeException("unable to toByteArray(): " + toString());
|
||||
return rv;
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getNextRouter()) +
|
||||
DataHelper.hashCode(getNextTunnelId()) +
|
||||
DataHelper.hashCode(getReplyGateway()) +
|
||||
DataHelper.hashCode(getReplyTunnel());
|
||||
}
|
||||
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelCreateMessage) ) {
|
||||
TunnelCreateMessage msg = (TunnelCreateMessage)object;
|
||||
return DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
|
||||
DataHelper.eq(getNextTunnelId(), msg.getNextTunnelId()) &&
|
||||
DataHelper.eq(getReplyGateway(), msg.getReplyGateway()) &&
|
||||
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel());
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelCreateMessage: ");
|
||||
buf.append("\n\tNext Router: ").append(getNextRouter());
|
||||
buf.append("\n\tNext Tunnel: ").append(getNextTunnelId());
|
||||
buf.append("\n\tReply Tunnel: ").append(getReplyTunnel());
|
||||
buf.append("\n\tReply Peer: ").append(getReplyGateway());
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message a router sends to another router in reply to a
|
||||
* TunnelCreateMessage
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class TunnelCreateStatusMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(TunnelCreateStatusMessage.class);
|
||||
public final static int MESSAGE_TYPE = 7;
|
||||
private TunnelId _receiveTunnelId;
|
||||
private int _status;
|
||||
private long _nonce;
|
||||
|
||||
public final static int STATUS_SUCCESS = 0;
|
||||
|
||||
public TunnelCreateStatusMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setReceiveTunnelId(null);
|
||||
setStatus(-1);
|
||||
setNonce(-1);
|
||||
}
|
||||
|
||||
public TunnelId getReceiveTunnelId() { return _receiveTunnelId; }
|
||||
public void setReceiveTunnelId(TunnelId id) {
|
||||
_receiveTunnelId = id;
|
||||
if ( (id != null) && (id.getTunnelId() <= 0) )
|
||||
throw new IllegalArgumentException("wtf, tunnelId " + id);
|
||||
}
|
||||
|
||||
public int getStatus() { return _status; }
|
||||
public void setStatus(int status) { _status = status; }
|
||||
|
||||
public long getNonce() { return _nonce; }
|
||||
public void setNonce(long nonce) { _nonce = nonce; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
_receiveTunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
|
||||
curIndex += 4;
|
||||
|
||||
if (_receiveTunnelId.getTunnelId() <= 0)
|
||||
throw new I2NPMessageException("wtf, negative tunnelId? " + _receiveTunnelId);
|
||||
|
||||
_status = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
|
||||
_nonce = DataHelper.fromLong(data, curIndex, 4);
|
||||
}
|
||||
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
return 4 + 1 + 4; // id + status + nonce
|
||||
}
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if ( (_receiveTunnelId == null) || (_nonce <= 0) ) throw new I2NPMessageException("Not enough data to write out");
|
||||
if (_receiveTunnelId.getTunnelId() <= 0) throw new I2NPMessageException("Invalid tunnelId!? " + _receiveTunnelId);
|
||||
|
||||
DataHelper.toLong(out, curIndex, 4, _receiveTunnelId.getTunnelId());
|
||||
curIndex += 4;
|
||||
DataHelper.toLong(out, curIndex, 1, _status);
|
||||
curIndex++;
|
||||
DataHelper.toLong(out, curIndex, 4, _nonce);
|
||||
curIndex += 4;
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getReceiveTunnelId()) +
|
||||
getStatus() +
|
||||
(int)getNonce();
|
||||
}
|
||||
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelCreateStatusMessage) ) {
|
||||
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)object;
|
||||
return DataHelper.eq(getReceiveTunnelId(),msg.getReceiveTunnelId()) &&
|
||||
DataHelper.eq(getNonce(),msg.getNonce()) &&
|
||||
(getStatus() == msg.getStatus());
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelCreateStatusMessage: ");
|
||||
buf.append("\n\tTunnel ID: ").append(getReceiveTunnelId());
|
||||
buf.append("\n\tStatus: ").append(getStatus());
|
||||
buf.append("\n\tNonce: ").append(getNonce());
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
@@ -8,32 +8,33 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.ByteArray;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.ByteCache;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the message sent between routers as part of the tunnel delivery
|
||||
*
|
||||
* The tunnel ID is changed in-place by TunnelParticipant.send(), so
|
||||
* we can't reuse the checksum on output, but we still subclass
|
||||
* FastI2NPMessageImpl so we don't verify the checksum on input...
|
||||
* because this is a high-usage class.
|
||||
*
|
||||
*/
|
||||
public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
private Log _log;
|
||||
public class TunnelDataMessage extends FastI2NPMessageImpl {
|
||||
private long _tunnelId;
|
||||
private TunnelId _tunnelIdObj;
|
||||
private byte[] _data;
|
||||
private ByteArray _dataBuf;
|
||||
|
||||
public final static int MESSAGE_TYPE = 18;
|
||||
private static final int DATA_SIZE = 1024;
|
||||
public static final int DATA_SIZE = 1024;
|
||||
/** if we can't deliver a tunnel message in 10s, fuck it */
|
||||
private static final int EXPIRATION_PERIOD = 10*1000;
|
||||
|
||||
private static final ByteCache _cache = ByteCache.getInstance(512, DATA_SIZE);
|
||||
private static final ByteCache _cache;
|
||||
/**
|
||||
* When true, it means this tunnelDataMessage is being used as part of a tunnel
|
||||
* processing pipeline, where the byte array is acquired during the TunnelDataMessage's
|
||||
@@ -42,35 +43,114 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
* handler's cache, etc), until it is finally released back into the cache when written
|
||||
* to the next peer (or explicitly by the fragment handler's completion).
|
||||
* Setting this to false just increases memory churn
|
||||
*
|
||||
* Well, this is tricky to get right and avoid data corruption,
|
||||
* here's an example after checks were put in:
|
||||
*
|
||||
*
|
||||
10:57:05.197 CRIT [NTCP read 1 ] 2p.data.i2np.TunnelDataMessage: TDM boom
|
||||
net.i2p.data.i2np.I2NPMessageException: TDM data buf use after free
|
||||
at net.i2p.data.i2np.TunnelDataMessage.writeMessageBody(TunnelDataMessage.java:124)
|
||||
at net.i2p.data.i2np.I2NPMessageImpl.toByteArray(I2NPMessageImpl.java:217)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection.bufferedPrepare(NTCPConnection.java:678)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection.send(NTCPConnection.java:293)
|
||||
at net.i2p.router.transport.ntcp.NTCPTransport.outboundMessageReady(NTCPTransport.java:185)
|
||||
at net.i2p.router.transport.TransportImpl.send(TransportImpl.java:357)
|
||||
at net.i2p.router.transport.GetBidsJob.getBids(GetBidsJob.java:80)
|
||||
at net.i2p.router.transport.CommSystemFacadeImpl.processMessage(CommSystemFacadeImpl.java:129)
|
||||
at net.i2p.router.OutNetMessagePool.add(OutNetMessagePool.java:61)
|
||||
at net.i2p.router.transport.TransportImpl.afterSend(TransportImpl.java:252)
|
||||
at net.i2p.router.transport.TransportImpl.afterSend(TransportImpl.java:163)
|
||||
at net.i2p.router.transport.udp.UDPTransport.failed(UDPTransport.java:1314)
|
||||
at net.i2p.router.transport.udp.PeerState.add(PeerState.java:1064)
|
||||
at net.i2p.router.transport.udp.OutboundMessageFragments.add(OutboundMessageFragments.java:146)
|
||||
at net.i2p.router.transport.udp.UDPTransport.send(UDPTransport.java:1098)
|
||||
at net.i2p.router.transport.GetBidsJob.getBids(GetBidsJob.java:80)
|
||||
at net.i2p.router.transport.CommSystemFacadeImpl.processMessage(CommSystemFacadeImpl.java:129)
|
||||
at net.i2p.router.OutNetMessagePool.add(OutNetMessagePool.java:61)
|
||||
at net.i2p.router.tunnel.TunnelParticipant.send(TunnelParticipant.java:172)
|
||||
at net.i2p.router.tunnel.TunnelParticipant.dispatch(TunnelParticipant.java:86)
|
||||
at net.i2p.router.tunnel.TunnelDispatcher.dispatch(TunnelDispatcher.java:351)
|
||||
at net.i2p.router.InNetMessagePool.doShortCircuitTunnelData(InNetMessagePool.java:306)
|
||||
at net.i2p.router.InNetMessagePool.shortCircuitTunnelData(InNetMessagePool.java:291)
|
||||
at net.i2p.router.InNetMessagePool.add(InNetMessagePool.java:160)
|
||||
at net.i2p.router.transport.TransportManager.messageReceived(TransportManager.java:462)
|
||||
at net.i2p.router.transport.TransportImpl.messageReceived(TransportImpl.java:416)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection$ReadState.receiveLastBlock(NTCPConnection.java:1285)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection$ReadState.receiveSubsequent(NTCPConnection.java:1248)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection$ReadState.receiveBlock(NTCPConnection.java:1205)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection.recvUnencryptedI2NP(NTCPConnection.java:1035)
|
||||
at net.i2p.router.transport.ntcp.NTCPConnection.recvEncryptedI2NP(NTCPConnection.java:1018)
|
||||
at net.i2p.router.transport.ntcp.Reader.processRead(Reader.java:167)
|
||||
at net.i2p.router.transport.ntcp.Reader.access$400(Reader.java:17)
|
||||
at net.i2p.router.transport.ntcp.Reader$Runner.run(Reader.java:106)
|
||||
at java.lang.Thread.run(Thread.java:619)
|
||||
at net.i2p.util.I2PThread.run(I2PThread.java:71)
|
||||
*
|
||||
*/
|
||||
private static final boolean PIPELINED_CACHE = true;
|
||||
|
||||
static {
|
||||
if (PIPELINED_CACHE)
|
||||
_cache = ByteCache.getInstance(512, DATA_SIZE);
|
||||
else
|
||||
_cache = null;
|
||||
}
|
||||
|
||||
/** For use-after-free checks. Always false if PIPELINED_CACHE is false. */
|
||||
private boolean _hadCache;
|
||||
|
||||
public TunnelDataMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(TunnelDataMessage.class);
|
||||
setMessageExpiration(context.clock().now() + EXPIRATION_PERIOD);
|
||||
}
|
||||
|
||||
public long getTunnelId() { return _tunnelId; }
|
||||
public void setTunnelId(long id) { _tunnelId = id; }
|
||||
|
||||
/**
|
||||
* (correctly) Invalidates stored checksum
|
||||
*/
|
||||
public void setTunnelId(long id) {
|
||||
_hasChecksum = false;
|
||||
_tunnelId = id;
|
||||
}
|
||||
|
||||
public TunnelId getTunnelIdObj() {
|
||||
if (_tunnelIdObj == null)
|
||||
_tunnelIdObj = new TunnelId(_tunnelId); // not thread safe, but immutable, so who cares
|
||||
return _tunnelIdObj;
|
||||
}
|
||||
|
||||
/**
|
||||
* (correctly) Invalidates stored checksum
|
||||
*/
|
||||
public void setTunnelId(TunnelId id) {
|
||||
_hasChecksum = false;
|
||||
_tunnelIdObj = id;
|
||||
_tunnelId = id.getTunnelId();
|
||||
}
|
||||
|
||||
public byte[] getData() { return _data; }
|
||||
public byte[] getData() {
|
||||
if (_hadCache && _dataBuf == null) {
|
||||
RuntimeException e = new RuntimeException("TDM data buf use after free");
|
||||
_log.error("TDM boom", e);
|
||||
throw e;
|
||||
}
|
||||
return _data;
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if data previously set, to protect saved checksum
|
||||
*/
|
||||
public void setData(byte data[]) {
|
||||
if (_data != null)
|
||||
throw new IllegalStateException();
|
||||
if ( (data == null) || (data.length <= 0) )
|
||||
throw new IllegalArgumentException("Empty tunnel payload?");
|
||||
_data = data;
|
||||
}
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
@@ -86,6 +166,7 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
if (PIPELINED_CACHE) {
|
||||
_dataBuf = _cache.acquire();
|
||||
_data = _dataBuf.getData();
|
||||
_hadCache = true;
|
||||
} else {
|
||||
_data = new byte[DATA_SIZE];
|
||||
}
|
||||
@@ -97,36 +178,51 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if ( (_tunnelId <= 0) || (_data == null) )
|
||||
throw new I2NPMessageException("Not enough data to write out (id=" + _tunnelId + " data=" + _data + ")");
|
||||
throw new I2NPMessageException("Not enough data to write out (id=" + _tunnelId + ")");
|
||||
if (_data.length <= 0)
|
||||
throw new I2NPMessageException("Not enough data to write out (data.length=" + _data.length + ")");
|
||||
|
||||
if (_hadCache && _dataBuf == null) {
|
||||
I2NPMessageException e = new I2NPMessageException("TDM data buf use after free");
|
||||
_log.error("TDM boom", e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
DataHelper.toLong(out, curIndex, 4, _tunnelId);
|
||||
curIndex += 4;
|
||||
System.arraycopy(_data, 0, out, curIndex, DATA_SIZE);
|
||||
curIndex += _data.length;
|
||||
if (PIPELINED_CACHE)
|
||||
_cache.release(_dataBuf);
|
||||
|
||||
// We can use from the cache, we just can't release to the cache, due to the bug
|
||||
// noted above. In effect, this means that transmitted TDMs don't get their
|
||||
// dataBufs released - but received TDMs do (via FragmentHandler)
|
||||
//if (_hadCache) {
|
||||
// _cache.release(_dataBuf);
|
||||
// _dataBuf = null;
|
||||
//}
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)_tunnelId +
|
||||
DataHelper.hashCode(_data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelDataMessage) ) {
|
||||
TunnelDataMessage msg = (TunnelDataMessage)object;
|
||||
return DataHelper.eq(getTunnelId(),msg.getTunnelId()) &&
|
||||
return _tunnelId == msg.getTunnelId() &&
|
||||
DataHelper.eq(getData(),msg.getData());
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = super.toByteArray();
|
||||
if (rv == null)
|
||||
@@ -134,11 +230,12 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[TunnelDataMessage:");
|
||||
buf.append(" MessageId: ").append(getUniqueId());
|
||||
buf.append(" Tunnel ID: ").append(getTunnelId());
|
||||
buf.append(" MessageId: ").append(_uniqueId);
|
||||
buf.append(" Tunnel ID: ").append(_tunnelId);
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@ package net.i2p.data.i2np;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.TunnelId;
|
||||
@@ -20,12 +18,11 @@ import net.i2p.util.Log;
|
||||
* format: { tunnelId, sizeof(i2npMessage.toByteArray()), i2npMessage.toByteArray() }
|
||||
*
|
||||
*/
|
||||
public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
private Log _log;
|
||||
public class TunnelGatewayMessage extends FastI2NPMessageImpl {
|
||||
private TunnelId _tunnelId;
|
||||
private I2NPMessage _msg;
|
||||
private byte _msgData[];
|
||||
private Exception _creator;
|
||||
//private Exception _creator;
|
||||
|
||||
public final static int MESSAGE_TYPE = 19;
|
||||
/** if we can't deliver a tunnel message in 10s, fuck it */
|
||||
@@ -33,16 +30,37 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
|
||||
public TunnelGatewayMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(TunnelGatewayMessage.class);
|
||||
setMessageExpiration(context.clock().now() + EXPIRATION_PERIOD);
|
||||
//_creator = new Exception("i made this");
|
||||
}
|
||||
|
||||
public TunnelId getTunnelId() { return _tunnelId; }
|
||||
public void setTunnelId(TunnelId id) { _tunnelId = id; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if id previously set, to protect saved checksum
|
||||
*/
|
||||
public void setTunnelId(TunnelId id) {
|
||||
if (_tunnelId != null)
|
||||
throw new IllegalStateException();
|
||||
_tunnelId = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warning, at the IBGW, where the message was read in,
|
||||
* this will be an UnknownI2NPMessage.
|
||||
* If you need a real message class, use UnknownI2NPMessage.convert().
|
||||
*
|
||||
* Note that if you change the expiration on the embedded message it will
|
||||
* mess up the checksum of this message, so don't do that.
|
||||
*/
|
||||
public I2NPMessage getMessage() { return _msg; }
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if msg previously set, to protect saved checksum
|
||||
*/
|
||||
public void setMessage(I2NPMessage msg) {
|
||||
if (_msg != null)
|
||||
throw new IllegalStateException();
|
||||
if (msg == null)
|
||||
throw new IllegalArgumentException("wtf, dont set me to null");
|
||||
_msg = msg;
|
||||
@@ -61,7 +79,7 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if ( (_tunnelId == null) || ( (_msg == null) && (_msgData == null) ) ) {
|
||||
_log.log(Log.CRIT, "failing to write out gateway message, created by: ", _creator);
|
||||
_log.log(Log.CRIT, "failing to write out gateway message");
|
||||
throw new I2NPMessageException("Not enough data to write out (id=" + _tunnelId + " data=" + _msg + ")");
|
||||
}
|
||||
|
||||
@@ -75,17 +93,31 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
}
|
||||
DataHelper.toLong(out, curIndex, 2, _msgData.length);
|
||||
curIndex += 2;
|
||||
// where is this coming from?
|
||||
if (curIndex + _msgData.length > out.length) {
|
||||
_log.log(Log.ERROR, "output buffer too small idx: " + curIndex + " len: " + _msgData.length + " outlen: " + out.length);
|
||||
throw new I2NPMessageException("Too much data to write out (id=" + _tunnelId + " data=" + _msg + ")");
|
||||
}
|
||||
System.arraycopy(_msgData, 0, out, curIndex, _msgData.length);
|
||||
curIndex += _msgData.length;
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
I2NPMessageHandler h = new I2NPMessageHandler(_context);
|
||||
readMessage(data, offset, dataSize, type, h);
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
//I2NPMessageHandler h = new I2NPMessageHandler(_context);
|
||||
//readMessage(data, offset, dataSize, type, h);
|
||||
readMessage(data, offset, dataSize, type, null);
|
||||
}
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException, IOException {
|
||||
|
||||
/**
|
||||
* Note that for efficiency at the IBGW, this does not fully deserialize the included
|
||||
* I2NP Message. It just puts it in an UnknownI2NPMessage.
|
||||
*
|
||||
* @param handler unused, may be null
|
||||
*/
|
||||
@Override
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
@@ -95,21 +127,43 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
if (_tunnelId.getTunnelId() <= 0)
|
||||
throw new I2NPMessageException("Invalid tunnel Id " + _tunnelId);
|
||||
|
||||
DataHelper.fromLong(data, curIndex, 2);
|
||||
int len = (int) DataHelper.fromLong(data, curIndex, 2);
|
||||
curIndex += 2;
|
||||
curIndex = handler.readMessage(data, curIndex);
|
||||
_msg = handler.lastRead();
|
||||
if (_msg == null)
|
||||
throw new I2NPMessageException("wtf, message read has no payload?");
|
||||
if (len <= 1 || curIndex + len > data.length || len > dataSize - 6)
|
||||
throw new I2NPMessageException("I2NP length in TGM: " + len +
|
||||
" but remaining bytes: " + Math.min(data.length - curIndex, dataSize - 6));
|
||||
|
||||
// OLD WAY full message parsing and instantiation
|
||||
//handler.readMessage(data, curIndex);
|
||||
//_msg = handler.lastRead();
|
||||
//if (_msg == null)
|
||||
// throw new I2NPMessageException("wtf, message read has no payload?");
|
||||
|
||||
// NEW WAY save lots of effort at the IBGW by reading as an UnknownI2NPMessage instead
|
||||
// This will save a lot of object churn and processing,
|
||||
// primarily for unencrypted msgs (V)TBRM, DatabaseStoreMessage, and DSRMs.
|
||||
// DatabaseStoreMessages in particluar are intensive for readBytes()
|
||||
// since the RI is decompressed.
|
||||
// For a zero-hop IB tunnel, where we do need the real thing,
|
||||
// it is converted to a real message class in TunnelGatewayZeroHop
|
||||
// using UnknownI2NPMessage.convert() in TunnelGatewayZeroHop.
|
||||
// We also skip processing the checksum as it's covered by the TGM checksum.
|
||||
// If a zero-hop, the checksum will be verified in convert().
|
||||
int utype = data[curIndex++] & 0xff;
|
||||
UnknownI2NPMessage umsg = new UnknownI2NPMessage(_context, utype);
|
||||
umsg.readBytes(data, utype, curIndex);
|
||||
_msg = umsg;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getTunnelId()) +
|
||||
DataHelper.hashCode(_msg);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelGatewayMessage) ) {
|
||||
TunnelGatewayMessage msg = (TunnelGatewayMessage)object;
|
||||
@@ -121,8 +175,9 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[TunnelGatewayMessage:");
|
||||
buf.append(" Tunnel ID: ").append(getTunnelId());
|
||||
buf.append(" Message: ").append(_msg);
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Contains the session key used by the tunnel gateway to encrypt the DeliveryInstructions
|
||||
* and used by the tunnel end point to decrypt those instructions.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class TunnelSessionKey extends DataStructureImpl {
|
||||
private final static Log _log = new Log(TunnelSessionKey.class);
|
||||
private SessionKey _key;
|
||||
|
||||
public TunnelSessionKey() { this(null); }
|
||||
public TunnelSessionKey(SessionKey key) { setKey(key); }
|
||||
|
||||
public SessionKey getKey() { return _key; }
|
||||
public void setKey(SessionKey key) { _key= key; }
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_key = new SessionKey();
|
||||
_key.readBytes(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_key == null) throw new DataFormatException("Invalid key");
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelSessionKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((TunnelSessionKey)obj).getKey());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[TunnelSessionKey: " + getKey() + "]";
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Contains the private key which constructs a signature for the TunnelMessage
|
||||
* which every participant in a tunnel uses to verify the
|
||||
* TunnelVerificationStructure with.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class TunnelSigningPrivateKey extends DataStructureImpl {
|
||||
private final static Log _log = new Log(EndPointPrivateKey.class);
|
||||
private SigningPrivateKey _key;
|
||||
|
||||
public TunnelSigningPrivateKey() { this(null); }
|
||||
public TunnelSigningPrivateKey(SigningPrivateKey key) { setKey(key); }
|
||||
|
||||
public SigningPrivateKey getKey() { return _key; }
|
||||
public void setKey(SigningPrivateKey key) { _key= key; }
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_key = new SigningPrivateKey();
|
||||
_key.readBytes(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_key == null) throw new DataFormatException("Invalid key");
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelSigningPrivateKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((TunnelSigningPrivateKey)obj).getKey());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[EndPointPrivateKey: " + getKey() + "]";
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Contains the public key which every participant in a tunnel uses to verify
|
||||
* the TunnelVerificationStructure for TunnelMessages that pass by.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class TunnelSigningPublicKey extends DataStructureImpl {
|
||||
private final static Log _log = new Log(TunnelSigningPublicKey.class);
|
||||
private SigningPublicKey _key;
|
||||
|
||||
public TunnelSigningPublicKey() { this(null); }
|
||||
public TunnelSigningPublicKey(SigningPublicKey key) { setKey(key); }
|
||||
|
||||
public SigningPublicKey getKey() { return _key; }
|
||||
public void setKey(SigningPublicKey key) { _key= key; }
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_key = new SigningPublicKey();
|
||||
_key.readBytes(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_key == null) throw new DataFormatException("Invalid key");
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelSigningPublicKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((TunnelSigningPublicKey)obj).getKey());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[TunnelSigningPublicKey: " + getKey() + "]";
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.Signature;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class TunnelVerificationStructure extends DataStructureImpl {
|
||||
private Hash _msgHash;
|
||||
private Signature _authSignature;
|
||||
|
||||
public TunnelVerificationStructure() { this(null, null); }
|
||||
public TunnelVerificationStructure(Hash messageHash, Signature authSig) {
|
||||
setMessageHash(messageHash);
|
||||
setAuthorizationSignature(authSig);
|
||||
}
|
||||
|
||||
public Hash getMessageHash() { return _msgHash; }
|
||||
public void setMessageHash(Hash hash) { _msgHash = hash; }
|
||||
|
||||
public Signature getAuthorizationSignature() { return _authSignature; }
|
||||
public void setAuthorizationSignature(Signature sig) { _authSignature = sig; }
|
||||
|
||||
public void sign(RouterContext context, SigningPrivateKey key) {
|
||||
if (_msgHash != null) {
|
||||
Signature sig = context.dsa().sign(_msgHash.getData(), key);
|
||||
setAuthorizationSignature(sig);
|
||||
}
|
||||
}
|
||||
public boolean verifySignature(RouterContext context, SigningPublicKey key) {
|
||||
if (_msgHash == null) return false;
|
||||
return context.dsa().verifySignature(_authSignature, _msgHash.getData(), key);
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_msgHash = new Hash();
|
||||
_msgHash.readBytes(in);
|
||||
_authSignature = new Signature();
|
||||
_authSignature.readBytes(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_authSignature == null) {
|
||||
_authSignature = new Signature();
|
||||
_authSignature.setData(Signature.FAKE_SIGNATURE);
|
||||
}
|
||||
if ( (_msgHash == null) || (_authSignature == null) ) throw new DataFormatException("Invalid data");
|
||||
_msgHash.writeBytes(out);
|
||||
_authSignature.writeBytes(out);
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelVerificationStructure))
|
||||
return false;
|
||||
TunnelVerificationStructure str = (TunnelVerificationStructure)obj;
|
||||
return DataHelper.eq(getMessageHash(), str.getMessageHash()) &&
|
||||
DataHelper.eq(getAuthorizationSignature(), str.getAuthorizationSignature());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if ( (_msgHash == null) || (_authSignature == null) ) return 0;
|
||||
return getMessageHash().hashCode() + getAuthorizationSignature().hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[TunnelVerificationStructure: " + getMessageHash() + " " + getAuthorizationSignature() + "]";
|
||||
}
|
||||
}
|
||||
132
router/java/src/net/i2p/data/i2np/UnknownI2NPMessage.java
Normal file
132
router/java/src/net/i2p/data/i2np/UnknownI2NPMessage.java
Normal file
@@ -0,0 +1,132 @@
|
||||
package net.i2p.data.i2np;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.util.SimpleByteCache;
|
||||
|
||||
/**
|
||||
* This is similar to DataMessage or GarlicMessage but with a variable message type.
|
||||
* This is defined so routers can route messages they don't know about.
|
||||
* We don't extend those classes so that any code that does (instanceof foo)
|
||||
* won't return true for this type. Load tests use DataMessage, for example.
|
||||
* Also, those classes include an additional length field that we can't use here.
|
||||
* See InboundMessageDistributor.
|
||||
*
|
||||
* There is no setData() method, the only way to create one of these is to
|
||||
* read it with readMessage() (i.e., it came from some other router)
|
||||
*
|
||||
* As of 0.8.12 this class is working. It is used at the IBGW to reduce the processing
|
||||
* required. For zero-hop IB tunnels, the convert() method is used to reconstitute
|
||||
* a standard message class.
|
||||
*
|
||||
* @since 0.7.12 but broken before 0.8.12
|
||||
*/
|
||||
public class UnknownI2NPMessage extends FastI2NPMessageImpl {
|
||||
private byte _data[];
|
||||
private final int _type;
|
||||
|
||||
/** @param type 0-255 */
|
||||
public UnknownI2NPMessage(I2PAppContext context, int type) {
|
||||
super(context);
|
||||
_type = type;
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws IllegalStateException if data previously set, to protect saved checksum
|
||||
*/
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
if (_data != null)
|
||||
throw new IllegalStateException();
|
||||
if (type != _type) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
if (dataSize > MAX_SIZE)
|
||||
throw new I2NPMessageException("wtf, size=" + dataSize);
|
||||
_data = new byte[dataSize];
|
||||
System.arraycopy(data, offset, _data, 0, dataSize);
|
||||
}
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
if (_data == null)
|
||||
return 0;
|
||||
else
|
||||
return _data.length;
|
||||
}
|
||||
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) {
|
||||
if (_data != null) {
|
||||
System.arraycopy(_data, 0, out, curIndex, _data.length);
|
||||
curIndex += _data.length;
|
||||
}
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Note that this returns the "true" type, so that
|
||||
* the IBGW can correctly make drop decisions.
|
||||
*
|
||||
* @return 0-255
|
||||
*/
|
||||
public int getType() { return _type; }
|
||||
|
||||
/**
|
||||
* Attempt to convert this message to a known message class.
|
||||
* This does the delayed verification using the saved checksum.
|
||||
*
|
||||
* Used by TunnelGatewayZeroHop.
|
||||
*
|
||||
* @throws I2NPMessageException if the conversion fails
|
||||
* @since 0.8.12
|
||||
*/
|
||||
public I2NPMessage convert() throws I2NPMessageException {
|
||||
if (_data == null || !_hasChecksum)
|
||||
throw new I2NPMessageException("Illegal state");
|
||||
I2NPMessage msg = I2NPMessageImpl.createMessage(_context, _type);
|
||||
if (msg instanceof UnknownI2NPMessage)
|
||||
throw new I2NPMessageException("Unable to convert unknown type " + _type);
|
||||
byte[] calc = SimpleByteCache.acquire(Hash.HASH_LENGTH);
|
||||
_context.sha().calculateHash(_data, 0, _data.length, calc, 0);
|
||||
boolean eq = _checksum == calc[0];
|
||||
SimpleByteCache.release(calc);
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Bad checksum on " + _data.length + " byte msg type " + _type);
|
||||
msg.readMessage(_data, 0, _data.length, _type);
|
||||
msg.setUniqueId(_uniqueId);
|
||||
msg.setMessageExpiration(_expiration);
|
||||
return msg;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return _type + DataHelper.hashCode(_data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof UnknownI2NPMessage) ) {
|
||||
UnknownI2NPMessage msg = (UnknownI2NPMessage)object;
|
||||
return _type == msg.getType() && DataHelper.eq(_data, msg._data);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("[UnknownI2NPMessage: ");
|
||||
buf.append("\n\tType: ").append(_type);
|
||||
buf.append("\n\tLength: ").append(calculateWrittenLength());
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
package net.i2p.data.i2np;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.ByteArray;
|
||||
import net.i2p.data.DataHelper;
|
||||
|
||||
/**
|
||||
* Variable number of records.
|
||||
*
|
||||
* @since 0.7.12
|
||||
*/
|
||||
public class VariableTunnelBuildMessage extends TunnelBuildMessage {
|
||||
public static final int MESSAGE_TYPE = 23;
|
||||
|
||||
/** zero record count, will be set with readMessage() */
|
||||
public VariableTunnelBuildMessage(I2PAppContext context) {
|
||||
super(context, 0);
|
||||
}
|
||||
|
||||
public VariableTunnelBuildMessage(I2PAppContext context, int records) {
|
||||
super(context, records);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int calculateWrittenLength() { return 1 + super.calculateWrittenLength(); }
|
||||
|
||||
@Override
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public void readMessage(byte[] data, int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
// message type will be checked in super()
|
||||
int r = (int)DataHelper.fromLong(data, offset, 1);
|
||||
if (r <= 0 || r > MAX_RECORD_COUNT)
|
||||
throw new I2NPMessageException("Bad record count " + r);
|
||||
RECORD_COUNT = r;
|
||||
if (dataSize != calculateWrittenLength())
|
||||
throw new I2NPMessageException("Wrong length (expects " + calculateWrittenLength() + ", recv " + dataSize + ")");
|
||||
_records = new ByteArray[RECORD_COUNT];
|
||||
super.readMessage(data, offset + 1, dataSize, type);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException {
|
||||
int remaining = out.length - (curIndex + calculateWrittenLength());
|
||||
if (remaining < 0)
|
||||
throw new I2NPMessageException("Not large enough (too short by " + remaining + ")");
|
||||
if (RECORD_COUNT <= 0 || RECORD_COUNT > MAX_RECORD_COUNT)
|
||||
throw new I2NPMessageException("Bad record count " + RECORD_COUNT);
|
||||
DataHelper.toLong(out, curIndex++, 1, RECORD_COUNT);
|
||||
// can't call super, written length check will fail
|
||||
//return super.writeMessageBody(out, curIndex + 1);
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE);
|
||||
curIndex += RECORD_SIZE;
|
||||
}
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder(64);
|
||||
buf.append("[VariableTunnelBuildMessage: " +
|
||||
"\n\tRecords: ").append(getRecordCount())
|
||||
.append(']');
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package net.i2p.data.i2np;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.ByteArray;
|
||||
import net.i2p.data.DataHelper;
|
||||
|
||||
/**
|
||||
* Transmitted from the new outbound endpoint to the creator through a
|
||||
* reply tunnel.
|
||||
* Variable number of records.
|
||||
*
|
||||
* @since 0.7.12
|
||||
*/
|
||||
public class VariableTunnelBuildReplyMessage extends TunnelBuildReplyMessage {
|
||||
public static final int MESSAGE_TYPE = 24;
|
||||
|
||||
/** zero record count, will be set with readMessage() */
|
||||
public VariableTunnelBuildReplyMessage(I2PAppContext context) {
|
||||
super(context, 0);
|
||||
}
|
||||
|
||||
public VariableTunnelBuildReplyMessage(I2PAppContext context, int records) {
|
||||
super(context, records);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int calculateWrittenLength() { return 1 + super.calculateWrittenLength(); }
|
||||
|
||||
@Override
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public void readMessage(byte[] data, int offset, int dataSize, int type) throws I2NPMessageException {
|
||||
// message type will be checked in super()
|
||||
int r = (int)DataHelper.fromLong(data, offset, 1);
|
||||
if (r <= 0 || r > MAX_RECORD_COUNT)
|
||||
throw new I2NPMessageException("Bad record count " + r);
|
||||
RECORD_COUNT = r;
|
||||
if (dataSize != calculateWrittenLength())
|
||||
throw new I2NPMessageException("Wrong length (expects " + calculateWrittenLength() + ", recv " + dataSize + ")");
|
||||
_records = new ByteArray[RECORD_COUNT];
|
||||
super.readMessage(data, offset + 1, dataSize, type);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException {
|
||||
int remaining = out.length - (curIndex + calculateWrittenLength());
|
||||
if (remaining < 0)
|
||||
throw new I2NPMessageException("Not large enough (too short by " + remaining + ")");
|
||||
if (RECORD_COUNT <= 0 || RECORD_COUNT > MAX_RECORD_COUNT)
|
||||
throw new I2NPMessageException("Bad record count " + RECORD_COUNT);
|
||||
DataHelper.toLong(out, curIndex++, 1, RECORD_COUNT);
|
||||
// can't call super, written length check will fail
|
||||
//return super.writeMessageBody(out, curIndex + 1);
|
||||
for (int i = 0; i < RECORD_COUNT; i++) {
|
||||
System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE);
|
||||
curIndex += RECORD_SIZE;
|
||||
}
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder(64);
|
||||
buf.append("[VariableTunnelBuildReplyMessage: " +
|
||||
"\n\tRecords: ").append(getRecordCount())
|
||||
.append(']');
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
@@ -7,31 +7,30 @@ communicate with the network routers. In addition, various transport protocols d
|
||||
the specifics of how data is passed from one router to another over the network. I2NP
|
||||
does not specify or require any particular transport layer, allowing transport protocols
|
||||
to work over TCP, Polling HTTP, SMTP+POP3/IMAP, UDP, among anything else
|
||||
that can pass data. I2NP merely requires that they:
|
||||
that can pass data. I2NP merely requires that they:</p>
|
||||
<ul>
|
||||
<li>
|
||||
Register a unique identifier for use in RouterAddress structures consisting of no
|
||||
more than 32 UTF-8 characters.
|
||||
more than 32 UTF-8 characters.</li>
|
||||
<li>
|
||||
Define standard text based options that uniquely define a contact method (for
|
||||
example .hostname. and .port. or .email address.) as usable in the
|
||||
RouterAddress structure's set of options.
|
||||
RouterAddress structure's set of options.</li>
|
||||
<li>
|
||||
Provide a means to reliably deliver a chunk of data, where the contents of any
|
||||
particular chunk is delivered in order. However, different chunks of data do not
|
||||
need to be delivered in order.
|
||||
need to be delivered in order.</li>
|
||||
<li>
|
||||
Secure the chunks of data from alteration or disclosure (e.g. encrypt them and use
|
||||
checksums).
|
||||
checksums).</li>
|
||||
<li>
|
||||
Enable the router to control the transport's bandwidth usage.
|
||||
Enable the router to control the transport's bandwidth usage.</li>
|
||||
<li>
|
||||
Provide estimates for the latency and bandwidth associated with passing a chunk of
|
||||
data.
|
||||
data.</li>
|
||||
<li>
|
||||
Provide a programmable interface suitable for integration with various routers.
|
||||
Provide a programmable interface suitable for integration with various routers.</li>
|
||||
</ul>
|
||||
</p>
|
||||
<p>
|
||||
Transports themselves can implement advanced features, such as steganography,
|
||||
constant rate delivery, dummy message delivery, and may even run on top of existing
|
||||
@@ -56,12 +55,12 @@ message to be sent. The router then determines where to send it, delivers it thr
|
||||
outbound tunnels, instructing the end point to pass it along to the appropriate inbound
|
||||
tunnel, where it is passed along again to that tunnel's end point and made available to
|
||||
the target for reception. To understand fully, each step in the process must be
|
||||
explained in detail.
|
||||
explained in detail.</p>
|
||||
<ul>
|
||||
<li>
|
||||
First, once the originating router receives the message and the Destination, it
|
||||
attempts to find the LeaseSet associated with it as stored in the Network Database
|
||||
under the key calculated by SHA256 of the Destination.
|
||||
under the key calculated by SHA256 of the Destination.</li>
|
||||
<li>
|
||||
The router then builds a GarlicMessage addressed to the SHA256 of the
|
||||
PublicKey from the LeaseSet with the real data to be delivered. This
|
||||
@@ -71,48 +70,48 @@ and in fact, if the source router desires guaranteed delivery, it will include a
|
||||
requesting source route delivery of a DeliveryStatusMessage back to itself. The
|
||||
body of the GarlicMessage with all enclosed GarlicCloves is encrypted to the key
|
||||
specified on the LeaseSet using the ElGamal+AES256 algorithm described in the
|
||||
data structure spec.
|
||||
data structure spec.</li>
|
||||
<li>
|
||||
The router then selects one or more outbound tunnels through which the
|
||||
GarlicMessage will be delivered.
|
||||
GarlicMessage will be delivered.</li>
|
||||
<li>
|
||||
Then the router selects one or more of those Lease structures from the LeaseSet
|
||||
and constructs a TunnelMessage along with DeliveryInstructions for the
|
||||
outbound tunnel's end point to deliver the GarlicMessage to the inbound tunnel's
|
||||
gateway router.
|
||||
gateway router.</li>
|
||||
<li>
|
||||
The source router then passes the various TunnelMessages down the outbound
|
||||
tunnel to that tunnel's end point, where the instructions are decrypted, specifying
|
||||
where the message should be delivered.
|
||||
where the message should be delivered.</li>
|
||||
<li>
|
||||
At this point, the end point must determine how to contact the router specified in
|
||||
the decrypted DeliveryInstructions, perhaps looking up RouterInfo or
|
||||
LeaseSet structures in the Network Database, and maybe even delaying a
|
||||
requested period of time before passing on the message.
|
||||
requested period of time before passing on the message.</li>
|
||||
<li>
|
||||
Once the tunnel end point has the data it needs to contact the inbound tunnel's
|
||||
gateway router, it then attempts to contact it either directly through one of its public
|
||||
RouterAddress or source routed through one of its published trusted peers. Over
|
||||
this medium the tunnel end point delivers the GarlicMessage as it was wrapped by
|
||||
the source router, along with the TunnelId.
|
||||
the source router, along with the TunnelId.</li>
|
||||
<li>
|
||||
Once delivered to the inbound tunnel's gateway, the gateway builds a
|
||||
TunnelMessage wrapping the GarlicMessage, encrypting a
|
||||
DeliveryInstructions to specify local delivery upon arrival at the tunnel's end
|
||||
point.
|
||||
point.</li>
|
||||
<li>
|
||||
Once the TunnelMessage is passed down to the end point in inbound tunnel, the
|
||||
router opens the DeliveryInstructions, notes the request to deliver it locally,
|
||||
and then proceeds to review the contents of the TunnelMessage's payload, which in
|
||||
this case is a GarlicMessage addressed to the SHA256 of a LeaseSet that it has
|
||||
published. It then decrypts the payload of the message with ElGamal + AES256.
|
||||
published. It then decrypts the payload of the message with ElGamal + AES256.</li>
|
||||
<li>
|
||||
After opening up the GarlicMessage, it reviews each of the GarlicCloves and
|
||||
processes them each. Cloves with DeliveryInstructions addressed to a local
|
||||
Destination are delivered to the associated client application, other cloves asking
|
||||
for local processing (e.g. Network Database messages or DeliveryStatusMessages)
|
||||
are processed, and cloves asking for forwarding to other routers are passed off for
|
||||
delivery.
|
||||
delivery.</li>
|
||||
</ul>
|
||||
<p>
|
||||
There are several important points of note in this scenario. First, the source router
|
||||
|
||||
@@ -5,23 +5,33 @@ package net.i2p.router;
|
||||
* zzz 2008-06
|
||||
*/
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Writer;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterAddress;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.HexDump;
|
||||
import net.i2p.util.Addresses;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.Translate;
|
||||
|
||||
/**
|
||||
* Manage blocking by IP address, in a manner similar to the Shitlist,
|
||||
@@ -38,9 +48,11 @@ import net.i2p.util.Log;
|
||||
* And the on-disk blocklist can also contain router hashes to be shitlisted.
|
||||
*
|
||||
* So, this class maintains three separate lists:
|
||||
*<pre>
|
||||
* 1) The list of IP ranges, read in from a file at startup
|
||||
* 2) The list of hashes, read in from the same file
|
||||
* 3) A list of single IPs, initially empty, added to as needed
|
||||
*</pre>
|
||||
*
|
||||
* Read in the IP blocklist from a file, store it in-memory as efficiently
|
||||
* as we can, and perform tests against it as requested.
|
||||
@@ -51,32 +63,25 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class Blocklist {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private long _blocklist[];
|
||||
private int _blocklistSize;
|
||||
private Object _lock;
|
||||
private final Object _lock = new Object();
|
||||
private Entry _wrapSave;
|
||||
private Set _inProcess;
|
||||
private Map _peerBlocklist;
|
||||
private Set _singleIPBlocklist;
|
||||
private final Set<Hash> _inProcess = new HashSet(4);
|
||||
private Map<Hash, String> _peerBlocklist = new HashMap(4);
|
||||
private final Set<Integer> _singleIPBlocklist = new ConcurrentHashSet(4);
|
||||
|
||||
public Blocklist(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(Blocklist.class);
|
||||
_blocklist = null;
|
||||
_blocklistSize = 0;
|
||||
_lock = new Object();
|
||||
_wrapSave = null;
|
||||
_inProcess = new HashSet(0);
|
||||
_peerBlocklist = new HashMap(0);
|
||||
_singleIPBlocklist = new HashSet(0);
|
||||
}
|
||||
|
||||
public Blocklist() {
|
||||
/** only for testing with main() */
|
||||
private Blocklist() {
|
||||
_context = null;
|
||||
_log = new Log(Blocklist.class);
|
||||
_blocklist = null;
|
||||
_blocklistSize = 0;
|
||||
}
|
||||
|
||||
static final String PROP_BLOCKLIST_ENABLED = "router.blocklist.enable";
|
||||
@@ -98,7 +103,7 @@ public class Blocklist {
|
||||
}
|
||||
|
||||
private class ReadinJob extends JobImpl {
|
||||
private String _file;
|
||||
private final String _file;
|
||||
public ReadinJob (String f) {
|
||||
super(_context);
|
||||
_file = f;
|
||||
@@ -114,13 +119,15 @@ public class Blocklist {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (Iterator iter = _peerBlocklist.keySet().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash) iter.next();
|
||||
String reason = "Blocklisted by router hash";
|
||||
String comment = (String) _peerBlocklist.get(peer);
|
||||
for (Iterator<Hash> iter = _peerBlocklist.keySet().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
String reason;
|
||||
String comment = _peerBlocklist.get(peer);
|
||||
if (comment != null)
|
||||
reason = reason + ": " + comment;
|
||||
_context.shitlist().shitlistRouterForever(peer, reason);
|
||||
reason = _x("Banned by router hash: {0}");
|
||||
else
|
||||
reason = _x("Banned by router hash");
|
||||
_context.shitlist().shitlistRouterForever(peer, reason, comment);
|
||||
}
|
||||
_peerBlocklist = null;
|
||||
|
||||
@@ -128,8 +135,8 @@ public class Blocklist {
|
||||
return;
|
||||
FloodfillNetworkDatabaseFacade fndf = (FloodfillNetworkDatabaseFacade) _context.netDb();
|
||||
int count = 0;
|
||||
for (Iterator iter = fndf.getKnownRouterData().iterator(); iter.hasNext(); ) {
|
||||
RouterInfo ri = (RouterInfo) iter.next();
|
||||
for (Iterator<RouterInfo> iter = fndf.getKnownRouterData().iterator(); iter.hasNext(); ) {
|
||||
RouterInfo ri = iter.next();
|
||||
Hash peer = ri.getIdentity().getHash();
|
||||
if (isBlocklisted(peer))
|
||||
count++;
|
||||
@@ -171,6 +178,8 @@ public class Blocklist {
|
||||
*/
|
||||
private void readBlocklistFile(String file) {
|
||||
File BLFile = new File(file);
|
||||
if (!BLFile.isAbsolute())
|
||||
BLFile = new File(_context.getConfigDir(), file);
|
||||
if (BLFile == null || (!BLFile.exists()) || BLFile.length() <= 0) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Blocklist file not found: " + file);
|
||||
@@ -191,10 +200,10 @@ public class Blocklist {
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
in = new FileInputStream(BLFile);
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
while (DataHelper.readLine(in, buf) && count < maxSize) {
|
||||
BufferedReader br = new BufferedReader(new InputStreamReader(in, "UTF-8"));
|
||||
String buf = null;
|
||||
while ((buf = br.readLine()) != null && count < maxSize) {
|
||||
Entry e = parse(buf, true);
|
||||
buf.setLength(0);
|
||||
if (e == null) {
|
||||
badcount++;
|
||||
continue;
|
||||
@@ -257,10 +266,10 @@ public class Blocklist {
|
||||
}
|
||||
|
||||
private static class Entry {
|
||||
String comment;
|
||||
byte ip1[];
|
||||
byte ip2[];
|
||||
Hash peer;
|
||||
final String comment;
|
||||
final byte ip1[];
|
||||
final byte ip2[];
|
||||
final Hash peer;
|
||||
|
||||
public Entry(String c, Hash h, byte[] i1, byte[] i2) {
|
||||
comment = c;
|
||||
@@ -270,17 +279,17 @@ public class Blocklist {
|
||||
}
|
||||
}
|
||||
|
||||
private Entry parse(StringBuffer buf, boolean bitch) {
|
||||
private Entry parse(String buf, boolean bitch) {
|
||||
byte[] ip1;
|
||||
byte[] ip2;
|
||||
int start1 = 0;
|
||||
int end1 = buf.length();
|
||||
if (end1 <= 0)
|
||||
return null; // blank
|
||||
if (buf.charAt(end1 - 1) == '\r') { // DataHelper.readLine leaves the \r on there
|
||||
buf.deleteCharAt(end1 - 1);
|
||||
end1--;
|
||||
}
|
||||
//if (buf.charAt(end1 - 1) == '\r') { // DataHelper.readLine leaves the \r on there
|
||||
// buf.deleteCharAt(end1 - 1);
|
||||
// end1--;
|
||||
//}
|
||||
if (end1 <= 0)
|
||||
return null; // blank
|
||||
int start2 = -1;
|
||||
@@ -297,7 +306,7 @@ public class Blocklist {
|
||||
if (end1 - start1 == 44 && buf.substring(start1).indexOf(".") < 0) {
|
||||
byte b[] = Base64.decode(buf.substring(start1));
|
||||
if (b != null)
|
||||
return new Entry(comment, new Hash(b), null, null);
|
||||
return new Entry(comment, Hash.create(b), null, null);
|
||||
}
|
||||
index = buf.indexOf("-", start1);
|
||||
if (index >= 0) {
|
||||
@@ -381,10 +390,9 @@ public class Blocklist {
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
in = new FileInputStream(BLFile);
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
while (DataHelper.readLine(in, buf)) {
|
||||
BufferedReader br = new BufferedReader(new InputStreamReader(in, "ISO-8859-1"));
|
||||
while (br.readLine() != null) {
|
||||
lines++;
|
||||
buf.setLength(0);
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@@ -424,9 +432,11 @@ public class Blocklist {
|
||||
return lines;
|
||||
}
|
||||
|
||||
// Maintain a simple in-memory single-IP blocklist
|
||||
// This is used for new additions, NOT for the main list
|
||||
// of IP ranges read in from the file.
|
||||
/**
|
||||
* Maintain a simple in-memory single-IP blocklist
|
||||
* This is used for new additions, NOT for the main list
|
||||
* of IP ranges read in from the file.
|
||||
*/
|
||||
public void add(String ip) {
|
||||
InetAddress pi;
|
||||
try {
|
||||
@@ -439,46 +449,39 @@ public class Blocklist {
|
||||
add(pib);
|
||||
}
|
||||
|
||||
/**
|
||||
* Maintain a simple in-memory single-IP blocklist
|
||||
* This is used for new additions, NOT for the main list
|
||||
* of IP ranges read in from the file.
|
||||
*/
|
||||
public void add(byte ip[]) {
|
||||
if (ip.length != 4)
|
||||
return;
|
||||
if (add(toInt(ip)))
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Adding IP to blocklist: " + (ip[0]&0xff) + '.' + (ip[1]&0xff) + '.' + (ip[2]&0xff) + '.' + (ip[3]&0xff));
|
||||
_log.warn("Adding IP to blocklist: " + Addresses.toString(ip));
|
||||
}
|
||||
|
||||
private boolean add(int ip) {
|
||||
synchronized(_singleIPBlocklist) {
|
||||
return _singleIPBlocklist.add(new Integer(ip));
|
||||
}
|
||||
return _singleIPBlocklist.add(Integer.valueOf(ip));
|
||||
}
|
||||
|
||||
private boolean isOnSingleList(int ip) {
|
||||
synchronized(_singleIPBlocklist) {
|
||||
return _singleIPBlocklist.contains(new Integer(ip));
|
||||
}
|
||||
return _singleIPBlocklist.contains(Integer.valueOf(ip));
|
||||
}
|
||||
|
||||
/**
|
||||
* this tries to not return duplicates
|
||||
* but I suppose it could.
|
||||
*/
|
||||
public List getAddresses(Hash peer) {
|
||||
List rv = new ArrayList(1);
|
||||
private List<byte[]> getAddresses(Hash peer) {
|
||||
List<byte[]> rv = new ArrayList(1);
|
||||
RouterInfo pinfo = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (pinfo == null) return rv;
|
||||
Set paddr = pinfo.getAddresses();
|
||||
if (paddr == null || paddr.size() == 0)
|
||||
return rv;
|
||||
String oldphost = null;
|
||||
List pladdr = new ArrayList(paddr);
|
||||
// for each peer address
|
||||
for (int j = 0; j < paddr.size(); j++) {
|
||||
RouterAddress pa = (RouterAddress) pladdr.get(j);
|
||||
if (pa == null) continue;
|
||||
Properties pprops = pa.getOptions();
|
||||
if (pprops == null) continue;
|
||||
String phost = pprops.getProperty("host");
|
||||
for (RouterAddress pa : pinfo.getAddresses()) {
|
||||
String phost = pa.getOption("host");
|
||||
if (phost == null) continue;
|
||||
if (oldphost != null && oldphost.equals(phost)) continue;
|
||||
oldphost = phost;
|
||||
@@ -500,20 +503,22 @@ public class Blocklist {
|
||||
* If so, and it isn't shitlisted, shitlist it forever...
|
||||
*/
|
||||
public boolean isBlocklisted(Hash peer) {
|
||||
List ips = getAddresses(peer);
|
||||
for (Iterator iter = ips.iterator(); iter.hasNext(); ) {
|
||||
byte ip[] = (byte[]) iter.next();
|
||||
List<byte[]> ips = getAddresses(peer);
|
||||
for (Iterator<byte[]> iter = ips.iterator(); iter.hasNext(); ) {
|
||||
byte ip[] = iter.next();
|
||||
if (isBlocklisted(ip)) {
|
||||
if (! _context.shitlist().isShitlisted(peer))
|
||||
// nice knowing you...
|
||||
shitlist(peer);
|
||||
shitlist(peer, ip);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// calling this externally won't shitlist the peer, this is just an IP check
|
||||
/**
|
||||
* calling this externally won't shitlist the peer, this is just an IP check
|
||||
*/
|
||||
public boolean isBlocklisted(String ip) {
|
||||
InetAddress pi;
|
||||
try {
|
||||
@@ -526,7 +531,9 @@ public class Blocklist {
|
||||
return isBlocklisted(pib);
|
||||
}
|
||||
|
||||
// calling this externally won't shitlist the peer, this is just an IP check
|
||||
/**
|
||||
* calling this externally won't shitlist the peer, this is just an IP check
|
||||
*/
|
||||
public boolean isBlocklisted(byte ip[]) {
|
||||
if (ip.length != 4)
|
||||
return false;
|
||||
@@ -588,11 +595,11 @@ public class Blocklist {
|
||||
|
||||
// methods to get and store the from/to values in the array
|
||||
|
||||
private int getFrom(long entry) {
|
||||
private static int getFrom(long entry) {
|
||||
return (int) ((entry >> 32) & 0xffffffff);
|
||||
}
|
||||
|
||||
private int getTo(long entry) {
|
||||
private static int getTo(long entry) {
|
||||
return (int) (entry & 0xffffffff);
|
||||
}
|
||||
|
||||
@@ -604,7 +611,7 @@ public class Blocklist {
|
||||
* So the size is (cough) almost 2MB for the 240,000 line splist.txt.
|
||||
*
|
||||
*/
|
||||
private long toEntry(byte ip1[], byte ip2[]) {
|
||||
private static long toEntry(byte ip1[], byte ip2[]) {
|
||||
long entry = 0;
|
||||
for (int i = 0; i < 4; i++)
|
||||
entry |= ((long) (ip2[i] & 0xff)) << ((3-i)*8);
|
||||
@@ -623,15 +630,15 @@ public class Blocklist {
|
||||
_blocklist[idx] = entry;
|
||||
}
|
||||
|
||||
private int toInt(byte ip[]) {
|
||||
private static int toInt(byte ip[]) {
|
||||
int rv = 0;
|
||||
for (int i = 0; i < 4; i++)
|
||||
rv |= (ip[i] & 0xff) << ((3-i)*8);
|
||||
return rv;
|
||||
}
|
||||
|
||||
private String toStr(long entry) {
|
||||
StringBuffer buf = new StringBuffer(32);
|
||||
private static String toStr(long entry) {
|
||||
StringBuilder buf = new StringBuilder(32);
|
||||
for (int i = 7; i >= 0; i--) {
|
||||
buf.append((entry >> (8*i)) & 0xff);
|
||||
if (i == 4)
|
||||
@@ -642,8 +649,8 @@ public class Blocklist {
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private String toStr(int ip) {
|
||||
StringBuffer buf = new StringBuffer(16);
|
||||
private static String toStr(int ip) {
|
||||
StringBuilder buf = new StringBuilder(16);
|
||||
for (int i = 3; i >= 0; i--) {
|
||||
buf.append((ip >> (8*i)) & 0xff);
|
||||
if (i > 0)
|
||||
@@ -660,10 +667,11 @@ public class Blocklist {
|
||||
* actual line in the blocklist file, this could take a while.
|
||||
*
|
||||
*/
|
||||
public void shitlist(Hash peer) {
|
||||
private void shitlist(Hash peer, byte[] ip) {
|
||||
// Temporary reason, until the job finishes
|
||||
_context.shitlist().shitlistRouterForever(peer, "IP Blocklisted");
|
||||
if (! "true".equals( _context.getProperty(PROP_BLOCKLIST_DETAIL, "true")))
|
||||
String reason = _x("IP banned by blocklist.txt entry {0}");
|
||||
_context.shitlist().shitlistRouterForever(peer, reason, Addresses.toString(ip));
|
||||
if (! _context.getBooleanPropertyDefaultTrue(PROP_BLOCKLIST_DETAIL))
|
||||
return;
|
||||
boolean shouldRunJob;
|
||||
int number;
|
||||
@@ -673,21 +681,24 @@ public class Blocklist {
|
||||
}
|
||||
if (!shouldRunJob)
|
||||
return;
|
||||
Job job = new ShitlistJob(peer);
|
||||
// get the IPs now because it won't be in the netdb by the time the job runs
|
||||
Job job = new ShitlistJob(peer, getAddresses(peer));
|
||||
if (number > 0)
|
||||
job.getTiming().setStartAfter(_context.clock().now() + (number * 30*1000));
|
||||
job.getTiming().setStartAfter(_context.clock().now() + (30*1000l * number));
|
||||
_context.jobQueue().addJob(job);
|
||||
}
|
||||
|
||||
private class ShitlistJob extends JobImpl {
|
||||
private Hash _peer;
|
||||
public ShitlistJob (Hash p) {
|
||||
private final Hash _peer;
|
||||
private final List<byte[]> _ips;
|
||||
public ShitlistJob (Hash p, List<byte[]> ips) {
|
||||
super(_context);
|
||||
_peer = p;
|
||||
_ips = ips;
|
||||
}
|
||||
public String getName() { return "Shitlist Peer Forever"; }
|
||||
public String getName() { return "Ban Peer by IP"; }
|
||||
public void runJob() {
|
||||
shitlistForever(_peer);
|
||||
shitlistForever(_peer, _ips);
|
||||
synchronized (_inProcess) {
|
||||
_inProcess.remove(_peer);
|
||||
}
|
||||
@@ -703,9 +714,11 @@ public class Blocklist {
|
||||
* So we also stagger these jobs.
|
||||
*
|
||||
*/
|
||||
private synchronized void shitlistForever(Hash peer) {
|
||||
private synchronized void shitlistForever(Hash peer, List<byte[]> ips) {
|
||||
String file = _context.getProperty(PROP_BLOCKLIST_FILE, BLOCKLIST_FILE_DEFAULT);
|
||||
File BLFile = new File(file);
|
||||
if (!BLFile.isAbsolute())
|
||||
BLFile = new File(_context.getConfigDir(), file);
|
||||
if (BLFile == null || (!BLFile.exists()) || BLFile.length() <= 0) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Blocklist file not found: " + file);
|
||||
@@ -713,36 +726,35 @@ public class Blocklist {
|
||||
}
|
||||
|
||||
// look through the file for each address to find which one was the cause
|
||||
List ips = getAddresses(peer);
|
||||
for (Iterator iter = ips.iterator(); iter.hasNext(); ) {
|
||||
byte ip[] = (byte[]) iter.next();
|
||||
for (Iterator<byte[]> iter = ips.iterator(); iter.hasNext(); ) {
|
||||
byte ip[] = iter.next();
|
||||
int ipint = toInt(ip);
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
in = new FileInputStream(BLFile);
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
BufferedReader br = new BufferedReader(new InputStreamReader(in, "UTF-8"));
|
||||
String buf = null;
|
||||
// assume the file is unsorted, so go through the whole thing
|
||||
while (DataHelper.readLine(in, buf)) {
|
||||
while ((buf = br.readLine()) != null) {
|
||||
Entry e = parse(buf, false);
|
||||
if (e == null || e.peer != null) {
|
||||
buf.setLength(0);
|
||||
continue;
|
||||
}
|
||||
if (match(ipint, toEntry(e.ip1, e.ip2))) {
|
||||
try { in.close(); } catch (IOException ioe) {}
|
||||
String reason = "IP ";
|
||||
for (int i = 0; i < 4; i++) {
|
||||
reason = reason + (ip[i] & 0xff);
|
||||
if (i != 3)
|
||||
reason = reason + '.';
|
||||
}
|
||||
reason = reason + " blocklisted by entry \"" + buf + "\"";
|
||||
String reason = _x("IP banned by blocklist.txt entry {0}");
|
||||
// only one translate parameter for now
|
||||
//for (int i = 0; i < 4; i++) {
|
||||
// reason = reason + (ip[i] & 0xff);
|
||||
// if (i != 3)
|
||||
// reason = reason + '.';
|
||||
//}
|
||||
//reason = reason + " banned by " + BLOCKLIST_FILE_DEFAULT + " entry \"" + buf + "\"";
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Shitlisting " + peer + " " + reason);
|
||||
_context.shitlist().shitlistRouterForever(peer, reason);
|
||||
_context.shitlist().shitlistRouterForever(peer, reason, buf.toString());
|
||||
return;
|
||||
}
|
||||
buf.setLength(0);
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@@ -754,40 +766,108 @@ public class Blocklist {
|
||||
// We already shitlisted in shitlist(peer), that's good enough
|
||||
}
|
||||
|
||||
private static final int MAX_DISPLAY = 1000;
|
||||
|
||||
/**
|
||||
* Write directly to the stream so we don't OOM on a huge list.
|
||||
* Go through each list twice since we store out-of-order.
|
||||
*
|
||||
* TODO move to routerconsole, but that would require exposing the _blocklist array.
|
||||
*/
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("<h2>IP Blocklist</h2>");
|
||||
Set singles = new TreeSet();
|
||||
synchronized(_singleIPBlocklist) {
|
||||
singles.addAll(_singleIPBlocklist);
|
||||
}
|
||||
if (singles.size() > 0) {
|
||||
buf.append("<table><tr><td><b>Transient IPs</b></td></tr>");
|
||||
for (Iterator iter = singles.iterator(); iter.hasNext(); ) {
|
||||
int ip = ((Integer) iter.next()).intValue();
|
||||
buf.append("<tr><td align=right>").append(toStr(ip)).append("</td></tr>\n");
|
||||
// move to the jsp
|
||||
//out.write("<h2>Banned IPs</h2>");
|
||||
Set<Integer> singles = new TreeSet();
|
||||
singles.addAll(_singleIPBlocklist);
|
||||
if (!singles.isEmpty()) {
|
||||
out.write("<table><tr><th align=\"center\" colspan=\"2\"><b>");
|
||||
out.write(_("IPs Banned Until Restart"));
|
||||
out.write("</b></td></tr>");
|
||||
// first 0 - 127
|
||||
for (Integer ii : singles) {
|
||||
int ip = ii.intValue();
|
||||
if (ip < 0)
|
||||
continue;
|
||||
out.write("<tr><td align=\"center\" width=\"50%\">");
|
||||
out.write(toStr(ip));
|
||||
out.write("</td><td width=\"50%\"> </td></tr>\n");
|
||||
}
|
||||
buf.append("</table>");
|
||||
// then 128 - 255
|
||||
for (Integer ii : singles) {
|
||||
int ip = ii.intValue();
|
||||
if (ip >= 0)
|
||||
break;
|
||||
out.write("<tr><td align=\"center\" width=\"50%\">");
|
||||
out.write(toStr(ip));
|
||||
out.write("</td><td width=\"50%\"> </td></tr>\n");
|
||||
}
|
||||
out.write("</table>");
|
||||
}
|
||||
if (_blocklistSize > 0) {
|
||||
buf.append("<table><tr><td align=center colspan=2><b>IPs from Blocklist File</b></td></tr><tr><td align=center><b>From</b></td><td align=center><b>To</b></td></tr>");
|
||||
for (int i = 0; i < _blocklistSize; i++) {
|
||||
out.write("<table><tr><th align=\"center\" colspan=\"2\"><b>");
|
||||
out.write(_("IPs Permanently Banned"));
|
||||
out.write("</b></th></tr><tr><td align=\"center\" width=\"50%\"><b>");
|
||||
out.write(_("From"));
|
||||
out.write("</b></td><td align=\"center\" width=\"50%\"><b>");
|
||||
out.write(_("To"));
|
||||
out.write("</b></td></tr>");
|
||||
int max = Math.min(_blocklistSize, MAX_DISPLAY);
|
||||
int displayed = 0;
|
||||
// first 0 - 127
|
||||
for (int i = 0; i < max; i++) {
|
||||
int from = getFrom(_blocklist[i]);
|
||||
buf.append("<tr><td align=right>").append(toStr(from)).append("</td><td align=right>");
|
||||
if (from < 0)
|
||||
continue;
|
||||
out.write("<tr><td align=\"center\" width=\"50%\">"); out.write(toStr(from)); out.write("</td><td align=\"center\" width=\"50%\">");
|
||||
int to = getTo(_blocklist[i]);
|
||||
if (to != from)
|
||||
buf.append(toStr(to)).append("</td></tr>\n");
|
||||
else
|
||||
buf.append(" </td></tr>\n");
|
||||
if (to != from) {
|
||||
out.write(toStr(to)); out.write("</td></tr>\n");
|
||||
} else
|
||||
out.write(" </td></tr>\n");
|
||||
displayed++;
|
||||
}
|
||||
buf.append("</table>");
|
||||
// then 128 - 255
|
||||
for (int i = 0; i < max && displayed++ < max; i++) {
|
||||
int from = getFrom(_blocklist[i]);
|
||||
if (from >= 0)
|
||||
break;
|
||||
out.write("<tr><td align=\"center\" width=\"50%\">"); out.write(toStr(from)); out.write("</td><td align=\"center\" width=\"50%\">");
|
||||
int to = getTo(_blocklist[i]);
|
||||
if (to != from) {
|
||||
out.write(toStr(to)); out.write("</td></tr>\n");
|
||||
} else
|
||||
out.write(" </td></tr>\n");
|
||||
}
|
||||
if (_blocklistSize > MAX_DISPLAY)
|
||||
// very rare, don't bother translating
|
||||
out.write("<tr><th colspan=2>First " + MAX_DISPLAY + " displayed, see the " +
|
||||
BLOCKLIST_FILE_DEFAULT + " file for the full list</th></tr>");
|
||||
out.write("</table>");
|
||||
} else {
|
||||
buf.append("<br>No blocklist file entries");
|
||||
out.write("<br><i>");
|
||||
out.write(_("none"));
|
||||
out.write("</i>");
|
||||
}
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a string for extraction by xgettext and translation.
|
||||
* Use this only in static initializers.
|
||||
* It does not translate!
|
||||
* @return s
|
||||
*/
|
||||
private static final String _x(String s) {
|
||||
return s;
|
||||
}
|
||||
|
||||
private static final String BUNDLE_NAME = "net.i2p.router.web.messages";
|
||||
|
||||
/** translate */
|
||||
private String _(String key) {
|
||||
return Translate.getString(key, _context, BUNDLE_NAME);
|
||||
}
|
||||
|
||||
public static void main(String args[]) {
|
||||
Blocklist b = new Blocklist();
|
||||
if ( (args != null) && (args.length == 1) )
|
||||
|
||||
@@ -13,6 +13,7 @@ import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
@@ -72,6 +73,7 @@ public abstract class ClientManagerFacade implements Service {
|
||||
public abstract void messageReceived(ClientMessage msg);
|
||||
|
||||
public boolean verifyClientLiveliness() { return true; }
|
||||
public boolean isAlive() { return true; }
|
||||
/**
|
||||
* Does the client specified want their leaseSet published?
|
||||
*/
|
||||
@@ -83,12 +85,16 @@ public abstract class ClientManagerFacade implements Service {
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
public Set listClients() { return Collections.EMPTY_SET; }
|
||||
public Set<Destination> listClients() { return Collections.EMPTY_SET; }
|
||||
|
||||
/**
|
||||
* Return the client's current config, or null if not connected
|
||||
*
|
||||
*/
|
||||
public abstract SessionConfig getClientSessionConfig(Destination dest);
|
||||
public abstract SessionKeyManager getClientSessionKeyManager(Hash dest);
|
||||
public void renderStatusHTML(Writer out) throws IOException { }
|
||||
|
||||
/** @since 0.8.8 */
|
||||
public abstract void shutdown(String msg);
|
||||
}
|
||||
|
||||
@@ -23,21 +23,15 @@ public class ClientMessage {
|
||||
private Payload _payload;
|
||||
private Destination _destination;
|
||||
private Destination _fromDestination;
|
||||
private MessageReceptionInfo _receptionInfo;
|
||||
//private MessageReceptionInfo _receptionInfo;
|
||||
private SessionConfig _senderConfig;
|
||||
private Hash _destinationHash;
|
||||
private MessageId _messageId;
|
||||
private long _expiration;
|
||||
/** only for outbound messages */
|
||||
private int _flags;
|
||||
|
||||
public ClientMessage() {
|
||||
setPayload(null);
|
||||
setDestination(null);
|
||||
setFromDestination(null);
|
||||
setReceptionInfo(null);
|
||||
setSenderConfig(null);
|
||||
setDestinationHash(null);
|
||||
setMessageId(null);
|
||||
setExpiration(0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -83,8 +77,8 @@ public class ClientMessage {
|
||||
* originated ones.
|
||||
*
|
||||
*/
|
||||
public MessageReceptionInfo getReceptionInfo() { return _receptionInfo; }
|
||||
public void setReceptionInfo(MessageReceptionInfo info) { _receptionInfo = info; }
|
||||
//public MessageReceptionInfo getReceptionInfo() { return _receptionInfo; }
|
||||
//public void setReceptionInfo(MessageReceptionInfo info) { _receptionInfo = info; }
|
||||
|
||||
/**
|
||||
* Retrieve the session config of the client that sent the message. This will only be available
|
||||
@@ -101,4 +95,17 @@ public class ClientMessage {
|
||||
*/
|
||||
public long getExpiration() { return _expiration; }
|
||||
public void setExpiration(long e) { _expiration = e; }
|
||||
|
||||
/**
|
||||
* Flags requested by the client that sent the message. This will only be available
|
||||
* for locally originated messages.
|
||||
*
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public int getFlags() { return _flags; }
|
||||
|
||||
/**
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void setFlags(int f) { _flags = f; }
|
||||
}
|
||||
|
||||
@@ -8,9 +8,7 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.client.I2PClient;
|
||||
import net.i2p.router.message.OutboundCache;
|
||||
import net.i2p.router.message.OutboundClientMessageOneShotJob;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@@ -23,14 +21,31 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class ClientMessagePool {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final OutboundCache _cache;
|
||||
|
||||
public ClientMessagePool(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(ClientMessagePool.class);
|
||||
_cache = new OutboundCache(_context);
|
||||
OutboundClientMessageOneShotJob.init(_context);
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void shutdown() {
|
||||
_cache.clearAllCaches();
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void restart() {
|
||||
shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new message to the pool. The message can either be locally or
|
||||
* remotely destined.
|
||||
@@ -57,7 +72,7 @@ public class ClientMessagePool {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Adding message for remote delivery");
|
||||
OutboundClientMessageOneShotJob j = new OutboundClientMessageOneShotJob(_context, msg);
|
||||
OutboundClientMessageOneShotJob j = new OutboundClientMessageOneShotJob(_context, _cache, msg);
|
||||
if (true) // blocks the I2CP reader for a nontrivial period of time
|
||||
j.runJob();
|
||||
else
|
||||
@@ -65,6 +80,7 @@ public class ClientMessagePool {
|
||||
}
|
||||
}
|
||||
|
||||
/******
|
||||
private boolean isGuaranteed(ClientMessage msg) {
|
||||
Properties opts = null;
|
||||
if (msg.getSenderConfig() != null)
|
||||
@@ -76,4 +92,5 @@ public class ClientMessagePool {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
******/
|
||||
}
|
||||
|
||||
@@ -44,8 +44,9 @@ public class ClientTunnelSettings {
|
||||
_outboundSettings.writeToProperties("outbound.", props);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
Properties p = new Properties();
|
||||
writeToProperties(p);
|
||||
buf.append("Client tunnel settings:\n");
|
||||
|
||||
@@ -11,7 +11,6 @@ package net.i2p.router;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
@@ -30,11 +29,13 @@ public abstract class CommSystemFacade implements Service {
|
||||
public void renderStatusHTML(Writer out) throws IOException { renderStatusHTML(out, null, 0); }
|
||||
|
||||
/** Create the set of RouterAddress structures based on the router's config */
|
||||
public Set createAddresses() { return new HashSet(); }
|
||||
public Set<RouterAddress> createAddresses() { return Collections.EMPTY_SET; }
|
||||
|
||||
public int countActivePeers() { return 0; }
|
||||
public int countActiveSendPeers() { return 0; }
|
||||
public boolean haveCapacity() { return true; }
|
||||
public boolean haveInboundCapacity(int pct) { return true; }
|
||||
public boolean haveOutboundCapacity(int pct) { return true; }
|
||||
public boolean haveHighOutboundCapacity() { return true; }
|
||||
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
|
||||
|
||||
/**
|
||||
@@ -47,7 +48,7 @@ public abstract class CommSystemFacade implements Service {
|
||||
* Return framed average clock skew of connected peers in seconds, or null if we cannot answer.
|
||||
* CommSystemFacadeImpl overrides this.
|
||||
*/
|
||||
public Long getFramedAveragePeerClockSkew(int percentToInclude) { return null; }
|
||||
public long getFramedAveragePeerClockSkew(int percentToInclude) { return 0; }
|
||||
|
||||
/**
|
||||
* Determine under what conditions we are remotely reachable.
|
||||
@@ -58,7 +59,24 @@ public abstract class CommSystemFacade implements Service {
|
||||
public boolean isBacklogged(Hash dest) { return false; }
|
||||
public boolean wasUnreachable(Hash dest) { return false; }
|
||||
public boolean isEstablished(Hash dest) { return false; }
|
||||
public byte[] getIP(Hash dest) { return null; }
|
||||
public void queueLookup(byte[] ip) {}
|
||||
|
||||
/** @since 0.8.11 */
|
||||
public String getOurCountry() { return null; }
|
||||
|
||||
/** @since 0.8.13 */
|
||||
public boolean isInBadCountry() { return false; }
|
||||
|
||||
public String getCountry(Hash peer) { return null; }
|
||||
public String getCountryName(String code) { return code; }
|
||||
public String renderPeerHTML(Hash peer) {
|
||||
return peer.toBase64().substring(0, 4);
|
||||
}
|
||||
|
||||
/** @since 0.8.13 */
|
||||
public boolean isDummy() { return true; }
|
||||
|
||||
/**
|
||||
* Tell other transports our address changed
|
||||
*/
|
||||
|
||||
@@ -18,8 +18,6 @@ import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.TunnelCreateMessage;
|
||||
import net.i2p.data.i2np.TunnelCreateStatusMessage;
|
||||
import net.i2p.data.i2np.TunnelDataMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
import net.i2p.util.I2PThread;
|
||||
@@ -32,17 +30,23 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class InNetMessagePool implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private HandlerJobBuilder _handlerJobBuilders[];
|
||||
private List _pendingDataMessages;
|
||||
private List _pendingDataMessagesFrom;
|
||||
private List _pendingGatewayMessages;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final HandlerJobBuilder _handlerJobBuilders[];
|
||||
|
||||
/** following 5 unused unless DISPATCH_DIRECT == false */
|
||||
private final List _pendingDataMessages;
|
||||
private final List _pendingDataMessagesFrom;
|
||||
private final List _pendingGatewayMessages;
|
||||
private SharedShortCircuitDataJob _shortCircuitDataJob;
|
||||
private SharedShortCircuitGatewayJob _shortCircuitGatewayJob;
|
||||
|
||||
private boolean _alive;
|
||||
private boolean _dispatchThreaded;
|
||||
|
||||
/** Make this >= the max I2NP message type number (currently 24) */
|
||||
private static final int MAX_I2NP_MESSAGE_TYPE = 31;
|
||||
|
||||
/**
|
||||
* If set to true, we will have two additional threads - one for dispatching
|
||||
* tunnel data messages, and another for dispatching tunnel gateway messages.
|
||||
@@ -63,29 +67,41 @@ public class InNetMessagePool implements Service {
|
||||
|
||||
public InNetMessagePool(RouterContext context) {
|
||||
_context = context;
|
||||
_handlerJobBuilders = new HandlerJobBuilder[32];
|
||||
_pendingDataMessages = new ArrayList(16);
|
||||
_pendingDataMessagesFrom = new ArrayList(16);
|
||||
_pendingGatewayMessages = new ArrayList(16);
|
||||
_shortCircuitDataJob = new SharedShortCircuitDataJob(context);
|
||||
_shortCircuitGatewayJob = new SharedShortCircuitGatewayJob(context);
|
||||
_handlerJobBuilders = new HandlerJobBuilder[MAX_I2NP_MESSAGE_TYPE + 1];
|
||||
if (DISPATCH_DIRECT) {
|
||||
// keep the compiler happy since they are final
|
||||
_pendingDataMessages = null;
|
||||
_pendingDataMessagesFrom = null;
|
||||
_pendingGatewayMessages = null;
|
||||
} else {
|
||||
_pendingDataMessages = new ArrayList(16);
|
||||
_pendingDataMessagesFrom = new ArrayList(16);
|
||||
_pendingGatewayMessages = new ArrayList(16);
|
||||
_shortCircuitDataJob = new SharedShortCircuitDataJob(context);
|
||||
_shortCircuitGatewayJob = new SharedShortCircuitGatewayJob(context);
|
||||
}
|
||||
_log = _context.logManager().getLog(InNetMessagePool.class);
|
||||
_alive = false;
|
||||
_context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedDeliveryStatusDelay", "How long after a delivery status message is created do we receive it back again (for messages that are too slow to be handled)", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedTunnelCreateStatusMessage", "How often we drop a slow-to-arrive tunnel request response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedDbLookupResponseMessage", "How often we drop a slow-to-arrive db search response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("pool.dispatchDataTime", "How long a tunnel dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("pool.dispatchGatewayTime", "How long a tunnel gateway dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedDeliveryStatusDelay", "How long after a delivery status message is created do we receive it back again (for messages that are too slow to be handled)", "InNetPool", new long[] { 60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*60*1000l });
|
||||
//_context.statManager().createRateStat("inNetPool.droppedTunnelCreateStatusMessage", "How often we drop a slow-to-arrive tunnel request response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedDbLookupResponseMessage", "How often we drop a slow-to-arrive db search response", "InNetPool", new long[] { 60*60*1000l });
|
||||
}
|
||||
|
||||
/**
|
||||
* @return previous builder for this message type, or null
|
||||
* @throws AIOOBE if i2npMessageType is greater than MAX_I2NP_MESSAGE_TYPE
|
||||
*/
|
||||
public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) {
|
||||
HandlerJobBuilder old = _handlerJobBuilders[i2npMessageType];
|
||||
_handlerJobBuilders[i2npMessageType] = builder;
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return previous builder for this message type, or null
|
||||
* @throws AIOOBE if i2npMessageType is greater than MAX_I2NP_MESSAGE_TYPE
|
||||
*/
|
||||
public HandlerJobBuilder unregisterHandlerJobBuilder(int i2npMessageType) {
|
||||
HandlerJobBuilder old = _handlerJobBuilders[i2npMessageType];
|
||||
_handlerJobBuilders[i2npMessageType] = null;
|
||||
@@ -93,12 +109,14 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new message to the pool, returning the number of messages in the
|
||||
* pool so that the comm system can throttle inbound messages. If there is
|
||||
* Add a new message to the pool.
|
||||
* If there is
|
||||
* a HandlerJobBuilder for the inbound message type, the message is loaded
|
||||
* into a job created by that builder and queued up for processing instead
|
||||
* (though if the builder doesn't create a job, it is added to the pool)
|
||||
*
|
||||
* @return -1 for some types of errors but not all; 0 otherwise
|
||||
* (was queue length, long ago)
|
||||
*/
|
||||
public int add(I2NPMessage messageBody, RouterIdentity fromRouter, Hash fromRouterHash) {
|
||||
long exp = messageBody.getMessageExpiration();
|
||||
@@ -125,17 +143,18 @@ public class InNetMessagePool implements Service {
|
||||
|
||||
if (invalidReason != null) {
|
||||
int level = Log.WARN;
|
||||
if (messageBody instanceof TunnelCreateMessage)
|
||||
level = Log.INFO;
|
||||
//if (messageBody instanceof TunnelCreateMessage)
|
||||
// level = Log.INFO;
|
||||
if (_log.shouldLog(level))
|
||||
_log.log(level, "Duplicate message received [" + messageBody.getUniqueId()
|
||||
+ " expiring on " + exp + "]: " + messageBody.getClass().getName() + ": " + invalidReason
|
||||
_log.log(level, "Dropping message [" + messageBody.getUniqueId()
|
||||
+ " expiring on " + exp + "]: " + messageBody.getClass().getSimpleName() + ": " + invalidReason
|
||||
+ ": " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
// FIXME not necessarily a duplicate, could be expired too long ago / too far in future
|
||||
_context.statManager().addRateData("inNetPool.duplicate", 1, 0);
|
||||
_context.messageHistory().droppedOtherMessage(messageBody, (fromRouter != null ? fromRouter.calculateHash() : fromRouterHash));
|
||||
_context.messageHistory().messageProcessingError(messageBody.getUniqueId(),
|
||||
messageBody.getClass().getName(),
|
||||
messageBody.getClass().getSimpleName(),
|
||||
"Duplicate/expired");
|
||||
return -1;
|
||||
} else {
|
||||
@@ -155,6 +174,7 @@ public class InNetMessagePool implements Service {
|
||||
shortCircuitTunnelData(messageBody, fromRouterHash);
|
||||
allowMatches = false;
|
||||
} else {
|
||||
// why don't we allow type 0? There used to be a message of type 0 long ago...
|
||||
if ( (type > 0) && (type < _handlerJobBuilders.length) ) {
|
||||
HandlerJobBuilder builder = _handlerJobBuilders[type];
|
||||
|
||||
@@ -195,10 +215,10 @@ public class InNetMessagePool implements Service {
|
||||
_log.warn("Dropping unhandled delivery status message created " + timeSinceSent + "ms ago: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.droppedDeliveryStatusDelay", timeSinceSent, timeSinceSent);
|
||||
}
|
||||
} else if (type == TunnelCreateStatusMessage.MESSAGE_TYPE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping slow tunnel create request response: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.droppedTunnelCreateStatusMessage", 1, 0);
|
||||
//} else if (type == TunnelCreateStatusMessage.MESSAGE_TYPE) {
|
||||
// if (_log.shouldLog(Log.INFO))
|
||||
// _log.info("Dropping slow tunnel create request response: " + messageBody);
|
||||
// _context.statManager().addRateData("inNetPool.droppedTunnelCreateStatusMessage", 1, 0);
|
||||
} else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping slow db lookup response: " + messageBody);
|
||||
@@ -232,7 +252,7 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
|
||||
public int handleReplies(I2NPMessage messageBody) {
|
||||
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
||||
List<OutNetMessage> origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Original messages for inbound message: " + origMessages.size());
|
||||
if (origMessages.size() > 1) {
|
||||
@@ -242,7 +262,7 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
|
||||
for (int i = 0; i < origMessages.size(); i++) {
|
||||
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
|
||||
OutNetMessage omsg = origMessages.get(i);
|
||||
ReplyJob job = omsg.getOnReplyJob();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
|
||||
@@ -302,20 +322,27 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) {}
|
||||
|
||||
/** does nothing since we aren't threaded */
|
||||
public void restart() {
|
||||
shutdown();
|
||||
try { Thread.sleep(100); } catch (InterruptedException ie) {}
|
||||
startup();
|
||||
}
|
||||
|
||||
/** does nothing since we aren't threaded */
|
||||
public void shutdown() {
|
||||
_alive = false;
|
||||
synchronized (_pendingDataMessages) {
|
||||
_pendingDataMessages.clear();
|
||||
_pendingDataMessagesFrom.clear();
|
||||
_pendingDataMessages.notifyAll();
|
||||
if (!DISPATCH_DIRECT) {
|
||||
synchronized (_pendingDataMessages) {
|
||||
_pendingDataMessages.clear();
|
||||
_pendingDataMessagesFrom.clear();
|
||||
_pendingDataMessages.notifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** does nothing since we aren't threaded */
|
||||
public void startup() {
|
||||
_alive = true;
|
||||
_dispatchThreaded = DEFAULT_DISPATCH_THREADED;
|
||||
@@ -324,6 +351,8 @@ public class InNetMessagePool implements Service {
|
||||
_dispatchThreaded = Boolean.valueOf(threadedStr).booleanValue();
|
||||
}
|
||||
if (_dispatchThreaded) {
|
||||
_context.statManager().createRateStat("pool.dispatchDataTime", "How long a tunnel dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("pool.dispatchGatewayTime", "How long a tunnel gateway dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
I2PThread data = new I2PThread(new TunnelDataDispatcher(), "Tunnel data dispatcher");
|
||||
data.setDaemon(true);
|
||||
data.start();
|
||||
@@ -333,6 +362,7 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
}
|
||||
|
||||
/** unused unless DISPATCH_DIRECT == false */
|
||||
private class SharedShortCircuitDataJob extends JobImpl {
|
||||
public SharedShortCircuitDataJob(RouterContext ctx) {
|
||||
super(ctx);
|
||||
@@ -343,7 +373,7 @@ public class InNetMessagePool implements Service {
|
||||
I2NPMessage msg = null;
|
||||
Hash from = null;
|
||||
synchronized (_pendingDataMessages) {
|
||||
if (_pendingDataMessages.size() > 0) {
|
||||
if (!_pendingDataMessages.isEmpty()) {
|
||||
msg = (I2NPMessage)_pendingDataMessages.remove(0);
|
||||
from = (Hash)_pendingDataMessagesFrom.remove(0);
|
||||
}
|
||||
@@ -355,6 +385,8 @@ public class InNetMessagePool implements Service {
|
||||
getContext().jobQueue().addJob(SharedShortCircuitDataJob.this);
|
||||
}
|
||||
}
|
||||
|
||||
/** unused unless DISPATCH_DIRECT == false */
|
||||
private class SharedShortCircuitGatewayJob extends JobImpl {
|
||||
public SharedShortCircuitGatewayJob(RouterContext ctx) {
|
||||
super(ctx);
|
||||
@@ -364,7 +396,7 @@ public class InNetMessagePool implements Service {
|
||||
I2NPMessage msg = null;
|
||||
int remaining = 0;
|
||||
synchronized (_pendingGatewayMessages) {
|
||||
if (_pendingGatewayMessages.size() > 0)
|
||||
if (!_pendingGatewayMessages.isEmpty())
|
||||
msg = (I2NPMessage)_pendingGatewayMessages.remove(0);
|
||||
remaining = _pendingGatewayMessages.size();
|
||||
}
|
||||
@@ -375,13 +407,14 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
}
|
||||
|
||||
/** unused unless router.dispatchThreaded=true */
|
||||
private class TunnelGatewayDispatcher implements Runnable {
|
||||
public void run() {
|
||||
while (_alive) {
|
||||
I2NPMessage msg = null;
|
||||
try {
|
||||
synchronized (_pendingGatewayMessages) {
|
||||
if (_pendingGatewayMessages.size() <= 0)
|
||||
if (_pendingGatewayMessages.isEmpty())
|
||||
_pendingGatewayMessages.wait();
|
||||
else
|
||||
msg = (I2NPMessage)_pendingGatewayMessages.remove(0);
|
||||
@@ -403,6 +436,8 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** unused unless router.dispatchThreaded=true */
|
||||
private class TunnelDataDispatcher implements Runnable {
|
||||
public void run() {
|
||||
while (_alive) {
|
||||
@@ -410,7 +445,7 @@ public class InNetMessagePool implements Service {
|
||||
Hash from = null;
|
||||
try {
|
||||
synchronized (_pendingDataMessages) {
|
||||
if (_pendingDataMessages.size() <= 0) {
|
||||
if (_pendingDataMessages.isEmpty()) {
|
||||
_pendingDataMessages.wait();
|
||||
} else {
|
||||
msg = (I2NPMessage)_pendingDataMessages.remove(0);
|
||||
|
||||
@@ -19,7 +19,7 @@ public interface Job {
|
||||
*/
|
||||
public String getName();
|
||||
/** unique id */
|
||||
public int getJobId();
|
||||
public long getJobId();
|
||||
/**
|
||||
* Timing criteria for the task
|
||||
*/
|
||||
|
||||
@@ -8,48 +8,62 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import net.i2p.util.Log;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* Base implementation of a Job
|
||||
*/
|
||||
public abstract class JobImpl implements Job {
|
||||
private RouterContext _context;
|
||||
private JobTiming _timing;
|
||||
private static int _idSrc = 0;
|
||||
private int _id;
|
||||
private Exception _addedBy;
|
||||
private final RouterContext _context;
|
||||
private final JobTiming _timing;
|
||||
private static AtomicLong _idSrc = new AtomicLong();
|
||||
private final long _id;
|
||||
//private Exception _addedBy;
|
||||
private long _madeReadyOn;
|
||||
|
||||
public JobImpl(RouterContext context) {
|
||||
_context = context;
|
||||
_timing = new JobTiming(context);
|
||||
_id = ++_idSrc;
|
||||
_addedBy = null;
|
||||
_madeReadyOn = 0;
|
||||
_id = _idSrc.incrementAndGet();
|
||||
}
|
||||
|
||||
public int getJobId() { return _id; }
|
||||
public long getJobId() { return _id; }
|
||||
public JobTiming getTiming() { return _timing; }
|
||||
|
||||
public final RouterContext getContext() { return _context; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append(super.toString());
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getClass().getSimpleName());
|
||||
buf.append(": Job ").append(_id).append(": ").append(getName());
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* As of 0.8.1, this is a noop, as it just adds classes to the log manager
|
||||
* class list for no good reason. Logging in jobs is almost always
|
||||
* set explicitly rather than by class name.
|
||||
*/
|
||||
void addedToQueue() {
|
||||
if (_context.logManager().getLog(getClass()).shouldLog(Log.DEBUG))
|
||||
_addedBy = new Exception();
|
||||
//if (_context.logManager().getLog(getClass()).shouldLog(Log.DEBUG))
|
||||
// _addedBy = new Exception();
|
||||
}
|
||||
|
||||
public Exception getAddedBy() { return _addedBy; }
|
||||
/**
|
||||
* @deprecated
|
||||
* @return null always
|
||||
*/
|
||||
public Exception getAddedBy() { return null; }
|
||||
public long getMadeReadyOn() { return _madeReadyOn; }
|
||||
public void madeReady() { _madeReadyOn = _context.clock().now(); }
|
||||
public void dropped() {}
|
||||
|
||||
/**
|
||||
* Warning - only call this from runJob() or if Job is not already queued,
|
||||
* or else it gets the job queue out of order.
|
||||
*/
|
||||
protected void requeue(long delayMs) {
|
||||
getTiming().setStartAfter(_context.clock().now() + delayMs);
|
||||
_context.jobQueue().addJob(this);
|
||||
|
||||
@@ -10,15 +10,19 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Map;
|
||||
import java.util.TreeSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
|
||||
import net.i2p.router.networkdb.kademlia.HandleFloodfillDatabaseLookupMessageJob;
|
||||
import net.i2p.util.Clock;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
@@ -29,32 +33,46 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class JobQueue {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
|
||||
/** Integer (runnerId) to JobQueueRunner for created runners */
|
||||
private HashMap _queueRunners;
|
||||
private final Map<Integer, JobQueueRunner> _queueRunners;
|
||||
/** a counter to identify a job runner */
|
||||
private volatile static int _runnerId = 0;
|
||||
/** list of jobs that are ready to run ASAP */
|
||||
private ArrayList _readyJobs;
|
||||
/** list of jobs that are scheduled for running in the future */
|
||||
private ArrayList _timedJobs;
|
||||
private final BlockingQueue<Job> _readyJobs;
|
||||
/** SortedSet of jobs that are scheduled for running in the future, earliest first */
|
||||
private final Set<Job> _timedJobs;
|
||||
/** job name to JobStat for that job */
|
||||
private SortedMap _jobStats;
|
||||
private final Map<String, JobStats> _jobStats;
|
||||
/** how many job queue runners can go concurrently */
|
||||
private int _maxRunners = 1;
|
||||
private QueuePumper _pumper;
|
||||
private final QueuePumper _pumper;
|
||||
/** will we allow the # job runners to grow beyond 1? */
|
||||
private boolean _allowParallelOperation;
|
||||
/** have we been killed or are we alive? */
|
||||
private boolean _alive;
|
||||
|
||||
private Object _jobLock;
|
||||
private final Object _jobLock;
|
||||
|
||||
/** how many when we go parallel */
|
||||
private static final int RUNNERS;
|
||||
static {
|
||||
long maxMemory = Runtime.getRuntime().maxMemory();
|
||||
if (maxMemory == Long.MAX_VALUE)
|
||||
maxMemory = 128*1024*1024l;
|
||||
if (maxMemory < 64*1024*1024)
|
||||
RUNNERS = 3;
|
||||
else if (maxMemory < 256*1024*1024)
|
||||
RUNNERS = 4;
|
||||
else
|
||||
RUNNERS = 5;
|
||||
}
|
||||
|
||||
/** default max # job queue runners operating */
|
||||
private final static int DEFAULT_MAX_RUNNERS = 1;
|
||||
/** router.config parameter to override the max runners */
|
||||
/** router.config parameter to override the max runners @deprecated unimplemented */
|
||||
private final static String PROP_MAX_RUNNERS = "router.maxJobRunners";
|
||||
|
||||
/** how frequently should we check and update the max runners */
|
||||
@@ -63,38 +81,44 @@ public class JobQueue {
|
||||
/** if a job is this lagged, spit out a warning, but keep going */
|
||||
private long _lagWarning = DEFAULT_LAG_WARNING;
|
||||
private final static long DEFAULT_LAG_WARNING = 5*1000;
|
||||
/** @deprecated unimplemented */
|
||||
private final static String PROP_LAG_WARNING = "router.jobLagWarning";
|
||||
|
||||
/** if a job is this lagged, the router is hosed, so shut it down */
|
||||
/** if a job is this lagged, the router is hosed, so spit out a warning (dont shut it down) */
|
||||
private long _lagFatal = DEFAULT_LAG_FATAL;
|
||||
private final static long DEFAULT_LAG_FATAL = 30*1000;
|
||||
/** @deprecated unimplemented */
|
||||
private final static String PROP_LAG_FATAL = "router.jobLagFatal";
|
||||
|
||||
/** if a job takes this long to run, spit out a warning, but keep going */
|
||||
private long _runWarning = DEFAULT_RUN_WARNING;
|
||||
private final static long DEFAULT_RUN_WARNING = 5*1000;
|
||||
/** @deprecated unimplemented */
|
||||
private final static String PROP_RUN_WARNING = "router.jobRunWarning";
|
||||
|
||||
/** if a job takes this long to run, the router is hosed, so shut it down */
|
||||
/** if a job takes this long to run, the router is hosed, so spit out a warning (dont shut it down) */
|
||||
private long _runFatal = DEFAULT_RUN_FATAL;
|
||||
private final static long DEFAULT_RUN_FATAL = 30*1000;
|
||||
/** @deprecated unimplemented */
|
||||
private final static String PROP_RUN_FATAL = "router.jobRunFatal";
|
||||
|
||||
/** don't enforce fatal limits until the router has been up for this long */
|
||||
private long _warmupTime = DEFAULT_WARMUP_TIME;
|
||||
private final static long DEFAULT_WARMUP_TIME = 10*60*1000;
|
||||
private final static String PROP_WARMUM_TIME = "router.jobWarmupTime";
|
||||
/** @deprecated unimplemented */
|
||||
private final static String PROP_WARMUP_TIME = "router.jobWarmupTime";
|
||||
|
||||
/** max ready and waiting jobs before we start dropping 'em */
|
||||
private int _maxWaitingJobs = DEFAULT_MAX_WAITING_JOBS;
|
||||
private final static int DEFAULT_MAX_WAITING_JOBS = 100;
|
||||
/** @deprecated unimplemented */
|
||||
private final static String PROP_MAX_WAITING_JOBS = "router.maxWaitingJobs";
|
||||
|
||||
|
||||
/**
|
||||
* queue runners wait on this whenever they're not doing anything, and
|
||||
* this gets notified *once* whenever there are ready jobs
|
||||
*/
|
||||
private Object _runnerLock = new Object();
|
||||
private final Object _runnerLock = new Object();
|
||||
|
||||
public JobQueue(RouterContext context) {
|
||||
_context = context;
|
||||
@@ -109,16 +133,13 @@ public class JobQueue {
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
_alive = true;
|
||||
_readyJobs = new ArrayList(16);
|
||||
_timedJobs = new ArrayList(64);
|
||||
_readyJobs = new LinkedBlockingQueue();
|
||||
_timedJobs = new TreeSet(new JobComparator());
|
||||
_jobLock = new Object();
|
||||
_queueRunners = new HashMap();
|
||||
_jobStats = Collections.synchronizedSortedMap(new TreeMap());
|
||||
_allowParallelOperation = false;
|
||||
_queueRunners = new ConcurrentHashMap(RUNNERS);
|
||||
_jobStats = new ConcurrentHashMap();
|
||||
_pumper = new QueuePumper();
|
||||
I2PThread pumperThread = new I2PThread(_pumper);
|
||||
pumperThread.setDaemon(true);
|
||||
pumperThread.setName("QueuePumper");
|
||||
I2PThread pumperThread = new I2PThread(_pumper, "Job Queue Pumper", true);
|
||||
//pumperThread.setPriority(I2PThread.NORM_PRIORITY+1);
|
||||
pumperThread.start();
|
||||
}
|
||||
@@ -128,24 +149,31 @@ public class JobQueue {
|
||||
*
|
||||
*/
|
||||
public void addJob(Job job) {
|
||||
if (job == null) return;
|
||||
if (job == null || !_alive) return;
|
||||
|
||||
if (job instanceof JobImpl)
|
||||
((JobImpl)job).addedToQueue();
|
||||
// This does nothing
|
||||
//if (job instanceof JobImpl)
|
||||
// ((JobImpl)job).addedToQueue();
|
||||
|
||||
long numReady = 0;
|
||||
boolean alreadyExists = false;
|
||||
boolean dropped = false;
|
||||
// getNext() is now outside the jobLock, is that ok?
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.contains(job))
|
||||
alreadyExists = true;
|
||||
numReady = _readyJobs.size();
|
||||
if (!alreadyExists) {
|
||||
if (_timedJobs.contains(job))
|
||||
alreadyExists = true;
|
||||
//if (_timedJobs.contains(job))
|
||||
// alreadyExists = true;
|
||||
// Always remove and re-add, since it needs to be
|
||||
// re-sorted in the TreeSet.
|
||||
boolean removed = _timedJobs.remove(job);
|
||||
if (removed && _log.shouldLog(Log.WARN))
|
||||
_log.warn("Rescheduling job: " + job);
|
||||
}
|
||||
|
||||
if (shouldDrop(job, numReady)) {
|
||||
if ((!alreadyExists) && shouldDrop(job, numReady)) {
|
||||
job.dropped();
|
||||
dropped = true;
|
||||
} else {
|
||||
@@ -155,24 +183,22 @@ public class JobQueue {
|
||||
job.getTiming().setStartAfter(_context.clock().now());
|
||||
if (job instanceof JobImpl)
|
||||
((JobImpl)job).madeReady();
|
||||
_readyJobs.add(job);
|
||||
_readyJobs.offer(job);
|
||||
} else {
|
||||
_timedJobs.add(job);
|
||||
// only notify for _timedJobs, as _readyJobs does not use that lock
|
||||
_jobLock.notifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
_jobLock.notifyAll();
|
||||
}
|
||||
|
||||
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
|
||||
if (dropped) {
|
||||
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping job due to overload! # ready jobs: "
|
||||
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 0);
|
||||
_log.logAlways(Log.WARN, "Dropping job due to overload! # ready jobs: "
|
||||
+ numReady + ": job = " + job);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
public void removeJob(Job job) {
|
||||
@@ -182,6 +208,23 @@ public class JobQueue {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if a given job is waiting or running;
|
||||
* <code>false</code> if the job is finished or doesn't exist in the queue.
|
||||
*
|
||||
* Only used by PluginStarter, candidate for deprecation
|
||||
*/
|
||||
public boolean isJobActive(Job job) {
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.contains(job) || _timedJobs.contains(job))
|
||||
return true;
|
||||
}
|
||||
for (JobQueueRunner runner: _queueRunners.values())
|
||||
if (runner.getCurrentJob() == job)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
public void timingUpdated() {
|
||||
synchronized (_jobLock) {
|
||||
_jobLock.notifyAll();
|
||||
@@ -189,17 +232,19 @@ public class JobQueue {
|
||||
}
|
||||
|
||||
public int getReadyCount() {
|
||||
synchronized (_jobLock) {
|
||||
return _readyJobs.size();
|
||||
}
|
||||
}
|
||||
|
||||
public long getMaxLag() {
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.size() <= 0) return 0;
|
||||
// first job is the one that has been waiting the longest
|
||||
long startAfter = ((Job)_readyJobs.get(0)).getTiming().getStartAfter();
|
||||
Job j = _readyJobs.peek();
|
||||
if (j == null) return 0;
|
||||
JobTiming jt = j.getTiming();
|
||||
// PoisonJob timing is null, prevent NPE at shutdown
|
||||
if (jt == null)
|
||||
return 0;
|
||||
long startAfter = jt.getStartAfter();
|
||||
return _context.clock().now() - startAfter;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -212,14 +257,16 @@ public class JobQueue {
|
||||
private boolean shouldDrop(Job job, long numReady) {
|
||||
if (_maxWaitingJobs <= 0) return false; // dont ever drop jobs
|
||||
if (!_allowParallelOperation) return false; // dont drop during startup [duh]
|
||||
Class cls = job.getClass();
|
||||
if (numReady > _maxWaitingJobs) {
|
||||
Class cls = job.getClass();
|
||||
// lets not try to drop too many tunnel messages...
|
||||
//if (cls == HandleTunnelMessageJob.class)
|
||||
// return true;
|
||||
|
||||
// we don't really *need* to answer DB lookup messages
|
||||
if (cls == HandleDatabaseLookupMessageJob.class)
|
||||
// This is pretty lame, there's actually a ton of different jobs we
|
||||
// could drop, but is it worth making a list?
|
||||
if (cls == HandleFloodfillDatabaseLookupMessageJob.class)
|
||||
return true;
|
||||
|
||||
}
|
||||
@@ -228,9 +275,10 @@ public class JobQueue {
|
||||
|
||||
public void allowParallelOperation() {
|
||||
_allowParallelOperation = true;
|
||||
runQueue(4);
|
||||
runQueue(RUNNERS);
|
||||
}
|
||||
|
||||
/** @deprecated do you really want to do this? */
|
||||
public void restart() {
|
||||
synchronized (_jobLock) {
|
||||
_timedJobs.clear();
|
||||
@@ -242,13 +290,28 @@ public class JobQueue {
|
||||
void shutdown() {
|
||||
_alive = false;
|
||||
synchronized (_jobLock) {
|
||||
_timedJobs.clear();
|
||||
_readyJobs.clear();
|
||||
_jobLock.notifyAll();
|
||||
}
|
||||
// The JobQueueRunners are NOT daemons,
|
||||
// so they must be stopped.
|
||||
Job poison = new PoisonJob();
|
||||
for (JobQueueRunner runner : _queueRunners.values()) {
|
||||
runner.stopRunning();
|
||||
_readyJobs.offer(poison);
|
||||
// TODO interrupt thread for each runner
|
||||
}
|
||||
_queueRunners.clear();
|
||||
_jobStats.clear();
|
||||
_runnerId = 0;
|
||||
|
||||
/********
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
StringBuilder buf = new StringBuilder(1024);
|
||||
buf.append("current jobs: \n");
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
|
||||
JobQueueRunner runner = (JobQueueRunner)iter.next();
|
||||
JobQueueRunner runner = iter.next();
|
||||
Job j = runner.getCurrentJob();
|
||||
|
||||
buf.append("Runner ").append(runner.getRunnerId()).append(": ");
|
||||
@@ -279,7 +342,9 @@ public class JobQueue {
|
||||
buf.append(_timedJobs.get(i).toString()).append("\n\t");
|
||||
_log.log(Log.WARN, buf.toString());
|
||||
}
|
||||
********/
|
||||
}
|
||||
|
||||
boolean isAlive() { return _alive; }
|
||||
|
||||
/**
|
||||
@@ -287,9 +352,8 @@ public class JobQueue {
|
||||
*/
|
||||
public long getLastJobBegin() {
|
||||
long when = -1;
|
||||
// not synchronized, so might b0rk if the runners are changed
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
|
||||
long cur = ((JobQueueRunner)iter.next()).getLastBegin();
|
||||
for (JobQueueRunner runner : _queueRunners.values()) {
|
||||
long cur = runner.getLastBegin();
|
||||
if (cur > when)
|
||||
cur = when;
|
||||
}
|
||||
@@ -300,9 +364,8 @@ public class JobQueue {
|
||||
*/
|
||||
public long getLastJobEnd() {
|
||||
long when = -1;
|
||||
// not synchronized, so might b0rk if the runners are changed
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
|
||||
long cur = ((JobQueueRunner)iter.next()).getLastEnd();
|
||||
for (JobQueueRunner runner : _queueRunners.values()) {
|
||||
long cur = runner.getLastEnd();
|
||||
if (cur > when)
|
||||
cur = when;
|
||||
}
|
||||
@@ -315,9 +378,7 @@ public class JobQueue {
|
||||
public Job getLastJob() {
|
||||
Job j = null;
|
||||
long when = -1;
|
||||
// not synchronized, so might b0rk if the runners are changed
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
|
||||
JobQueueRunner cur = (JobQueueRunner)iter.next();
|
||||
for (JobQueueRunner cur : _queueRunners.values()) {
|
||||
if (cur.getLastBegin() > when) {
|
||||
j = cur.getCurrentJob();
|
||||
when = cur.getLastBegin();
|
||||
@@ -333,13 +394,10 @@ public class JobQueue {
|
||||
Job getNext() {
|
||||
while (_alive) {
|
||||
try {
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.size() > 0) {
|
||||
return (Job)_readyJobs.remove(0);
|
||||
} else {
|
||||
_jobLock.wait();
|
||||
}
|
||||
}
|
||||
Job j = _readyJobs.take();
|
||||
if (j.getJobId() == POISON_ID)
|
||||
break;
|
||||
return j;
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@@ -355,11 +413,10 @@ public class JobQueue {
|
||||
* the current job.
|
||||
*
|
||||
*/
|
||||
public void runQueue(int numThreads) {
|
||||
synchronized (_queueRunners) {
|
||||
public synchronized void runQueue(int numThreads) {
|
||||
// we're still starting up [serially] and we've got at least one runner,
|
||||
// so dont do anything
|
||||
if ( (_queueRunners.size() > 0) && (!_allowParallelOperation) ) return;
|
||||
if ( (!_queueRunners.isEmpty()) && (!_allowParallelOperation) ) return;
|
||||
|
||||
// we've already enabled parallel operation, so grow to however many are
|
||||
// specified
|
||||
@@ -370,15 +427,12 @@ public class JobQueue {
|
||||
for (int i = _queueRunners.size(); i < numThreads; i++) {
|
||||
JobQueueRunner runner = new JobQueueRunner(_context, i);
|
||||
_queueRunners.put(Integer.valueOf(i), runner);
|
||||
Thread t = new I2PThread(runner);
|
||||
t.setName("JobQueue"+(_runnerId++));
|
||||
Thread t = new I2PThread(runner, "JobQueue " + (++_runnerId) + '/' + numThreads, false);
|
||||
//t.setPriority(I2PThread.MAX_PRIORITY-1);
|
||||
t.setDaemon(false);
|
||||
t.start();
|
||||
}
|
||||
} else if (_queueRunners.size() == numThreads) {
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
|
||||
JobQueueRunner runner = (JobQueueRunner)iter.next();
|
||||
for (JobQueueRunner runner : _queueRunners.values()) {
|
||||
runner.startRunning();
|
||||
}
|
||||
} else { // numThreads < # runners, so shrink
|
||||
@@ -387,7 +441,6 @@ public class JobQueue {
|
||||
// runner.stopRunning();
|
||||
//}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void removeRunner(int id) { _queueRunners.remove(Integer.valueOf(id)); }
|
||||
@@ -407,41 +460,58 @@ public class JobQueue {
|
||||
while (_alive) {
|
||||
long now = _context.clock().now();
|
||||
long timeToWait = -1;
|
||||
ArrayList toAdd = null;
|
||||
try {
|
||||
synchronized (_jobLock) {
|
||||
for (int i = 0; i < _timedJobs.size(); i++) {
|
||||
Job j = (Job)_timedJobs.get(i);
|
||||
Job lastJob = null;
|
||||
long lastTime = Long.MIN_VALUE;
|
||||
for (Iterator<Job> iter = _timedJobs.iterator(); iter.hasNext(); ) {
|
||||
Job j = iter.next();
|
||||
// find jobs due to start before now
|
||||
long timeLeft = j.getTiming().getStartAfter() - now;
|
||||
if (lastJob != null && lastTime > j.getTiming().getStartAfter()) {
|
||||
_log.error("Job " + lastJob + " out of order with job " + j +
|
||||
" difference of " + DataHelper.formatDuration(lastTime - j.getTiming().getStartAfter()));
|
||||
}
|
||||
lastJob = j;
|
||||
lastTime = lastJob.getTiming().getStartAfter();
|
||||
if (timeLeft <= 0) {
|
||||
if (j instanceof JobImpl)
|
||||
((JobImpl)j).madeReady();
|
||||
|
||||
if (toAdd == null) toAdd = new ArrayList(4);
|
||||
toAdd.add(j);
|
||||
_timedJobs.remove(i);
|
||||
i--; // so the index stays consistent
|
||||
_readyJobs.offer(j);
|
||||
iter.remove();
|
||||
} else {
|
||||
if ( (timeToWait <= 0) || (timeLeft < timeToWait) )
|
||||
//if ( (timeToWait <= 0) || (timeLeft < timeToWait) )
|
||||
// _timedJobs is now a TreeSet, so once we hit one that is
|
||||
// not ready yet, we can break
|
||||
// NOTE: By not going through the whole thing, a single job changing
|
||||
// setStartAfter() to some far-away time, without
|
||||
// calling addJob(), could clog the whole queue forever.
|
||||
// Hopefully nobody does that, and as a backup, we hope
|
||||
// that the TreeSet will eventually resort it from other addJob() calls.
|
||||
timeToWait = timeLeft;
|
||||
|
||||
// failsafe - remove and re-add, peek at the next job,
|
||||
// break and go around again
|
||||
if (timeToWait > 10*1000 && iter.hasNext()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Failsafe re-sort job " + j +
|
||||
" with delay " + DataHelper.formatDuration(timeToWait));
|
||||
iter.remove();
|
||||
Job nextJob = iter.next();
|
||||
_timedJobs.add(j);
|
||||
long nextTimeLeft = nextJob.getTiming().getStartAfter() - now;
|
||||
if (timeToWait > nextTimeLeft) {
|
||||
_log.error("Job " + j + " out of order with job " + nextJob +
|
||||
" difference of " + DataHelper.formatDuration(timeToWait - nextTimeLeft));
|
||||
timeToWait = Math.max(10, nextTimeLeft);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (toAdd != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Not waiting - we have " + toAdd.size() + " newly ready jobs");
|
||||
// rather than addAll, which allocs a byte array rv before adding,
|
||||
// we iterate, since toAdd is usually going to only be 1 or 2 entries
|
||||
// and since readyJobs will often have the space, we can avoid the
|
||||
// extra alloc. (no, i'm not just being insane - i'm updating this based
|
||||
// on some profiling data ;)
|
||||
for (int i = 0; i < toAdd.size(); i++)
|
||||
_readyJobs.add(toAdd.get(i));
|
||||
_jobLock.notifyAll();
|
||||
} else {
|
||||
if (timeToWait < 0)
|
||||
timeToWait = 30*1000;
|
||||
timeToWait = 1000;
|
||||
else if (timeToWait < 10)
|
||||
timeToWait = 10;
|
||||
else if (timeToWait > 10*1000)
|
||||
@@ -449,7 +519,6 @@ public class JobQueue {
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Waiting " + timeToWait + " before rechecking the timed queue");
|
||||
_jobLock.wait(timeToWait);
|
||||
}
|
||||
} // synchronize (_jobLock)
|
||||
} catch (InterruptedException ie) {}
|
||||
} // while (_alive)
|
||||
@@ -475,18 +544,15 @@ public class JobQueue {
|
||||
*/
|
||||
private void updateJobTimings(long delta) {
|
||||
synchronized (_jobLock) {
|
||||
for (int i = 0; i < _timedJobs.size(); i++) {
|
||||
Job j = (Job)_timedJobs.get(i);
|
||||
for (Job j : _timedJobs) {
|
||||
j.getTiming().offsetChanged(delta);
|
||||
}
|
||||
for (int i = 0; i < _readyJobs.size(); i++) {
|
||||
Job j = (Job)_readyJobs.get(i);
|
||||
for (Job j : _readyJobs) {
|
||||
j.getTiming().offsetChanged(delta);
|
||||
}
|
||||
}
|
||||
synchronized (_runnerLock) {
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
|
||||
JobQueueRunner runner = (JobQueueRunner)iter.next();
|
||||
for (JobQueueRunner runner : _queueRunners.values()) {
|
||||
Job job = runner.getCurrentJob();
|
||||
if (job != null)
|
||||
job.getTiming().offsetChanged(delta);
|
||||
@@ -509,14 +575,14 @@ public class JobQueue {
|
||||
if (lag < 0) lag = 0;
|
||||
if (duration < 0) duration = 0;
|
||||
|
||||
JobStats stats = null;
|
||||
if (!_jobStats.containsKey(key)) {
|
||||
_jobStats.put(key, new JobStats(key));
|
||||
JobStats stats = _jobStats.get(key);
|
||||
if (stats == null) {
|
||||
stats = new JobStats(key);
|
||||
_jobStats.put(key, stats);
|
||||
// yes, if two runners finish the same job at the same time, this could
|
||||
// create an extra object. but, who cares, its pushed out of the map
|
||||
// immediately anyway.
|
||||
}
|
||||
stats = (JobStats)_jobStats.get(key);
|
||||
stats.jobRan(duration, lag);
|
||||
|
||||
String dieMsg = null;
|
||||
@@ -555,186 +621,86 @@ public class JobQueue {
|
||||
}
|
||||
|
||||
|
||||
////
|
||||
// the remainder are utility methods for dumping status info
|
||||
////
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
ArrayList readyJobs = null;
|
||||
ArrayList timedJobs = null;
|
||||
ArrayList activeJobs = new ArrayList(1);
|
||||
ArrayList justFinishedJobs = new ArrayList(4);
|
||||
out.write("<!-- jobQueue rendering -->\n");
|
||||
out.flush();
|
||||
|
||||
int states[] = null;
|
||||
int numRunners = 0;
|
||||
synchronized (_queueRunners) {
|
||||
states = new int[_queueRunners.size()];
|
||||
int i = 0;
|
||||
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); i++) {
|
||||
JobQueueRunner runner = (JobQueueRunner)iter.next();
|
||||
states[i] = runner.getState();
|
||||
Job job = runner.getCurrentJob();
|
||||
if (job != null) {
|
||||
activeJobs.add(job);
|
||||
} else {
|
||||
job = runner.getLastJob();
|
||||
if (job != null)
|
||||
justFinishedJobs.add(job);
|
||||
}
|
||||
}
|
||||
numRunners = _queueRunners.size();
|
||||
}
|
||||
|
||||
StringBuffer str = new StringBuffer(128);
|
||||
str.append("<!-- after queueRunner sync: states: ");
|
||||
for (int i = 0; states != null && i < states.length; i++)
|
||||
str.append(states[i]).append(" ");
|
||||
str.append(" -->\n");
|
||||
str.append("<!-- jobs: ");
|
||||
for (int i = 0; i < activeJobs.size(); i++)
|
||||
str.append(activeJobs.get(i).toString()).append(" ");
|
||||
str.append("-->\n");
|
||||
out.write(str.toString());
|
||||
out.flush();
|
||||
|
||||
synchronized (_jobLock) {
|
||||
readyJobs = new ArrayList(_readyJobs);
|
||||
timedJobs = new ArrayList(_timedJobs);
|
||||
}
|
||||
out.write("<!-- jobQueue rendering: after jobLock sync -->\n");
|
||||
out.flush();
|
||||
|
||||
StringBuffer buf = new StringBuffer(32*1024);
|
||||
buf.append("<h2>JobQueue</h2>");
|
||||
buf.append("# runners: ").append(numRunners).append(" [states=");
|
||||
if (states != null)
|
||||
for (int i = 0; i < states.length; i++)
|
||||
buf.append(states[i]).append(" ");
|
||||
buf.append("]<br />\n");
|
||||
/** job ID counter changed from int to long so it won't wrap negative */
|
||||
private static final int POISON_ID = -99999;
|
||||
|
||||
long now = _context.clock().now();
|
||||
|
||||
buf.append("# active jobs: ").append(activeJobs.size()).append("<ol>\n");
|
||||
for (int i = 0; i < activeJobs.size(); i++) {
|
||||
Job j = (Job)activeJobs.get(i);
|
||||
buf.append("<li> [started ").append(now-j.getTiming().getStartAfter()).append("ms ago]: ");
|
||||
buf.append(j.toString()).append("</li>\n");
|
||||
}
|
||||
buf.append("</ol>\n");
|
||||
buf.append("# just finished jobs: ").append(justFinishedJobs.size()).append("<ol>\n");
|
||||
for (int i = 0; i < justFinishedJobs.size(); i++) {
|
||||
Job j = (Job)justFinishedJobs.get(i);
|
||||
buf.append("<li> [finished ").append(now-j.getTiming().getActualEnd()).append("ms ago]: ");
|
||||
buf.append(j.toString()).append("</li>\n");
|
||||
}
|
||||
buf.append("</ol>\n");
|
||||
buf.append("# ready/waiting jobs: ").append(readyJobs.size()).append(" <i>(lots of these mean there's likely a big problem)</i><ol>\n");
|
||||
for (int i = 0; i < readyJobs.size(); i++) {
|
||||
Job j = (Job)readyJobs.get(i);
|
||||
buf.append("<li> [waiting ");
|
||||
buf.append(DataHelper.formatDuration(now-j.getTiming().getStartAfter()));
|
||||
buf.append("]: ");
|
||||
buf.append(j.toString()).append("</li>\n");
|
||||
}
|
||||
buf.append("</ol>\n");
|
||||
out.flush();
|
||||
|
||||
buf.append("# timed jobs: ").append(timedJobs.size()).append("<ol>\n");
|
||||
TreeMap ordered = new TreeMap();
|
||||
for (int i = 0; i < timedJobs.size(); i++) {
|
||||
Job j = (Job)timedJobs.get(i);
|
||||
ordered.put(new Long(j.getTiming().getStartAfter()), j);
|
||||
}
|
||||
for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) {
|
||||
Job j = (Job)iter.next();
|
||||
long time = j.getTiming().getStartAfter() - now;
|
||||
buf.append("<li>").append(j.getName()).append(" in ");
|
||||
buf.append(DataHelper.formatDuration(time)).append("</li>\n");
|
||||
}
|
||||
buf.append("</ol>\n");
|
||||
|
||||
out.write("<!-- jobQueue rendering: after main buffer, before stats -->\n");
|
||||
out.flush();
|
||||
|
||||
getJobStats(buf);
|
||||
|
||||
out.write("<!-- jobQueue rendering: after stats -->\n");
|
||||
out.flush();
|
||||
|
||||
out.write(buf.toString());
|
||||
private static class PoisonJob implements Job {
|
||||
public String getName() { return null; }
|
||||
public long getJobId() { return POISON_ID; }
|
||||
public JobTiming getTiming() { return null; }
|
||||
public void runJob() {}
|
||||
public Exception getAddedBy() { return null; }
|
||||
public void dropped() {}
|
||||
}
|
||||
|
||||
/** render the HTML for the job stats */
|
||||
private void getJobStats(StringBuffer buf) {
|
||||
buf.append("<table border=\"1\">\n");
|
||||
buf.append("<tr><td><b>Job</b></td><td><b>Runs</b></td>");
|
||||
buf.append("<td><b>Time</b></td><td><b><i>Avg</i></b></td><td><b><i>Max</i></b></td><td><b><i>Min</i></b></td>");
|
||||
buf.append("<td><b>Pending</b></td><td><b><i>Avg</i></b></td><td><b><i>Max</i></b></td><td><b><i>Min</i></b></td></tr>\n");
|
||||
long totRuns = 0;
|
||||
long totExecTime = 0;
|
||||
long avgExecTime = 0;
|
||||
long maxExecTime = -1;
|
||||
long minExecTime = -1;
|
||||
long totPendingTime = 0;
|
||||
long avgPendingTime = 0;
|
||||
long maxPendingTime = -1;
|
||||
long minPendingTime = -1;
|
||||
|
||||
TreeMap tstats = null;
|
||||
synchronized (_jobStats) {
|
||||
tstats = new TreeMap(_jobStats);
|
||||
}
|
||||
|
||||
for (Iterator iter = tstats.values().iterator(); iter.hasNext(); ) {
|
||||
JobStats stats = (JobStats)iter.next();
|
||||
buf.append("<tr>");
|
||||
buf.append("<td><b>").append(stats.getName()).append("</b></td>");
|
||||
buf.append("<td>").append(stats.getRuns()).append("</td>");
|
||||
buf.append("<td>").append(stats.getTotalTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getAvgTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getMaxTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getMinTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getTotalPendingTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getAvgPendingTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getMaxPendingTime()).append("</td>");
|
||||
buf.append("<td>").append(stats.getMinPendingTime()).append("</td>");
|
||||
buf.append("</tr>\n");
|
||||
totRuns += stats.getRuns();
|
||||
totExecTime += stats.getTotalTime();
|
||||
if (stats.getMaxTime() > maxExecTime)
|
||||
maxExecTime = stats.getMaxTime();
|
||||
if ( (minExecTime < 0) || (minExecTime > stats.getMinTime()) )
|
||||
minExecTime = stats.getMinTime();
|
||||
totPendingTime += stats.getTotalPendingTime();
|
||||
if (stats.getMaxPendingTime() > maxPendingTime)
|
||||
maxPendingTime = stats.getMaxPendingTime();
|
||||
if ( (minPendingTime < 0) || (minPendingTime > stats.getMinPendingTime()) )
|
||||
minPendingTime = stats.getMinPendingTime();
|
||||
/**
|
||||
* Comparator for the _timedJobs TreeSet.
|
||||
* Ensure different jobs with the same timing are different so they aren't removed.
|
||||
* @since 0.8.9
|
||||
*/
|
||||
private static class JobComparator implements Comparator<Job> {
|
||||
public int compare(Job l, Job r) {
|
||||
// equals first, Jobs generally don't override so this should be fast
|
||||
// And this MUST be first so we can remove a job even if its timing has changed.
|
||||
if (l.equals(r))
|
||||
return 0;
|
||||
// This is for _timedJobs, which always have a JobTiming.
|
||||
// PoisonJob only goes in _readyJobs.
|
||||
long ld = l.getTiming().getStartAfter() - r.getTiming().getStartAfter();
|
||||
if (ld < 0)
|
||||
return -1;
|
||||
if (ld > 0)
|
||||
return 1;
|
||||
ld = l.getJobId() - r.getJobId();
|
||||
if (ld < 0)
|
||||
return -1;
|
||||
if (ld > 0)
|
||||
return 1;
|
||||
return l.hashCode() - r.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
if (totRuns != 0) {
|
||||
if (totExecTime != 0)
|
||||
avgExecTime = totExecTime / totRuns;
|
||||
if (totPendingTime != 0)
|
||||
avgPendingTime = totPendingTime / totRuns;
|
||||
/**
|
||||
* Dump the current state.
|
||||
* For the router console jobs status page.
|
||||
*
|
||||
* @param readyJobs out parameter
|
||||
* @param timedJobs out parameter
|
||||
* @param activeJobs out parameter
|
||||
* @param justFinishedJobs out parameter
|
||||
* @return number of job runners
|
||||
* @since 0.8.9
|
||||
*/
|
||||
public int getJobs(Collection<Job> readyJobs, Collection<Job> timedJobs,
|
||||
Collection<Job> activeJobs, Collection<Job> justFinishedJobs) {
|
||||
for (JobQueueRunner runner :_queueRunners.values()) {
|
||||
Job job = runner.getCurrentJob();
|
||||
if (job != null) {
|
||||
activeJobs.add(job);
|
||||
} else {
|
||||
job = runner.getLastJob();
|
||||
if (job != null)
|
||||
justFinishedJobs.add(job);
|
||||
}
|
||||
}
|
||||
synchronized (_jobLock) {
|
||||
readyJobs.addAll(_readyJobs);
|
||||
timedJobs.addAll(_timedJobs);
|
||||
}
|
||||
return _queueRunners.size();
|
||||
}
|
||||
|
||||
buf.append("<tr><td colspan=\"10\"><hr /></td><tr>");
|
||||
buf.append("<tr>");
|
||||
buf.append("<td><i><b>").append("SUMMARY").append("</b></i></td>");
|
||||
buf.append("<td><i>").append(totRuns).append("</i></td>");
|
||||
buf.append("<td><i>").append(totExecTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(avgExecTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(maxExecTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(minExecTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(totPendingTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(avgPendingTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(maxPendingTime).append("</i></td>");
|
||||
buf.append("<td><i>").append(minPendingTime).append("</i></td>");
|
||||
buf.append("</tr>\n");
|
||||
|
||||
buf.append("</table>\n");
|
||||
/**
|
||||
* Current job stats.
|
||||
* For the router console jobs status page.
|
||||
*
|
||||
* @since 0.8.9
|
||||
*/
|
||||
public Collection<JobStats> getJobStats() {
|
||||
return Collections.unmodifiableCollection(_jobStats.values());
|
||||
}
|
||||
|
||||
/** @deprecated moved to router console */
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,11 +4,10 @@ import net.i2p.util.Log;
|
||||
|
||||
/** a do run run run a do run run */
|
||||
class JobQueueRunner implements Runnable {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private boolean _keepRunning;
|
||||
private int _id;
|
||||
private long _numJobs;
|
||||
private final int _id;
|
||||
private Job _currentJob;
|
||||
private Job _lastJob;
|
||||
private long _lastBegin;
|
||||
@@ -19,16 +18,13 @@ class JobQueueRunner implements Runnable {
|
||||
_context = context;
|
||||
_id = id;
|
||||
_keepRunning = true;
|
||||
_numJobs = 0;
|
||||
_currentJob = null;
|
||||
_lastJob = null;
|
||||
_log = _context.logManager().getLog(JobQueueRunner.class);
|
||||
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_state = 1;
|
||||
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("jobQueue.jobLag", "Job run delay (ms)", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
//_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
//_state = 1;
|
||||
}
|
||||
|
||||
final int getState() { return _state; }
|
||||
@@ -41,16 +37,15 @@ class JobQueueRunner implements Runnable {
|
||||
public long getLastBegin() { return _lastBegin; }
|
||||
public long getLastEnd() { return _lastEnd; }
|
||||
public void run() {
|
||||
_state = 2;
|
||||
//_state = 2;
|
||||
long lastActive = _context.clock().now();
|
||||
long jobNum = 0;
|
||||
while ( (_keepRunning) && (_context.jobQueue().isAlive()) ) {
|
||||
_state = 3;
|
||||
//_state = 3;
|
||||
try {
|
||||
Job job = _context.jobQueue().getNext();
|
||||
_state = 4;
|
||||
//_state = 4;
|
||||
if (job == null) {
|
||||
_state = 5;
|
||||
//_state = 5;
|
||||
if (_context.router().isAlive())
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("getNext returned null - dead?");
|
||||
@@ -60,42 +55,41 @@ class JobQueueRunner implements Runnable {
|
||||
|
||||
long enqueuedTime = 0;
|
||||
if (job instanceof JobImpl) {
|
||||
_state = 6;
|
||||
//_state = 6;
|
||||
long when = ((JobImpl)job).getMadeReadyOn();
|
||||
if (when <= 0) {
|
||||
_state = 7;
|
||||
//_state = 7;
|
||||
_log.error("Job was not made ready?! " + job,
|
||||
new Exception("Not made ready?!"));
|
||||
} else {
|
||||
_state = 8;
|
||||
//_state = 8;
|
||||
enqueuedTime = now - when;
|
||||
}
|
||||
}
|
||||
|
||||
long betweenJobs = now - lastActive;
|
||||
_currentJob = job;
|
||||
_lastJob = null;
|
||||
_state = 9;
|
||||
//_state = 9;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Runner " + _id + " running job " + job.getJobId() + ": " + job.getName());
|
||||
long origStartAfter = job.getTiming().getStartAfter();
|
||||
long doStart = _context.clock().now();
|
||||
_state = 10;
|
||||
//_state = 10;
|
||||
job.getTiming().start();
|
||||
runCurrentJob();
|
||||
job.getTiming().end();
|
||||
_state = 11;
|
||||
//_state = 11;
|
||||
long duration = job.getTiming().getActualEnd() - job.getTiming().getActualStart();
|
||||
long beforeUpdate = _context.clock().now();
|
||||
_state = 12;
|
||||
//_state = 12;
|
||||
_context.jobQueue().updateStats(job, doStart, origStartAfter, duration);
|
||||
_state = 13;
|
||||
//_state = 13;
|
||||
long diff = _context.clock().now() - beforeUpdate;
|
||||
|
||||
long lag = doStart - origStartAfter;
|
||||
if (lag < 0) lag = 0;
|
||||
|
||||
_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
|
||||
//_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
|
||||
_context.statManager().addRateData("jobQueue.jobRun", duration, duration);
|
||||
_context.statManager().addRateData("jobQueue.jobLag", lag, 0);
|
||||
_context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
|
||||
@@ -107,7 +101,7 @@ class JobQueueRunner implements Runnable {
|
||||
+ ") on job " + _currentJob);
|
||||
}
|
||||
|
||||
_state = 14;
|
||||
//_state = 14;
|
||||
|
||||
if (diff > 100) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@@ -120,8 +114,7 @@ class JobQueueRunner implements Runnable {
|
||||
_lastJob = _currentJob;
|
||||
_currentJob = null;
|
||||
_lastEnd = lastActive;
|
||||
jobNum++;
|
||||
_state = 15;
|
||||
//_state = 15;
|
||||
|
||||
//if ( (jobNum % 10) == 0)
|
||||
// System.gc();
|
||||
@@ -130,22 +123,22 @@ class JobQueueRunner implements Runnable {
|
||||
_log.log(Log.CRIT, "WTF, error running?", t);
|
||||
}
|
||||
}
|
||||
_state = 16;
|
||||
//_state = 16;
|
||||
if (_context.router().isAlive())
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
_log.log(Log.CRIT, "Queue runner " + _id + " exiting");
|
||||
_context.jobQueue().removeRunner(_id);
|
||||
_state = 17;
|
||||
//_state = 17;
|
||||
}
|
||||
|
||||
private void runCurrentJob() {
|
||||
try {
|
||||
_state = 18;
|
||||
//_state = 18;
|
||||
_lastBegin = _context.clock().now();
|
||||
_currentJob.runJob();
|
||||
_state = 19;
|
||||
//_state = 19;
|
||||
} catch (OutOfMemoryError oom) {
|
||||
_state = 20;
|
||||
//_state = 20;
|
||||
try {
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
_log.log(Log.CRIT, "Router ran out of memory, shutting down", oom);
|
||||
@@ -157,12 +150,12 @@ class JobQueueRunner implements Runnable {
|
||||
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
|
||||
System.exit(-1);
|
||||
} catch (Throwable t) {
|
||||
_state = 21;
|
||||
//_state = 21;
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
_log.log(Log.CRIT, "Error processing job [" + _currentJob.getName()
|
||||
+ "] on thread " + _id + ": " + t.getMessage(), t);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("The above job was enqueued by: ", _currentJob.getAddedBy());
|
||||
//if (_log.shouldLog(Log.ERROR))
|
||||
// _log.error("The above job was enqueued by: ", _currentJob.getAddedBy());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,9 +2,12 @@ package net.i2p.router;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
|
||||
/** glorified struct to contain basic job stats */
|
||||
class JobStats {
|
||||
private String _job;
|
||||
/**
|
||||
* Glorified struct to contain basic job stats.
|
||||
* Public for router console only.
|
||||
*/
|
||||
public class JobStats {
|
||||
private final String _job;
|
||||
private volatile long _numRuns;
|
||||
private volatile long _totalTime;
|
||||
private volatile long _maxTime;
|
||||
@@ -15,11 +18,8 @@ class JobStats {
|
||||
|
||||
public JobStats(String name) {
|
||||
_job = name;
|
||||
_numRuns = 0;
|
||||
_totalTime = 0;
|
||||
_maxTime = -1;
|
||||
_minTime = -1;
|
||||
_totalPendingTime = 0;
|
||||
_maxPendingTime = -1;
|
||||
_minPendingTime = -1;
|
||||
}
|
||||
@@ -59,7 +59,9 @@ class JobStats {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() { return _job.hashCode(); }
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof JobStats) ) {
|
||||
JobStats stats = (JobStats)obj;
|
||||
@@ -73,8 +75,9 @@ class JobStats {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("Over ").append(getRuns()).append(" runs, job <b>").append(getName()).append("</b> took ");
|
||||
buf.append(getTotalTime()).append("ms (").append(getAvgTime()).append("ms/").append(getMaxTime()).append("ms/");
|
||||
buf.append(getMinTime()).append("ms avg/max/min) after a total lag of ");
|
||||
|
||||
@@ -18,13 +18,11 @@ public class JobTiming implements Clock.ClockUpdateListener {
|
||||
private long _start;
|
||||
private long _actualStart;
|
||||
private long _actualEnd;
|
||||
private RouterContext _context;
|
||||
private final RouterContext _context;
|
||||
|
||||
public JobTiming(RouterContext context) {
|
||||
_context = context;
|
||||
_start = context.clock().now();
|
||||
_actualStart = 0;
|
||||
_actualEnd = 0;
|
||||
//context.clock().addUpdateListener(this);
|
||||
}
|
||||
|
||||
@@ -33,6 +31,11 @@ public class JobTiming implements Clock.ClockUpdateListener {
|
||||
*
|
||||
*/
|
||||
public long getStartAfter() { return _start; }
|
||||
|
||||
/**
|
||||
* WARNING - this does not force a resort of the job queue any more...
|
||||
* ALWAYS call JobImpl.requeue() instead if job is already queued.
|
||||
*/
|
||||
public void setStartAfter(long startTime) {
|
||||
_start = startTime;
|
||||
// sure, this current job object may not already be on the queue, so
|
||||
|
||||
@@ -12,10 +12,8 @@ import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataStructure;
|
||||
@@ -27,6 +25,8 @@ import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.util.Clock;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SecureDirectory;
|
||||
import net.i2p.util.SecureFileOutputStream;
|
||||
|
||||
/**
|
||||
* Maintain all of the key pairs for the router.
|
||||
@@ -35,14 +35,14 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class KeyManager {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private PrivateKey _privateKey;
|
||||
private PublicKey _publicKey;
|
||||
private SigningPrivateKey _signingPrivateKey;
|
||||
private SigningPublicKey _signingPublicKey;
|
||||
private Map _leaseSetKeys; // Destination --> LeaseSetKeys
|
||||
private SynchronizeKeysJob _synchronizeJob;
|
||||
private final Map<Hash, LeaseSetKeys> _leaseSetKeys; // Destination --> LeaseSetKeys
|
||||
private final SynchronizeKeysJob _synchronizeJob;
|
||||
|
||||
public final static String PROP_KEYDIR = "router.keyBackupDir";
|
||||
public final static String DEFAULT_KEYDIR = "keyBackup";
|
||||
@@ -59,11 +59,7 @@ public class KeyManager {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(KeyManager.class);
|
||||
_synchronizeJob = new SynchronizeKeysJob();
|
||||
setPrivateKey(null);
|
||||
setPublicKey(null);
|
||||
setSigningPrivateKey(null);
|
||||
setSigningPublicKey(null);
|
||||
_leaseSetKeys = new HashMap();
|
||||
_leaseSetKeys = new ConcurrentHashMap();
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
@@ -102,44 +98,32 @@ public class KeyManager {
|
||||
public void registerKeys(Destination dest, SigningPrivateKey leaseRevocationPrivateKey, PrivateKey endpointDecryptionKey) {
|
||||
_log.info("Registering keys for destination " + dest.calculateHash().toBase64());
|
||||
LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey);
|
||||
synchronized (_leaseSetKeys) {
|
||||
_leaseSetKeys.put(dest.calculateHash(), keys);
|
||||
}
|
||||
_leaseSetKeys.put(dest.calculateHash(), keys);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait one second, as this will get called 4 times in quick succession
|
||||
* There is still a race here though, if a key is set while the sync job is running
|
||||
*/
|
||||
private void queueWrite() {
|
||||
Clock cl = _context.clock();
|
||||
JobQueue q = _context.jobQueue();
|
||||
if ( (cl == null) || (q == null) ) return;
|
||||
_synchronizeJob.getTiming().setStartAfter(cl.now());
|
||||
_synchronizeJob.getTiming().setStartAfter(cl.now() + 1000);
|
||||
q.addJob(_synchronizeJob);
|
||||
}
|
||||
|
||||
public LeaseSetKeys unregisterKeys(Destination dest) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Unregistering keys for destination " + dest.calculateHash().toBase64());
|
||||
LeaseSetKeys rv = null;
|
||||
synchronized (_leaseSetKeys) {
|
||||
rv = (LeaseSetKeys)_leaseSetKeys.remove(dest.calculateHash());
|
||||
}
|
||||
return rv;
|
||||
return _leaseSetKeys.remove(dest.calculateHash());
|
||||
}
|
||||
|
||||
public LeaseSetKeys getKeys(Destination dest) {
|
||||
return getKeys(dest.calculateHash());
|
||||
}
|
||||
public LeaseSetKeys getKeys(Hash dest) {
|
||||
synchronized (_leaseSetKeys) {
|
||||
return (LeaseSetKeys)_leaseSetKeys.get(dest);
|
||||
}
|
||||
}
|
||||
|
||||
public Set getAllKeys() {
|
||||
HashSet keys = new HashSet();
|
||||
synchronized (_leaseSetKeys) {
|
||||
keys.addAll(_leaseSetKeys.values());
|
||||
}
|
||||
return keys;
|
||||
return _leaseSetKeys.get(dest);
|
||||
}
|
||||
|
||||
private class SynchronizeKeysJob extends JobImpl {
|
||||
@@ -147,10 +131,8 @@ public class KeyManager {
|
||||
super(KeyManager.this._context);
|
||||
}
|
||||
public void runJob() {
|
||||
String keyDir = getContext().getProperty(PROP_KEYDIR);
|
||||
if (keyDir == null)
|
||||
keyDir = DEFAULT_KEYDIR;
|
||||
File dir = new File(keyDir);
|
||||
String keyDir = getContext().getProperty(PROP_KEYDIR, DEFAULT_KEYDIR);
|
||||
File dir = new SecureDirectory(getContext().getRouterDir(), keyDir);
|
||||
if (!dir.exists())
|
||||
dir.mkdirs();
|
||||
if (dir.exists() && dir.isDirectory() && dir.canRead() && dir.canWrite()) {
|
||||
@@ -171,33 +153,55 @@ public class KeyManager {
|
||||
}
|
||||
|
||||
private synchronized void syncPrivateKey(File keyDir) {
|
||||
File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_ENC);
|
||||
DataStructure ds;
|
||||
File keyFile = new File(keyDir, KEYFILE_PRIVATE_ENC);
|
||||
boolean exists = (_privateKey != null);
|
||||
if (!exists)
|
||||
_privateKey = new PrivateKey();
|
||||
_privateKey = (PrivateKey)syncKey(keyFile, _privateKey, exists);
|
||||
if (exists)
|
||||
ds = _privateKey;
|
||||
else
|
||||
ds = new PrivateKey();
|
||||
DataStructure readin = syncKey(keyFile, ds, exists);
|
||||
if (readin != null && !exists)
|
||||
_privateKey = (PrivateKey) readin;
|
||||
}
|
||||
|
||||
private synchronized void syncPublicKey(File keyDir) {
|
||||
File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_ENC);
|
||||
DataStructure ds;
|
||||
File keyFile = new File(keyDir, KEYFILE_PUBLIC_ENC);
|
||||
boolean exists = (_publicKey != null);
|
||||
if (!exists)
|
||||
_publicKey = new PublicKey();
|
||||
_publicKey = (PublicKey)syncKey(keyFile, _publicKey, exists);
|
||||
if (exists)
|
||||
ds = _publicKey;
|
||||
else
|
||||
ds = new PublicKey();
|
||||
DataStructure readin = syncKey(keyFile, ds, exists);
|
||||
if (readin != null && !exists)
|
||||
_publicKey = (PublicKey) readin;
|
||||
}
|
||||
|
||||
private synchronized void syncSigningKey(File keyDir) {
|
||||
File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_SIGNING);
|
||||
DataStructure ds;
|
||||
File keyFile = new File(keyDir, KEYFILE_PRIVATE_SIGNING);
|
||||
boolean exists = (_signingPrivateKey != null);
|
||||
if (!exists)
|
||||
_signingPrivateKey = new SigningPrivateKey();
|
||||
_signingPrivateKey = (SigningPrivateKey)syncKey(keyFile, _signingPrivateKey, exists);
|
||||
if (exists)
|
||||
ds = _signingPrivateKey;
|
||||
else
|
||||
ds = new SigningPrivateKey();
|
||||
DataStructure readin = syncKey(keyFile, ds, exists);
|
||||
if (readin != null && !exists)
|
||||
_signingPrivateKey = (SigningPrivateKey) readin;
|
||||
}
|
||||
|
||||
private synchronized void syncVerificationKey(File keyDir) {
|
||||
File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_SIGNING);
|
||||
DataStructure ds;
|
||||
File keyFile = new File(keyDir, KEYFILE_PUBLIC_SIGNING);
|
||||
boolean exists = (_signingPublicKey != null);
|
||||
if (!exists)
|
||||
_signingPublicKey = new SigningPublicKey();
|
||||
_signingPublicKey = (SigningPublicKey)syncKey(keyFile, _signingPublicKey, exists);
|
||||
if (exists)
|
||||
ds = _signingPublicKey;
|
||||
else
|
||||
ds = new SigningPublicKey();
|
||||
DataStructure readin = syncKey(keyFile, ds, exists);
|
||||
if (readin != null && !exists)
|
||||
_signingPublicKey = (SigningPublicKey) readin;
|
||||
}
|
||||
|
||||
private DataStructure syncKey(File keyFile, DataStructure structure, boolean exists) {
|
||||
@@ -205,7 +209,7 @@ public class KeyManager {
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
if (exists) {
|
||||
out = new FileOutputStream(keyFile);
|
||||
out = new SecureFileOutputStream(keyFile);
|
||||
structure.writeBytes(out);
|
||||
return structure;
|
||||
} else {
|
||||
|
||||
@@ -73,6 +73,7 @@ public class LeaseSetKeys extends DataStructureImpl {
|
||||
_revocationKey.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += DataHelper.hashCode(_dest);
|
||||
@@ -81,6 +82,7 @@ public class LeaseSetKeys extends DataStructureImpl {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof LeaseSetKeys) ) {
|
||||
LeaseSetKeys keys = (LeaseSetKeys)obj;
|
||||
|
||||
@@ -1,639 +0,0 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Certificate;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.DataMessage;
|
||||
import net.i2p.data.i2np.DeliveryInstructions;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.data.i2np.GarlicMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
import net.i2p.router.message.GarlicMessageBuilder;
|
||||
import net.i2p.router.message.PayloadGarlicConfig;
|
||||
import net.i2p.router.tunnel.TunnelCreatorConfig;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
/**
|
||||
* Coordinate some tests of peers to see how much load they can handle. If
|
||||
* TEST_LIVE_TUNNELS is set to false, it builds load test tunnels across various
|
||||
* peers in ways that are not anonymity sensitive (but may help with testing the net).
|
||||
* If it is set to true, however, it runs a few tests at a time for actual tunnels that
|
||||
* are built, to help determine whether our peer selection is insufficient.
|
||||
*
|
||||
* Load tests of fake tunnels are conducted by building a single one hop inbound
|
||||
* tunnel with the peer in question acting as the inbound gateway. We then send
|
||||
* messages directly to that gateway, which they batch up and send "down the
|
||||
* tunnel" (aka directly to us), at which point we then send another message,
|
||||
* and so on, until the tunnel expires. Along the way, we record a few vital
|
||||
* stats to the "loadtest.log" file. If we don't receive a message, we send another
|
||||
* after 10 seconds.
|
||||
*
|
||||
* If "router.loadTestSmall=true", we transmit a tiny DeliveryStatusMessage (~96 bytes
|
||||
* at the SSU level), which is sent back to us as a single TunnelDataMessage (~1KB).
|
||||
* Otherwise, we transmit a 4KB DataMessage wrapped inside a garlic message, which is
|
||||
* sent back to us as five (1KB) TunnelDataMessages. This size is chosen because the
|
||||
* streaming lib uses 4KB messages by default.
|
||||
*
|
||||
* Load tests of live tunnels pick a random tunnel from the tested pool's pair (e.g. if
|
||||
* we are testing an outbound tunnel for a particular destination, it picks an inbound
|
||||
* tunnel from that destination's inbound pool), with each message going down that one
|
||||
* randomly paired tunnel for the duration of the load test (varying the paired tunnel
|
||||
* with each message had poor results)
|
||||
*
|
||||
*/
|
||||
public class LoadTestManager {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private Writer _out;
|
||||
private List _untestedPeers;
|
||||
private List _active;
|
||||
|
||||
private static final String PROP_LOG_DATA = "router.loadTestLog";
|
||||
private static final String DEFAULT_LOG_DATA = "false";
|
||||
|
||||
public LoadTestManager(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(LoadTestManager.class);
|
||||
_active = Collections.synchronizedList(new ArrayList());
|
||||
if (Boolean.valueOf(ctx.getProperty(PROP_LOG_DATA, DEFAULT_LOG_DATA)).booleanValue()) {
|
||||
try {
|
||||
_out = new BufferedWriter(new FileWriter("loadtest.log", true));
|
||||
_out.write("startup at " + ctx.clock().now() + "\n");
|
||||
} catch (IOException ioe) {
|
||||
_log.log(Log.CRIT, "error creating log", ioe);
|
||||
}
|
||||
}
|
||||
_context.statManager().createRateStat("test.lifetimeSuccessful", "How many messages we can pump through a load test during a tunnel's lifetime", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("test.lifetimeFailed", "How many messages we fail to pump through (period == successful)", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("test.timeoutAfter", "How many messages have we successfully pumped through a tunnel when one particular message times out", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("test.rtt", "How long it takes to get a reply", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("test.rttHigh", "How long it takes to get a reply, if it is a slow rtt", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
}
|
||||
|
||||
public static final boolean TEST_LIVE_TUNNELS = true;
|
||||
|
||||
/** 1 peer at a time */
|
||||
private static final int CONCURRENT_PEERS = 1;
|
||||
/** 4 messages per peer at a time */
|
||||
private static final int CONCURRENT_MESSAGES = 1;//4;
|
||||
|
||||
private static final boolean DEFAULT_ENABLE = false;
|
||||
|
||||
/** disable all load testing for the moment */
|
||||
public static final boolean FORCE_DISABLE = true;
|
||||
|
||||
public static boolean isEnabled(I2PAppContext ctx) {
|
||||
if (FORCE_DISABLE) return false;
|
||||
String enable = ctx.getProperty("router.enableLoadTesting");
|
||||
if ( (DEFAULT_ENABLE) && (enable != null) && (!Boolean.valueOf(enable).booleanValue()) )
|
||||
return false;
|
||||
else if ( (!DEFAULT_ENABLE) && ((enable == null) || (!Boolean.valueOf(enable).booleanValue()) ) )
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
public static void setEnableLoadTesting(RouterContext ctx, boolean enable) {
|
||||
if (enable)
|
||||
ctx.router().setConfigSetting("router.enableLoadTesting", "true");
|
||||
else
|
||||
ctx.router().setConfigSetting("router.enableLoadTesting", "false");
|
||||
}
|
||||
|
||||
private int getConcurrency() {
|
||||
if (!isEnabled(_context)) return 0;
|
||||
|
||||
int rv = _context.getProperty("router.loadTestConcurrency", CONCURRENT_PEERS);
|
||||
if (rv < 0)
|
||||
rv = 0;
|
||||
if (rv > 50)
|
||||
rv = 50;
|
||||
return rv;
|
||||
}
|
||||
|
||||
private int getPeerMessages() {
|
||||
int rv = _context.getProperty("router.loadTestMessagesPerPeer", CONCURRENT_MESSAGES);
|
||||
if (rv < 1)
|
||||
rv = 1;
|
||||
if (rv > 50)
|
||||
rv = 50;
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Actually send the messages through the given tunnel
|
||||
*/
|
||||
private void runTest(LoadTestTunnelConfig tunnel) {
|
||||
if (!isEnabled(_context)) return;
|
||||
log(tunnel, "start");
|
||||
int peerMessages = getPeerMessages();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Run test on " + tunnel + " with " + peerMessages + " messages");
|
||||
for (int i = 0; i < peerMessages; i++)
|
||||
sendTestMessage(tunnel);
|
||||
}
|
||||
|
||||
private void pickTunnels(LoadTestTunnelConfig tunnel) {
|
||||
TunnelInfo inbound = null;
|
||||
TunnelInfo outbound = null;
|
||||
if (tunnel.getTunnel().isInbound()) {
|
||||
inbound = _context.tunnelManager().getTunnelInfo(tunnel.getReceiveTunnelId(0));
|
||||
if ( (inbound == null) && (_log.shouldLog(Log.WARN)) )
|
||||
_log.warn("where are we? inbound tunnel isn't known: " + tunnel, new Exception("source"));
|
||||
if (tunnel.getTunnel().getDestination() != null)
|
||||
outbound = _context.tunnelManager().selectOutboundTunnel(tunnel.getTunnel().getDestination());
|
||||
else
|
||||
outbound = _context.tunnelManager().selectOutboundTunnel();
|
||||
} else {
|
||||
outbound = _context.tunnelManager().getTunnelInfo(tunnel.getSendTunnelId(0));
|
||||
if ( (outbound == null) && (_log.shouldLog(Log.WARN)) )
|
||||
_log.warn("where are we? outbound tunnel isn't known: " + tunnel, new Exception("source"));
|
||||
if (tunnel.getTunnel().getDestination() != null)
|
||||
inbound = _context.tunnelManager().selectInboundTunnel(tunnel.getTunnel().getDestination());
|
||||
else
|
||||
inbound = _context.tunnelManager().selectInboundTunnel();
|
||||
}
|
||||
tunnel.setInbound(inbound);
|
||||
tunnel.setOutbound(outbound);
|
||||
}
|
||||
|
||||
private void sendTestMessage(LoadTestTunnelConfig tunnel) {
|
||||
long now = _context.clock().now();
|
||||
if (now > tunnel.getExpiration()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Not sending a test message to " + tunnel + " because it expired");
|
||||
tunnel.logComplete();
|
||||
_active.remove(tunnel);
|
||||
return;
|
||||
}
|
||||
|
||||
if (TEST_LIVE_TUNNELS) {
|
||||
TunnelInfo inbound = tunnel.getInbound();
|
||||
TunnelInfo outbound = tunnel.getOutbound();
|
||||
if ( (inbound == null) || (outbound == null) ) {
|
||||
pickTunnels(tunnel);
|
||||
inbound = tunnel.getInbound();
|
||||
outbound = tunnel.getOutbound();
|
||||
}
|
||||
|
||||
if (inbound == null) {
|
||||
log(tunnel, "No inbound tunnels found");
|
||||
_active.remove(tunnel);
|
||||
return;
|
||||
} else if (outbound == null) {
|
||||
log(tunnel, "No outbound tunnels found");
|
||||
tunnel.logComplete();
|
||||
_active.remove(tunnel);
|
||||
return;
|
||||
}
|
||||
|
||||
if ( (now >= inbound.getExpiration()) || (now >= outbound.getExpiration()) ) {
|
||||
tunnel.logComplete();
|
||||
_active.remove(tunnel);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("inbound and outbound found for " + tunnel);
|
||||
|
||||
I2NPMessage payloadMessage = createPayloadMessage();
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("testing live tunnels with inbound [" + inbound + "] and outbound [" + outbound + "]");
|
||||
|
||||
// this should take into consideration both the inbound and outbound tunnels
|
||||
// ... but it doesn't, yet.
|
||||
long uniqueId = -1;
|
||||
if (payloadMessage != null) {
|
||||
uniqueId = payloadMessage.getUniqueId();
|
||||
} else {
|
||||
tunnel.logComplete();
|
||||
_active.remove(tunnel);
|
||||
return;
|
||||
}
|
||||
_context.messageRegistry().registerPending(new Selector(tunnel, uniqueId),
|
||||
new SendAgain(_context, tunnel, uniqueId, true),
|
||||
new SendAgain(_context, tunnel, uniqueId, false),
|
||||
10*1000);
|
||||
_context.tunnelDispatcher().dispatchOutbound(payloadMessage, outbound.getSendTunnelId(0),
|
||||
inbound.getReceiveTunnelId(0),
|
||||
inbound.getPeer(0));
|
||||
//log(tunnel, payloadMessage.getUniqueId() + " sent via " + inbound + " / " + outbound);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("NOT testing live tunnels for [" + tunnel + "]");
|
||||
RouterInfo target = _context.netDb().lookupRouterInfoLocally(tunnel.getPeer(0));
|
||||
if (target == null) {
|
||||
log(tunnel, "lookup failed");
|
||||
return;
|
||||
}
|
||||
|
||||
I2NPMessage payloadMessage = createPayloadMessage();
|
||||
|
||||
TunnelGatewayMessage tm = new TunnelGatewayMessage(_context);
|
||||
tm.setMessage(payloadMessage);
|
||||
tm.setTunnelId(tunnel.getReceiveTunnelId(0));
|
||||
tm.setMessageExpiration(payloadMessage.getMessageExpiration());
|
||||
|
||||
OutNetMessage om = new OutNetMessage(_context);
|
||||
om.setMessage(tm);
|
||||
SendAgain failed = new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), false);
|
||||
om.setOnFailedReplyJob(failed);
|
||||
om.setOnReplyJob(new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), true));
|
||||
//om.setOnFailedSendJob(failed);
|
||||
om.setReplySelector(new Selector(tunnel, payloadMessage.getUniqueId()));
|
||||
om.setTarget(target);
|
||||
om.setExpiration(tm.getMessageExpiration());
|
||||
om.setPriority(40);
|
||||
_context.outNetMessagePool().add(om);
|
||||
//log(tunnel, m.getMessageId() + " sent");
|
||||
}
|
||||
}
|
||||
|
||||
private static final boolean SMALL_PAYLOAD = false;
|
||||
|
||||
private boolean useSmallPayload() {
|
||||
return Boolean.valueOf(_context.getProperty("router.loadTestSmall", SMALL_PAYLOAD + "")).booleanValue();
|
||||
}
|
||||
|
||||
private I2NPMessage createPayloadMessage() {
|
||||
// doesnt matter whats in the message, as it gets dropped anyway, since we match
|
||||
// on it with the message.uniqueId
|
||||
if (useSmallPayload()) {
|
||||
DeliveryStatusMessage m = new DeliveryStatusMessage(_context);
|
||||
long now = _context.clock().now();
|
||||
m.setArrival(now);
|
||||
m.setMessageExpiration(now + 10*1000);
|
||||
m.setMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
return m;
|
||||
} else {
|
||||
DataMessage m = new DataMessage(_context);
|
||||
byte data[] = new byte[4096];
|
||||
_context.random().nextBytes(data);
|
||||
m.setData(data);
|
||||
long now = _context.clock().now();
|
||||
m.setMessageExpiration(now + 10*1000);
|
||||
|
||||
if (true) {
|
||||
// garlic wrap the data message to ourselves so the endpoints and gateways
|
||||
// can't tell its a test, encrypting it with a random key and tag,
|
||||
// remembering that key+tag so that we can decrypt it later without any ElGamal
|
||||
DeliveryInstructions instructions = new DeliveryInstructions();
|
||||
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
|
||||
|
||||
PayloadGarlicConfig payload = new PayloadGarlicConfig();
|
||||
payload.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
payload.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
payload.setId(m.getUniqueId());
|
||||
payload.setPayload(m);
|
||||
payload.setRecipient(_context.router().getRouterInfo());
|
||||
payload.setDeliveryInstructions(instructions);
|
||||
payload.setRequestAck(false);
|
||||
payload.setExpiration(m.getMessageExpiration());
|
||||
|
||||
SessionKey encryptKey = _context.keyGenerator().generateSessionKey();
|
||||
SessionTag encryptTag = new SessionTag(true);
|
||||
SessionKey sentKey = new SessionKey();
|
||||
Set sentTags = null;
|
||||
GarlicMessage msg = GarlicMessageBuilder.buildMessage(_context, payload, sentKey, sentTags,
|
||||
_context.keyManager().getPublicKey(),
|
||||
encryptKey, encryptTag);
|
||||
|
||||
Set encryptTags = new HashSet(1);
|
||||
encryptTags.add(encryptTag);
|
||||
_context.sessionKeyManager().tagsReceived(encryptKey, encryptTags);
|
||||
|
||||
return msg;
|
||||
} else {
|
||||
return m;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class SendAgain extends JobImpl implements ReplyJob {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
private long _messageId;
|
||||
private boolean _ok;
|
||||
private boolean _run;
|
||||
private long _dontStartUntil;
|
||||
public SendAgain(RouterContext ctx, LoadTestTunnelConfig cfg, long messageId, boolean ok) {
|
||||
super(ctx);
|
||||
_cfg = cfg;
|
||||
_messageId = messageId;
|
||||
_ok = ok;
|
||||
_run = false;
|
||||
_dontStartUntil = ctx.clock().now() + 10*1000;
|
||||
}
|
||||
public String getName() { return "send another load test"; }
|
||||
public void runJob() {
|
||||
if (!_ok) {
|
||||
if (!_run) {
|
||||
log(_cfg, _messageId + " " + _cfg.getFullMessageCount() + " TIMEOUT");
|
||||
getContext().statManager().addRateData("test.timeoutAfter", _cfg.getFullMessageCount(), 0);
|
||||
if (getContext().clock().now() >= _dontStartUntil) {
|
||||
sendTestMessage(_cfg);
|
||||
_cfg.incrementFailed();
|
||||
} else {
|
||||
getTiming().setStartAfter(_dontStartUntil);
|
||||
getContext().jobQueue().addJob(SendAgain.this);
|
||||
}
|
||||
}
|
||||
_run = true;
|
||||
} else {
|
||||
sendTestMessage(_cfg);
|
||||
}
|
||||
}
|
||||
|
||||
public void setMessage(I2NPMessage message) {}
|
||||
}
|
||||
|
||||
private class Selector implements MessageSelector {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
private long _messageId;
|
||||
public Selector(LoadTestTunnelConfig cfg, long messageId) {
|
||||
_cfg = cfg;
|
||||
_messageId = messageId;
|
||||
}
|
||||
public boolean continueMatching() { return false; }
|
||||
public long getExpiration() { return _cfg.getExpiration(); }
|
||||
public boolean isMatch(I2NPMessage message) {
|
||||
if (message.getUniqueId() == _messageId) {
|
||||
long count = _cfg.getFullMessageCount();
|
||||
_cfg.incrementFull();
|
||||
long period = _context.clock().now() - (message.getMessageExpiration() - 10*1000);
|
||||
log(_cfg, _messageId + " " + count + " after " + period);
|
||||
_context.statManager().addRateData("test.rtt", period, count);
|
||||
if (period > 2000)
|
||||
_context.statManager().addRateData("test.rttHigh", period, count);
|
||||
TunnelInfo info = _cfg.getOutbound();
|
||||
if (info != null)
|
||||
info.incrementVerifiedBytesTransferred(5*1024);
|
||||
// the inbound tunnel is incremented by the tunnel management system itself,
|
||||
// so don't double count it here
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void log(LoadTestTunnelConfig tunnel, String msg) {
|
||||
if (_out == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
if (tunnel.getInbound() == null) {
|
||||
for (int i = 0; i < tunnel.getLength()-1; i++) {
|
||||
Hash peer = tunnel.getPeer(i);
|
||||
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
||||
continue;
|
||||
else if (peer != null)
|
||||
buf.append(peer.toBase64());
|
||||
else
|
||||
buf.append("[unknown_peer]");
|
||||
buf.append(" ");
|
||||
TunnelId id = tunnel.getReceiveTunnelId(i);
|
||||
if (id != null)
|
||||
buf.append(id.getTunnelId());
|
||||
else
|
||||
buf.append("[unknown_tunnel]");
|
||||
buf.append(" ");
|
||||
buf.append(_context.clock().now()).append(" hop ").append(i).append(" ").append(msg).append("\n");
|
||||
}
|
||||
} else {
|
||||
int hop = 0;
|
||||
TunnelInfo info = tunnel.getOutbound();
|
||||
for (int i = 0; (info != null) && (i < info.getLength()); i++) {
|
||||
Hash peer = info.getPeer(i);
|
||||
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
||||
continue;
|
||||
else if (peer != null)
|
||||
buf.append(peer.toBase64());
|
||||
else
|
||||
buf.append("[unknown_peer]");
|
||||
buf.append(" ");
|
||||
TunnelId id = info.getReceiveTunnelId(i);
|
||||
if (id != null)
|
||||
buf.append(id.getTunnelId());
|
||||
else
|
||||
buf.append("[unknown_tunnel]");
|
||||
buf.append(" ");
|
||||
buf.append(_context.clock().now()).append(" out_hop ").append(hop).append(" ").append(msg).append("\n");
|
||||
hop++;
|
||||
}
|
||||
info = tunnel.getInbound();
|
||||
for (int i = 0; (info != null) && (i < info.getLength()); i++) {
|
||||
Hash peer = info.getPeer(i);
|
||||
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
||||
continue;
|
||||
else if (peer != null)
|
||||
buf.append(peer.toBase64());
|
||||
else
|
||||
buf.append("[unknown_peer]");
|
||||
buf.append(" ");
|
||||
TunnelId id = info.getReceiveTunnelId(i);
|
||||
if (id != null)
|
||||
buf.append(id.getTunnelId());
|
||||
else
|
||||
buf.append("[unknown_tunnel]");
|
||||
buf.append(" ");
|
||||
buf.append(_context.clock().now()).append(" in_hop ").append(hop).append(" ").append(msg).append("\n");
|
||||
hop++;
|
||||
}
|
||||
}
|
||||
try {
|
||||
synchronized (_out) {
|
||||
_out.write(buf.toString());
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
_log.error("error logging [" + msg + "]", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If we are testing live tunnels, see if we want to test the one that was just created
|
||||
* fully.
|
||||
*/
|
||||
public void addTunnelTestCandidate(TunnelCreatorConfig cfg) {
|
||||
LoadTestTunnelConfig ltCfg = new LoadTestTunnelConfig(cfg);
|
||||
if (wantToTest(ltCfg)) {
|
||||
// wait briefly so everyone has their things in order (not really necessary...)
|
||||
long delay = _context.random().nextInt(30*1000) + 30*1000;
|
||||
SimpleTimer.getInstance().addEvent(new BeginTest(ltCfg), delay);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Testing " + cfg + ", with " + _active.size() + " active");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Not testing " + cfg + " because we have " + _active.size() + " active: " + _active);
|
||||
}
|
||||
}
|
||||
public void removeTunnelTestCandidate(TunnelCreatorConfig cfg) { _active.remove(cfg); }
|
||||
|
||||
private class BeginTest implements SimpleTimer.TimedEvent {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
public BeginTest(LoadTestTunnelConfig cfg) {
|
||||
_cfg = cfg;
|
||||
}
|
||||
public void timeReached() {
|
||||
_context.jobQueue().addJob(new Expire(_context, _cfg, false));
|
||||
runTest(_cfg);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean wantToTest(LoadTestTunnelConfig cfg) {
|
||||
// wait 10 minutes before testing anything
|
||||
if (_context.router().getUptime() <= 10*60*1000) return false;
|
||||
if (bandwidthOverloaded()) return false;
|
||||
|
||||
if (TEST_LIVE_TUNNELS && _active.size() < getConcurrency()) {
|
||||
// length == #hops+1 (as it includes the creator)
|
||||
if (cfg.getLength() < 2)
|
||||
return false;
|
||||
_active.add(cfg);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean bandwidthOverloaded() {
|
||||
int msgLoadBps = CONCURRENT_MESSAGES
|
||||
* 5 * 1024 // message size
|
||||
/ 10; // 10 seconds before timeout & retransmission
|
||||
msgLoadBps *= 2; // buffer
|
||||
int curBps = getBps();
|
||||
if ((curBps + msgLoadBps)/1024 >= _context.bandwidthLimiter().getOutboundKBytesPerSecond())
|
||||
return true;
|
||||
if ((curBps + msgLoadBps)/1024 >= _context.bandwidthLimiter().getInboundKBytesPerSecond())
|
||||
return true;
|
||||
if (_context.throttle().getMessageDelay() > 1000)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
private int getBps() {
|
||||
int used1s = _context.router().get1sRate();
|
||||
int used1m = _context.router().get1mRate();
|
||||
int used5m = _context.router().get5mRate();
|
||||
return Math.max(used1s, Math.max(used1m, used5m));
|
||||
}
|
||||
|
||||
private class CreatedJob extends JobImpl {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
public CreatedJob(RouterContext ctx, LoadTestTunnelConfig cfg) {
|
||||
super(ctx);
|
||||
_cfg = cfg;
|
||||
}
|
||||
public String getName() { return "Test tunnel created"; }
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Tunnel created for testing peer " + _cfg.getPeer(0).toBase64());
|
||||
getContext().tunnelDispatcher().joinInbound(_cfg.getTunnel());
|
||||
//log(_cfg, "joined");
|
||||
_active.add(_cfg);
|
||||
Expire j = new Expire(getContext(), _cfg);
|
||||
//_cfg.setExpireJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
runTest(_cfg);
|
||||
}
|
||||
}
|
||||
private long TEST_PERIOD_MAX = 5*60*1000;
|
||||
private long TEST_PERIOD_MIN = 1*60*1000;
|
||||
|
||||
private class Expire extends JobImpl {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
private boolean _removeFromDispatcher;
|
||||
public Expire(RouterContext ctx, LoadTestTunnelConfig cfg) {
|
||||
this(ctx, cfg, true);
|
||||
}
|
||||
public Expire(RouterContext ctx, LoadTestTunnelConfig cfg, boolean removeFromDispatcher) {
|
||||
super(ctx);
|
||||
_cfg = cfg;
|
||||
_removeFromDispatcher = removeFromDispatcher;
|
||||
long duration = ctx.random().nextLong(TEST_PERIOD_MAX);
|
||||
if (duration < TEST_PERIOD_MIN)
|
||||
duration += TEST_PERIOD_MIN;
|
||||
long expiration = duration + ctx.clock().now();
|
||||
if (expiration > cfg.getExpiration()+60*1000)
|
||||
expiration = cfg.getExpiration()+60*1000;
|
||||
getTiming().setStartAfter(expiration);
|
||||
}
|
||||
public String getName() { return "expire test tunnel"; }
|
||||
public void runJob() {
|
||||
if (_removeFromDispatcher)
|
||||
getContext().tunnelDispatcher().remove(_cfg.getTunnel());
|
||||
_cfg.logComplete();
|
||||
TunnelInfo info = _cfg.getOutbound();
|
||||
if (info != null)
|
||||
info.incrementVerifiedBytesTransferred(0); // just to wrap up the test data
|
||||
_active.remove(_cfg);
|
||||
}
|
||||
}
|
||||
private class FailedJob extends JobImpl {
|
||||
private LoadTestTunnelConfig _cfg;
|
||||
public FailedJob(RouterContext ctx, LoadTestTunnelConfig cfg) {
|
||||
super(ctx);
|
||||
_cfg = cfg;
|
||||
}
|
||||
public String getName() { return "Test tunnel failed"; }
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Tunnel failed for testing peer " + _cfg.getPeer(0).toBase64());
|
||||
log(_cfg, "failed");
|
||||
}
|
||||
}
|
||||
|
||||
private class LoadTestTunnelConfig {
|
||||
private TunnelCreatorConfig _cfg;
|
||||
private long _failed;
|
||||
private long _fullMessages;
|
||||
private TunnelInfo _testInbound;
|
||||
private TunnelInfo _testOutbound;
|
||||
private boolean _completed;
|
||||
public LoadTestTunnelConfig(TunnelCreatorConfig cfg) {
|
||||
_cfg = cfg;
|
||||
_failed = 0;
|
||||
_fullMessages = 0;
|
||||
_completed = false;
|
||||
}
|
||||
|
||||
public long getExpiration() { return _cfg.getExpiration(); }
|
||||
public Hash getPeer(int peer) { return _cfg.getPeer(peer); }
|
||||
public TunnelId getReceiveTunnelId(int peer) { return _cfg.getReceiveTunnelId(peer); }
|
||||
public TunnelId getSendTunnelId(int peer) { return _cfg.getSendTunnelId(peer); }
|
||||
public int getLength() { return _cfg.getLength(); }
|
||||
|
||||
public void incrementFailed() { ++_failed; }
|
||||
public long getFailedMessageCount() { return _failed; }
|
||||
public void incrementFull() { ++_fullMessages; }
|
||||
public long getFullMessageCount() { return _fullMessages; }
|
||||
public TunnelCreatorConfig getTunnel() { return _cfg; }
|
||||
public void setInbound(TunnelInfo info) { _testInbound = info; }
|
||||
public void setOutbound(TunnelInfo info) { _testOutbound = info; }
|
||||
public TunnelInfo getInbound() { return _testInbound; }
|
||||
public TunnelInfo getOutbound() { return _testOutbound; }
|
||||
public String toString() { return _cfg + ": failed=" + _failed + " full=" + _fullMessages; }
|
||||
|
||||
void logComplete() {
|
||||
if (_completed) return;
|
||||
_completed = true;
|
||||
LoadTestTunnelConfig cfg = LoadTestTunnelConfig.this;
|
||||
log(cfg, "expired after sending " + cfg.getFullMessageCount() + " / " + cfg.getFailedMessageCount()
|
||||
+ " in " + (10*60*1000l - (cfg.getExpiration()-_context.clock().now())));
|
||||
_context.statManager().addRateData("test.lifetimeSuccessful", cfg.getFullMessageCount(), cfg.getFailedMessageCount());
|
||||
if (cfg.getFailedMessageCount() > 0)
|
||||
_context.statManager().addRateData("test.lifetimeFailed", cfg.getFailedMessageCount(), cfg.getFullMessageCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,17 +4,18 @@ import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.tunnel.HopConfig;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SecureFileOutputStream;
|
||||
|
||||
/**
|
||||
* Simply act as a pen register of messages sent in and out of the router.
|
||||
@@ -22,26 +23,28 @@ import net.i2p.util.Log;
|
||||
* (with clock synchronization, this will generate a log that can be used to
|
||||
* analyze the entire network, if everyone provides their logs honestly)
|
||||
*
|
||||
* This is always instantiated in the context and the WriteJob runs every minute,
|
||||
* but unless router.keepHistory=true it does nothing.
|
||||
* It generates a LARGE log file.
|
||||
*/
|
||||
public class MessageHistory {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private List _unwrittenEntries; // list of raw entries (strings) yet to be written
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final Queue<String> _unwrittenEntries; // list of raw entries (strings) yet to be written
|
||||
private String _historyFile; // where to write
|
||||
private String _localIdent; // placed in each entry to uniquely identify the local router
|
||||
private boolean _doLog; // true == we want to log
|
||||
private boolean _doPause; // true == briefly stop writing data to the log (used while submitting it)
|
||||
private ReinitializeJob _reinitializeJob;
|
||||
private WriteJob _writeJob;
|
||||
private SubmitMessageHistoryJob _submitMessageHistoryJob;
|
||||
private final ReinitializeJob _reinitializeJob;
|
||||
private final WriteJob _writeJob;
|
||||
//private SubmitMessageHistoryJob _submitMessageHistoryJob;
|
||||
private volatile boolean _firstPass;
|
||||
|
||||
private final static byte[] NL = System.getProperty("line.separator").getBytes();
|
||||
private final static int FLUSH_SIZE = 1000; // write out at least once every 1000 entries
|
||||
|
||||
/** config property determining whether we want to debug with the message history */
|
||||
/** config property determining whether we want to debug with the message history - default false */
|
||||
public final static String PROP_KEEP_MESSAGE_HISTORY = "router.keepHistory";
|
||||
public final static boolean DEFAULT_KEEP_MESSAGE_HISTORY = false;
|
||||
/** config property determining where we want to log the message history, if we're keeping one */
|
||||
public final static String PROP_MESSAGE_HISTORY_FILENAME = "router.historyFilename";
|
||||
public final static String DEFAULT_MESSAGE_HISTORY_FILENAME = "messageHistory.txt";
|
||||
@@ -53,6 +56,7 @@ public class MessageHistory {
|
||||
_log = context.logManager().getLog(getClass());
|
||||
_fmt = new SimpleDateFormat("yy/MM/dd.HH:mm:ss.SSS");
|
||||
_fmt.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
_unwrittenEntries = new LinkedBlockingQueue();
|
||||
_reinitializeJob = new ReinitializeJob();
|
||||
_writeJob = new WriteJob();
|
||||
_firstPass = true;
|
||||
@@ -60,26 +64,24 @@ public class MessageHistory {
|
||||
initialize(true);
|
||||
}
|
||||
|
||||
void setDoLog(boolean log) { _doLog = log; }
|
||||
boolean getDoLog() { return _doLog; }
|
||||
/** @since 0.8.12 */
|
||||
public void shutdown() {
|
||||
if (_doLog)
|
||||
addEntry(getPrefix() + "** Router shutdown");
|
||||
_doPause = false;
|
||||
flushEntries();
|
||||
_doLog = false;
|
||||
}
|
||||
|
||||
public boolean getDoLog() { return _doLog; }
|
||||
|
||||
/** @deprecated unused */
|
||||
void setPauseFlushes(boolean doPause) { _doPause = doPause; }
|
||||
String getFilename() { return _historyFile; }
|
||||
|
||||
private void updateSettings() {
|
||||
String keepHistory = _context.router().getConfigSetting(PROP_KEEP_MESSAGE_HISTORY);
|
||||
if (keepHistory != null) {
|
||||
_doLog = Boolean.TRUE.toString().equalsIgnoreCase(keepHistory);
|
||||
} else {
|
||||
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
|
||||
}
|
||||
|
||||
String filename = null;
|
||||
if (_doLog) {
|
||||
filename = _context.router().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME);
|
||||
if ( (filename == null) || (filename.trim().length() <= 0) )
|
||||
filename = DEFAULT_MESSAGE_HISTORY_FILENAME;
|
||||
}
|
||||
_doLog = Boolean.valueOf(_context.getProperty(PROP_KEEP_MESSAGE_HISTORY)).booleanValue();
|
||||
_historyFile = _context.getProperty(PROP_MESSAGE_HISTORY_FILENAME, DEFAULT_MESSAGE_HISTORY_FILENAME);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -90,30 +92,25 @@ public class MessageHistory {
|
||||
public void initialize(boolean forceReinitialize) {
|
||||
if (!forceReinitialize) return;
|
||||
|
||||
if (_context.router() == null) return;
|
||||
|
||||
if (_context.router().getRouterInfo() == null) {
|
||||
_reinitializeJob.getTiming().setStartAfter(_context.clock().now()+5000);
|
||||
_reinitializeJob.getTiming().setStartAfter(_context.clock().now() + 15*1000);
|
||||
_context.jobQueue().addJob(_reinitializeJob);
|
||||
} else {
|
||||
String filename = null;
|
||||
filename = _context.router().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME);
|
||||
if ( (filename == null) || (filename.trim().length() <= 0) )
|
||||
filename = DEFAULT_MESSAGE_HISTORY_FILENAME;
|
||||
|
||||
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
|
||||
_historyFile = filename;
|
||||
_localIdent = getName(_context.routerHash());
|
||||
_unwrittenEntries = new ArrayList(64);
|
||||
// _unwrittenEntries = new ArrayList(64);
|
||||
updateSettings();
|
||||
// clear the history file on startup
|
||||
if (_firstPass) {
|
||||
File f = new File(_historyFile);
|
||||
if (!f.isAbsolute())
|
||||
f = new File(_context.getLogDir(), _historyFile);
|
||||
f.delete();
|
||||
_writeJob.getTiming().setStartAfter(_context.clock().now() + WRITE_DELAY);
|
||||
_context.jobQueue().addJob(_writeJob);
|
||||
_firstPass = false;
|
||||
}
|
||||
_firstPass = false;
|
||||
addEntry(getPrefix() + "** Router initialized (started up or changed identities)");
|
||||
_context.jobQueue().addJob(_writeJob);
|
||||
if (_doLog)
|
||||
addEntry(getPrefix() + "** Router initialized (started up or changed identities)");
|
||||
//_submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
|
||||
//_context.jobQueue().addJob(_submitMessageHistoryJob);
|
||||
}
|
||||
@@ -142,9 +139,10 @@ public class MessageHistory {
|
||||
* @param replyTunnel the tunnel sourceRoutePeer should forward the source routed message to
|
||||
* @param replyThrough the gateway of the tunnel that the sourceRoutePeer will be sending to
|
||||
*/
|
||||
/********
|
||||
public void requestTunnelCreate(TunnelId createTunnel, TunnelId outTunnel, Hash peerRequested, Hash nextPeer, TunnelId replyTunnel, Hash replyThrough) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("request [").append(getName(peerRequested)).append("] to create tunnel [");
|
||||
buf.append(createTunnel.getTunnelId()).append("] ");
|
||||
@@ -156,6 +154,7 @@ public class MessageHistory {
|
||||
buf.append("who forwards it through [").append(replyTunnel.getTunnelId()).append("] on [").append(getName(replyThrough)).append("]");
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
*********/
|
||||
|
||||
/**
|
||||
* The local router has received a request to join the createTunnel with the next hop being nextPeer,
|
||||
@@ -167,9 +166,10 @@ public class MessageHistory {
|
||||
* @param ok whether we will join the tunnel
|
||||
* @param sourceRoutePeer peer through whom we should send our garlic routed ok through
|
||||
*/
|
||||
/*********
|
||||
public void receiveTunnelCreate(TunnelId createTunnel, Hash nextPeer, Date expire, boolean ok, Hash sourceRoutePeer) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] ");
|
||||
if (nextPeer != null)
|
||||
@@ -177,6 +177,7 @@ public class MessageHistory {
|
||||
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire.getTime())).append("]");
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
*********/
|
||||
|
||||
/**
|
||||
* The local router has joined the given tunnel operating in the given state.
|
||||
@@ -187,7 +188,7 @@ public class MessageHistory {
|
||||
public void tunnelJoined(String state, TunnelInfo tunnel) {
|
||||
if (!_doLog) return;
|
||||
if (tunnel == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("joining as [").append(state);
|
||||
buf.append("] to tunnel: ").append(tunnel.toString());
|
||||
@@ -203,7 +204,7 @@ public class MessageHistory {
|
||||
public void tunnelJoined(String state, HopConfig tunnel) {
|
||||
if (!_doLog) return;
|
||||
if (tunnel == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("joining as [").append(state);
|
||||
buf.append("] to tunnel: ").append(tunnel.toString());
|
||||
@@ -242,7 +243,7 @@ public class MessageHistory {
|
||||
public void tunnelFailed(TunnelId tunnel) {
|
||||
if (!_doLog) return;
|
||||
if (tunnel == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("failing tunnel [").append(tunnel.getTunnelId()).append("]");
|
||||
addEntry(buf.toString());
|
||||
@@ -258,7 +259,7 @@ public class MessageHistory {
|
||||
public void tunnelValid(TunnelInfo tunnel, long timeToTest) {
|
||||
if (!_doLog) return;
|
||||
if (tunnel == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("tunnel ").append(tunnel).append(" tested ok after ").append(timeToTest).append("ms");
|
||||
addEntry(buf.toString());
|
||||
@@ -271,7 +272,7 @@ public class MessageHistory {
|
||||
public void tunnelRejected(Hash peer, TunnelId tunnel, Hash replyThrough, String reason) {
|
||||
if (!_doLog) return;
|
||||
if ( (tunnel == null) || (peer == null) ) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("tunnel [").append(tunnel.getTunnelId()).append("] was rejected by [");
|
||||
buf.append(getName(peer)).append("] for [").append(reason).append("]");
|
||||
@@ -283,7 +284,7 @@ public class MessageHistory {
|
||||
public void tunnelParticipantRejected(Hash peer, String msg) {
|
||||
if (!_doLog) return;
|
||||
if (peer == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("tunnel participation rejected by [");
|
||||
buf.append(getName(peer)).append("]: ").append(msg);
|
||||
@@ -298,7 +299,7 @@ public class MessageHistory {
|
||||
public void tunnelRequestTimedOut(Hash peer, TunnelId tunnel) {
|
||||
if (!_doLog) return;
|
||||
if ( (tunnel == null) || (peer == null) ) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("tunnel [").append(tunnel.getTunnelId()).append("] timed out on [");
|
||||
buf.append(getName(peer)).append("]");
|
||||
@@ -314,7 +315,7 @@ public class MessageHistory {
|
||||
*/
|
||||
public void droppedTunnelMessage(TunnelId id, long msgId, Date expiration, Hash from) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("dropped message ").append(msgId).append(" for unknown tunnel [").append(id.getTunnelId());
|
||||
buf.append("] from [").append(getName(from)).append("]").append(" expiring on ");
|
||||
@@ -328,7 +329,7 @@ public class MessageHistory {
|
||||
public void droppedOtherMessage(I2NPMessage message, Hash from) {
|
||||
if (!_doLog) return;
|
||||
if (message == null) return;
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
StringBuilder buf = new StringBuilder(512);
|
||||
buf.append(getPrefix());
|
||||
buf.append("dropped [").append(message.getClass().getName()).append("] ").append(message.getUniqueId());
|
||||
buf.append(" [").append(message.toString()).append("] from [");
|
||||
@@ -342,7 +343,7 @@ public class MessageHistory {
|
||||
|
||||
public void droppedInboundMessage(long messageId, Hash from, String info) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
StringBuilder buf = new StringBuilder(512);
|
||||
buf.append(getPrefix());
|
||||
buf.append("dropped inbound message ").append(messageId);
|
||||
buf.append(" from ");
|
||||
@@ -364,7 +365,7 @@ public class MessageHistory {
|
||||
public void replyTimedOut(OutNetMessage sentMessage) {
|
||||
if (!_doLog) return;
|
||||
if (sentMessage == null) return;
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
StringBuilder buf = new StringBuilder(512);
|
||||
buf.append(getPrefix());
|
||||
buf.append("timed out waiting for a reply to [").append(sentMessage.getMessageType());
|
||||
buf.append("] [").append(sentMessage.getMessageId()).append("] expiring on [");
|
||||
@@ -383,7 +384,7 @@ public class MessageHistory {
|
||||
*/
|
||||
public void messageProcessingError(long messageId, String messageType, String error) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Error processing [").append(messageType).append("] [").append(messageId).append("] failed with [").append(error).append("]");
|
||||
addEntry(buf.toString());
|
||||
@@ -420,7 +421,7 @@ public class MessageHistory {
|
||||
public void sendMessage(String messageType, long messageId, long expiration, Hash peer, boolean sentOk, String info) {
|
||||
if (!_doLog) return;
|
||||
if (false) return;
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
StringBuilder buf = new StringBuilder(256);
|
||||
buf.append(getPrefix());
|
||||
buf.append("send [").append(messageType).append("] message [").append(messageId).append("] ");
|
||||
buf.append("to [").append(getName(peer)).append("] ");
|
||||
@@ -448,7 +449,7 @@ public class MessageHistory {
|
||||
public void receiveMessage(String messageType, long messageId, long expiration, Hash from, boolean isValid) {
|
||||
if (!_doLog) return;
|
||||
if (false) return;
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
StringBuilder buf = new StringBuilder(256);
|
||||
buf.append(getPrefix());
|
||||
buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] ");
|
||||
if (from != null)
|
||||
@@ -470,7 +471,7 @@ public class MessageHistory {
|
||||
*/
|
||||
public void wrap(String bodyMessageType, long bodyMessageId, String containerMessageType, long containerMessageId) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Wrap message [").append(bodyMessageType).append("] id [").append(bodyMessageId).append("] ");
|
||||
buf.append("in [").append(containerMessageType).append("] id [").append(containerMessageId).append("]");
|
||||
@@ -483,7 +484,7 @@ public class MessageHistory {
|
||||
*/
|
||||
public void receivePayloadMessage(long messageId) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(64);
|
||||
StringBuilder buf = new StringBuilder(64);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Receive payload message [").append(messageId).append("]");
|
||||
addEntry(buf.toString());
|
||||
@@ -498,7 +499,7 @@ public class MessageHistory {
|
||||
*/
|
||||
public void sendPayloadMessage(long messageId, boolean successfullySent, long timeToSend) {
|
||||
if (!_doLog) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Send payload message in [").append(messageId).append("] in [").append(timeToSend).append("] successfully? ").append(successfullySent);
|
||||
addEntry(buf.toString());
|
||||
@@ -507,7 +508,7 @@ public class MessageHistory {
|
||||
public void receiveTunnelFragment(long messageId, int fragmentId, Object status) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Receive fragment ").append(fragmentId).append(" in ").append(messageId);
|
||||
buf.append(" status: ").append(status.toString());
|
||||
@@ -516,7 +517,7 @@ public class MessageHistory {
|
||||
public void receiveTunnelFragmentComplete(long messageId) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Receive fragmented message completely: ").append(messageId);
|
||||
addEntry(buf.toString());
|
||||
@@ -524,7 +525,7 @@ public class MessageHistory {
|
||||
public void droppedFragmentedMessage(long messageId, String status) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Fragmented message dropped: ").append(messageId);
|
||||
buf.append(" ").append(status);
|
||||
@@ -533,7 +534,7 @@ public class MessageHistory {
|
||||
public void fragmentMessage(long messageId, int numFragments, int totalLength, List messageIds, String msg) {
|
||||
if (!_doLog) return;
|
||||
//if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Break message ").append(messageId).append(" into fragments: ").append(numFragments);
|
||||
buf.append(" total size ").append(totalLength);
|
||||
@@ -545,7 +546,7 @@ public class MessageHistory {
|
||||
public void fragmentMessage(long messageId, int numFragments, int totalLength, List messageIds, Object tunnel, String msg) {
|
||||
if (!_doLog) return;
|
||||
//if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Break message ").append(messageId).append(" into fragments: ").append(numFragments);
|
||||
buf.append(" total size ").append(totalLength);
|
||||
@@ -559,7 +560,7 @@ public class MessageHistory {
|
||||
public void droppedTunnelDataMessageUnknown(long msgId, long tunnelId) {
|
||||
if (!_doLog) return;
|
||||
if (msgId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Dropped data message ").append(msgId).append(" for unknown tunnel ").append(tunnelId);
|
||||
addEntry(buf.toString());
|
||||
@@ -567,7 +568,7 @@ public class MessageHistory {
|
||||
public void droppedTunnelGatewayMessageUnknown(long msgId, long tunnelId) {
|
||||
if (!_doLog) return;
|
||||
if (msgId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Dropped gateway message ").append(msgId).append(" for unknown tunnel ").append(tunnelId);
|
||||
addEntry(buf.toString());
|
||||
@@ -585,7 +586,7 @@ public class MessageHistory {
|
||||
}
|
||||
|
||||
private final String getPrefix() {
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
StringBuilder buf = new StringBuilder(48);
|
||||
buf.append(getTime(_context.clock().now()));
|
||||
buf.append(' ').append(_localIdent).append(": ");
|
||||
return buf.toString();
|
||||
@@ -604,11 +605,8 @@ public class MessageHistory {
|
||||
*/
|
||||
private void addEntry(String entry) {
|
||||
if (entry == null) return;
|
||||
int sz = 0;
|
||||
synchronized (_unwrittenEntries) {
|
||||
_unwrittenEntries.add(entry);
|
||||
sz = _unwrittenEntries.size();
|
||||
}
|
||||
_unwrittenEntries.offer(entry);
|
||||
int sz = _unwrittenEntries.size();
|
||||
if (sz > FLUSH_SIZE)
|
||||
flushEntries();
|
||||
}
|
||||
@@ -617,26 +615,25 @@ public class MessageHistory {
|
||||
* Write out any unwritten entries, and clear the pending list
|
||||
*/
|
||||
private void flushEntries() {
|
||||
if (_doPause) return;
|
||||
List entries = null;
|
||||
synchronized (_unwrittenEntries) {
|
||||
entries = new ArrayList(_unwrittenEntries);
|
||||
if (!_doLog)
|
||||
_unwrittenEntries.clear();
|
||||
}
|
||||
writeEntries(entries);
|
||||
else if ((!_unwrittenEntries.isEmpty()) && !_doPause)
|
||||
writeEntries();
|
||||
}
|
||||
|
||||
/**
|
||||
* Actually write the specified entries
|
||||
*
|
||||
*/
|
||||
private void writeEntries(List entries) {
|
||||
if (!_doLog) return;
|
||||
private synchronized void writeEntries() {
|
||||
File f = new File(_historyFile);
|
||||
if (!f.isAbsolute())
|
||||
f = new File(_context.getLogDir(), _historyFile);
|
||||
FileOutputStream fos = null;
|
||||
try {
|
||||
fos = new FileOutputStream(_historyFile, true);
|
||||
for (Iterator iter = entries.iterator(); iter.hasNext(); ) {
|
||||
String entry = (String)iter.next();
|
||||
fos = new SecureFileOutputStream(f, true);
|
||||
String entry;
|
||||
while ((entry = _unwrittenEntries.poll()) != null) {
|
||||
fos.write(entry.getBytes());
|
||||
fos.write(NL);
|
||||
}
|
||||
@@ -653,7 +650,7 @@ public class MessageHistory {
|
||||
public WriteJob() {
|
||||
super(MessageHistory.this._context);
|
||||
}
|
||||
public String getName() { return "Write History Entries"; }
|
||||
public String getName() { return _doLog ? "Message debug log" : "Message debug log (disabled)"; }
|
||||
public void runJob() {
|
||||
flushEntries();
|
||||
updateSettings();
|
||||
@@ -661,6 +658,7 @@ public class MessageHistory {
|
||||
}
|
||||
}
|
||||
|
||||
/****
|
||||
public static void main(String args[]) {
|
||||
RouterContext ctx = new RouterContext(null);
|
||||
MessageHistory hist = new MessageHistory(ctx);
|
||||
@@ -673,4 +671,5 @@ public class MessageHistory {
|
||||
hist.addEntry("you smell finished");
|
||||
hist.flushEntries();
|
||||
}
|
||||
****/
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import net.i2p.data.TunnelId;
|
||||
/**
|
||||
* Wrap up the details of how a ClientMessage was received from the network
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public class MessageReceptionInfo {
|
||||
private Hash _fromPeer;
|
||||
|
||||
@@ -16,19 +16,34 @@ import net.i2p.data.i2np.I2NPMessage;
|
||||
*
|
||||
*/
|
||||
public interface MessageSelector {
|
||||
|
||||
/**
|
||||
* Returns true if the received message matches the selector
|
||||
* Returns true if the received message matches the selector.
|
||||
* If this returns true, the job specified by OutNetMessage.getOnReplyJob()
|
||||
* will be run for every OutNetMessage associated with this selector
|
||||
* (by InNetMessagePool), after calling setMessage() for that ReplyJob.
|
||||
*
|
||||
* WARNING this is called from within OutboundMessageSelector.getOriginalMessages()
|
||||
* inside a lock and can lead to deadlocks if the selector does too much in isMatch().
|
||||
* Until the lock is removed, take care to keep it simple.
|
||||
*
|
||||
*/
|
||||
public boolean isMatch(I2NPMessage message);
|
||||
|
||||
/**
|
||||
* Returns true if the selector should still keep searching for further matches
|
||||
*
|
||||
* Returns true if the selector should still keep searching for further matches.
|
||||
* This is called only if isMatch() returns true.
|
||||
* If this returns true, isMatch() will not be called again.
|
||||
*/
|
||||
public boolean continueMatching();
|
||||
|
||||
/**
|
||||
* Returns the # of milliseconds since the epoch after which this selector should
|
||||
* stop searching for matches
|
||||
*
|
||||
* stop searching for matches.
|
||||
* At some time after expiration, if continueMatching() has not returned false,
|
||||
* the job specified by OutNetMessage.getOnFailedReplyJob()
|
||||
* will be run for every OutNetMessage associated with this selector
|
||||
* (by OutboundMessageRegistry).
|
||||
*/
|
||||
public long getExpiration();
|
||||
}
|
||||
|
||||
@@ -5,22 +5,19 @@ import net.i2p.util.Log;
|
||||
/**
|
||||
* Keep track of the inbound and outbound messages in memory.
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public class MessageStateMonitor {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private volatile int _inboundLiveCount;
|
||||
private volatile int _inboundReadCount;
|
||||
private volatile int _inboundFinalizedCount;
|
||||
private volatile int _outboundLiveCount;
|
||||
private volatile int _outboundDiscardedCount;
|
||||
|
||||
public MessageStateMonitor(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(MessageStateMonitor.class);
|
||||
_inboundLiveCount = 0;
|
||||
_inboundReadCount = 0;
|
||||
_inboundFinalizedCount = 0;
|
||||
_outboundLiveCount = 0;
|
||||
_outboundDiscardedCount = 0;
|
||||
}
|
||||
@@ -36,7 +33,6 @@ public class MessageStateMonitor {
|
||||
}
|
||||
public void inboundMessageFinalized() {
|
||||
_inboundReadCount--;
|
||||
_inboundFinalizedCount++;
|
||||
logStatus("inboundFinalized ");
|
||||
}
|
||||
|
||||
@@ -69,4 +65,4 @@ public class MessageStateMonitor {
|
||||
public int getInboundReadCount() { return _inboundReadCount; }
|
||||
public int getOutboundLiveCount() { return _outboundLiveCount; }
|
||||
public int getOutboundDiscardedCount() { return _outboundDiscardedCount; }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import net.i2p.util.DecayingBloomFilter;
|
||||
import net.i2p.router.util.DecayingBloomFilter;
|
||||
import net.i2p.router.util.DecayingHashSet;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@@ -12,14 +13,13 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class MessageValidator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private DecayingBloomFilter _filter;
|
||||
|
||||
|
||||
public MessageValidator(RouterContext context) {
|
||||
_log = context.logManager().getLog(MessageValidator.class);
|
||||
_filter = null;
|
||||
_context = context;
|
||||
context.statManager().createRateStat("router.duplicateMessageId", "Note that a duplicate messageId was received", "Router",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
@@ -95,7 +95,7 @@ public class MessageValidator {
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
_filter = new DecayingBloomFilter(_context, (int)Router.CLOCK_FUDGE_FACTOR * 2, 8);
|
||||
_filter = new DecayingHashSet(_context, (int)Router.CLOCK_FUDGE_FACTOR * 2, 8, "RouterMV");
|
||||
}
|
||||
|
||||
void shutdown() {
|
||||
|
||||
@@ -58,6 +58,7 @@ public class MultiRouter {
|
||||
_defaultContext.clock().setOffset(0);
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Router* Shutdown");
|
||||
try { Thread.sleep(120*1000); } catch (InterruptedException ie) {}
|
||||
@@ -89,13 +90,17 @@ public class MultiRouter {
|
||||
|
||||
private static Properties getEnv(String filename) {
|
||||
Properties props = new Properties();
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
props.load(new FileInputStream(filename));
|
||||
in = new FileInputStream(filename);
|
||||
props.load(in);
|
||||
props.setProperty("time.disabled", "true");
|
||||
return props;
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
return null;
|
||||
} finally {
|
||||
if (in != null) try { in.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ public class MultiRouterBuilder {
|
||||
buildStartupScriptNix(args);
|
||||
}
|
||||
private static void buildStartupScriptNix(String args[]) {
|
||||
StringBuffer buf = new StringBuffer(4096);
|
||||
StringBuilder buf = new StringBuilder(4096);
|
||||
buf.append("#!/bin/sh\n");
|
||||
buf.append("export CP=.; for LIB in lib/* ; do export CP=$CP:$LIB ; done\n");
|
||||
buf.append("nohup java -cp $CP ");
|
||||
@@ -106,7 +106,7 @@ public class MultiRouterBuilder {
|
||||
File baseDir = new File(dir);
|
||||
baseDir.mkdirs();
|
||||
File cfgFile = new File(baseDir, "router.config");
|
||||
StringBuffer buf = new StringBuffer(8*1024);
|
||||
StringBuilder buf = new StringBuilder(8*1024);
|
||||
buf.append("router.profileDir=").append(baseDir.getPath()).append("/peerProfiles\n");
|
||||
buf.append("router.historyFilename=").append(baseDir.getPath()).append("/messageHistory.txt\n");
|
||||
buf.append("router.sessionKeys.location=").append(baseDir.getPath()).append("/sessionKeys.dat\n");
|
||||
|
||||
@@ -11,14 +11,13 @@ package net.i2p.router;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.networkdb.reseed.ReseedChecker;
|
||||
|
||||
/**
|
||||
* Defines the mechanism for interacting with I2P's network database
|
||||
@@ -33,8 +32,13 @@ public abstract class NetworkDatabaseFacade implements Service {
|
||||
* @param maxNumRouters The maximum number of routers to return
|
||||
* @param peersToIgnore Hash of routers not to include
|
||||
*/
|
||||
public abstract Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore);
|
||||
public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore);
|
||||
|
||||
/**
|
||||
* @return RouterInfo, LeaseSet, or null
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public abstract DatabaseEntry lookupLocally(Hash key);
|
||||
public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
|
||||
public abstract LeaseSet lookupLeaseSetLocally(Hash key);
|
||||
public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
|
||||
@@ -59,8 +63,18 @@ public abstract class NetworkDatabaseFacade implements Service {
|
||||
public abstract void unpublish(LeaseSet localLeaseSet);
|
||||
public abstract void fail(Hash dbEntry);
|
||||
|
||||
public abstract Set<Hash> getAllRouters();
|
||||
public int getKnownRouters() { return 0; }
|
||||
public int getKnownLeaseSets() { return 0; }
|
||||
public void renderRouterInfoHTML(Writer out, String s) throws IOException {}
|
||||
public void renderStatusHTML(Writer out, boolean b) throws IOException {}
|
||||
public boolean isInitialized() { return true; }
|
||||
public void rescan() {}
|
||||
/** @deprecated moved to router console */
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
/** public for NetDbRenderer in routerconsole */
|
||||
public Set<LeaseSet> getLeases() { return Collections.EMPTY_SET; }
|
||||
/** public for NetDbRenderer in routerconsole */
|
||||
public Set<RouterInfo> getRouters() { return Collections.EMPTY_SET; }
|
||||
|
||||
/** @since 0.9 */
|
||||
public ReseedChecker reseedChecker() { return null; };
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.util.Log;
|
||||
@@ -29,8 +28,8 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class OutNetMessage {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private RouterInfo _target;
|
||||
private I2NPMessage _message;
|
||||
/** cached message class name, for use after we discard the message */
|
||||
@@ -46,41 +45,28 @@ public class OutNetMessage {
|
||||
private ReplyJob _onReply;
|
||||
private Job _onFailedReply;
|
||||
private MessageSelector _replySelector;
|
||||
private Set _failedTransports;
|
||||
private Set<String> _failedTransports;
|
||||
private long _sendBegin;
|
||||
private long _transmitBegin;
|
||||
private Exception _createdBy;
|
||||
private long _created;
|
||||
//private Exception _createdBy;
|
||||
private final long _created;
|
||||
/** for debugging, contains a mapping of even name to Long (e.g. "begin sending", "handleOutbound", etc) */
|
||||
private HashMap _timestamps;
|
||||
private HashMap<String, Long> _timestamps;
|
||||
/**
|
||||
* contains a list of timestamp event names in the order they were fired
|
||||
* (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order)
|
||||
*/
|
||||
private List _timestampOrder;
|
||||
private int _queueSize;
|
||||
private long _prepareBegin;
|
||||
private long _prepareEnd;
|
||||
private List<String> _timestampOrder;
|
||||
private Object _preparationBuf;
|
||||
|
||||
public OutNetMessage(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(OutNetMessage.class);
|
||||
setTarget(null);
|
||||
_message = null;
|
||||
_messageSize = 0;
|
||||
setPriority(-1);
|
||||
setExpiration(-1);
|
||||
setOnSendJob(null);
|
||||
setOnFailedSendJob(null);
|
||||
setOnReplyJob(null);
|
||||
setOnFailedReplyJob(null);
|
||||
setReplySelector(null);
|
||||
_failedTransports = null;
|
||||
_sendBegin = 0;
|
||||
_priority = -1;
|
||||
_expiration = -1;
|
||||
//_createdBy = new Exception("Created by");
|
||||
_created = context.clock().now();
|
||||
timestamp("Created");
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
timestamp("Created");
|
||||
//_context.messageStateMonitor().outboundMessageAdded();
|
||||
//_context.statManager().createRateStat("outNetMessage.timeToDiscard",
|
||||
// "How long until we discard an outbound msg?",
|
||||
@@ -88,7 +74,8 @@ public class OutNetMessage {
|
||||
}
|
||||
|
||||
/**
|
||||
* Stamp the message's progress
|
||||
* Stamp the message's progress.
|
||||
* Only useful if log level is INFO or DEBUG
|
||||
*
|
||||
* @param eventName what occurred
|
||||
* @return how long this message has been 'in flight'
|
||||
@@ -99,34 +86,39 @@ public class OutNetMessage {
|
||||
// only timestamp if we are debugging
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
while (_timestamps.containsKey(eventName)) {
|
||||
eventName = eventName + '.';
|
||||
}
|
||||
_timestamps.put(eventName, new Long(now));
|
||||
// ???
|
||||
//while (_timestamps.containsKey(eventName)) {
|
||||
// eventName = eventName + '.';
|
||||
//}
|
||||
_timestamps.put(eventName, Long.valueOf(now));
|
||||
_timestampOrder.add(eventName);
|
||||
}
|
||||
}
|
||||
return now - _created;
|
||||
}
|
||||
public Map getTimestamps() {
|
||||
|
||||
/** @deprecated unused */
|
||||
public Map<String, Long> getTimestamps() {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
return (Map)_timestamps.clone();
|
||||
return (Map<String, Long>)_timestamps.clone();
|
||||
}
|
||||
}
|
||||
return Collections.EMPTY_MAP;
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
public Long getTimestamp(String eventName) {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
return (Long)_timestamps.get(eventName);
|
||||
return _timestamps.get(eventName);
|
||||
}
|
||||
}
|
||||
return ZERO;
|
||||
return Long.valueOf(0);
|
||||
}
|
||||
private static final Long ZERO = new Long(0);
|
||||
|
||||
private void locked_initTimestamps() {
|
||||
if (_timestamps == null) {
|
||||
_timestamps = new HashMap(8);
|
||||
@@ -134,7 +126,11 @@ public class OutNetMessage {
|
||||
}
|
||||
}
|
||||
|
||||
public Exception getCreatedBy() { return _createdBy; }
|
||||
/**
|
||||
* @deprecated
|
||||
* @return null always
|
||||
*/
|
||||
public Exception getCreatedBy() { return null; }
|
||||
|
||||
/**
|
||||
* Specifies the router to which the message should be delivered.
|
||||
@@ -142,22 +138,28 @@ public class OutNetMessage {
|
||||
*/
|
||||
public RouterInfo getTarget() { return _target; }
|
||||
public void setTarget(RouterInfo target) { _target = target; }
|
||||
|
||||
/**
|
||||
* Specifies the message to be sent
|
||||
*
|
||||
*/
|
||||
public I2NPMessage getMessage() { return _message; }
|
||||
|
||||
public void setMessage(I2NPMessage msg) {
|
||||
_message = msg;
|
||||
if (msg != null) {
|
||||
_messageType = msg.getClass().getName();
|
||||
_messageType = msg.getClass().getSimpleName();
|
||||
_messageTypeId = msg.getType();
|
||||
_messageId = msg.getUniqueId();
|
||||
_messageSize = _message.getMessageSize();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the simple class name
|
||||
*/
|
||||
public String getMessageType() { return _messageType; }
|
||||
|
||||
public int getMessageTypeId() { return _messageTypeId; }
|
||||
public long getMessageId() { return _messageId; }
|
||||
|
||||
@@ -230,7 +232,7 @@ public class OutNetMessage {
|
||||
|
||||
public void transportFailed(String transportStyle) {
|
||||
if (_failedTransports == null)
|
||||
_failedTransports = new HashSet(1);
|
||||
_failedTransports = new HashSet(2);
|
||||
_failedTransports.add(transportStyle);
|
||||
}
|
||||
/** not thread safe - dont fail transports and iterate over this at the same time */
|
||||
@@ -240,14 +242,13 @@ public class OutNetMessage {
|
||||
|
||||
/** when did the sending process begin */
|
||||
public long getSendBegin() { return _sendBegin; }
|
||||
|
||||
public void beginSend() { _sendBegin = _context.clock().now(); }
|
||||
public void beginTransmission() { _transmitBegin = _context.clock().now(); }
|
||||
public void beginPrepare() { _prepareBegin = _context.clock().now(); }
|
||||
public void prepared() { prepared(null); }
|
||||
|
||||
public void prepared(Object buf) {
|
||||
_prepareEnd = _context.clock().now();
|
||||
_preparationBuf = buf;
|
||||
}
|
||||
|
||||
public Object releasePreparationBuffer() {
|
||||
Object rv = _preparationBuf;
|
||||
_preparationBuf = null;
|
||||
@@ -255,29 +256,25 @@ public class OutNetMessage {
|
||||
}
|
||||
|
||||
public long getCreated() { return _created; }
|
||||
|
||||
/** time since the message was created */
|
||||
public long getLifetime() { return _context.clock().now() - _created; }
|
||||
|
||||
/** time the transport tries to send the message (including any queueing) */
|
||||
public long getSendTime() { return _context.clock().now() - _sendBegin; }
|
||||
/** time during which the i2np message is actually in flight */
|
||||
public long getTransmissionTime() { return _context.clock().now() - _transmitBegin; }
|
||||
/** how long it took to prepare the i2np message for transmission (including serialization and transport layer encryption) */
|
||||
public long getPreparationTime() { return _prepareEnd - _prepareBegin; }
|
||||
/** number of messages ahead of this one going to the targetted peer when it is first enqueued */
|
||||
public int getQueueSize() { return _queueSize; }
|
||||
public void setQueueSize(int size) { _queueSize = size; }
|
||||
|
||||
|
||||
/**
|
||||
* We've done what we need to do with the data from this message, though
|
||||
* we may keep the object around for a while to use its ID, jobs, etc.
|
||||
*/
|
||||
public void discardData() {
|
||||
long timeToDiscard = _context.clock().now() - _created;
|
||||
if ( (_message != null) && (_messageSize <= 0) )
|
||||
_messageSize = _message.getMessageSize();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
long timeToDiscard = _context.clock().now() - _created;
|
||||
_log.debug("Discard " + _messageSize + "byte " + _messageType + " message after "
|
||||
+ timeToDiscard);
|
||||
}
|
||||
_message = null;
|
||||
//_context.statManager().addRateData("outNetMessage.timeToDiscard", timeToDiscard, timeToDiscard);
|
||||
//_context.messageStateMonitor().outboundMessageDiscarded();
|
||||
@@ -287,7 +284,7 @@ public class OutNetMessage {
|
||||
public void finalize() throws Throwable {
|
||||
if (_message != null) {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
StringBuilder buf = new StringBuilder(1024);
|
||||
buf.append("Undiscarded ").append(_messageSize).append("byte ");
|
||||
buf.append(_messageType).append(" message created ");
|
||||
buf.append((_context.clock().now() - _created)).append("ms ago: ");
|
||||
@@ -302,14 +299,16 @@ public class OutNetMessage {
|
||||
super.finalize();
|
||||
}
|
||||
*/
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("[OutNetMessage contains ");
|
||||
StringBuilder buf = new StringBuilder(256);
|
||||
buf.append("[OutNetMessage containing ");
|
||||
if (_message == null) {
|
||||
buf.append("*no message*");
|
||||
} else {
|
||||
buf.append("a ").append(_messageSize).append(" byte ");
|
||||
buf.append(_message.getClass().getName());
|
||||
buf.append(_messageType);
|
||||
}
|
||||
buf.append(" expiring on ").append(new Date(_expiration));
|
||||
if (_failedTransports != null)
|
||||
@@ -326,20 +325,25 @@ public class OutNetMessage {
|
||||
buf.append(" with onFailedReply job: ").append(_onFailedReply);
|
||||
if (_onFailedSend != null)
|
||||
buf.append(" with onFailedSend job: ").append(_onFailedSend);
|
||||
buf.append(" {timestamps: \n");
|
||||
renderTimestamps(buf);
|
||||
buf.append("}");
|
||||
if (_timestamps != null && _timestampOrder != null && _log.shouldLog(Log.INFO)) {
|
||||
buf.append(" {timestamps: \n");
|
||||
renderTimestamps(buf);
|
||||
buf.append("}");
|
||||
}
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private void renderTimestamps(StringBuffer buf) {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
/**
|
||||
* Only useful if log level is INFO or DEBUG;
|
||||
* locked_initTimestamps() must have been called previously
|
||||
*/
|
||||
private void renderTimestamps(StringBuilder buf) {
|
||||
synchronized (this) {
|
||||
long lastWhen = -1;
|
||||
for (int i = 0; i < _timestampOrder.size(); i++) {
|
||||
String name = (String)_timestampOrder.get(i);
|
||||
Long when = (Long)_timestamps.get(name);
|
||||
String name = _timestampOrder.get(i);
|
||||
Long when = _timestamps.get(name);
|
||||
buf.append("\t[");
|
||||
long diff = when.longValue() - lastWhen;
|
||||
if ( (lastWhen > 0) && (diff > 500) )
|
||||
@@ -354,7 +358,6 @@ public class OutNetMessage {
|
||||
lastWhen = when.longValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final static SimpleDateFormat _fmt = new SimpleDateFormat("HH:mm:ss.SSS");
|
||||
@@ -365,15 +368,21 @@ public class OutNetMessage {
|
||||
}
|
||||
}
|
||||
|
||||
/****
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += DataHelper.hashCode(_message);
|
||||
rv += DataHelper.hashCode(_target);
|
||||
int rv = DataHelper.hashCode(_message);
|
||||
rv ^= DataHelper.hashCode(_target);
|
||||
// the others are pretty much inconsequential
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
//if(obj == null) return false;
|
||||
//if(!(obj instanceof OutNetMessage)) return false;
|
||||
return obj == this; // two OutNetMessages are different even if they contain the same message
|
||||
}
|
||||
****/
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Comparator;
|
||||
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@@ -18,24 +16,18 @@ import net.i2p.util.Log;
|
||||
* that wants to send a message, and the communication subsystem periodically
|
||||
* retrieves messages for delivery.
|
||||
*
|
||||
* Actually, this doesn't 'pool' anything, it calls the comm system directly.
|
||||
* Nor does it organize by priority. But perhaps it could someday.
|
||||
*/
|
||||
public class OutNetMessagePool {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
|
||||
public OutNetMessagePool(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(OutNetMessagePool.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the highest priority message, or null if none are available.
|
||||
*
|
||||
*/
|
||||
public OutNetMessage getNext() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new message to the pool
|
||||
*
|
||||
@@ -47,8 +39,8 @@ public class OutNetMessagePool {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Adding outbound message to "
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Adding outbound message to "
|
||||
+ msg.getTarget().getIdentity().getHash().toBase64().substring(0,6)
|
||||
+ " with id " + msg.getMessage().getUniqueId()
|
||||
+ " expiring on " + msg.getMessage().getMessageExpiration()
|
||||
@@ -70,51 +62,18 @@ public class OutNetMessagePool {
|
||||
return false;
|
||||
}
|
||||
if (msg.getTarget() == null) {
|
||||
_log.error("No target in the OutNetMessage: " + msg, new Exception("Definitely a fuckup"));
|
||||
_log.error("No target in the OutNetMessage: " + msg, new Exception());
|
||||
return false;
|
||||
}
|
||||
if (msg.getPriority() < 0) {
|
||||
_log.warn("Priority less than 0? sounds like nonsense to me... " + msg, new Exception("Negative priority"));
|
||||
_log.error("Priority less than 0? sounds like nonsense to me... " + msg, new Exception());
|
||||
return false;
|
||||
}
|
||||
if (msg.getExpiration() <= _context.clock().now()) {
|
||||
_log.error("Already expired! wtf: " + msg, new Exception("Expired message"));
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping expired outbound msg: " + msg, new Exception());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear any messages that have expired, enqueuing any appropriate jobs
|
||||
*
|
||||
*/
|
||||
public void clearExpired() {
|
||||
// noop
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the number of messages, regardless of priority.
|
||||
*
|
||||
*/
|
||||
public int getCount() { return 0; }
|
||||
|
||||
/**
|
||||
* Retrieve the number of messages at the given priority. This can be used for
|
||||
* subsystems that maintain a pool of messages to be sent whenever there is spare time,
|
||||
* where all of these 'spare' messages are of the same priority.
|
||||
*
|
||||
*/
|
||||
public int getCount(int priority) { return 0; }
|
||||
|
||||
public void dumpPoolInfo() { return; }
|
||||
|
||||
private static class ReverseIntegerComparator implements Comparator {
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
if ( (lhs == null) || (rhs == null) ) return 0; // invalid, but never used
|
||||
if ( !(lhs instanceof Integer) || !(rhs instanceof Integer)) return 0;
|
||||
Integer lv = (Integer)lhs;
|
||||
Integer rv = (Integer)rhs;
|
||||
return - (lv.compareTo(rv));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.Writer;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
|
||||
@@ -26,8 +26,8 @@ public interface PeerManagerFacade extends Service {
|
||||
*
|
||||
* @return List of Hash objects of the RouterIdentity for matching peers
|
||||
*/
|
||||
public List selectPeers(PeerSelectionCriteria criteria);
|
||||
public List getPeersByCapability(char capability);
|
||||
public List<Hash> selectPeers(PeerSelectionCriteria criteria);
|
||||
public Set<Hash> getPeersByCapability(char capability);
|
||||
public void setCapabilities(Hash peer, String caps);
|
||||
public void removeCapabilities(Hash peer);
|
||||
public Hash selectRandomByCapability(char capability);
|
||||
|
||||
@@ -12,6 +12,7 @@ package net.i2p.router;
|
||||
* Defines the criteria for selecting a set of peers for use when searching the
|
||||
* PeerManager
|
||||
*
|
||||
* Only used by PeerTestJob, which may not have a point.
|
||||
*/
|
||||
public class PeerSelectionCriteria {
|
||||
/** The peers will be used in a tunnel */
|
||||
|
||||
@@ -4,16 +4,12 @@ import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.util.KeyRing;
|
||||
|
||||
/**
|
||||
@@ -31,19 +27,18 @@ public class PersistentKeyRing extends KeyRing {
|
||||
addFromProperties();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SessionKey put(Hash h, SessionKey sk) {
|
||||
SessionKey old = super.put(h, sk);
|
||||
if (!sk.equals(old)) {
|
||||
_ctx.router().setConfigSetting(PROP_PFX + h.toBase64().replace("=", "$"),
|
||||
_ctx.router().saveConfig(PROP_PFX + h.toBase64().replace("=", "$"),
|
||||
sk.toBase64());
|
||||
_ctx.router().saveConfig();
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
public SessionKey remove(Hash h) {
|
||||
_ctx.router().removeConfigSetting(PROP_PFX + h.toBase64().replace("=", "$"));
|
||||
_ctx.router().saveConfig();
|
||||
_ctx.router().saveConfig(PROP_PFX + h.toBase64().replace("=", "$"), null);
|
||||
return super.remove(h);
|
||||
}
|
||||
|
||||
@@ -56,7 +51,7 @@ public class PersistentKeyRing extends KeyRing {
|
||||
if (key == null || key.length() != 44)
|
||||
continue;
|
||||
String hb = prop.substring(PROP_PFX.length());
|
||||
hb.replace("$", "=");
|
||||
hb = hb.replace("$", "=");
|
||||
Hash dest = new Hash();
|
||||
SessionKey sk = new SessionKey();
|
||||
try {
|
||||
@@ -67,13 +62,14 @@ public class PersistentKeyRing extends KeyRing {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("\n<table border=\"1\"><tr><th align=\"left\">Destination Hash<th align=\"left\">Name or Dest.<th align=\"left\">Session Key</tr>");
|
||||
StringBuilder buf = new StringBuilder(1024);
|
||||
buf.append("\n<table><tr><th align=\"left\">Destination Hash<th align=\"left\">Name or Dest.<th align=\"left\">Encryption Key</tr>");
|
||||
for (Entry<Hash, SessionKey> e : entrySet()) {
|
||||
buf.append("\n<tr><td>");
|
||||
Hash h = e.getKey();
|
||||
buf.append(h.toBase64().substring(0, 6)).append("...");
|
||||
buf.append(h.toBase64().substring(0, 6)).append("…");
|
||||
buf.append("<td>");
|
||||
LeaseSet ls = _ctx.netDb().lookupLeaseSetLocally(h);
|
||||
if (ls != null) {
|
||||
@@ -83,13 +79,13 @@ public class PersistentKeyRing extends KeyRing {
|
||||
if (in != null && in.getDestinationNickname() != null)
|
||||
buf.append(in.getDestinationNickname());
|
||||
else
|
||||
buf.append(dest.toBase64().substring(0, 6)).append("...");
|
||||
buf.append(dest.toBase64().substring(0, 6)).append("…");
|
||||
} else {
|
||||
String host = _ctx.namingService().reverseLookup(dest);
|
||||
if (host != null)
|
||||
buf.append(host);
|
||||
else
|
||||
buf.append(dest.toBase64().substring(0, 6)).append("...");
|
||||
buf.append(dest.toBase64().substring(0, 6)).append("…");
|
||||
}
|
||||
}
|
||||
buf.append("<td>");
|
||||
|
||||
@@ -136,6 +136,13 @@ public interface ProfileManager {
|
||||
*/
|
||||
void dbStoreSent(Hash peer, long responseTimeMs);
|
||||
|
||||
/**
|
||||
* Note that we confirmed a successful send of db data to
|
||||
* the peer.
|
||||
*
|
||||
*/
|
||||
void dbStoreSuccessful(Hash peer);
|
||||
|
||||
/**
|
||||
* Note that we were unable to confirm a successful send of db data to
|
||||
* the peer, at least not within our timeout period
|
||||
@@ -148,6 +155,7 @@ public interface ProfileManager {
|
||||
* through an explicit dbStore or in a dbLookupReply
|
||||
*/
|
||||
void heardAbout(Hash peer);
|
||||
void heardAbout(Hash peer, long when);
|
||||
|
||||
/**
|
||||
* Note that the router received a message from the given peer on the specified
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,11 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.router.time.RouterTimestamper;
|
||||
import net.i2p.time.Timestamper;
|
||||
import net.i2p.util.Clock;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@@ -15,21 +21,77 @@ import net.i2p.util.Log;
|
||||
*/
|
||||
public class RouterClock extends Clock {
|
||||
|
||||
RouterContext _context;
|
||||
/**
|
||||
* How often we will slew the clock
|
||||
* i.e. ppm = 1000000/MAX_SLEW
|
||||
* We should be able to slew really fast,
|
||||
* this is probably a lot faster than what NTP does
|
||||
* 1/50 is 12s in a 10m tunnel lifetime, that should be fine.
|
||||
* All of this is @since 0.7.12
|
||||
*/
|
||||
private static final long MAX_SLEW = 50;
|
||||
public static final int DEFAULT_STRATUM = 8;
|
||||
private static final int WORST_STRATUM = 16;
|
||||
|
||||
/** the max NTP Timestamper delay is 30m right now, make this longer than that */
|
||||
private static final long MIN_DELAY_FOR_WORSE_STRATUM = 45*60*1000;
|
||||
private volatile long _desiredOffset;
|
||||
private volatile long _lastSlewed;
|
||||
/** use system time for this */
|
||||
private long _lastChanged;
|
||||
private int _lastStratum;
|
||||
private final Timestamper _timeStamper;
|
||||
|
||||
/**
|
||||
* If the system clock shifts by this much,
|
||||
* call the callback, we probably need a soft restart.
|
||||
* @since 0.8.8
|
||||
*/
|
||||
private static final long MASSIVE_SHIFT_FORWARD = 150*1000;
|
||||
private static final long MASSIVE_SHIFT_BACKWARD = 61*1000;
|
||||
|
||||
private final Set<ClockShiftListener> _shiftListeners;
|
||||
private volatile long _lastShiftNanos;
|
||||
|
||||
public RouterClock(RouterContext context) {
|
||||
super(context);
|
||||
_context = context;
|
||||
_lastStratum = WORST_STRATUM;
|
||||
_lastSlewed = System.currentTimeMillis();
|
||||
_shiftListeners = new CopyOnWriteArraySet();
|
||||
_lastShiftNanos = System.nanoTime();
|
||||
_timeStamper = new RouterTimestamper(context, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Specify how far away from the "correct" time the computer is - a positive
|
||||
* value means that we are slow, while a negative value means we are fast.
|
||||
*
|
||||
* The RouterTimestamper
|
||||
*/
|
||||
public void setOffset(long offsetMs, boolean force) {
|
||||
@Override
|
||||
public Timestamper getTimestamper() { return _timeStamper; }
|
||||
|
||||
if (false) return;
|
||||
/**
|
||||
* Specify how far away from the "correct" time the computer is - a positive
|
||||
* value means that the system time is slow, while a negative value means the system time is fast.
|
||||
*
|
||||
* @param offsetMs the delta from System.currentTimeMillis() (NOT the delta from now())
|
||||
*/
|
||||
@Override
|
||||
public void setOffset(long offsetMs, boolean force) {
|
||||
setOffset(offsetMs, force, DEFAULT_STRATUM);
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.7.12
|
||||
* @param offsetMs the delta from System.currentTimeMillis() (NOT the delta from now())
|
||||
*/
|
||||
private void setOffset(long offsetMs, int stratum) {
|
||||
setOffset(offsetMs, false, stratum);
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.7.12
|
||||
* @param offsetMs the delta from System.currentTimeMillis() (NOT the delta from now())
|
||||
*/
|
||||
private void setOffset(long offsetMs, boolean force, int stratum) {
|
||||
long delta = offsetMs - _offset;
|
||||
if (!force) {
|
||||
if ((offsetMs > MAX_OFFSET) || (offsetMs < 0 - MAX_OFFSET)) {
|
||||
@@ -46,57 +108,195 @@ public class RouterClock extends Clock {
|
||||
}
|
||||
}
|
||||
|
||||
if ((delta < MIN_OFFSET_CHANGE) && (delta > 0 - MIN_OFFSET_CHANGE)) {
|
||||
getLog().debug("Not changing offset since it is only " + delta + "ms");
|
||||
// let's be perfect
|
||||
if (delta == 0) {
|
||||
getLog().debug("Not changing offset, delta=0");
|
||||
_alreadyChanged = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// only listen to a worse stratum if it's been a while
|
||||
if (_alreadyChanged && stratum > _lastStratum &&
|
||||
System.currentTimeMillis() - _lastChanged < MIN_DELAY_FOR_WORSE_STRATUM) {
|
||||
getLog().warn("Ignoring update from a stratum " + stratum +
|
||||
" clock, we recently had an update from a stratum " + _lastStratum + " clock");
|
||||
return;
|
||||
}
|
||||
|
||||
// If so configured, check sanity of proposed clock offset
|
||||
if (Boolean.valueOf(_context.getProperty("router.clockOffsetSanityCheck","true")).booleanValue() == true) {
|
||||
if (_context.getBooleanPropertyDefaultTrue("router.clockOffsetSanityCheck") &&
|
||||
_alreadyChanged) {
|
||||
|
||||
// Try calculating peer clock skew
|
||||
Long peerClockSkew = _context.commSystem().getFramedAveragePeerClockSkew(50);
|
||||
|
||||
if (peerClockSkew != null) {
|
||||
long currentPeerClockSkew = ((RouterContext)_context).commSystem().getFramedAveragePeerClockSkew(50);
|
||||
|
||||
// Predict the effect of applying the proposed clock offset
|
||||
long currentPeerClockSkew = peerClockSkew.longValue();
|
||||
long predictedPeerClockSkew = currentPeerClockSkew + (delta / 1000l);
|
||||
long predictedPeerClockSkew = currentPeerClockSkew + delta;
|
||||
|
||||
// Fail sanity check if applying the offset would increase peer clock skew
|
||||
if ((Math.abs(predictedPeerClockSkew) > (Math.abs(currentPeerClockSkew) + 5)) ||
|
||||
(Math.abs(predictedPeerClockSkew) > 20)) {
|
||||
if ((Math.abs(predictedPeerClockSkew) > (Math.abs(currentPeerClockSkew) + 5*1000)) ||
|
||||
(Math.abs(predictedPeerClockSkew) > 20*1000)) {
|
||||
|
||||
getLog().error("Ignoring clock offset " + offsetMs + "ms (current " + _offset +
|
||||
"ms) since it would increase peer clock skew from " + currentPeerClockSkew +
|
||||
"s to " + predictedPeerClockSkew + "s. Broken server in pool.ntp.org?");
|
||||
"ms to " + predictedPeerClockSkew + "ms. Bad time server?");
|
||||
return;
|
||||
} else {
|
||||
getLog().debug("Approving clock offset " + offsetMs + "ms (current " + _offset +
|
||||
"ms) since it would decrease peer clock skew from " + currentPeerClockSkew +
|
||||
"s to " + predictedPeerClockSkew + "s.");
|
||||
"ms to " + predictedPeerClockSkew + "ms.");
|
||||
}
|
||||
}
|
||||
} // check sanity
|
||||
}
|
||||
|
||||
if (_alreadyChanged) {
|
||||
// In first minute, allow a lower (better) stratum to do a step adjustment after
|
||||
// a previous step adjustment.
|
||||
// This allows NTP to trump a peer offset after a soft restart
|
||||
if (_alreadyChanged &&
|
||||
(stratum >= _lastStratum || _startedOn - System.currentTimeMillis() > 60*1000)) {
|
||||
// Update the target offset, slewing will take care of the rest
|
||||
if (delta > 15*1000)
|
||||
getLog().log(Log.CRIT, "Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
getLog().error("Warning - Updating target clock offset to " + offsetMs + "ms from " + _offset + "ms, Stratum " + stratum);
|
||||
else if (getLog().shouldLog(Log.INFO))
|
||||
getLog().info("Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
getLog().info("Updating target clock offset to " + offsetMs + "ms from " + _offset + "ms, Stratum " + stratum);
|
||||
|
||||
if (!_statCreated)
|
||||
_context.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
if (!_statCreated) {
|
||||
_context.statManager().createRequiredRateStat("clock.skew", "Clock step adjustment (ms)", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_statCreated = true;
|
||||
}
|
||||
_context.statManager().addRateData("clock.skew", delta, 0);
|
||||
_desiredOffset = offsetMs;
|
||||
} else {
|
||||
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms, Stratum " + stratum);
|
||||
_alreadyChanged = true;
|
||||
_offset = offsetMs;
|
||||
_desiredOffset = offsetMs;
|
||||
// this is used by the JobQueue
|
||||
fireOffsetChanged(delta);
|
||||
}
|
||||
_alreadyChanged = true;
|
||||
_offset = offsetMs;
|
||||
fireOffsetChanged(delta);
|
||||
_lastChanged = System.currentTimeMillis();
|
||||
_lastStratum = stratum;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @param stratum used to determine whether we should ignore
|
||||
* @since 0.7.12
|
||||
*/
|
||||
@Override
|
||||
public void setNow(long realTime, int stratum) {
|
||||
long diff = realTime - System.currentTimeMillis();
|
||||
setOffset(diff, stratum);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the current time synchronized with whatever reference clock is in use.
|
||||
* Do really simple clock slewing, like NTP but without jitter prevention.
|
||||
* Slew the clock toward the desired offset, but only up to a maximum slew rate,
|
||||
* and never let the clock go backwards because of slewing.
|
||||
*
|
||||
* Take care to only access the volatile variables once for speed and to
|
||||
* avoid having another thread change them
|
||||
*
|
||||
* This is called about a zillion times a second, so we can do the slewing right
|
||||
* here rather than in some separate thread to keep it simple.
|
||||
* Avoiding backwards clocks when updating in a thread would be hard too.
|
||||
*/
|
||||
@Override
|
||||
public long now() {
|
||||
long systemNow = System.currentTimeMillis();
|
||||
// copy the global, so two threads don't both increment or decrement _offset
|
||||
long offset = _offset;
|
||||
long sinceLastSlewed = systemNow - _lastSlewed;
|
||||
if (sinceLastSlewed >= MASSIVE_SHIFT_FORWARD ||
|
||||
sinceLastSlewed <= 0 - MASSIVE_SHIFT_BACKWARD) {
|
||||
_lastSlewed = systemNow;
|
||||
notifyMassive(sinceLastSlewed);
|
||||
} else if (sinceLastSlewed >= MAX_SLEW) {
|
||||
// copy the global
|
||||
long desiredOffset = _desiredOffset;
|
||||
if (desiredOffset > offset) {
|
||||
// slew forward
|
||||
_offset = ++offset;
|
||||
} else if (desiredOffset < offset) {
|
||||
// slew backward, but don't let the clock go backward
|
||||
// this should be the first call since systemNow
|
||||
// was greater than lastSled + MAX_SLEW, i.e. different
|
||||
// from the last systemNow, thus we won't let the clock go backward,
|
||||
// no need to track when we were last called.
|
||||
_offset = --offset;
|
||||
}
|
||||
_lastSlewed = systemNow;
|
||||
}
|
||||
return offset + systemNow;
|
||||
}
|
||||
|
||||
/*
|
||||
* A large system clock shift happened. Tell people about it.
|
||||
*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
private void notifyMassive(long shift) {
|
||||
long nowNanos = System.nanoTime();
|
||||
// try to prevent dups, not guaranteed
|
||||
// nanoTime() isn't guaranteed to be monotonic either :(
|
||||
if (nowNanos < _lastShiftNanos + MASSIVE_SHIFT_FORWARD)
|
||||
return;
|
||||
_lastShiftNanos = nowNanos;
|
||||
|
||||
// reset these so the offset can be reset by the timestamper again
|
||||
_startedOn = System.currentTimeMillis();
|
||||
_alreadyChanged = false;
|
||||
getTimestamper().timestampNow();
|
||||
|
||||
if (shift > 0)
|
||||
getLog().log(Log.CRIT, "Large clock shift forward by " + DataHelper.formatDuration(shift));
|
||||
else
|
||||
getLog().log(Log.CRIT, "Large clock shift backward by " + DataHelper.formatDuration(0 - shift));
|
||||
|
||||
for (ClockShiftListener lsnr : _shiftListeners) {
|
||||
lsnr.clockShift(shift);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get notified of massive System clock shifts, positive or negative -
|
||||
* generally a minute or more.
|
||||
* The adjusted (offset) clock changes by the same amount.
|
||||
* The offset itself did not change.
|
||||
* Warning - duplicate notifications may occur.
|
||||
*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void addShiftListener(ClockShiftListener lsnr) {
|
||||
_shiftListeners.add(lsnr);
|
||||
}
|
||||
|
||||
/*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void removeShiftListener(ClockShiftListener lsnr) {
|
||||
_shiftListeners.remove(lsnr);
|
||||
}
|
||||
|
||||
/*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public interface ClockShiftListener {
|
||||
|
||||
/**
|
||||
* @param delta The system clock and adjusted clock just changed by this much,
|
||||
* in milliseconds (approximately)
|
||||
*/
|
||||
public void clockShift(long delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* How far we still have to slew, for diagnostics
|
||||
* @since 0.7.12
|
||||
* @deprecated for debugging only
|
||||
*/
|
||||
public long getDeltaOffset() {
|
||||
return _desiredOffset - _offset;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +1,30 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.admin.AdminManager;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.internal.InternalClientManager;
|
||||
import net.i2p.router.client.ClientManagerFacadeImpl;
|
||||
import net.i2p.router.dummy.*;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.peermanager.Calculator;
|
||||
import net.i2p.router.peermanager.CapacityCalculator;
|
||||
import net.i2p.router.peermanager.IntegrationCalculator;
|
||||
import net.i2p.router.peermanager.IsFailingCalculator;
|
||||
import net.i2p.router.peermanager.PeerManagerFacadeImpl;
|
||||
import net.i2p.router.peermanager.ProfileManagerImpl;
|
||||
import net.i2p.router.peermanager.ProfileOrganizer;
|
||||
import net.i2p.router.peermanager.ReliabilityCalculator;
|
||||
import net.i2p.router.peermanager.SpeedCalculator;
|
||||
import net.i2p.router.peermanager.StrictSpeedCalculator;
|
||||
import net.i2p.router.transport.CommSystemFacadeImpl;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter;
|
||||
import net.i2p.router.transport.OutboundMessageRegistry;
|
||||
import net.i2p.router.transport.VMCommSystem;
|
||||
import net.i2p.router.tunnel.TunnelDispatcher;
|
||||
import net.i2p.router.tunnel.pool.TunnelPoolManager;
|
||||
import net.i2p.util.Clock;
|
||||
import net.i2p.util.KeyRing;
|
||||
import net.i2p.util.I2PProperties.I2PPropertyCallback;
|
||||
|
||||
/**
|
||||
* Build off the core I2P context to provide a root for a router instance to
|
||||
@@ -37,9 +35,8 @@ import net.i2p.util.KeyRing;
|
||||
*
|
||||
*/
|
||||
public class RouterContext extends I2PAppContext {
|
||||
private Router _router;
|
||||
private AdminManager _adminManager;
|
||||
private ClientManagerFacade _clientManagerFacade;
|
||||
private final Router _router;
|
||||
private ClientManagerFacadeImpl _clientManagerFacade;
|
||||
private ClientMessagePool _clientMessagePool;
|
||||
private JobQueue _jobQueue;
|
||||
private InNetMessagePool _inNetMessagePool;
|
||||
@@ -59,52 +56,105 @@ public class RouterContext extends I2PAppContext {
|
||||
private Shitlist _shitlist;
|
||||
private Blocklist _blocklist;
|
||||
private MessageValidator _messageValidator;
|
||||
private MessageStateMonitor _messageStateMonitor;
|
||||
//private MessageStateMonitor _messageStateMonitor;
|
||||
private RouterThrottle _throttle;
|
||||
private RouterClock _clock;
|
||||
private Calculator _isFailingCalc;
|
||||
private Calculator _integrationCalc;
|
||||
private Calculator _speedCalc;
|
||||
private Calculator _reliabilityCalc;
|
||||
private Calculator _capacityCalc;
|
||||
private Calculator _oldSpeedCalc;
|
||||
private final Set<Runnable> _finalShutdownTasks;
|
||||
// split up big lock on this to avoid deadlocks
|
||||
private final Object _lock1 = new Object(), _lock2 = new Object();
|
||||
|
||||
|
||||
private static List _contexts = new ArrayList(1);
|
||||
private static final List<RouterContext> _contexts = new CopyOnWriteArrayList();
|
||||
|
||||
public RouterContext(Router router) { this(router, null); }
|
||||
|
||||
public RouterContext(Router router, Properties envProps) {
|
||||
super(filterProps(envProps));
|
||||
_router = router;
|
||||
initAll();
|
||||
// Disabled here so that the router can get a context and get the
|
||||
// directory locations from it, to do an update, without having
|
||||
// to init everything. Caller MUST call initAll() afterwards.
|
||||
// Sorry, this breaks some main() unit tests out there.
|
||||
//initAll();
|
||||
if (!_contexts.isEmpty())
|
||||
System.err.println("Warning - More than one router in this JVM");
|
||||
_finalShutdownTasks = new CopyOnWriteArraySet();
|
||||
_contexts.add(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set properties where the defaults must be different from those
|
||||
* in I2PAppContext.
|
||||
*
|
||||
* Unless we are explicitly disabling the timestamper, we want to use it.
|
||||
* We need this now as the new timestamper default is disabled (so we don't
|
||||
* have each I2PAppContext creating their own SNTP queries all the time)
|
||||
*
|
||||
* Set more PRNG buffers, as the default is now small for the I2PAppContext.
|
||||
*
|
||||
*/
|
||||
static final Properties filterProps(Properties envProps) {
|
||||
private static final Properties filterProps(Properties envProps) {
|
||||
if (envProps == null)
|
||||
envProps = new Properties();
|
||||
if (envProps.getProperty("time.disabled") == null)
|
||||
envProps.setProperty("time.disabled", "false");
|
||||
if (envProps.getProperty("prng.buffers") == null) {
|
||||
// How many of these 256 KB buffers do we need?
|
||||
// One clue: prng.bufferFillTime is ~10ms on my system,
|
||||
// and prng.bufferFillTime event count is ~30 per minute,
|
||||
// or about 2 seconds per buffer - so about 200x faster
|
||||
// to fill than to drain - so we don't need too many
|
||||
long maxMemory = Runtime.getRuntime().maxMemory();
|
||||
if (maxMemory == Long.MAX_VALUE)
|
||||
maxMemory = 96*1024*1024l;
|
||||
long buffs = Math.min(16, Math.max(2, maxMemory / (14 * 1024 * 1024)));
|
||||
envProps.setProperty("prng.buffers", "" + buffs);
|
||||
}
|
||||
return envProps;
|
||||
}
|
||||
private void initAll() {
|
||||
_adminManager = new AdminManager(this);
|
||||
if ("false".equals(getProperty("i2p.dummyClientFacade", "false")))
|
||||
_clientManagerFacade = new ClientManagerFacadeImpl(this);
|
||||
else
|
||||
_clientManagerFacade = new DummyClientManagerFacade(this);
|
||||
|
||||
/**
|
||||
* Modify the configuration attributes of this context, changing
|
||||
* one of the properties provided during the context construction.
|
||||
*
|
||||
* @param propName The name of the property.
|
||||
* @param value The new value for the property.
|
||||
* @since 0.8.4
|
||||
* @deprecated Use Router.saveConfig()
|
||||
*/
|
||||
public void setProperty(String propName, String value) {
|
||||
_overrideProps.setProperty(propName, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a property provided during the context construction.
|
||||
* Only for use by the router. Others use Router.saveConfig()
|
||||
*
|
||||
* @param propName The name of the property.
|
||||
* @since 0.9
|
||||
*/
|
||||
void removeProperty(String propName) {
|
||||
_overrideProps.remove(propName);
|
||||
}
|
||||
|
||||
|
||||
public void addPropertyCallback(I2PPropertyCallback callback) {
|
||||
_overrideProps.addCallBack(callback);
|
||||
}
|
||||
|
||||
|
||||
public void initAll() {
|
||||
if (getBooleanProperty("i2p.dummyClientFacade"))
|
||||
System.err.println("i2p.dummyClientFacade currently unsupported");
|
||||
_clientManagerFacade = new ClientManagerFacadeImpl(this);
|
||||
// removed since it doesn't implement InternalClientManager for now
|
||||
//else
|
||||
// _clientManagerFacade = new DummyClientManagerFacade(this);
|
||||
_clientMessagePool = new ClientMessagePool(this);
|
||||
_jobQueue = new JobQueue(this);
|
||||
_inNetMessagePool = new InNetMessagePool(this);
|
||||
_outNetMessagePool = new OutNetMessagePool(this);
|
||||
_messageHistory = new MessageHistory(this);
|
||||
_messageRegistry = new OutboundMessageRegistry(this);
|
||||
_messageStateMonitor = new MessageStateMonitor(this);
|
||||
//_messageStateMonitor = new MessageStateMonitor(this);
|
||||
if ("false".equals(getProperty("i2p.dummyNetDb", "false")))
|
||||
_netDb = new FloodfillNetworkDatabaseFacade(this); // new KademliaNetworkDatabaseFacade(this);
|
||||
else
|
||||
@@ -130,35 +180,62 @@ public class RouterContext extends I2PAppContext {
|
||||
_shitlist = new Shitlist(this);
|
||||
_blocklist = new Blocklist(this);
|
||||
_messageValidator = new MessageValidator(this);
|
||||
//_throttle = new RouterThrottleImpl(this);
|
||||
_throttle = new RouterDoSThrottle(this);
|
||||
_isFailingCalc = new IsFailingCalculator(this);
|
||||
_integrationCalc = new IntegrationCalculator(this);
|
||||
_speedCalc = new SpeedCalculator(this);
|
||||
_oldSpeedCalc = new StrictSpeedCalculator(this);
|
||||
_reliabilityCalc = new ReliabilityCalculator(this);
|
||||
_capacityCalc = new CapacityCalculator(this);
|
||||
_throttle = new RouterThrottleImpl(this);
|
||||
//_throttle = new RouterDoSThrottle(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the list of router contexts currently instantiated in this JVM.
|
||||
* This will always contain only one item (except when a simulation per the
|
||||
* MultiRouter is going on), and the list should only be modified when a new
|
||||
* MultiRouter is going on).
|
||||
*
|
||||
* @return an unmodifiable list (as of 0.8.8). May be empty.
|
||||
*/
|
||||
public static List<RouterContext> listContexts() {
|
||||
return Collections.unmodifiableList(_contexts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as listContexts() but package private and modifiable.
|
||||
* The list should only be modified when a new
|
||||
* context is created or a router is shut down.
|
||||
*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public static List listContexts() { return _contexts; }
|
||||
static List<RouterContext> getContexts() {
|
||||
return _contexts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill the global I2PAppContext, so it isn't still around
|
||||
* when we restart in the same JVM (Android).
|
||||
* Only do this if there are no other routers in the JVM.
|
||||
*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
static void killGlobalContext() {
|
||||
synchronized (I2PAppContext.class) {
|
||||
_globalAppContext = null;
|
||||
}
|
||||
}
|
||||
|
||||
/** what router is this context working for? */
|
||||
public Router router() { return _router; }
|
||||
/** convenience method for querying the router's ident */
|
||||
public Hash routerHash() { return _router.getRouterInfo().getIdentity().getHash(); }
|
||||
|
||||
/**
|
||||
* Controls a basic admin interface
|
||||
*
|
||||
* Convenience method for getting the router hash.
|
||||
* Equivalent to context.router().getRouterInfo().getIdentity().getHash()
|
||||
* @return may be null if called very early
|
||||
*/
|
||||
public AdminManager adminManager() { return _adminManager; }
|
||||
public Hash routerHash() {
|
||||
if (_router == null)
|
||||
return null;
|
||||
RouterInfo ri = _router.getRouterInfo();
|
||||
if (ri == null)
|
||||
return null;
|
||||
return ri.getIdentity().getHash();
|
||||
}
|
||||
|
||||
/**
|
||||
* How are we coordinating clients for the router?
|
||||
*/
|
||||
@@ -189,13 +266,15 @@ public class RouterContext extends I2PAppContext {
|
||||
* The registry is used by outbound messages to wait for replies.
|
||||
*/
|
||||
public OutboundMessageRegistry messageRegistry() { return _messageRegistry; }
|
||||
|
||||
/**
|
||||
* The monitor keeps track of inbound and outbound messages currently held in
|
||||
* memory / queued for processing. We'll use this to throttle the router so
|
||||
* we don't overflow.
|
||||
*
|
||||
*/
|
||||
public MessageStateMonitor messageStateMonitor() { return _messageStateMonitor; }
|
||||
//public MessageStateMonitor messageStateMonitor() { return _messageStateMonitor; }
|
||||
|
||||
/**
|
||||
* Our db cache
|
||||
*/
|
||||
@@ -264,20 +343,9 @@ public class RouterContext extends I2PAppContext {
|
||||
*/
|
||||
public RouterThrottle throttle() { return _throttle; }
|
||||
|
||||
/** how do we rank the failure of profiles? */
|
||||
public Calculator isFailingCalculator() { return _isFailingCalc; }
|
||||
/** how do we rank the integration of profiles? */
|
||||
public Calculator integrationCalculator() { return _integrationCalc; }
|
||||
/** how do we rank the speed of profiles? */
|
||||
public Calculator speedCalculator() { return _speedCalc; }
|
||||
public Calculator oldSpeedCalculator() { return _oldSpeedCalc; }
|
||||
/** how do we rank the reliability of profiles? */
|
||||
public Calculator reliabilityCalculator() { return _reliabilityCalc; }
|
||||
/** how do we rank the capacity of profiles? */
|
||||
public Calculator capacityCalculator() { return _capacityCalc; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
StringBuilder buf = new StringBuilder(512);
|
||||
buf.append("RouterContext: ").append(super.toString()).append('\n');
|
||||
buf.append(_router).append('\n');
|
||||
buf.append(_clientManagerFacade).append('\n');
|
||||
@@ -298,10 +366,6 @@ public class RouterContext extends I2PAppContext {
|
||||
buf.append(_statPublisher).append('\n');
|
||||
buf.append(_shitlist).append('\n');
|
||||
buf.append(_messageValidator).append('\n');
|
||||
buf.append(_isFailingCalc).append('\n');
|
||||
buf.append(_integrationCalc).append('\n');
|
||||
buf.append(_speedCalc).append('\n');
|
||||
buf.append(_reliabilityCalc).append('\n');
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@@ -310,6 +374,7 @@ public class RouterContext extends I2PAppContext {
|
||||
* I2PAppContext says.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public String getProperty(String propName) {
|
||||
if (_router != null) {
|
||||
String val = _router.getConfigSetting(propName);
|
||||
@@ -322,6 +387,7 @@ public class RouterContext extends I2PAppContext {
|
||||
* I2PAppContext says.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public String getProperty(String propName, String defaultVal) {
|
||||
if (_router != null) {
|
||||
String val = _router.getConfigSetting(propName);
|
||||
@@ -333,6 +399,7 @@ public class RouterContext extends I2PAppContext {
|
||||
/**
|
||||
* Return an int with an int default
|
||||
*/
|
||||
@Override
|
||||
public int getProperty(String propName, int defaultVal) {
|
||||
if (_router != null) {
|
||||
String val = _router.getConfigSetting(propName);
|
||||
@@ -348,19 +415,20 @@ public class RouterContext extends I2PAppContext {
|
||||
}
|
||||
|
||||
/**
|
||||
* The context's synchronized clock, which is kept context specific only to
|
||||
* enable simulators to play with clock skew among different instances.
|
||||
*
|
||||
* It wouldn't be necessary to override clock(), except for the reason
|
||||
* that it triggers initializeClock() of which we definitely
|
||||
* need the local version to run.
|
||||
* @return new Properties with system and context properties
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Clock clock() {
|
||||
if (!_clockInitialized) initializeClock();
|
||||
return _clock;
|
||||
@Override
|
||||
public Properties getProperties() {
|
||||
Properties rv = super.getProperties();
|
||||
if (_router != null)
|
||||
rv.putAll(_router.getConfigMap());
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initializeClock() {
|
||||
synchronized (this) {
|
||||
synchronized (_lock1) {
|
||||
if (_clock == null)
|
||||
_clock = new RouterClock(this);
|
||||
_clockInitialized = true;
|
||||
@@ -377,11 +445,54 @@ public class RouterContext extends I2PAppContext {
|
||||
|
||||
@Override
|
||||
protected void initializeKeyRing() {
|
||||
synchronized (this) {
|
||||
synchronized (_lock2) {
|
||||
if (_keyRing == null)
|
||||
_keyRing = new PersistentKeyRing(this);
|
||||
_keyRingInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.8
|
||||
*/
|
||||
void removeShutdownTasks() {
|
||||
_shutdownTasks.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* The last thing to be called before router shutdown.
|
||||
* No context resources, including logging, will be available.
|
||||
* Only for external threads in the same JVM needing to know when
|
||||
* the shutdown is complete, like Android.
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void addFinalShutdownTask(Runnable task) {
|
||||
_finalShutdownTasks.add(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the Set
|
||||
* @since 0.8.8
|
||||
*/
|
||||
Set<Runnable> getFinalShutdownTasks() {
|
||||
return _finalShutdownTasks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this instead of context instanceof RouterContext
|
||||
* @return true
|
||||
* @since 0.7.9
|
||||
*/
|
||||
public boolean isRouterContext() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this to connect to the router in the same JVM.
|
||||
* @return the client manager
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public InternalClientManager internalClientManager() {
|
||||
return _clientManagerFacade;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import net.i2p.data.Hash;
|
||||
* Minor extention of the router throttle to handle some DoS events and
|
||||
* throttle accordingly.
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
class RouterDoSThrottle extends RouterThrottleImpl {
|
||||
public RouterDoSThrottle(RouterContext context) {
|
||||
@@ -19,19 +20,20 @@ class RouterDoSThrottle extends RouterThrottleImpl {
|
||||
private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
|
||||
private static final long LOOKUP_THROTTLE_MAX = 20;
|
||||
|
||||
@Override
|
||||
public boolean acceptNetDbLookupRequest(Hash key) {
|
||||
// if we were going to refuse it anyway, drop it
|
||||
boolean shouldAccept = super.acceptNetDbLookupRequest(key);
|
||||
if (!shouldAccept) return false;
|
||||
|
||||
// now lets check for DoS
|
||||
long now = getContext().clock().now();
|
||||
long now = _context.clock().now();
|
||||
if (_currentLookupPeriod + LOOKUP_THROTTLE_PERIOD > now) {
|
||||
// same period, check for DoS
|
||||
_currentLookupCount++;
|
||||
if (_currentLookupCount >= LOOKUP_THROTTLE_MAX) {
|
||||
getContext().statManager().addRateData("router.throttleNetDbDoS", _currentLookupCount, 0);
|
||||
int rand = getContext().random().nextInt(_currentLookupCount);
|
||||
_context.statManager().addRateData("router.throttleNetDbDoS", _currentLookupCount, 0);
|
||||
int rand = _context.random().nextInt(_currentLookupCount);
|
||||
if (rand > LOOKUP_THROTTLE_MAX) {
|
||||
return false;
|
||||
} else {
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
|
||||
/**
|
||||
* This is the class called by the runplain.sh script on linux
|
||||
* and the i2p.exe launcher on Windows.
|
||||
* (i.e. no wrapper)
|
||||
*
|
||||
* Setup of wrapper.log file is moved to WorkingDir.java
|
||||
* Until WorkingDir is called, the existing stdout / stderr will be used.
|
||||
*/
|
||||
public class RouterLaunch {
|
||||
|
||||
public static void main(String args[]) {
|
||||
try {
|
||||
System.setOut(new PrintStream(new FileOutputStream("wrapper.log")));
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
}
|
||||
Router.main(args);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,4 +47,10 @@ public interface RouterThrottle {
|
||||
*/
|
||||
public String getTunnelStatus();
|
||||
public void setTunnelStatus(String msg);
|
||||
|
||||
/** @since 0.8.12 */
|
||||
public void setShutdownStatus();
|
||||
|
||||
/** @since 0.8.12 */
|
||||
public void cancelShutdownStatus();
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import net.i2p.router.peermanager.TunnelHistory;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
/**
|
||||
* Simple throttle that basically stops accepting messages or nontrivial
|
||||
@@ -12,57 +14,73 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
class RouterThrottleImpl implements RouterThrottle {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
protected final RouterContext _context;
|
||||
private final Log _log;
|
||||
private String _tunnelStatus;
|
||||
|
||||
/**
|
||||
* arbitrary hard limit of 10 seconds - if its taking this long to get
|
||||
* arbitrary hard limit - if it's taking this long to get
|
||||
* to a job, we're congested.
|
||||
*
|
||||
*/
|
||||
private static int JOB_LAG_LIMIT = 2*1000;
|
||||
/**
|
||||
* Arbitrary hard limit - if we throttle our network connection this many
|
||||
* times in the previous 2 minute period, don't accept requests to
|
||||
* participate in tunnels.
|
||||
*
|
||||
*/
|
||||
private static int THROTTLE_EVENT_LIMIT = 30;
|
||||
|
||||
private static final String PROP_MAX_TUNNELS = "router.maxParticipatingTunnels";
|
||||
private static final int DEFAULT_MAX_TUNNELS = 2000;
|
||||
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
|
||||
private static final int DEFAULT_MAX_TUNNELS = 5000;
|
||||
private static final String PROP_MAX_PROCESSINGTIME = "router.defaultProcessingTimeThrottle";
|
||||
|
||||
/**
|
||||
* TO BE FIXED - SEE COMMENTS BELOW
|
||||
*/
|
||||
private static final int DEFAULT_MAX_PROCESSINGTIME = 2250;
|
||||
|
||||
/** tunnel acceptance */
|
||||
public static final int TUNNEL_ACCEPT = 0;
|
||||
|
||||
/** = TrivialPreprocessor.PREPROCESSED_SIZE */
|
||||
private static final int PREPROCESSED_SIZE = 1024;
|
||||
|
||||
private static final long REJECT_STARTUP_TIME = 20*60*1000;
|
||||
|
||||
public RouterThrottleImpl(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(RouterThrottleImpl.class);
|
||||
setTunnelStatus();
|
||||
_context.simpleScheduler().addEvent(new ResetStatus(), REJECT_STARTUP_TIME + 120*1000);
|
||||
_context.statManager().createRateStat("router.throttleNetworkCause", "How lagged the jobQueue was when an I2NP was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleNetDbCause", "How lagged the jobQueue was when a networkDb request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelCause", "How lagged the jobQueue was when a tunnel request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
//_context.statManager().createRateStat("router.throttleNetDbCause", "How lagged the jobQueue was when a networkDb request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
//_context.statManager().createRateStat("router.throttleTunnelCause", "How lagged the jobQueue was when a tunnel request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.bytesAllocatedAtAccept", "How many bytes had been 'allocated' for participating tunnels when we accepted a request?", "Tunnels", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelProcessingTime1m", "How long it takes to process a message (1 minute average) when we throttle a tunnel?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelProcessingTime10m", "How long it takes to process a message (10 minute average) when we throttle a tunnel?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelMaxExceeded", "How many tunnels we are participating in when we refuse one due to excees?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelProbTooFast", "How many tunnels beyond the previous 1h average are we participating in when we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelProbTestSlow", "How slow are our tunnel tests when our average exceeds the old average and we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
//_context.statManager().createRateStat("router.throttleTunnelProbTestSlow", "How slow are our tunnel tests when our average exceeds the old average and we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelBandwidthExceeded", "How much bandwidth is allocated when we refuse due to bandwidth allocation?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelBytesAllowed", "How many bytes are allowed to be sent when we get a tunnel request (period is how many are currently allocated)?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelBytesUsed", "Used Bps at request (period = max KBps)?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelFailCount1m", "How many messages failed to be sent in the last 2 minutes when we throttle based on a spike in failures (period = 10 minute average failure count)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
|
||||
_context.statManager().createRateStat("router.throttleTunnelQueueOverload", "How many pending tunnel request messages have we received when we reject them due to overload (period = time to process each)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
|
||||
//_context.statManager().createRateStat("router.throttleTunnelQueueOverload", "How many pending tunnel request messages have we received when we reject them due to overload (period = time to process each)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset status from starting up to not-starting up,
|
||||
* in case we don't get a tunnel request soon after the 20 minutes is up.
|
||||
*
|
||||
* @since 0.8.12
|
||||
*/
|
||||
private class ResetStatus implements SimpleTimer.TimedEvent {
|
||||
public void timeReached() {
|
||||
if (_tunnelStatus.equals(_x("Rejecting tunnels: Starting up")))
|
||||
cancelShutdownStatus();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean acceptNetworkMessage() {
|
||||
//if (true) return true;
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
if ( (lag > JOB_LAG_LIMIT) && (_context.router().getUptime() > 60*1000) ) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Throttling network reader, as the job lag is " + lag);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Throttling network reader, as the job lag is " + lag);
|
||||
_context.statManager().addRateData("router.throttleNetworkCause", lag, lag);
|
||||
return false;
|
||||
} else {
|
||||
@@ -70,6 +88,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated unused, function moved to netdb */
|
||||
public boolean acceptNetDbLookupRequest(Hash key) {
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
if (lag > JOB_LAG_LIMIT) {
|
||||
@@ -86,34 +105,77 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (_context.getProperty(Router.PROP_SHUTDOWN_IN_PROGRESS) != null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Refusing tunnel request since we are shutting down ASAP");
|
||||
setTunnelStatus("Rejecting tunnels: Shutting down");
|
||||
setShutdownStatus();
|
||||
// Don't use CRIT because this tells everybody we are shutting down
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
|
||||
// Don't use CRIT because we don't want peers to think we're failing
|
||||
if (_context.router().getUptime() < 20*60*1000)
|
||||
if (_context.router().getUptime() < REJECT_STARTUP_TIME) {
|
||||
setTunnelStatus(_x("Rejecting tunnels: Starting up"));
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
// reject here if lag too high???
|
||||
//long lag = _context.jobQueue().getMaxLag();
|
||||
// reject here if lag too high???
|
||||
|
||||
// TODO
|
||||
// This stat is highly dependent on transport mix.
|
||||
// For NTCP, it is queueing delay only, ~25ms
|
||||
// For SSU it is queueing + ack time, ~1000 ms.
|
||||
// (SSU acks may be delayed so it is much more than just RTT... and the delay may
|
||||
// counterintuitively be more when there is low traffic)
|
||||
// Change the stat or pick a better stat.
|
||||
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
|
||||
Rate r = null;
|
||||
if (rs != null)
|
||||
r = rs.getRate(60*1000);
|
||||
double processTime = (r != null ? r.getAverageValue() : 0);
|
||||
if (processTime > 5000) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ "since the 1 minute message processing time is too slow (" + processTime + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProcessingTime1m", (long)processTime, (long)processTime);
|
||||
setTunnelStatus("Rejecting tunnels: High message delay");
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
|
||||
//Reject tunnels if the time to process messages and send them is too large. Too much time implies congestion.
|
||||
if(r != null) {
|
||||
double totalSendProcessingTimeEvents = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
double avgSendProcessingTime = 0;
|
||||
double currentSendProcessingTime = 0;
|
||||
double lastSendProcessingTime = 0;
|
||||
|
||||
//Calculate times
|
||||
if(r.getCurrentEventCount() > 0) {
|
||||
currentSendProcessingTime = r.getCurrentTotalValue()/r.getCurrentEventCount();
|
||||
}
|
||||
if(r.getLastEventCount() > 0) {
|
||||
lastSendProcessingTime = r.getLastTotalValue()/r.getLastEventCount();
|
||||
}
|
||||
if(totalSendProcessingTimeEvents > 0) {
|
||||
avgSendProcessingTime = (r.getCurrentTotalValue() + r.getLastTotalValue())/totalSendProcessingTimeEvents;
|
||||
}
|
||||
else {
|
||||
avgSendProcessingTime = r.getAverageValue();
|
||||
if(_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("No events occurred. Using 1 minute average to look at message delay.");
|
||||
}
|
||||
}
|
||||
|
||||
int maxProcessingTime = _context.getProperty(PROP_MAX_PROCESSINGTIME, DEFAULT_MAX_PROCESSINGTIME);
|
||||
|
||||
//Set throttling if necessary
|
||||
if((avgSendProcessingTime > maxProcessingTime*0.9
|
||||
|| currentSendProcessingTime > maxProcessingTime
|
||||
|| lastSendProcessingTime > maxProcessingTime)) {
|
||||
if(_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("Refusing tunnel request due to sendProcessingTime of " + avgSendProcessingTime
|
||||
+ " ms over the last two minutes, which is too much.");
|
||||
}
|
||||
setTunnelStatus(_x("Rejecting tunnels: High message delay"));
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
int maxTunnels = _context.getProperty(PROP_MAX_TUNNELS, DEFAULT_MAX_TUNNELS);
|
||||
|
||||
if (numTunnels > getMinThrottleTunnels()) {
|
||||
// Throttle tunnels if min. throttle level is exceeded and default max participating tunnels (or fewer) is used.
|
||||
if ((numTunnels > getMinThrottleTunnels()) && (DEFAULT_MAX_TUNNELS <= maxTunnels)) {
|
||||
double tunnelGrowthFactor = getTunnelGrowthFactor();
|
||||
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(10*60*1000);
|
||||
if (avgTunnels != null) {
|
||||
@@ -140,7 +202,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.warn("Probabalistically refusing tunnel request (avg=" + avg
|
||||
+ " current=" + numTunnels + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long)(numTunnels-avg), 0);
|
||||
setTunnelStatus("Rejecting " + (100 - (int) probAccept*100) + "% of tunnels: High number of requests");
|
||||
// hard to do {0} from here
|
||||
//setTunnelStatus("Rejecting " + (100 - (int) probAccept*100) + "% of tunnels: High number of requests");
|
||||
setTunnelStatus(_x("Rejecting most tunnels: High number of requests"));
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
} else {
|
||||
@@ -174,13 +238,13 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Probabalistically accept tunnel request (p=" + probAccept
|
||||
+ " v=" + v + " test time avg 1m=" + avg1m + " 10m=" + avg10m + ")");
|
||||
} else if (false) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Probabalistically refusing tunnel request (test time avg 1m=" + avg1m
|
||||
+ " 10m=" + avg10m + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg1m-avg10m), 0);
|
||||
setTunnelStatus("Rejecting " + ((int) probAccept*100) + "% of tunnels: High test time");
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
//} else if (false) {
|
||||
// if (_log.shouldLog(Log.WARN))
|
||||
// _log.warn("Probabalistically refusing tunnel request (test time avg 1m=" + avg1m
|
||||
// + " 10m=" + avg10m + ")");
|
||||
// _context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg1m-avg10m), 0);
|
||||
// setTunnelStatus("Rejecting " + ((int) probAccept*100) + "% of tunnels: High test time");
|
||||
// return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
} else {
|
||||
// not yet...
|
||||
@@ -190,13 +254,12 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
}
|
||||
}
|
||||
|
||||
int max = _context.getProperty(PROP_MAX_TUNNELS, DEFAULT_MAX_TUNNELS);
|
||||
if (numTunnels >= max) {
|
||||
if (numTunnels >= maxTunnels) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Refusing tunnel request since we are already participating in "
|
||||
+ numTunnels + " (our max is " + max + ")");
|
||||
+ numTunnels + " (our max is " + maxTunnels + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels, 0);
|
||||
setTunnelStatus("Rejecting tunnels: Limit reached");
|
||||
setTunnelStatus(_x("Rejecting tunnels: Limit reached"));
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
|
||||
@@ -218,7 +281,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
|
||||
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
|
||||
|
||||
double bytesAllocated = messagesPerTunnel * numTunnels * net.i2p.router.tunnel.TrivialPreprocessor.PREPROCESSED_SIZE;
|
||||
double bytesAllocated = messagesPerTunnel * numTunnels * PREPROCESSED_SIZE;
|
||||
|
||||
if (!allowTunnel(bytesAllocated, numTunnels)) {
|
||||
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0);
|
||||
@@ -262,8 +325,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
}
|
||||
|
||||
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 40; // .067KBps
|
||||
/** also limited to 90% - see below */
|
||||
private static final int MIN_AVAILABLE_BPS = 4*1024; // always leave at least 4KBps free when allowing
|
||||
private static final String LIMIT_STR = "Rejecting tunnels: Bandwidth limit";
|
||||
private static final String LIMIT_STR = _x("Rejecting tunnels: Bandwidth limit");
|
||||
|
||||
/**
|
||||
* with bytesAllocated already accounted for across the numTunnels existing
|
||||
@@ -282,8 +346,11 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
int used1mOut = _context.router().get1mRate(true);
|
||||
|
||||
// Check the inbound and outbound total bw available (separately)
|
||||
int availBps = (maxKBpsIn*1024) - usedIn;
|
||||
availBps = Math.min(availBps, (maxKBpsOut*1024) - usedOut);
|
||||
// We block all tunnels when share bw is over (max * 0.9) - 4KB
|
||||
// This gives reasonable growth room for existing tunnels on both low and high
|
||||
// bandwidth routers. We want to be rejecting tunnels more aggressively than
|
||||
// dropping packets with WRED
|
||||
int availBps = Math.min((maxKBpsIn*1024*9/10) - usedIn, (maxKBpsOut*1024*9/10) - usedOut);
|
||||
if (availBps < MIN_AVAILABLE_BPS) {
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject, avail (" + availBps + ") less than min");
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
@@ -303,16 +370,16 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesAllowed", availBps, (long)bytesAllocated);
|
||||
|
||||
// Now see if 1m rates are too high
|
||||
long overage = used1mIn - (maxKBpsIn*1024);
|
||||
overage = Math.max(overage, used1mOut - (maxKBpsOut*1024));
|
||||
long overage = Math.max(used1mIn - (maxKBpsIn*1024), used1mOut - (maxKBpsOut*1024));
|
||||
if ( (overage > 0) &&
|
||||
((overage/(float)(maxKBps*1024f)) > _context.random().nextFloat()) ) {
|
||||
((overage/(maxKBps*1024f)) > _context.random().nextFloat()) ) {
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject tunnel, 1m rate (" + overage + " over) indicates overload.");
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
return false;
|
||||
}
|
||||
|
||||
float maxBps = maxKBps * 1024f;
|
||||
// limit at 90% - 4KBps (see above)
|
||||
float maxBps = (maxKBps * 1024f * 0.9f) - MIN_AVAILABLE_BPS;
|
||||
float pctFull = (maxBps - availBps) / (maxBps);
|
||||
double probReject = Math.pow(pctFull, 16); // steep curve
|
||||
double rand = _context.random().nextFloat();
|
||||
@@ -328,11 +395,15 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (probReject >= 0.9)
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
else if (probReject >= 0.5)
|
||||
setTunnelStatus("Rejecting " + ((int)(100.0*probReject)) + "% of tunnels: Bandwidth limit");
|
||||
// hard to do {0} from here
|
||||
//setTunnelStatus("Rejecting " + ((int)(100.0*probReject)) + "% of tunnels: Bandwidth limit");
|
||||
setTunnelStatus(_x("Rejecting most tunnels: Bandwidth limit"));
|
||||
else if(probReject >= 0.1)
|
||||
setTunnelStatus("Accepting " + (100-(int)(100.0*probReject)) + "% of tunnels");
|
||||
// hard to do {0} from here
|
||||
//setTunnelStatus("Accepting " + (100-(int)(100.0*probReject)) + "% of tunnels");
|
||||
setTunnelStatus(_x("Accepting most tunnels"));
|
||||
else
|
||||
setTunnelStatus("Accepting tunnels");
|
||||
setTunnelStatus(_x("Accepting tunnels"));
|
||||
return !reject;
|
||||
|
||||
|
||||
@@ -427,7 +498,8 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (fiveMinBps > 0) return nowBps - fiveMinBps;
|
||||
return 0;
|
||||
}
|
||||
private double getBps(Rate rate) {
|
||||
|
||||
private static double getBps(Rate rate) {
|
||||
if (rate == null) return -1;
|
||||
double bytes = rate.getLastTotalValue();
|
||||
return (bytes*1000.0d)/rate.getPeriod();
|
||||
@@ -442,12 +514,30 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
// if (_context.router().getRouterInfo().getBandwidthTier().equals("K"))
|
||||
// setTunnelStatus("Not expecting tunnel requests: Advertised bandwidth too low");
|
||||
// else
|
||||
setTunnelStatus("Rejecting tunnels");
|
||||
setTunnelStatus(_x("Rejecting tunnels: Starting up"));
|
||||
}
|
||||
|
||||
/** @since 0.8.12 */
|
||||
public void setShutdownStatus() {
|
||||
setTunnelStatus(_x("Rejecting tunnels: Shutting down"));
|
||||
}
|
||||
|
||||
/** @since 0.8.12 */
|
||||
public void cancelShutdownStatus() {
|
||||
setTunnelStatus(_x("Rejecting tunnels"));
|
||||
}
|
||||
|
||||
public void setTunnelStatus(String msg) {
|
||||
_tunnelStatus = msg;
|
||||
}
|
||||
|
||||
protected RouterContext getContext() { return _context; }
|
||||
/**
|
||||
* Mark a string for extraction by xgettext and translation.
|
||||
* Use this only in static initializers.
|
||||
* It does not translate!
|
||||
* @return s
|
||||
*/
|
||||
private static final String _x(String s) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,16 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $";
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 5;
|
||||
public final static long BUILD = 9;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
public final static String FULL_VERSION = VERSION + "-" + BUILD + EXTRA;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("I2P Router version: " + FULL_VERSION);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
System.out.println("I2P Core version: " + CoreVersion.VERSION);
|
||||
System.out.println("Core ID: " + CoreVersion.ID);
|
||||
|
||||
@@ -1,281 +0,0 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.I2NPMessageException;
|
||||
import net.i2p.data.i2np.I2NPMessageImpl;
|
||||
|
||||
/**
|
||||
* Demo of a stripped down router - no tunnels, no netDb, no i2cp, no peer profiling,
|
||||
* just the SSU comm layer, crypto, and associated infrastructure, extended to handle
|
||||
* a new type of message ("FooMessage").
|
||||
*
|
||||
*/
|
||||
public class SSUDemo {
|
||||
RouterContext _us;
|
||||
|
||||
public static void main(String args[]) {
|
||||
SSUDemo demo = new SSUDemo();
|
||||
demo.run();
|
||||
}
|
||||
|
||||
public SSUDemo() {}
|
||||
public void run() {
|
||||
String cfgFile = "router.config";
|
||||
Properties envProps = getEnv();
|
||||
Router r = new Router(cfgFile, envProps);
|
||||
r.runRouter();
|
||||
_us = r.getContext();
|
||||
setupHandlers();
|
||||
// wait for it to warm up a bit
|
||||
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
|
||||
// now write out our ident and info
|
||||
RouterInfo myInfo = _us.router().getRouterInfo();
|
||||
storeMyInfo(myInfo);
|
||||
// look for any other peers written to the same directory, and send each
|
||||
// a single Foo message (0x0123), unless they've already contacted us first.
|
||||
// this call never returns
|
||||
loadPeers();
|
||||
}
|
||||
|
||||
private Properties getEnv() {
|
||||
Properties envProps = System.getProperties();
|
||||
// disable the TCP transport, as its deprecated
|
||||
envProps.setProperty("i2np.tcp.disable", "true");
|
||||
// we want SNTP synchronization for replay prevention
|
||||
envProps.setProperty("time.disabled", "false");
|
||||
// allow 127.0.0.1/10.0.0.1/etc (useful for testing). If this is false,
|
||||
// peers who say they're on an invalid IP are shitlisted
|
||||
envProps.setProperty("i2np.udp.allowLocal", "true");
|
||||
// explicit IP+port. at least one router on the net has to have their IP+port
|
||||
// set, since there has to be someone to detect one's IP off. most don't need
|
||||
// to set these though
|
||||
envProps.setProperty("i2np.udp.host", "127.0.0.1");
|
||||
envProps.setProperty("i2np.udp.internalPort", "12000");
|
||||
envProps.setProperty("i2np.udp.port", "12000");
|
||||
// disable I2CP, the netDb, peer testing/profile persistence, and tunnel
|
||||
// creation/management
|
||||
envProps.setProperty("i2p.dummyClientFacade", "true");
|
||||
envProps.setProperty("i2p.dummyNetDb", "true");
|
||||
envProps.setProperty("i2p.dummyPeerManager", "true");
|
||||
envProps.setProperty("i2p.dummyTunnelManager", "true");
|
||||
// set to false if you want to use HMAC-SHA256-128 instead of HMAC-MD5-128 as
|
||||
// the SSU MAC
|
||||
envProps.setProperty("i2p.HMACMD5", "true");
|
||||
// if you're using the HMAC MD5, by default it will use a 32 byte MAC field,
|
||||
// which is a bug, as it doesn't generate the same values as a 16 byte MAC field.
|
||||
// set this to false if you don't want the bug
|
||||
envProps.setProperty("i2p.HMACBrokenSize", "false");
|
||||
// no need to include any stats in the routerInfo we send to people on SSU
|
||||
// session establishment
|
||||
envProps.setProperty("router.publishPeerRankings", "false");
|
||||
// write the logs to ./logs/log-router-*.txt (logger configured with the file
|
||||
// ./logger.config, or another config file specified as
|
||||
// -Dlogger.configLocation=blah)
|
||||
envProps.setProperty("loggerFilenameOverride", "logs/log-router-@.txt");
|
||||
return envProps;
|
||||
}
|
||||
|
||||
private void setupHandlers() {
|
||||
// netDb store is sent on connection establishment, which includes contact info
|
||||
// for the peer. the DBStoreJobBuilder builds a new asynchronous Job to process
|
||||
// each one received (storing it in our in-memory, passive netDb)
|
||||
_us.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new DBStoreJobBuilder());
|
||||
// handle any Foo messages by displaying them on stdout
|
||||
_us.inNetMessagePool().registerHandlerJobBuilder(FooMessage.MESSAGE_TYPE, new FooJobBuilder());
|
||||
}
|
||||
|
||||
/** random place for storing router info files - written as $dir/base64(SHA256(info.getIdentity)) */
|
||||
private File getInfoDir() { return new File("/tmp/ssuDemoInfo/"); }
|
||||
|
||||
private void storeMyInfo(RouterInfo info) {
|
||||
File infoDir = getInfoDir();
|
||||
if (!infoDir.exists())
|
||||
infoDir.mkdirs();
|
||||
FileOutputStream fos = null;
|
||||
File infoFile = new File(infoDir, info.getIdentity().calculateHash().toBase64());
|
||||
try {
|
||||
fos = new FileOutputStream(infoFile);
|
||||
info.writeBytes(fos);
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
} catch (DataFormatException dfe) {
|
||||
dfe.printStackTrace();
|
||||
} finally {
|
||||
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
|
||||
System.out.println("Our info stored at: " + infoFile.getAbsolutePath());
|
||||
}
|
||||
|
||||
private void loadPeers() {
|
||||
File infoDir = getInfoDir();
|
||||
if (!infoDir.exists())
|
||||
infoDir.mkdirs();
|
||||
while (true) {
|
||||
File peerFiles[] = infoDir.listFiles();
|
||||
if ( (peerFiles != null) && (peerFiles.length > 0) ) {
|
||||
for (int i = 0; i < peerFiles.length; i++) {
|
||||
if (peerFiles[i].isFile() && !peerFiles[i].isHidden()) {
|
||||
if (!_us.routerHash().toBase64().equals(peerFiles[i].getName())) {
|
||||
System.out.println("Reading info: " + peerFiles[i].getAbsolutePath());
|
||||
try {
|
||||
FileInputStream in = new FileInputStream(peerFiles[i]);
|
||||
RouterInfo ri = new RouterInfo();
|
||||
ri.readBytes(in);
|
||||
peerRead(ri);
|
||||
} catch (IOException ioe) {
|
||||
System.err.println("Error reading " + peerFiles[i].getAbsolutePath());
|
||||
ioe.printStackTrace();
|
||||
} catch (DataFormatException dfe) {
|
||||
System.err.println("Corrupt " + peerFiles[i].getAbsolutePath());
|
||||
dfe.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
|
||||
}
|
||||
}
|
||||
|
||||
private void peerRead(RouterInfo ri) {
|
||||
RouterInfo old = _us.netDb().store(ri.getIdentity().calculateHash(), ri);
|
||||
if (old == null)
|
||||
newPeerRead(ri);
|
||||
}
|
||||
|
||||
private void newPeerRead(RouterInfo ri) {
|
||||
OutNetMessage out = new OutNetMessage(_us);
|
||||
// _us.clock() is an ntp synchronized clock. give up on sending this message
|
||||
// if it doesn't get ACKed within the next 10 seconds
|
||||
out.setExpiration(_us.clock().now() + 10*1000);
|
||||
out.setPriority(100);
|
||||
out.setTarget(ri);
|
||||
FooMessage data = new FooMessage(_us, new byte[] { 0x0, 0x1, 0x2, 0x3 });
|
||||
System.out.println("SEND: " + Base64.encode(data.getData()));
|
||||
out.setMessage(data);
|
||||
// job fired if we can't contact them, or if it takes too long to get an ACK
|
||||
out.setOnFailedSendJob(null);
|
||||
// job fired once the transport gets a full ACK of the message
|
||||
out.setOnSendJob(new AfterACK());
|
||||
// queue up the message, establishing a new SSU session if necessary, using
|
||||
// their direct SSU address if they have one, or their indirect SSU addresses
|
||||
// if they don't. If we cannot contact them, we will 'shitlist' their address,
|
||||
// during which time we will not even attempt to send messages to them. We also
|
||||
// drop their netDb info when we shitlist them, in case their info is no longer
|
||||
// correct. Since the netDb is disabled for all meaningful purposes, the SSUDemo
|
||||
// will be responsible for fetching such information.
|
||||
_us.outNetMessagePool().add(out);
|
||||
}
|
||||
|
||||
/** fired if and only if the FooMessage is ACKed before we time out */
|
||||
private class AfterACK extends JobImpl {
|
||||
public AfterACK() { super(_us); }
|
||||
public void runJob() { System.out.println("Foo message sent completely"); }
|
||||
public String getName() { return "After Foo message send"; }
|
||||
}
|
||||
|
||||
////
|
||||
// Foo and netDb store handling below
|
||||
|
||||
/**
|
||||
* Deal with an Foo message received
|
||||
*/
|
||||
private class FooJobBuilder implements HandlerJobBuilder {
|
||||
public FooJobBuilder() {
|
||||
I2NPMessageImpl.registerBuilder(new FooBuilder(), FooMessage.MESSAGE_TYPE);
|
||||
}
|
||||
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
return new FooHandleJob(_us, receivedMessage, from, fromHash);
|
||||
}
|
||||
}
|
||||
private class FooHandleJob extends JobImpl {
|
||||
private I2NPMessage _msg;
|
||||
public FooHandleJob(RouterContext ctx, I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
super(ctx);
|
||||
_msg = receivedMessage;
|
||||
}
|
||||
public void runJob() {
|
||||
// we know its a FooMessage, since thats the type of message that the handler
|
||||
// is registered as
|
||||
FooMessage m = (FooMessage)_msg;
|
||||
System.out.println("RECV: " + Base64.encode(m.getData()));
|
||||
}
|
||||
public String getName() { return "Handle Foo message"; }
|
||||
}
|
||||
private class FooBuilder implements I2NPMessageImpl.Builder {
|
||||
public I2NPMessage build(I2PAppContext ctx) { return new FooMessage(ctx, null); }
|
||||
}
|
||||
|
||||
/**
|
||||
* Just carry some data...
|
||||
*/
|
||||
class FooMessage extends I2NPMessageImpl {
|
||||
private byte[] _data;
|
||||
public static final int MESSAGE_TYPE = 17;
|
||||
public FooMessage(I2PAppContext ctx, byte data[]) {
|
||||
super(ctx);
|
||||
_data = data;
|
||||
}
|
||||
/** pull the read data off */
|
||||
public byte[] getData() { return _data; }
|
||||
/** specify the payload to be sent */
|
||||
public void setData(byte data[]) { _data = data; }
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
protected int calculateWrittenLength() { return _data.length; }
|
||||
public void readMessage(byte[] data, int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
_data = new byte[dataSize];
|
||||
System.arraycopy(data, offset, _data, 0, dataSize);
|
||||
}
|
||||
|
||||
protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException {
|
||||
System.arraycopy(_data, 0, out, curIndex, _data.length);
|
||||
return curIndex + _data.length;
|
||||
}
|
||||
}
|
||||
|
||||
////
|
||||
// netDb store handling below
|
||||
|
||||
/**
|
||||
* Handle any netDb stores from the peer - they send us their netDb as part of
|
||||
* their SSU establishment (and we send them ours).
|
||||
*/
|
||||
private class DBStoreJobBuilder implements HandlerJobBuilder {
|
||||
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
return new HandleJob(_us, receivedMessage, from, fromHash);
|
||||
}
|
||||
}
|
||||
private class HandleJob extends JobImpl {
|
||||
private I2NPMessage _msg;
|
||||
public HandleJob(RouterContext ctx, I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
super(ctx);
|
||||
_msg = receivedMessage;
|
||||
}
|
||||
public void runJob() {
|
||||
// we know its a DatabaseStoreMessage, since thats the type of message that the handler
|
||||
// is registered as
|
||||
DatabaseStoreMessage m = (DatabaseStoreMessage)_msg;
|
||||
try {
|
||||
_us.netDb().store(m.getKey(), m.getRouterInfo());
|
||||
} catch (IllegalArgumentException iae) {
|
||||
iae.printStackTrace();
|
||||
}
|
||||
}
|
||||
public String getName() { return "Handle netDb store"; }
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
|
||||
import net.i2p.crypto.PersistentSessionKeyManager;
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Centralize the sessionKeyManager persistence (rather than leave it to a private
|
||||
* job in the startup job)
|
||||
*
|
||||
*/
|
||||
public class SessionKeyPersistenceHelper implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private SessionKeyWriterJob _writerJob;
|
||||
private final static long PERSIST_DELAY = 3*60*1000;
|
||||
private final static String PROP_SESSION_KEY_FILE = "router.sessionKeys.location";
|
||||
private final static String DEFAULT_SESSION_KEY_FILE = "sessionKeys.dat";
|
||||
|
||||
public SessionKeyPersistenceHelper(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(SessionKeyPersistenceHelper.class);
|
||||
_writerJob = new SessionKeyWriterJob();
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
writeState();
|
||||
}
|
||||
|
||||
public void restart() {
|
||||
writeState();
|
||||
startup();
|
||||
}
|
||||
|
||||
private String getKeyFile() {
|
||||
String val = _context.router().getConfigSetting(PROP_SESSION_KEY_FILE);
|
||||
if (val == null)
|
||||
val = DEFAULT_SESSION_KEY_FILE;
|
||||
return val;
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
SessionKeyManager mgr = _context.sessionKeyManager();
|
||||
if (mgr instanceof PersistentSessionKeyManager) {
|
||||
PersistentSessionKeyManager manager = (PersistentSessionKeyManager)mgr;
|
||||
File f = new File(getKeyFile());
|
||||
if (f.exists()) {
|
||||
FileInputStream fin = null;
|
||||
try {
|
||||
fin = new FileInputStream(f);
|
||||
manager.loadState(fin);
|
||||
int expired = manager.aggressiveExpire();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Session keys loaded [not error] with " + expired
|
||||
+ " sets immediately expired");
|
||||
} catch (Throwable t) {
|
||||
_log.error("Error reading in session key data", t);
|
||||
} finally {
|
||||
if (fin != null) try { fin.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
_context.jobQueue().addJob(_writerJob);
|
||||
}
|
||||
}
|
||||
|
||||
private void writeState() {
|
||||
if (true) return;
|
||||
|
||||
Object o = _context.sessionKeyManager();
|
||||
if (!(o instanceof PersistentSessionKeyManager)) {
|
||||
_log.error("Unable to persist the session key state - manager is " + o.getClass().getName());
|
||||
return;
|
||||
}
|
||||
PersistentSessionKeyManager mgr = (PersistentSessionKeyManager)o;
|
||||
|
||||
// only need for synchronization is during shutdown()
|
||||
synchronized (mgr) {
|
||||
FileOutputStream fos = null;
|
||||
try {
|
||||
int expired = mgr.aggressiveExpire();
|
||||
if (expired > 0) {
|
||||
_log.info("Agressive expired " + expired + " tag sets");
|
||||
}
|
||||
fos = new FileOutputStream(getKeyFile());
|
||||
mgr.saveState(fos);
|
||||
fos.flush();
|
||||
_log.debug("Session keys written");
|
||||
} catch (Throwable t) {
|
||||
_log.debug("Error writing session key state", t);
|
||||
} finally {
|
||||
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) { }
|
||||
|
||||
private class SessionKeyWriterJob extends JobImpl {
|
||||
public SessionKeyWriterJob() {
|
||||
super(SessionKeyPersistenceHelper.this._context);
|
||||
getTiming().setStartAfter(PERSIST_DELAY);
|
||||
}
|
||||
public String getName() { return "Write Session Keys"; }
|
||||
public void runJob() {
|
||||
writeState();
|
||||
requeue(PERSIST_DELAY);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,18 +10,14 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
@@ -34,20 +30,26 @@ import net.i2p.util.Log;
|
||||
* shitlist.
|
||||
*/
|
||||
public class Shitlist {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private Map<Hash, Entry> _entries;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final Map<Hash, Entry> _entries;
|
||||
|
||||
private static class Entry {
|
||||
public static class Entry {
|
||||
/** when it should expire, per the i2p clock */
|
||||
long expireOn;
|
||||
public long expireOn;
|
||||
/** why they were shitlisted */
|
||||
String cause;
|
||||
public String cause;
|
||||
/** separate code so cause can contain {0} for translation */
|
||||
public String causeCode;
|
||||
/** what transports they were shitlisted for (String), or null for all transports */
|
||||
Set<String> transports;
|
||||
public Set<String> transports;
|
||||
}
|
||||
|
||||
public final static long SHITLIST_DURATION_MS = 20*60*1000;
|
||||
/**
|
||||
* Don't make this too long as the failure may be transient
|
||||
* due to connection limits.
|
||||
*/
|
||||
public final static long SHITLIST_DURATION_MS = 7*60*1000;
|
||||
public final static long SHITLIST_DURATION_MAX = 30*60*1000;
|
||||
public final static long SHITLIST_DURATION_PARTIAL = 10*60*1000;
|
||||
public final static long SHITLIST_DURATION_FOREVER = 181l*24*60*60*1000; // will get rounded down to 180d on console
|
||||
@@ -56,7 +58,7 @@ public class Shitlist {
|
||||
public Shitlist(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(Shitlist.class);
|
||||
_entries = new ConcurrentHashMap(8);
|
||||
_entries = new ConcurrentHashMap(16);
|
||||
_context.jobQueue().addJob(new Cleanup(_context));
|
||||
}
|
||||
|
||||
@@ -67,7 +69,7 @@ public class Shitlist {
|
||||
_toUnshitlist = new ArrayList(4);
|
||||
getTiming().setStartAfter(ctx.clock().now() + SHITLIST_CLEANER_START_DELAY);
|
||||
}
|
||||
public String getName() { return "Cleanup shitlist"; }
|
||||
public String getName() { return "Expire banned peers"; }
|
||||
public void runJob() {
|
||||
_toUnshitlist.clear();
|
||||
long now = getContext().clock().now();
|
||||
@@ -97,17 +99,42 @@ public class Shitlist {
|
||||
return _entries.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* For ShitlistRenderer in router console.
|
||||
* Note - may contain expired entries.
|
||||
*/
|
||||
public Map<Hash, Entry> getEntries() {
|
||||
return Collections.unmodifiableMap(_entries);
|
||||
}
|
||||
|
||||
public boolean shitlistRouter(Hash peer) {
|
||||
return shitlistRouter(peer, null);
|
||||
}
|
||||
|
||||
public boolean shitlistRouter(Hash peer, String reason) { return shitlistRouter(peer, reason, null); }
|
||||
|
||||
/** ick have to put the reasonCode in the front to avoid ambiguity */
|
||||
public boolean shitlistRouter(String reasonCode, Hash peer, String reason) {
|
||||
return shitlistRouter(peer, reason, reasonCode, null, false);
|
||||
}
|
||||
|
||||
public boolean shitlistRouter(Hash peer, String reason, String transport) {
|
||||
return shitlistRouter(peer, reason, transport, false);
|
||||
}
|
||||
|
||||
public boolean shitlistRouterForever(Hash peer, String reason) {
|
||||
return shitlistRouter(peer, reason, null, true);
|
||||
}
|
||||
|
||||
public boolean shitlistRouterForever(Hash peer, String reason, String reasonCode) {
|
||||
return shitlistRouter(peer, reason, reasonCode, null, true);
|
||||
}
|
||||
|
||||
public boolean shitlistRouter(Hash peer, String reason, String transport, boolean forever) {
|
||||
return shitlistRouter(peer, reason, null, transport, forever);
|
||||
}
|
||||
|
||||
private boolean shitlistRouter(Hash peer, String reason, String reasonCode, String transport, boolean forever) {
|
||||
if (peer == null) {
|
||||
_log.error("wtf, why did we try to shitlist null?", new Exception("shitfaced"));
|
||||
return false;
|
||||
@@ -127,7 +154,7 @@ public class Shitlist {
|
||||
} else if (transport != null) {
|
||||
e.expireOn = _context.clock().now() + SHITLIST_DURATION_PARTIAL;
|
||||
} else {
|
||||
long period = SHITLIST_DURATION_MS + _context.random().nextLong(SHITLIST_DURATION_MS);
|
||||
long period = SHITLIST_DURATION_MS + _context.random().nextLong(SHITLIST_DURATION_MS / 4);
|
||||
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
|
||||
if (prof != null) {
|
||||
period = SHITLIST_DURATION_MS << prof.incrementShitlists();
|
||||
@@ -139,9 +166,10 @@ public class Shitlist {
|
||||
e.expireOn = _context.clock().now() + period;
|
||||
}
|
||||
e.cause = reason;
|
||||
e.causeCode = reasonCode;
|
||||
e.transports = null;
|
||||
if (transport != null) {
|
||||
e.transports = new ConcurrentHashSet(1);
|
||||
e.transports = new ConcurrentHashSet(2);
|
||||
e.transports.add(transport);
|
||||
}
|
||||
|
||||
@@ -152,6 +180,7 @@ public class Shitlist {
|
||||
if (old.expireOn > e.expireOn) {
|
||||
e.expireOn = old.expireOn;
|
||||
e.cause = old.cause;
|
||||
e.causeCode = old.causeCode;
|
||||
}
|
||||
if (e.transports != null) {
|
||||
if (old.transports != null)
|
||||
@@ -159,6 +188,7 @@ public class Shitlist {
|
||||
else {
|
||||
e.transports = null;
|
||||
e.cause = reason;
|
||||
e.causeCode = reasonCode;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -167,6 +197,7 @@ public class Shitlist {
|
||||
if (transport == null) {
|
||||
// we hate the peer on *any* transport
|
||||
_context.netDb().fail(peer);
|
||||
_context.tunnelManager().fail(peer);
|
||||
}
|
||||
//_context.tunnelManager().peerFailed(peer);
|
||||
//_context.messageRegistry().peerFailed(peer);
|
||||
@@ -178,8 +209,11 @@ public class Shitlist {
|
||||
public void unshitlistRouter(Hash peer) {
|
||||
unshitlistRouter(peer, true);
|
||||
}
|
||||
|
||||
private void unshitlistRouter(Hash peer, boolean realUnshitlist) { unshitlistRouter(peer, realUnshitlist, null); }
|
||||
|
||||
public void unshitlistRouter(Hash peer, String transport) { unshitlistRouter(peer, true, transport); }
|
||||
|
||||
private void unshitlistRouter(Hash peer, boolean realUnshitlist, String transport) {
|
||||
if (peer == null) return;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@@ -193,7 +227,7 @@ public class Shitlist {
|
||||
fully = true;
|
||||
} else {
|
||||
e.transports.remove(transport);
|
||||
if (e.transports.size() <= 0)
|
||||
if (e.transports.isEmpty())
|
||||
fully = true;
|
||||
else
|
||||
_entries.put(peer, e);
|
||||
@@ -213,6 +247,7 @@ public class Shitlist {
|
||||
}
|
||||
|
||||
public boolean isShitlisted(Hash peer) { return isShitlisted(peer, null); }
|
||||
|
||||
public boolean isShitlisted(Hash peer, String transport) {
|
||||
boolean rv = false;
|
||||
boolean unshitlist = false;
|
||||
@@ -247,40 +282,7 @@ public class Shitlist {
|
||||
return entry != null && entry.expireOn > _context.clock().now() + SHITLIST_DURATION_MAX;
|
||||
}
|
||||
|
||||
class HashComparator implements Comparator {
|
||||
public int compare(Object l, Object r) {
|
||||
return ((Hash)l).toBase64().compareTo(((Hash)r).toBase64());
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated moved to router console */
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("<h2>Shitlist</h2>");
|
||||
Map<Hash, Entry> entries = new TreeMap(new HashComparator());
|
||||
|
||||
entries.putAll(_entries);
|
||||
|
||||
buf.append("<ul>");
|
||||
|
||||
for (Map.Entry<Hash, Entry> e : entries.entrySet()) {
|
||||
Hash key = e.getKey();
|
||||
Entry entry = e.getValue();
|
||||
buf.append("<li><b>").append(key.toBase64()).append("</b>");
|
||||
buf.append(" (<a href=\"netdb.jsp?r=").append(key.toBase64().substring(0, 6)).append("\">netdb</a>)");
|
||||
buf.append(" expiring in ");
|
||||
buf.append(DataHelper.formatDuration(entry.expireOn-_context.clock().now()));
|
||||
Set transports = entry.transports;
|
||||
if ( (transports != null) && (transports.size() > 0) )
|
||||
buf.append(" on the following transport: ").append(transports);
|
||||
if (entry.cause != null) {
|
||||
buf.append("<br />\n");
|
||||
buf.append(entry.cause);
|
||||
}
|
||||
buf.append(" (<a href=\"configpeer.jsp?peer=").append(key.toBase64()).append("#unsh\">unshitlist now</a>)");
|
||||
buf.append("</li>\n");
|
||||
}
|
||||
buf.append("</ul>\n");
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,19 +22,16 @@ import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Maintain the statistics about the router
|
||||
* Publishes some statistics about the router in the netDB.
|
||||
*
|
||||
*/
|
||||
public class StatisticsManager implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private boolean _includePeerRankings;
|
||||
private int _publishedStats;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
|
||||
public final static String PROP_PUBLISH_RANKINGS = "router.publishPeerRankings";
|
||||
public final static String DEFAULT_PROP_PUBLISH_RANKINGS = "true";
|
||||
public final static String PROP_MAX_PUBLISHED_PEERS = "router.publishPeerMax";
|
||||
public final static int DEFAULT_MAX_PUBLISHED_PEERS = 10;
|
||||
/** enhance anonymity by only including build stats one out of this many times */
|
||||
private static final int RANDOM_INCLUDE_STATS = 4;
|
||||
|
||||
private final DecimalFormat _fmt;
|
||||
private final DecimalFormat _pct;
|
||||
@@ -44,52 +41,16 @@ public class StatisticsManager implements Service {
|
||||
_fmt = new DecimalFormat("###,##0.00", new DecimalFormatSymbols(Locale.UK));
|
||||
_pct = new DecimalFormat("#0.00%", new DecimalFormatSymbols(Locale.UK));
|
||||
_log = context.logManager().getLog(StatisticsManager.class);
|
||||
_includePeerRankings = false;
|
||||
}
|
||||
|
||||
/** noop */
|
||||
public void shutdown() {}
|
||||
public void restart() {
|
||||
startup();
|
||||
}
|
||||
public void startup() {
|
||||
String val = _context.router().getConfigSetting(PROP_PUBLISH_RANKINGS);
|
||||
try {
|
||||
if (val == null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS
|
||||
+ " not set - using default " + DEFAULT_PROP_PUBLISH_RANKINGS);
|
||||
val = DEFAULT_PROP_PUBLISH_RANKINGS;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS
|
||||
+ " set to " + val);
|
||||
}
|
||||
boolean v = Boolean.TRUE.toString().equalsIgnoreCase(val);
|
||||
_includePeerRankings = v;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Setting includePeerRankings = " + v);
|
||||
} catch (Throwable t) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error determining whether to publish rankings ["
|
||||
+ PROP_PUBLISH_RANKINGS + "=" + val
|
||||
+ "], so we're defaulting to FALSE");
|
||||
_includePeerRankings = false;
|
||||
}
|
||||
val = _context.router().getConfigSetting(PROP_MAX_PUBLISHED_PEERS);
|
||||
if (val == null) {
|
||||
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
|
||||
} else {
|
||||
try {
|
||||
int num = Integer.parseInt(val);
|
||||
_publishedStats = num;
|
||||
} catch (NumberFormatException nfe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Invalid max number of peers to publish [" + val
|
||||
+ "], defaulting to " + DEFAULT_MAX_PUBLISHED_PEERS, nfe);
|
||||
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** noop */
|
||||
public void restart() {}
|
||||
|
||||
/** noop */
|
||||
public void startup() {}
|
||||
|
||||
/** Retrieve a snapshot of the statistics that should be published */
|
||||
public Properties publishStatistics() {
|
||||
@@ -123,14 +84,13 @@ public class StatisticsManager implements Service {
|
||||
stats.setProperty("stat_identities", newlines+"");
|
||||
***/
|
||||
|
||||
if (_includePeerRankings) {
|
||||
if (false)
|
||||
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
|
||||
|
||||
if (_context.getBooleanPropertyDefaultTrue(PROP_PUBLISH_RANKINGS) &&
|
||||
_context.random().nextInt(RANDOM_INCLUDE_STATS) == 0) {
|
||||
long publishedUptime = _context.router().getUptime();
|
||||
// Don't publish these for first hour
|
||||
if (publishedUptime > 60*60*1000)
|
||||
includeThroughput(stats);
|
||||
// Disabled in 0.9
|
||||
//if (publishedUptime > 62*60*1000)
|
||||
// includeAverageThroughput(stats);
|
||||
//includeRate("router.invalidMessageTime", stats, new long[] { 10*60*1000 });
|
||||
//includeRate("router.duplicateMessageId", stats, new long[] { 24*60*60*1000 });
|
||||
//includeRate("tunnel.duplicateIV", stats, new long[] { 24*60*60*1000 });
|
||||
@@ -162,7 +122,8 @@ public class StatisticsManager implements Service {
|
||||
//includeRate("transport.sendProcessingTime", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l });
|
||||
//includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
|
||||
includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
// total event count can be used to track uptime
|
||||
includeRate("tunnel.participatingTunnels", stats, new long[] { 60*60*1000 }, true);
|
||||
//includeRate("tunnel.testSuccessTime", stats, new long[] { 10*60*1000l });
|
||||
//includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
|
||||
//includeRate("udp.sendConfirmTime", stats, new long[] { 10*60*1000 });
|
||||
@@ -172,12 +133,6 @@ public class StatisticsManager implements Service {
|
||||
//includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
|
||||
// Round smaller uptimes to 1 hour, to frustrate uptime tracking
|
||||
// Round 2nd hour to 90m since peers use 2h minimum to route
|
||||
if (publishedUptime < 60*60*1000) publishedUptime = 60*60*1000;
|
||||
else if (publishedUptime < 2*60*60*1000) publishedUptime = 90*60*1000;
|
||||
|
||||
stats.setProperty("stat_uptime", DataHelper.formatDuration(publishedUptime));
|
||||
//stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
|
||||
|
||||
//includeRate("tunnel.decryptRequestTime", stats, new long[] { 60*1000, 10*60*1000 });
|
||||
@@ -185,35 +140,30 @@ public class StatisticsManager implements Service {
|
||||
//includeRate("udp.packetVerifyTime", stats, new long[] { 60*1000 });
|
||||
|
||||
//includeRate("tunnel.buildRequestTime", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildClientExpire", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildClientReject", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildClientSuccess", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildExploratoryExpire", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildExploratoryReject", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildExploratorySuccess", stats, new long[] { 10*60*1000 });
|
||||
long rate = 60*60*1000;
|
||||
includeTunnelRates("Client", stats, rate);
|
||||
includeTunnelRates("Exploratory", stats, rate);
|
||||
//includeRate("tunnel.rejectTimeout", stats, new long[] { 10*60*1000 });
|
||||
//includeRate("tunnel.rejectOverloaded", stats, new long[] { 10*60*1000 });
|
||||
//includeRate("tunnel.acceptLoad", stats, new long[] { 10*60*1000 });
|
||||
|
||||
_log.debug("Publishing peer rankings");
|
||||
} else {
|
||||
// So that we will still get build requests
|
||||
stats.setProperty("stat_uptime", "90m");
|
||||
_log.debug("Not publishing peer rankings");
|
||||
}
|
||||
|
||||
// So that we will still get build requests
|
||||
stats.setProperty("stat_uptime", "90m");
|
||||
if (FloodfillNetworkDatabaseFacade.isFloodfill(_context.router().getRouterInfo())) {
|
||||
stats.setProperty("netdb.knownRouters", ""+_context.netDb().getKnownRouters());
|
||||
stats.setProperty("netdb.knownLeaseSets", ""+_context.netDb().getKnownLeaseSets());
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Building status: " + stats);
|
||||
return stats;
|
||||
}
|
||||
|
||||
/*****
|
||||
private void includeRate(String rateName, Properties stats, long selectedPeriods[]) {
|
||||
includeRate(rateName, stats, selectedPeriods, false);
|
||||
}
|
||||
*****/
|
||||
|
||||
/**
|
||||
* @param fudgeQuantity the data being published in this stat is too sensitive to, uh
|
||||
* publish, so we're kludge the quantity (allowing the fairly safe
|
||||
@@ -245,7 +195,7 @@ public class StatisticsManager implements Service {
|
||||
}
|
||||
|
||||
private String renderRate(Rate rate, boolean fudgeQuantity) {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(num(rate.getAverageValue())).append(';');
|
||||
buf.append(num(rate.getExtremeAverageValue())).append(';');
|
||||
buf.append(pct(rate.getPercentageOfLifetimeValue())).append(';');
|
||||
@@ -257,10 +207,9 @@ public class StatisticsManager implements Service {
|
||||
}
|
||||
long numPeriods = rate.getLifetimePeriods();
|
||||
if (fudgeQuantity) {
|
||||
buf.append("666").append(';');
|
||||
buf.append("555;");
|
||||
if (numPeriods > 0) {
|
||||
buf.append("666").append(';');
|
||||
buf.append("666").append(';');
|
||||
buf.append("555;555;");
|
||||
}
|
||||
} else {
|
||||
buf.append(num(rate.getLastEventCount())).append(';');
|
||||
@@ -268,53 +217,78 @@ public class StatisticsManager implements Service {
|
||||
double avgFrequency = rate.getLifetimeEventCount() / (double)numPeriods;
|
||||
buf.append(num(avgFrequency)).append(';');
|
||||
buf.append(num(rate.getExtremeEventCount())).append(';');
|
||||
buf.append(num((double)rate.getLifetimeEventCount())).append(';');
|
||||
buf.append(num(rate.getLifetimeEventCount())).append(';');
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private void includeThroughput(Properties stats) {
|
||||
RateStat sendRate = _context.statManager().getRate("bw.sendRate");
|
||||
if (sendRate != null) {
|
||||
/****
|
||||
if (_context.router().getUptime() > 5*60*1000) {
|
||||
Rate r = sendRate.getRate(5*60*1000);
|
||||
if (r != null)
|
||||
stats.setProperty("stat_bandwidthSendBps.5m", num(r.getAverageValue()) + ';' + num(r.getExtremeAverageValue()) + ";0;0;");
|
||||
}
|
||||
****/
|
||||
if (_context.router().getUptime() > 60*60*1000) {
|
||||
Rate r = sendRate.getRate(60*60*1000);
|
||||
if (r != null)
|
||||
stats.setProperty("stat_bandwidthSendBps.60m", num(r.getAverageValue()) + ';' + num(r.getExtremeAverageValue()) + ";0;0;");
|
||||
}
|
||||
private static final String[] tunnelStats = { "Expire", "Reject", "Success" };
|
||||
|
||||
/**
|
||||
* Add tunnel build rates with some mods to hide absolute quantities
|
||||
* In particular, report counts normalized to 100 (i.e. a percentage)
|
||||
*/
|
||||
private void includeTunnelRates(String tunnelType, Properties stats, long selectedPeriod) {
|
||||
long totalEvents = 0;
|
||||
for (String tunnelStat : tunnelStats) {
|
||||
String rateName = "tunnel.build" + tunnelType + tunnelStat;
|
||||
RateStat stat = _context.statManager().getRate(rateName);
|
||||
if (stat == null) continue;
|
||||
Rate curRate = stat.getRate(selectedPeriod);
|
||||
if (curRate == null) continue;
|
||||
totalEvents += curRate.getLastEventCount();
|
||||
}
|
||||
|
||||
RateStat recvRate = _context.statManager().getRate("bw.recvRate");
|
||||
if (recvRate != null) {
|
||||
/****
|
||||
if (_context.router().getUptime() > 5*60*1000) {
|
||||
Rate r = recvRate.getRate(5*60*1000);
|
||||
if (r != null)
|
||||
stats.setProperty("stat_bandwidthReceiveBps.5m", num(r.getAverageValue()) + ';' + num(r.getExtremeAverageValue()) + ";0;0;");
|
||||
}
|
||||
****/
|
||||
if (_context.router().getUptime() > 60*60*1000) {
|
||||
Rate r = recvRate.getRate(60*60*1000);
|
||||
if (r != null)
|
||||
stats.setProperty("stat_bandwidthReceiveBps.60m", num(r.getAverageValue()) + ';' + num(r.getExtremeAverageValue()) + ";0;0;");
|
||||
}
|
||||
if (totalEvents <= 0)
|
||||
return;
|
||||
for (String tunnelStat : tunnelStats) {
|
||||
String rateName = "tunnel.build" + tunnelType + tunnelStat;
|
||||
RateStat stat = _context.statManager().getRate(rateName);
|
||||
if (stat == null) continue;
|
||||
Rate curRate = stat.getRate(selectedPeriod);
|
||||
if (curRate == null) continue;
|
||||
double fudgeQuantity = 100.0d * curRate.getLastEventCount() / totalEvents;
|
||||
stats.setProperty("stat_" + rateName + '.' + getPeriod(curRate), renderRate(curRate, fudgeQuantity));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); }
|
||||
private String renderRate(Rate rate, double fudgeQuantity) {
|
||||
StringBuilder buf = new StringBuilder(128);
|
||||
buf.append(num(rate.getAverageValue())).append(';');
|
||||
buf.append(num(rate.getExtremeAverageValue())).append(';');
|
||||
buf.append(pct(rate.getPercentageOfLifetimeValue())).append(';');
|
||||
if (rate.getLifetimeTotalEventTime() > 0) {
|
||||
// bah saturation
|
||||
buf.append("0;0;0;0;");
|
||||
}
|
||||
buf.append(num(fudgeQuantity)).append(';');
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/* report the same data for tx and rx, for enhanced anonymity */
|
||||
private void includeAverageThroughput(Properties stats) {
|
||||
RateStat sendRate = _context.statManager().getRate("bw.sendRate");
|
||||
RateStat recvRate = _context.statManager().getRate("bw.recvRate");
|
||||
if (sendRate == null || recvRate == null)
|
||||
return;
|
||||
Rate s = sendRate.getRate(60*60*1000);
|
||||
Rate r = recvRate.getRate(60*60*1000);
|
||||
if (s == null || r == null)
|
||||
return;
|
||||
double speed = (s.getAverageValue() + r.getAverageValue()) / 2;
|
||||
double max = Math.max(s.getExtremeAverageValue(), r.getExtremeAverageValue());
|
||||
String str = num(speed) + ';' + num(max) + ";0;0;";
|
||||
stats.setProperty("stat_bandwidthSendBps.60m", str);
|
||||
stats.setProperty("stat_bandwidthReceiveBps.60m", str);
|
||||
}
|
||||
|
||||
private static String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); }
|
||||
|
||||
private final String num(double num) {
|
||||
if (num < 0) num = 0;
|
||||
synchronized (_fmt) { return _fmt.format(num); }
|
||||
}
|
||||
|
||||
private final String pct(double num) {
|
||||
if (num < 0) num = 0;
|
||||
synchronized (_pct) { return _pct.format(num); }
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.router.transport.BandwidthLimitedInputStream;
|
||||
import net.i2p.util.HTTPSendData;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Job that, if its allowed to, will submit the data gathered by the MessageHistory
|
||||
* component to some URL so that the network can be debugged more easily. By default
|
||||
* it does not submit any data or touch the message history file, but if the router
|
||||
* has the line "router.submitHistory=true", it will send the file that the
|
||||
* MessageHistory component is configured to write to once an hour, post it to
|
||||
* http://i2p.net/cgi-bin/submitMessageHistory, and then delete that file
|
||||
* locally. This should only be used if the MessageHistory component is configured to
|
||||
* gather data (via "router.keepHistory=true").
|
||||
*
|
||||
*/
|
||||
public class SubmitMessageHistoryJob extends JobImpl {
|
||||
private Log _log;
|
||||
|
||||
/** default submitting data every hour */
|
||||
private final static long DEFAULT_REQUEUE_DELAY = 60*60*1000;
|
||||
/**
|
||||
* router config param for whether we want to autosubmit (and delete) the
|
||||
* history data managed by MessageHistory
|
||||
*/
|
||||
public final static String PARAM_SUBMIT_DATA = "router.submitHistory";
|
||||
/** default value for whether we autosubmit the data */
|
||||
public final static boolean DEFAULT_SUBMIT_DATA = true;
|
||||
/** where the data should be submitted to (via HTTP POST) */
|
||||
public final static String PARAM_SUBMIT_URL = "router.submitHistoryURL";
|
||||
/** default location */
|
||||
public final static String DEFAULT_SUBMIT_URL = "http://i2p.net/cgi-bin/submitMessageHistory";
|
||||
|
||||
public SubmitMessageHistoryJob(RouterContext context) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(SubmitMessageHistoryJob.class);
|
||||
}
|
||||
|
||||
public void runJob() {
|
||||
if (shouldSubmit()) {
|
||||
submit();
|
||||
} else {
|
||||
_log.debug("Not submitting data");
|
||||
// if we didn't submit we can just requeue
|
||||
requeue(getRequeueDelay());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We don't want this to be run within the jobqueue itself, so fire off a new thread
|
||||
* to do the actual submission, enqueueing a new submit job when its done
|
||||
*/
|
||||
private void submit() {
|
||||
I2PThread t = new I2PThread(new Runnable() {
|
||||
public void run() {
|
||||
_log.debug("Submitting data");
|
||||
getContext().messageHistory().setPauseFlushes(true);
|
||||
String filename = getContext().messageHistory().getFilename();
|
||||
send(filename);
|
||||
getContext().messageHistory().setPauseFlushes(false);
|
||||
Job job = new SubmitMessageHistoryJob(getContext());
|
||||
job.getTiming().setStartAfter(getContext().clock().now() + getRequeueDelay());
|
||||
getContext().jobQueue().addJob(job);
|
||||
}
|
||||
});
|
||||
t.setName("SubmitData");
|
||||
t.setPriority(I2PThread.MIN_PRIORITY);
|
||||
t.setDaemon(true);
|
||||
t.start();
|
||||
}
|
||||
|
||||
private void send(String filename) {
|
||||
String url = getURL();
|
||||
try {
|
||||
File dataFile = new File(filename);
|
||||
if (!dataFile.exists() || !dataFile.canRead()) {
|
||||
_log.warn("Unable to read the message data file [" + dataFile.getAbsolutePath() + "]");
|
||||
return;
|
||||
}
|
||||
long size = dataFile.length();
|
||||
FileInputStream fin = new FileInputStream(dataFile);
|
||||
BandwidthLimitedInputStream in = new BandwidthLimitedInputStream(getContext(), fin, null, true);
|
||||
boolean sent = HTTPSendData.postData(url, size, in);
|
||||
fin.close();
|
||||
boolean deleted = dataFile.delete();
|
||||
_log.debug("Submitted " + size + " bytes? " + sent + " and deleted? " + deleted);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error sending the data", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
private String getURL() {
|
||||
String str = getContext().router().getConfigSetting(PARAM_SUBMIT_URL);
|
||||
if ( (str == null) || (str.trim().length() <= 0) )
|
||||
return DEFAULT_SUBMIT_URL;
|
||||
else
|
||||
return str.trim();
|
||||
}
|
||||
|
||||
private boolean shouldSubmit() {
|
||||
String str = getContext().router().getConfigSetting(PARAM_SUBMIT_DATA);
|
||||
if (str == null) {
|
||||
_log.debug("History submit config not specified [" + PARAM_SUBMIT_DATA + "], default = " + DEFAULT_SUBMIT_DATA);
|
||||
return DEFAULT_SUBMIT_DATA;
|
||||
} else {
|
||||
_log.debug("History submit config specified [" + str + "]");
|
||||
}
|
||||
return Boolean.TRUE.toString().equals(str);
|
||||
}
|
||||
private long getRequeueDelay() { return DEFAULT_REQUEUE_DELAY; }
|
||||
public String getName() { return "Submit Message History"; }
|
||||
}
|
||||
@@ -34,6 +34,27 @@ public interface TunnelInfo {
|
||||
/** retrieve the peer at the given hop. the gateway is hop 0 */
|
||||
public Hash getPeer(int hop);
|
||||
|
||||
/**
|
||||
* For convenience
|
||||
* @return getPeer(0)
|
||||
* @since 0.8.9
|
||||
*/
|
||||
public Hash getGateway();
|
||||
|
||||
/**
|
||||
* For convenience
|
||||
* @return getPeer(getLength() - 1)
|
||||
* @since 0.8.9
|
||||
*/
|
||||
public Hash getEndpoint();
|
||||
|
||||
/**
|
||||
* For convenience
|
||||
* @return isInbound() ? getGateway() : getEndpoint()
|
||||
* @since 0.8.9
|
||||
*/
|
||||
public Hash getFarEnd();
|
||||
|
||||
/** is this an inbound tunnel? */
|
||||
public boolean isInbound();
|
||||
|
||||
@@ -47,10 +68,23 @@ public interface TunnelInfo {
|
||||
*/
|
||||
public void testSuccessful(int responseTime);
|
||||
|
||||
public long getProcessedMessagesCount();
|
||||
public int getProcessedMessagesCount();
|
||||
|
||||
/** we know for sure that this many bytes travelled through the tunnel in its lifetime */
|
||||
public long getVerifiedBytesTransferred();
|
||||
|
||||
/** we know for sure that the given number of bytes were sent down the tunnel fully */
|
||||
public void incrementVerifiedBytesTransferred(int numBytes);
|
||||
|
||||
/**
|
||||
* Did we reuse this tunnel?
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public boolean wasReused();
|
||||
|
||||
/**
|
||||
* Note that we reused this tunnel
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public void setReused();
|
||||
}
|
||||
|
||||
@@ -8,18 +8,21 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.router.tunnel.pool.TunnelPool;
|
||||
|
||||
/**
|
||||
* Build and maintain tunnels throughout the network.
|
||||
*
|
||||
*/
|
||||
public interface TunnelManagerFacade extends Service {
|
||||
|
||||
/**
|
||||
* Retrieve the information related to a particular tunnel
|
||||
*
|
||||
@@ -27,20 +30,88 @@ public interface TunnelManagerFacade extends Service {
|
||||
*
|
||||
*/
|
||||
TunnelInfo getTunnelInfo(TunnelId id);
|
||||
/** pick an inbound tunnel not bound to a particular destination */
|
||||
|
||||
/**
|
||||
* Pick a random inbound exploratory tunnel
|
||||
*
|
||||
* @return null if none
|
||||
*/
|
||||
TunnelInfo selectInboundTunnel();
|
||||
/** pick an inbound tunnel bound to the given destination */
|
||||
|
||||
/**
|
||||
* Pick a random inbound tunnel from the given destination's pool
|
||||
*
|
||||
* @param destination if null, returns inbound exploratory tunnel
|
||||
* @return null if none
|
||||
*/
|
||||
TunnelInfo selectInboundTunnel(Hash destination);
|
||||
/** pick an outbound tunnel not bound to a particular destination */
|
||||
|
||||
/**
|
||||
* Pick a random outbound exploratory tunnel
|
||||
*
|
||||
* @return null if none
|
||||
*/
|
||||
TunnelInfo selectOutboundTunnel();
|
||||
/** pick an outbound tunnel bound to the given destination */
|
||||
|
||||
/**
|
||||
* Pick a random outbound tunnel from the given destination's pool
|
||||
*
|
||||
* @param destination if null, returns outbound exploratory tunnel
|
||||
* @return null if none
|
||||
*/
|
||||
TunnelInfo selectOutboundTunnel(Hash destination);
|
||||
|
||||
/**
|
||||
* True if the peer currently part of a tunnel
|
||||
* Pick the inbound exploratory tunnel with the gateway closest to the given hash.
|
||||
* By using this instead of the random selectTunnel(),
|
||||
* we force some locality in OBEP-IBGW connections to minimize
|
||||
* those connections network-wide.
|
||||
*
|
||||
* @param closestTo non-null
|
||||
* @return null if none
|
||||
* @since 0.8.10
|
||||
*/
|
||||
boolean isInUse(Hash peer);
|
||||
public TunnelInfo selectInboundExploratoryTunnel(Hash closestTo);
|
||||
|
||||
/**
|
||||
* Pick the inbound tunnel with the gateway closest to the given hash
|
||||
* from the given destination's pool.
|
||||
* By using this instead of the random selectTunnel(),
|
||||
* we force some locality in OBEP-IBGW connections to minimize
|
||||
* those connections network-wide.
|
||||
*
|
||||
* @param destination if null, returns inbound exploratory tunnel
|
||||
* @param closestTo non-null
|
||||
* @return null if none
|
||||
* @since 0.8.10
|
||||
*/
|
||||
public TunnelInfo selectInboundTunnel(Hash destination, Hash closestTo);
|
||||
|
||||
/**
|
||||
* Pick the outbound exploratory tunnel with the endpoint closest to the given hash.
|
||||
* By using this instead of the random selectTunnel(),
|
||||
* we force some locality in OBEP-IBGW connections to minimize
|
||||
* those connections network-wide.
|
||||
*
|
||||
* @param closestTo non-null
|
||||
* @return null if none
|
||||
* @since 0.8.10
|
||||
*/
|
||||
public TunnelInfo selectOutboundExploratoryTunnel(Hash closestTo);
|
||||
|
||||
/**
|
||||
* Pick the outbound tunnel with the endpoint closest to the given hash
|
||||
* from the given destination's pool.
|
||||
* By using this instead of the random selectTunnel(),
|
||||
* we force some locality in OBEP-IBGW connections to minimize
|
||||
* those connections network-wide.
|
||||
*
|
||||
* @param destination if null, returns outbound exploratory tunnel
|
||||
* @param closestTo non-null
|
||||
* @return null if none
|
||||
* @since 0.8.10
|
||||
*/
|
||||
public TunnelInfo selectOutboundTunnel(Hash destination, Hash closestTo);
|
||||
|
||||
/** Is a tunnel a valid member of the pool? */
|
||||
public boolean isValidTunnel(Hash client, TunnelInfo tunnel);
|
||||
@@ -55,6 +126,9 @@ public interface TunnelManagerFacade extends Service {
|
||||
public int getInboundClientTunnelCount();
|
||||
/** how many outbound client tunnels do we have available? */
|
||||
public int getOutboundClientTunnelCount();
|
||||
/** how many outbound client tunnels in this pool? */
|
||||
public int getOutboundClientTunnelCount(Hash destination);
|
||||
public double getShareRatio();
|
||||
|
||||
/** When does the last tunnel we are participating in expire? */
|
||||
public long getLastParticipatingExpiration();
|
||||
@@ -62,6 +136,9 @@ public interface TunnelManagerFacade extends Service {
|
||||
/** count how many inbound tunnel requests we have received but not yet processed */
|
||||
public int getInboundBuildQueueSize();
|
||||
|
||||
/** @return Set of peers that should not be allowed to be in another tunnel */
|
||||
public Set<Hash> selectPeersInTooManyTunnels();
|
||||
|
||||
/**
|
||||
* the client connected (or updated their settings), so make sure we have
|
||||
* the tunnels for them, and whenever necessary, ask them to authorize
|
||||
@@ -78,4 +155,17 @@ public interface TunnelManagerFacade extends Service {
|
||||
public void setOutboundSettings(TunnelPoolSettings settings);
|
||||
public void setInboundSettings(Hash client, TunnelPoolSettings settings);
|
||||
public void setOutboundSettings(Hash client, TunnelPoolSettings settings);
|
||||
/** for TunnelRenderer in router console */
|
||||
public void listPools(List<TunnelPool> out);
|
||||
/** for TunnelRenderer in router console */
|
||||
public Map<Hash, TunnelPool> getInboundClientPools();
|
||||
/** for TunnelRenderer in router console */
|
||||
public Map<Hash, TunnelPool> getOutboundClientPools();
|
||||
/** for TunnelRenderer in router console */
|
||||
public TunnelPool getInboundExploratoryPool();
|
||||
/** for TunnelRenderer in router console */
|
||||
public TunnelPool getOutboundExploratoryPool();
|
||||
|
||||
/** @since 0.8.13 */
|
||||
public void fail(Hash peer);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package net.i2p.router;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.util.RandomSource;
|
||||
|
||||
/**
|
||||
* Wrap up the settings for a pool of tunnels (duh)
|
||||
* Wrap up the settings for a pool of tunnels.
|
||||
*
|
||||
*/
|
||||
public class TunnelPoolSettings {
|
||||
@@ -16,7 +17,7 @@ public class TunnelPoolSettings {
|
||||
private int _quantity;
|
||||
private int _backupQuantity;
|
||||
// private int _rebuildPeriod;
|
||||
private int _duration;
|
||||
//private int _duration;
|
||||
private int _length;
|
||||
private int _lengthVariance;
|
||||
private int _lengthOverride;
|
||||
@@ -24,8 +25,8 @@ public class TunnelPoolSettings {
|
||||
private boolean _isExploratory;
|
||||
private boolean _allowZeroHop;
|
||||
private int _IPRestriction;
|
||||
private Properties _unknownOptions;
|
||||
private Hash _randomKey;
|
||||
private final Properties _unknownOptions;
|
||||
private final Hash _randomKey;
|
||||
|
||||
/** prefix used to override the router's defaults for clients */
|
||||
public static final String PREFIX_DEFAULT = "router.defaultPool.";
|
||||
@@ -49,7 +50,7 @@ public class TunnelPoolSettings {
|
||||
// public static final int DEFAULT_REBUILD_PERIOD = 60*1000;
|
||||
public static final int DEFAULT_DURATION = 10*60*1000;
|
||||
public static final int DEFAULT_LENGTH = 2;
|
||||
public static final int DEFAULT_LENGTH_VARIANCE = 1;
|
||||
public static final int DEFAULT_LENGTH_VARIANCE = 0;
|
||||
public static final boolean DEFAULT_ALLOW_ZERO_HOP = true;
|
||||
public static final int DEFAULT_IP_RESTRICTION = 2; // class B (/16)
|
||||
|
||||
@@ -57,15 +58,11 @@ public class TunnelPoolSettings {
|
||||
_quantity = DEFAULT_QUANTITY;
|
||||
_backupQuantity = DEFAULT_BACKUP_QUANTITY;
|
||||
// _rebuildPeriod = DEFAULT_REBUILD_PERIOD;
|
||||
_duration = DEFAULT_DURATION;
|
||||
//_duration = DEFAULT_DURATION;
|
||||
_length = DEFAULT_LENGTH;
|
||||
_lengthVariance = DEFAULT_LENGTH_VARIANCE;
|
||||
_lengthOverride = 0;
|
||||
_lengthOverride = -1;
|
||||
_allowZeroHop = DEFAULT_ALLOW_ZERO_HOP;
|
||||
_isInbound = false;
|
||||
_isExploratory = false;
|
||||
_destination = null;
|
||||
_destinationNickname = null;
|
||||
_IPRestriction = DEFAULT_IP_RESTRICTION;
|
||||
_unknownOptions = new Properties();
|
||||
_randomKey = generateRandomKey();
|
||||
@@ -79,12 +76,29 @@ public class TunnelPoolSettings {
|
||||
public int getBackupQuantity() { return _backupQuantity; }
|
||||
public void setBackupQuantity(int quantity) { _backupQuantity = quantity; }
|
||||
|
||||
/**
|
||||
* Convenience
|
||||
* @return getQuantity() + getBackupQuantity()
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public int getTotalQuantity() {
|
||||
return _quantity + _backupQuantity;
|
||||
}
|
||||
|
||||
/** how long before tunnel expiration should new tunnels be built */
|
||||
// public int getRebuildPeriod() { return _rebuildPeriod; }
|
||||
// public void setRebuildPeriod(int periodMs) { _rebuildPeriod = periodMs; }
|
||||
|
||||
/** how many remote hops should be in the tunnel */
|
||||
/**
|
||||
* How many remote hops should be in the tunnel NOT including us
|
||||
* @return 0 to 7
|
||||
*/
|
||||
public int getLength() { return _length; }
|
||||
|
||||
/**
|
||||
* How many remote hops should be in the tunnel NOT including us
|
||||
* @param length 0 to 7 (not enforced here)
|
||||
*/
|
||||
public void setLength(int length) { _length = length; }
|
||||
|
||||
/** if there are no tunnels to build with, will this pool allow 0 hop tunnels? */
|
||||
@@ -100,10 +114,20 @@ public class TunnelPoolSettings {
|
||||
public int getLengthVariance() { return _lengthVariance; }
|
||||
public void setLengthVariance(int variance) { _lengthVariance = variance; }
|
||||
|
||||
/* Set to a nonzero value to override the length setting */
|
||||
/**
|
||||
* A temporary length to be used due to network conditions.
|
||||
* If less than zero, the standard length should be used.
|
||||
* Unused until 0.8.11
|
||||
*/
|
||||
public int getLengthOverride() { return _lengthOverride; }
|
||||
public void setLengthOverride(int variance) { _lengthOverride = variance; }
|
||||
|
||||
|
||||
/**
|
||||
* A temporary length to be used due to network conditions.
|
||||
* If less than zero, the standard length will be used.
|
||||
* Unused until 0.8.11
|
||||
*/
|
||||
public void setLengthOverride(int length) { _lengthOverride = length; }
|
||||
|
||||
/** is this an inbound tunnel? */
|
||||
public boolean isInbound() { return _isInbound; }
|
||||
public void setIsInbound(boolean isInbound) { _isInbound = isInbound; }
|
||||
@@ -112,8 +136,9 @@ public class TunnelPoolSettings {
|
||||
public boolean isExploratory() { return _isExploratory; }
|
||||
public void setIsExploratory(boolean isExploratory) { _isExploratory = isExploratory; }
|
||||
|
||||
public int getDuration() { return _duration; }
|
||||
public void setDuration(int ms) { _duration = ms; }
|
||||
// Duration is hardcoded
|
||||
//public int getDuration() { return _duration; }
|
||||
//public void setDuration(int ms) { _duration = ms; }
|
||||
|
||||
/** what destination is this a tunnel for (or null if none) */
|
||||
public Hash getDestination() { return _destination; }
|
||||
@@ -137,17 +162,17 @@ public class TunnelPoolSettings {
|
||||
|
||||
public Properties getUnknownOptions() { return _unknownOptions; }
|
||||
|
||||
public void readFromProperties(String prefix, Properties props) {
|
||||
for (Iterator iter = props.keySet().iterator(); iter.hasNext(); ) {
|
||||
String name = (String)iter.next();
|
||||
String value = props.getProperty(name);
|
||||
public void readFromProperties(String prefix, Map<Object, Object> props) {
|
||||
for (Map.Entry e : props.entrySet()) {
|
||||
String name = (String) e.getKey();
|
||||
String value = (String) e.getValue();
|
||||
if (name.startsWith(prefix)) {
|
||||
if (name.equalsIgnoreCase(prefix + PROP_ALLOW_ZERO_HOP))
|
||||
_allowZeroHop = getBoolean(value, DEFAULT_ALLOW_ZERO_HOP);
|
||||
else if (name.equalsIgnoreCase(prefix + PROP_BACKUP_QUANTITY))
|
||||
_backupQuantity = getInt(value, DEFAULT_BACKUP_QUANTITY);
|
||||
else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
|
||||
_duration = getInt(value, DEFAULT_DURATION);
|
||||
//else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
|
||||
// _duration = getInt(value, DEFAULT_DURATION);
|
||||
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH))
|
||||
_length = getInt(value, DEFAULT_LENGTH);
|
||||
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH_VARIANCE))
|
||||
@@ -170,7 +195,7 @@ public class TunnelPoolSettings {
|
||||
if (props == null) return;
|
||||
props.setProperty(prefix + PROP_ALLOW_ZERO_HOP, ""+_allowZeroHop);
|
||||
props.setProperty(prefix + PROP_BACKUP_QUANTITY, ""+_backupQuantity);
|
||||
props.setProperty(prefix + PROP_DURATION, ""+_duration);
|
||||
//props.setProperty(prefix + PROP_DURATION, ""+_duration);
|
||||
props.setProperty(prefix + PROP_LENGTH, ""+_length);
|
||||
props.setProperty(prefix + PROP_LENGTH_VARIANCE, ""+_lengthVariance);
|
||||
if (_destinationNickname != null)
|
||||
@@ -178,22 +203,23 @@ public class TunnelPoolSettings {
|
||||
props.setProperty(prefix + PROP_QUANTITY, ""+_quantity);
|
||||
// props.setProperty(prefix + PROP_REBUILD_PERIOD, ""+_rebuildPeriod);
|
||||
props.setProperty(prefix + PROP_IP_RESTRICTION, ""+_IPRestriction);
|
||||
for (Iterator iter = _unknownOptions.keySet().iterator(); iter.hasNext(); ) {
|
||||
String name = (String)iter.next();
|
||||
String val = _unknownOptions.getProperty(name);
|
||||
for (Map.Entry e : _unknownOptions.entrySet()) {
|
||||
String name = (String) e.getKey();
|
||||
String val = (String) e.getValue();
|
||||
props.setProperty(prefix + name, val);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
StringBuilder buf = new StringBuilder();
|
||||
Properties p = new Properties();
|
||||
writeToProperties("", p);
|
||||
buf.append("Tunnel pool settings:\n");
|
||||
buf.append("====================================\n");
|
||||
for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) {
|
||||
String name = (String)iter.next();
|
||||
String val = p.getProperty(name);
|
||||
for (Map.Entry e : p.entrySet()) {
|
||||
String name = (String) e.getKey();
|
||||
String val = (String) e.getValue();
|
||||
buf.append(name).append(" = [").append(val).append("]\n");
|
||||
}
|
||||
buf.append("is inbound? ").append(_isInbound).append("\n");
|
||||
@@ -203,7 +229,7 @@ public class TunnelPoolSettings {
|
||||
}
|
||||
|
||||
// used for strict peer ordering
|
||||
private Hash generateRandomKey() {
|
||||
private static Hash generateRandomKey() {
|
||||
byte hash[] = new byte[Hash.HASH_LENGTH];
|
||||
RandomSource.getInstance().nextBytes(hash);
|
||||
return new Hash(hash);
|
||||
@@ -211,10 +237,13 @@ public class TunnelPoolSettings {
|
||||
|
||||
private static final boolean getBoolean(String str, boolean defaultValue) {
|
||||
if (str == null) return defaultValue;
|
||||
boolean v = "TRUE".equalsIgnoreCase(str) || "YES".equalsIgnoreCase(str);
|
||||
boolean v = Boolean.valueOf(str).booleanValue() ||
|
||||
(str != null && "YES".equals(str.toUpperCase(Locale.US)));
|
||||
return v;
|
||||
}
|
||||
|
||||
private static final int getInt(String str, int defaultValue) { return (int)getLong(str, defaultValue); }
|
||||
|
||||
private static final long getLong(String str, long defaultValue) {
|
||||
if (str == null) return defaultValue;
|
||||
try {
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package net.i2p.router;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set of criteria for finding a tunnel from the Tunnel Manager
|
||||
*
|
||||
*/
|
||||
public class TunnelSelectionCriteria {
|
||||
public final static int MAX_PRIORITY = 100;
|
||||
public final static int MIN_PRIORITY = 0;
|
||||
private int _latencyPriority;
|
||||
private int _anonymityPriority;
|
||||
private int _reliabilityPriority;
|
||||
private int _maxNeeded;
|
||||
private int _minNeeded;
|
||||
|
||||
public TunnelSelectionCriteria() {
|
||||
setLatencyPriority(0);
|
||||
setAnonymityPriority(0);
|
||||
setReliabilityPriority(0);
|
||||
setMinimumTunnelsRequired(0);
|
||||
setMaximumTunnelsRequired(0);
|
||||
}
|
||||
|
||||
/** priority of the latency for the tunnel */
|
||||
public int getLatencyPriority() { return _latencyPriority; }
|
||||
public void setLatencyPriority(int latencyPriority) { _latencyPriority = latencyPriority; }
|
||||
/** priority of the anonymity for the tunnel */
|
||||
public int getAnonymityPriority() { return _anonymityPriority; }
|
||||
public void setAnonymityPriority(int anonPriority) { _anonymityPriority = anonPriority; }
|
||||
/** priority of the reliability for the tunnel */
|
||||
public int getReliabilityPriority() { return _reliabilityPriority; }
|
||||
public void setReliabilityPriority(int reliabilityPriority) { _reliabilityPriority = reliabilityPriority; }
|
||||
/** max # of tunnels to return */
|
||||
public int getMaximumTunnelsRequired() { return _maxNeeded; }
|
||||
public void setMaximumTunnelsRequired(int maxNeeded) { _maxNeeded = maxNeeded; }
|
||||
/** minimum # of tunnels to return */
|
||||
public int getMinimumTunnelsRequired() { return _minNeeded; }
|
||||
public void setMinimumTunnelsRequired(int minNeeded) { _minNeeded = minNeeded; }
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package net.i2p.router;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
|
||||
/**
|
||||
* Wrap up the settings specified for a particular tunnel
|
||||
*
|
||||
*/
|
||||
public class TunnelSettings extends DataStructureImpl {
|
||||
private I2PAppContext _context;
|
||||
private int _depth;
|
||||
private long _msgsPerMinuteAvg;
|
||||
private long _bytesPerMinuteAvg;
|
||||
private long _msgsPerMinutePeak;
|
||||
private long _bytesPerMinutePeak;
|
||||
private boolean _includeDummy;
|
||||
private boolean _reorder;
|
||||
private long _expiration;
|
||||
private long _created;
|
||||
|
||||
public TunnelSettings(I2PAppContext context) {
|
||||
_context = context;
|
||||
_depth = 0;
|
||||
_msgsPerMinuteAvg = 0;
|
||||
_msgsPerMinutePeak = 0;
|
||||
_bytesPerMinuteAvg = 0;
|
||||
_bytesPerMinutePeak = 0;
|
||||
_includeDummy = false;
|
||||
_reorder = false;
|
||||
_expiration = 0;
|
||||
_created = _context.clock().now();
|
||||
}
|
||||
|
||||
public int getDepth() { return _depth; }
|
||||
public void setDepth(int depth) { _depth = depth; }
|
||||
public long getMessagesPerMinuteAverage() { return _msgsPerMinuteAvg; }
|
||||
public long getMessagesPerMinutePeak() { return _msgsPerMinutePeak; }
|
||||
public long getBytesPerMinuteAverage() { return _bytesPerMinuteAvg; }
|
||||
public long getBytesPerMinutePeak() { return _bytesPerMinutePeak; }
|
||||
public void setMessagesPerMinuteAverage(long msgs) { _msgsPerMinuteAvg = msgs; }
|
||||
public void setMessagesPerMinutePeak(long msgs) { _msgsPerMinutePeak = msgs; }
|
||||
public void setBytesPerMinuteAverage(long bytes) { _bytesPerMinuteAvg = bytes; }
|
||||
public void setBytesPerMinutePeak(long bytes) { _bytesPerMinutePeak = bytes; }
|
||||
public boolean getIncludeDummy() { return _includeDummy; }
|
||||
public void setIncludeDummy(boolean include) { _includeDummy = include; }
|
||||
public boolean getReorder() { return _reorder; }
|
||||
public void setReorder(boolean reorder) { _reorder = reorder; }
|
||||
public long getExpiration() { return _expiration; }
|
||||
public void setExpiration(long expiration) { _expiration = expiration; }
|
||||
public long getCreated() { return _created; }
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
Boolean b = DataHelper.readBoolean(in);
|
||||
if (b == null) throw new DataFormatException("Null includeDummy boolean value");
|
||||
_includeDummy = b.booleanValue();
|
||||
b = DataHelper.readBoolean(in);
|
||||
if (b == null) throw new DataFormatException("Null reorder boolean value");
|
||||
_reorder = b.booleanValue();
|
||||
_depth = (int)DataHelper.readLong(in, 1);
|
||||
_bytesPerMinuteAvg = DataHelper.readLong(in, 4);
|
||||
_bytesPerMinutePeak = DataHelper.readLong(in, 4);
|
||||
Date exp = DataHelper.readDate(in);
|
||||
if (exp == null)
|
||||
_expiration = 0;
|
||||
else
|
||||
_expiration = exp.getTime();
|
||||
_msgsPerMinuteAvg = DataHelper.readLong(in, 4);
|
||||
_msgsPerMinutePeak = DataHelper.readLong(in, 4);
|
||||
Date created = DataHelper.readDate(in);
|
||||
if (created != null)
|
||||
_created = created.getTime();
|
||||
else
|
||||
_created = _context.clock().now();
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
DataHelper.writeBoolean(out, _includeDummy ? Boolean.TRUE : Boolean.FALSE);
|
||||
DataHelper.writeBoolean(out, _reorder ? Boolean.TRUE : Boolean.FALSE);
|
||||
DataHelper.writeLong(out, 1, _depth);
|
||||
DataHelper.writeLong(out, 4, _bytesPerMinuteAvg);
|
||||
DataHelper.writeLong(out, 4, _bytesPerMinutePeak);
|
||||
if (_expiration <= 0)
|
||||
DataHelper.writeDate(out, new Date(0));
|
||||
else
|
||||
DataHelper.writeDate(out, new Date(_expiration));
|
||||
DataHelper.writeLong(out, 4, _msgsPerMinuteAvg);
|
||||
DataHelper.writeLong(out, 4, _msgsPerMinutePeak);
|
||||
DataHelper.writeDate(out, new Date(_created));
|
||||
}
|
||||
|
||||
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += _includeDummy ? 100 : 0;
|
||||
rv += _reorder ? 50 : 0;
|
||||
rv += _depth;
|
||||
rv += _bytesPerMinuteAvg;
|
||||
rv += _bytesPerMinutePeak;
|
||||
rv += _expiration;
|
||||
rv += _msgsPerMinuteAvg;
|
||||
rv += _msgsPerMinutePeak;
|
||||
return rv;
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof TunnelSettings) ) {
|
||||
TunnelSettings settings = (TunnelSettings)obj;
|
||||
return settings.getBytesPerMinuteAverage() == getBytesPerMinuteAverage() &&
|
||||
settings.getBytesPerMinutePeak() == getBytesPerMinutePeak() &&
|
||||
settings.getDepth() == getDepth() &&
|
||||
settings.getExpiration() == getExpiration() &&
|
||||
settings.getIncludeDummy() == getIncludeDummy() &&
|
||||
settings.getMessagesPerMinuteAverage() == getMessagesPerMinuteAverage() &&
|
||||
settings.getMessagesPerMinutePeak() == getMessagesPerMinutePeak() &&
|
||||
settings.getReorder() == getReorder();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
package net.i2p.router.admin;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Listen for connections on the specified port, and toss them onto the client manager's
|
||||
* set of connections once they are established.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class AdminListener implements Runnable {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private ServerSocket _socket;
|
||||
private int _port;
|
||||
private boolean _running;
|
||||
private long _nextFailDelay = 1000;
|
||||
|
||||
public AdminListener(RouterContext context, int port) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(AdminListener.class);
|
||||
_port = port;
|
||||
_running = false;
|
||||
}
|
||||
|
||||
public void restart() {
|
||||
// this works by taking advantage of the auto-retry mechanism in the
|
||||
// startup() loop (which we reset to wait 1s). by failing the socket
|
||||
// (through close()) and nulling it, we will have to try to build a new
|
||||
// serverSocket (using the *new* _port)
|
||||
_nextFailDelay = 1000;
|
||||
ServerSocket s = _socket;
|
||||
try {
|
||||
_socket = null;
|
||||
s.close();
|
||||
} catch (IOException ioe) {}
|
||||
}
|
||||
|
||||
public void setPort(int port) { _port = port; }
|
||||
public int getPort() { return _port; }
|
||||
|
||||
/** max time to bind */
|
||||
private final static int MAX_FAIL_DELAY = 5*60*1000;
|
||||
|
||||
/**
|
||||
* Start up the socket listener, listens for connections, and
|
||||
* fires those connections off via {@link #runConnection runConnection}.
|
||||
* This only returns if the socket cannot be opened or there is a catastrophic
|
||||
* failure.
|
||||
*
|
||||
*/
|
||||
public void startup() {
|
||||
_running = true;
|
||||
int curDelay = 0;
|
||||
while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) {
|
||||
try {
|
||||
_log.info("Starting up listening for connections on port " + _port);
|
||||
_socket = new ServerSocket(_port);
|
||||
curDelay = 0;
|
||||
while (_running && (_socket != null) ) {
|
||||
try {
|
||||
Socket socket = _socket.accept();
|
||||
_log.debug("Connection received");
|
||||
runConnection(socket);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Server error accepting", ioe);
|
||||
} catch (Throwable t) {
|
||||
_log.error("Fatal error running client listener - killing the thread!", t);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error listening on port " + _port, ioe);
|
||||
}
|
||||
|
||||
if (_socket != null) {
|
||||
try { _socket.close(); } catch (IOException ioe) {}
|
||||
_socket = null;
|
||||
}
|
||||
|
||||
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
|
||||
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
|
||||
curDelay += _nextFailDelay;
|
||||
_nextFailDelay *= 5;
|
||||
}
|
||||
|
||||
_log.error("CANCELING ADMIN LISTENER. delay = " + curDelay, new Exception("ADMIN LISTENER cancelled!!!"));
|
||||
_running = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the connection by passing it off to an AdminRunner
|
||||
*
|
||||
*/
|
||||
protected void runConnection(Socket socket) throws IOException {
|
||||
AdminRunner runner = new AdminRunner(_context, socket);
|
||||
I2PThread t = new I2PThread(runner);
|
||||
t.setName("Admin Runner");
|
||||
//t.setPriority(Thread.MIN_PRIORITY);
|
||||
t.setDaemon(true);
|
||||
t.start();
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
_running = false;
|
||||
if (_socket != null) try {
|
||||
_socket.close();
|
||||
_socket = null;
|
||||
} catch (IOException ioe) {}
|
||||
}
|
||||
public void run() { startup(); }
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package net.i2p.router.admin;
|
||||
|
||||
import java.io.Writer;
|
||||
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.Service;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
public class AdminManager implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
public final static String PARAM_ADMIN_PORT = "router.adminPort";
|
||||
public final static int DEFAULT_ADMIN_PORT = 7655;
|
||||
|
||||
private AdminListener _listener;
|
||||
|
||||
public AdminManager(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(AdminManager.class);
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) { }
|
||||
|
||||
public void shutdown() {
|
||||
if (_listener != null) {
|
||||
_log.info("Shutting down admin listener");
|
||||
_listener.shutdown();
|
||||
_listener = null;
|
||||
}
|
||||
}
|
||||
|
||||
public void restart() {
|
||||
startup();
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
int port = DEFAULT_ADMIN_PORT;
|
||||
String str = _context.router().getConfigSetting(PARAM_ADMIN_PORT);
|
||||
if (str != null) {
|
||||
try {
|
||||
int val = Integer.parseInt(str);
|
||||
port = val;
|
||||
_log.info("Starting up admin listener on port " + port);
|
||||
} catch (NumberFormatException nfe) {
|
||||
_log.warn("Invalid admin port specified [" + str + "], using the default " + DEFAULT_ADMIN_PORT, nfe);
|
||||
}
|
||||
} else {
|
||||
_log.warn("Router admin port not specified, using the default " + DEFAULT_ADMIN_PORT);
|
||||
}
|
||||
startup(port);
|
||||
}
|
||||
|
||||
private void startup(int port) {
|
||||
if (_listener == null) {
|
||||
_listener = new AdminListener(_context, port);
|
||||
I2PThread t = new I2PThread(_listener);
|
||||
t.setName("Admin Listener:" + port);
|
||||
t.setDaemon(true);
|
||||
//t.setPriority(Thread.MIN_PRIORITY);
|
||||
t.start();
|
||||
} else {
|
||||
_listener.setPort(port);
|
||||
_listener.restart();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
package net.i2p.router.admin;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.net.Socket;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
class AdminRunner implements Runnable {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private Socket _socket;
|
||||
private StatsGenerator _generator;
|
||||
|
||||
public AdminRunner(RouterContext context, Socket socket) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(AdminRunner.class);
|
||||
_socket = socket;
|
||||
_generator = new StatsGenerator(context);
|
||||
}
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(_socket.getInputStream()));
|
||||
OutputStream out = _socket.getOutputStream();
|
||||
|
||||
String command = in.readLine();
|
||||
runCommand(command, out);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error running admin command", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
private void runCommand(String command, OutputStream out) throws IOException {
|
||||
_log.debug("Command [" + command + "]");
|
||||
if (command.indexOf("favicon") >= 0) {
|
||||
reply(out, "this is not a website");
|
||||
} else if ( (command.indexOf("routerStats.html") >= 0) || (command.indexOf("oldstats.jsp") >= 0) ) {
|
||||
try {
|
||||
out.write("HTTP/1.1 200 OK\nConnection: close\nCache-control: no-cache\nContent-type: text/html\n\n".getBytes());
|
||||
_generator.generateStatsPage(new OutputStreamWriter(out));
|
||||
out.close();
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error writing out the admin reply");
|
||||
throw ioe;
|
||||
}
|
||||
} else if (command.indexOf("/profile/") >= 0) {
|
||||
replyText(out, getProfile(command));
|
||||
} else if (command.indexOf("/shutdown") >= 0) {
|
||||
reply(out, shutdown(command));
|
||||
} else if (true || command.indexOf("routerConsole.html") > 0) {
|
||||
try {
|
||||
out.write("HTTP/1.1 200 OK\nConnection: close\nCache-control: no-cache\nContent-type: text/html\n\n".getBytes());
|
||||
_context.router().renderStatusHTML(new OutputStreamWriter(out));
|
||||
out.close();
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error writing out the admin reply");
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void reply(OutputStream out, String content) throws IOException {
|
||||
StringBuffer reply = new StringBuffer(10240);
|
||||
reply.append("HTTP/1.1 200 OK\n");
|
||||
reply.append("Connection: close\n");
|
||||
reply.append("Cache-control: no-cache\n");
|
||||
reply.append("Content-type: text/html\n\n");
|
||||
reply.append(content);
|
||||
try {
|
||||
out.write(reply.toString().getBytes());
|
||||
out.close();
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error writing out the admin reply:\n" + content);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
private void replyText(OutputStream out, String content) throws IOException {
|
||||
StringBuffer reply = new StringBuffer(10240);
|
||||
reply.append("HTTP/1.1 200 OK\n");
|
||||
reply.append("Connection: close\n");
|
||||
reply.append("Cache-control: no-cache\n");
|
||||
reply.append("Content-type: text/plain\n\n");
|
||||
reply.append(content);
|
||||
try {
|
||||
out.write(reply.toString().getBytes());
|
||||
out.close();
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error writing out the admin reply:\n" + content);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
private String getProfile(String cmd) {
|
||||
Set peers = _context.profileOrganizer().selectAllPeers();
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
if (cmd.indexOf(peer.toBase64().substring(0,10)) >= 0) {
|
||||
try {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(64*1024);
|
||||
_context.profileOrganizer().exportProfile(peer, baos);
|
||||
return new String(baos.toByteArray());
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error exporting the profile", ioe);
|
||||
return "Error exporting the peer profile\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "No such peer is being profiled\n";
|
||||
}
|
||||
|
||||
private static final String SHUTDOWN_PASSWORD_PROP = "router.shutdownPassword";
|
||||
private String shutdown(String cmd) {
|
||||
String password = _context.router().getConfigSetting(SHUTDOWN_PASSWORD_PROP);
|
||||
if (password == null)
|
||||
password = _context.getProperty(SHUTDOWN_PASSWORD_PROP);
|
||||
if (password == null)
|
||||
return "No shutdown password specified in the config or context - <b>REFUSING SHUTDOWN</b>." +
|
||||
"<a href=\"/routerConsole.html\">back</a>";
|
||||
if (cmd.indexOf(password) > 0) {
|
||||
I2PThread t = new I2PThread(new Runnable() {
|
||||
public void run() {
|
||||
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
|
||||
_context.router().shutdown(Router.EXIT_HARD);
|
||||
}
|
||||
});
|
||||
t.start();
|
||||
return "Shutdown request accepted. Killing the router in 30 seconds";
|
||||
} else {
|
||||
return "Incorrect shutdown password specified. Please edit your router.config appropriately." +
|
||||
"<a href=\"/routerConsole.html\">back</a>";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,239 +0,0 @@
|
||||
package net.i2p.router.admin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.stat.Frequency;
|
||||
import net.i2p.stat.FrequencyStat;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Dump the stats to the web admin interface
|
||||
*/
|
||||
public class StatsGenerator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
public StatsGenerator(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(StatsGenerator.class);
|
||||
}
|
||||
|
||||
public void generateStatsPage(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(16*1024);
|
||||
buf.append("<h1>Router statistics</h1><hr />");
|
||||
buf.append("<form action=\"/oldstats.jsp\">");
|
||||
buf.append("<select name=\"go\" onChange='location.href=this.value'>");
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
|
||||
Map groups = _context.statManager().getStatsByGroup();
|
||||
for (Iterator iter = groups.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Map.Entry entry = (Map.Entry)iter.next();
|
||||
String group = (String)entry.getKey();
|
||||
Set stats = (Set)entry.getValue();
|
||||
buf.append("<option value=\"/oldstats.jsp#").append(group).append("\">");
|
||||
buf.append(group).append("</option>\n");
|
||||
for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) {
|
||||
String stat = (String)statIter.next();
|
||||
buf.append("<option value=\"/oldstats.jsp#");
|
||||
buf.append(stat);
|
||||
buf.append("\">...");
|
||||
buf.append(stat);
|
||||
buf.append("</option>\n");
|
||||
}
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
}
|
||||
buf.append("</select> <input type=\"submit\" value=\"GO\" />");
|
||||
buf.append("</form>");
|
||||
|
||||
buf.append("Statistics gathered during this router's uptime (");
|
||||
long uptime = _context.router().getUptime();
|
||||
buf.append(DataHelper.formatDuration(uptime));
|
||||
buf.append("). The data gathered is quantized over a 1 minute period, so should just be used as an estimate<p />");
|
||||
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
|
||||
for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) {
|
||||
String group = (String)iter.next();
|
||||
Set stats = (Set)groups.get(group);
|
||||
buf.append("<h2><a name=\"");
|
||||
buf.append(group);
|
||||
buf.append("\">");
|
||||
buf.append(group);
|
||||
buf.append("</a></h2>");
|
||||
buf.append("<ul>");
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) {
|
||||
String stat = (String)statIter.next();
|
||||
buf.append("<li><b><a name=\"");
|
||||
buf.append(stat);
|
||||
buf.append("\">");
|
||||
buf.append(stat);
|
||||
buf.append("</a></b><br />");
|
||||
if (_context.statManager().isFrequency(stat))
|
||||
renderFrequency(stat, buf);
|
||||
else
|
||||
renderRate(stat, buf);
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
}
|
||||
out.write("</ul><hr />");
|
||||
}
|
||||
out.flush();
|
||||
}
|
||||
|
||||
private void renderFrequency(String name, StringBuffer buf) {
|
||||
FrequencyStat freq = _context.statManager().getFrequency(name);
|
||||
buf.append("<i>");
|
||||
buf.append(freq.getDescription());
|
||||
buf.append("</i><br />");
|
||||
long uptime = _context.router().getUptime();
|
||||
long periods[] = freq.getPeriods();
|
||||
Arrays.sort(periods);
|
||||
for (int i = 0; i < periods.length; i++) {
|
||||
if (periods[i] > uptime)
|
||||
break;
|
||||
renderPeriod(buf, periods[i], "frequency");
|
||||
Frequency curFreq = freq.getFrequency(periods[i]);
|
||||
buf.append(" <i>avg per period:</i> (");
|
||||
buf.append(num(curFreq.getAverageEventsPerPeriod()));
|
||||
buf.append(", max ");
|
||||
buf.append(num(curFreq.getMaxAverageEventsPerPeriod()));
|
||||
if ( (curFreq.getMaxAverageEventsPerPeriod() > 0) && (curFreq.getAverageEventsPerPeriod() > 0) ) {
|
||||
buf.append(", current is ");
|
||||
buf.append(pct(curFreq.getAverageEventsPerPeriod()/curFreq.getMaxAverageEventsPerPeriod()));
|
||||
buf.append(" of max");
|
||||
}
|
||||
buf.append(")");
|
||||
//buf.append(" <i>avg interval between updates:</i> (").append(num(curFreq.getAverageInterval())).append("ms, min ");
|
||||
//buf.append(num(curFreq.getMinAverageInterval())).append("ms)");
|
||||
buf.append(" <i>strict average per period:</i> ");
|
||||
buf.append(num(curFreq.getStrictAverageEventsPerPeriod()));
|
||||
buf.append(" events (averaged ");
|
||||
buf.append(" using the lifetime of ");
|
||||
buf.append(curFreq.getEventCount());
|
||||
buf.append(" events)");
|
||||
buf.append("<br />");
|
||||
}
|
||||
buf.append("<br />");
|
||||
}
|
||||
|
||||
private void renderRate(String name, StringBuffer buf) {
|
||||
RateStat rate = _context.statManager().getRate(name);
|
||||
String d = rate.getDescription();
|
||||
if (! "".equals(d)) {
|
||||
buf.append("<i>");
|
||||
buf.append(d);
|
||||
buf.append("</i><br />");
|
||||
}
|
||||
if (rate.getLifetimeEventCount() <= 0) {
|
||||
buf.append("No lifetime events<br /> <br />");
|
||||
return;
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
long periods[] = rate.getPeriods();
|
||||
Arrays.sort(periods);
|
||||
buf.append("<ul>");
|
||||
for (int i = 0; i < periods.length; i++) {
|
||||
Rate curRate = rate.getRate(periods[i]);
|
||||
if (curRate.getLastCoalesceDate() <= curRate.getCreationDate())
|
||||
break;
|
||||
buf.append("<li>");
|
||||
renderPeriod(buf, periods[i], "rate");
|
||||
if (curRate.getLastEventCount() > 0) {
|
||||
buf.append( "<i>avg value:</i> (");
|
||||
buf.append(num(curRate.getAverageValue()));
|
||||
buf.append(" peak ");
|
||||
buf.append(num(curRate.getExtremeAverageValue()));
|
||||
buf.append(", [");
|
||||
buf.append(pct(curRate.getPercentageOfExtremeValue()));
|
||||
buf.append(" of max");
|
||||
buf.append(", and ");
|
||||
buf.append(pct(curRate.getPercentageOfLifetimeValue()));
|
||||
buf.append(" of lifetime average]");
|
||||
|
||||
buf.append(")");
|
||||
buf.append(" <i>highest total period value:</i> (");
|
||||
buf.append(num(curRate.getExtremeTotalValue()));
|
||||
buf.append(")");
|
||||
if (curRate.getLifetimeTotalEventTime() > 0) {
|
||||
buf.append(" <i>saturation:</i> (");
|
||||
buf.append(pct(curRate.getLastEventSaturation()));
|
||||
buf.append(")");
|
||||
buf.append(" <i>saturated limit:</i> (");
|
||||
buf.append(num(curRate.getLastSaturationLimit()));
|
||||
buf.append(")");
|
||||
buf.append(" <i>peak saturation:</i> (");
|
||||
buf.append(pct(curRate.getExtremeEventSaturation()));
|
||||
buf.append(")");
|
||||
buf.append(" <i>peak saturated limit:</i> (");
|
||||
buf.append(num(curRate.getExtremeSaturationLimit()));
|
||||
buf.append(")");
|
||||
}
|
||||
buf.append(" <i>events:</i> ");
|
||||
buf.append(curRate.getLastEventCount());
|
||||
buf.append(" <i>in this period which ended:</i> ");
|
||||
buf.append(DataHelper.formatDuration(now - curRate.getLastCoalesceDate()));
|
||||
buf.append(" ago ");
|
||||
} else {
|
||||
buf.append(" <i>No events</i> ");
|
||||
}
|
||||
long numPeriods = curRate.getLifetimePeriods();
|
||||
if (numPeriods > 0) {
|
||||
double avgFrequency = curRate.getLifetimeEventCount() / (double)numPeriods;
|
||||
double peakFrequency = curRate.getExtremeEventCount();
|
||||
buf.append(" (lifetime average: ");
|
||||
buf.append(num(avgFrequency));
|
||||
buf.append(", peak average: ");
|
||||
buf.append(curRate.getExtremeEventCount());
|
||||
buf.append(")");
|
||||
}
|
||||
if (curRate.getSummaryListener() != null) {
|
||||
buf.append(" <a href=\"viewstat.jsp?stat=").append(name);
|
||||
buf.append("&period=").append(periods[i]);
|
||||
buf.append("\" title=\"Render summarized data\">render</a>");
|
||||
buf.append(" <a href=\"viewstat.jsp?stat=").append(name);
|
||||
buf.append("&period=").append(periods[i]).append("&showEvents=true\" title=\"Render summarized event counts\">events</a>");
|
||||
buf.append(" (as <a href=\"viewstat.jsp?stat=").append(name);
|
||||
buf.append("&period=").append(periods[i]);
|
||||
buf.append("&format=xml\" title=\"Dump stat history as XML\">XML</a>");
|
||||
buf.append(" in a format <a href=\"http://people.ee.ethz.ch/~oetiker/webtools/rrdtool\">RRDTool</a> understands)");
|
||||
}
|
||||
buf.append("</li>");
|
||||
}
|
||||
// Display the strict average
|
||||
buf.append("<li><b>lifetime average value:</b> ");
|
||||
buf.append(num(rate.getLifetimeAverageValue()));
|
||||
buf.append(" over ");
|
||||
buf.append(rate.getLifetimeEventCount());
|
||||
buf.append(" events<br /></li>");
|
||||
buf.append("</ul>");
|
||||
buf.append("<br />");
|
||||
}
|
||||
|
||||
private static void renderPeriod(StringBuffer buf, long period, String name) {
|
||||
buf.append("<b>");
|
||||
buf.append(DataHelper.formatDuration(period));
|
||||
buf.append(" ");
|
||||
buf.append(name);
|
||||
buf.append(":</b> ");
|
||||
}
|
||||
|
||||
private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00");
|
||||
private final static String num(double num) { synchronized (_fmt) { return _fmt.format(num); } }
|
||||
|
||||
private final static DecimalFormat _pct = new DecimalFormat("#0.00%");
|
||||
private final static String pct(double num) { synchronized (_pct) { return _pct.format(num); } }
|
||||
}
|
||||
@@ -8,16 +8,23 @@ package net.i2p.router.client;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.Socket;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import net.i2p.client.I2PClient;
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.crypto.TransientSessionKeyManager;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
@@ -47,12 +54,12 @@ import net.i2p.util.SimpleTimer;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class ClientConnectionRunner {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private ClientManager _manager;
|
||||
class ClientConnectionRunner {
|
||||
private final Log _log;
|
||||
protected final RouterContext _context;
|
||||
private final ClientManager _manager;
|
||||
/** socket for this particular peer connection */
|
||||
private Socket _socket;
|
||||
private final Socket _socket;
|
||||
/** output stream of the socket that I2CP messages bound to the client should be written to */
|
||||
private OutputStream _out;
|
||||
/** session ID of the current client */
|
||||
@@ -60,25 +67,42 @@ public class ClientConnectionRunner {
|
||||
/** user's config */
|
||||
private SessionConfig _config;
|
||||
/** static mapping of MessageId to Payload, storing messages for retrieval */
|
||||
private Map<MessageId, Payload> _messages;
|
||||
private final Map<MessageId, Payload> _messages;
|
||||
/** lease set request state, or null if there is no request pending on at the moment */
|
||||
private LeaseRequestState _leaseRequest;
|
||||
/** currently allocated leaseSet, or null if none is allocated */
|
||||
private LeaseSet _currentLeaseSet;
|
||||
/** set of messageIds created but not yet ACCEPTED */
|
||||
private Set<MessageId> _acceptedPending;
|
||||
private final Set<MessageId> _acceptedPending;
|
||||
/** thingy that does stuff */
|
||||
private I2CPMessageReader _reader;
|
||||
protected I2CPMessageReader _reader;
|
||||
/** just for this destination */
|
||||
private SessionKeyManager _sessionKeyManager;
|
||||
/**
|
||||
* This contains the last 10 MessageIds that have had their (non-ack) status
|
||||
* delivered to the client (so that we can be sure only to update when necessary)
|
||||
*/
|
||||
private List _alreadyProcessed;
|
||||
private final List<MessageId> _alreadyProcessed;
|
||||
private ClientWriterRunner _writer;
|
||||
private Hash _destHashCache;
|
||||
/** are we, uh, dead */
|
||||
private boolean _dead;
|
||||
/** For outbound traffic. true if i2cp.messageReliability = "none"; @since 0.8.1 */
|
||||
private boolean _dontSendMSM;
|
||||
private final AtomicInteger _messageId; // messageId counter
|
||||
|
||||
// Was 32767 since the beginning (04-2004).
|
||||
// But it's 4 bytes in the I2CP spec and stored as a long in MessageID....
|
||||
// If this is too low and wraps around, I2CP VerifyUsage could delete the wrong message,
|
||||
// e.g. on local access
|
||||
private static final int MAX_MESSAGE_ID = 0x4000000;
|
||||
|
||||
private static final int BUF_SIZE = 32*1024;
|
||||
|
||||
/** @since 0.9.2 */
|
||||
private static final String PROP_TAGS = "crypto.tagsToSend";
|
||||
private static final String PROP_THRESH = "crypto.lowTagThreshold";
|
||||
|
||||
/**
|
||||
* Create a new runner against the given socket
|
||||
*
|
||||
@@ -88,11 +112,10 @@ public class ClientConnectionRunner {
|
||||
_log = _context.logManager().getLog(ClientConnectionRunner.class);
|
||||
_manager = manager;
|
||||
_socket = socket;
|
||||
_config = null;
|
||||
_messages = new ConcurrentHashMap();
|
||||
_alreadyProcessed = new ArrayList();
|
||||
_acceptedPending = new ConcurrentHashSet();
|
||||
_dead = false;
|
||||
_messageId = new AtomicInteger(_context.random().nextInt());
|
||||
}
|
||||
|
||||
private static volatile int __id = 0;
|
||||
@@ -104,14 +127,15 @@ public class ClientConnectionRunner {
|
||||
*/
|
||||
public void startRunning() {
|
||||
try {
|
||||
_reader = new I2CPMessageReader(_socket.getInputStream(), new ClientMessageEventListener(_context, this));
|
||||
_reader = new I2CPMessageReader(new BufferedInputStream(_socket.getInputStream(), BUF_SIZE),
|
||||
new ClientMessageEventListener(_context, this, true));
|
||||
_writer = new ClientWriterRunner(_context, this);
|
||||
I2PThread t = new I2PThread(_writer);
|
||||
t.setName("I2CP Writer " + ++__id);
|
||||
t.setDaemon(true);
|
||||
t.setPriority(I2PThread.MAX_PRIORITY);
|
||||
t.start();
|
||||
_out = _socket.getOutputStream();
|
||||
_out = _socket.getOutputStream(); // FIXME OWCH! needs a better way so it can be final. FIXME
|
||||
_reader.startReading();
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error starting up the runner", ioe);
|
||||
@@ -130,20 +154,23 @@ public class ClientConnectionRunner {
|
||||
if (_writer != null) _writer.stopWriting();
|
||||
if (_socket != null) try { _socket.close(); } catch (IOException ioe) { }
|
||||
_messages.clear();
|
||||
if (_manager != null)
|
||||
_manager.unregisterConnection(this);
|
||||
if (_sessionKeyManager != null)
|
||||
_sessionKeyManager.shutdown();
|
||||
_manager.unregisterConnection(this);
|
||||
if (_currentLeaseSet != null)
|
||||
_context.netDb().unpublish(_currentLeaseSet);
|
||||
_leaseRequest = null;
|
||||
synchronized (_alreadyProcessed) {
|
||||
_alreadyProcessed.clear();
|
||||
}
|
||||
_config = null;
|
||||
//_config = null;
|
||||
//_manager = null;
|
||||
}
|
||||
|
||||
/** current client's config */
|
||||
public SessionConfig getConfig() { return _config; }
|
||||
/** current client's sessionkeymanager */
|
||||
public SessionKeyManager getSessionKeyManager() { return _sessionKeyManager; }
|
||||
/** currently allocated leaseSet */
|
||||
public LeaseSet getLeaseSet() { return _currentLeaseSet; }
|
||||
void setLeaseSet(LeaseSet ls) { _currentLeaseSet = ls; }
|
||||
@@ -182,13 +209,44 @@ public class ClientConnectionRunner {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("SessionEstablished called for destination " + _destHashCache.toBase64());
|
||||
_config = config;
|
||||
// We process a few options here, but most are handled by the tunnel manager.
|
||||
// The ones here can't be changed later.
|
||||
Properties opts = config.getOptions();
|
||||
if (opts != null)
|
||||
_dontSendMSM = "none".equals(config.getOptions().getProperty(I2PClient.PROP_RELIABILITY, "").toLowerCase(Locale.US));
|
||||
// per-destination session key manager to prevent rather easy correlation
|
||||
if (_sessionKeyManager == null) {
|
||||
int tags = TransientSessionKeyManager.DEFAULT_TAGS;
|
||||
int thresh = TransientSessionKeyManager.LOW_THRESHOLD;
|
||||
if (opts != null) {
|
||||
String ptags = opts.getProperty(PROP_TAGS);
|
||||
if (ptags != null) {
|
||||
try { tags = Integer.parseInt(ptags); } catch (NumberFormatException nfe) {}
|
||||
}
|
||||
String pthresh = opts.getProperty(PROP_THRESH);
|
||||
if (pthresh != null) {
|
||||
try { thresh = Integer.parseInt(pthresh); } catch (NumberFormatException nfe) {}
|
||||
}
|
||||
}
|
||||
_sessionKeyManager = new TransientSessionKeyManager(_context, tags, thresh);
|
||||
} else {
|
||||
_log.error("SessionEstablished called for twice for destination " + _destHashCache.toBase64().substring(0,4));
|
||||
}
|
||||
_manager.destinationEstablished(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a notification to the client that their message (id specified) was
|
||||
* delivered (or failed delivery)
|
||||
* Note that this sends the Guaranteed status codes, even though we only support best effort.
|
||||
* Doesn't do anything if i2cp.messageReliability = "none"
|
||||
*/
|
||||
void updateMessageDeliveryStatus(MessageId id, boolean delivered) {
|
||||
if (_dead) return;
|
||||
if (_dead || _dontSendMSM)
|
||||
return;
|
||||
_context.jobQueue().addJob(new MessageDeliveryStatusUpdate(id, delivered));
|
||||
}
|
||||
|
||||
/**
|
||||
* called after a new leaseSet is granted by the client, the NetworkDb has been
|
||||
* updated. This takes care of all the LeaseRequestState stuff (including firing any jobs)
|
||||
@@ -213,17 +271,33 @@ public class ClientConnectionRunner {
|
||||
_context.jobQueue().addJob(state.getOnGranted());
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a DisconnectMessage and log with level Log.ERROR.
|
||||
* This is always bad.
|
||||
* See ClientMessageEventListener.handleCreateSession()
|
||||
* for why we don't send a SessionStatusMessage when we do this.
|
||||
*/
|
||||
void disconnectClient(String reason) {
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
_log.log(Log.CRIT, "Disconnecting the client ("
|
||||
+ _config
|
||||
+ ": " + reason);
|
||||
disconnectClient(reason, Log.ERROR);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param logLevel e.g. Log.WARN
|
||||
* @since 0.8.2
|
||||
*/
|
||||
void disconnectClient(String reason, int logLevel) {
|
||||
if (_log.shouldLog(logLevel))
|
||||
_log.log(logLevel, "Disconnecting the client - "
|
||||
+ reason
|
||||
+ " config: "
|
||||
+ _config);
|
||||
DisconnectMessage msg = new DisconnectMessage();
|
||||
msg.setReason(reason);
|
||||
try {
|
||||
doSend(msg);
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Error writing out the disconnect message: " + ime);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error writing out the disconnect message: " + ime);
|
||||
}
|
||||
stopRunning();
|
||||
}
|
||||
@@ -240,33 +314,41 @@ public class ClientConnectionRunner {
|
||||
MessageId id = new MessageId();
|
||||
id.setMessageId(getNextMessageId());
|
||||
long expiration = 0;
|
||||
if (message instanceof SendMessageExpiresMessage)
|
||||
expiration = ((SendMessageExpiresMessage) message).getExpiration().getTime();
|
||||
_acceptedPending.add(id);
|
||||
int flags = 0;
|
||||
if (message.getType() == SendMessageExpiresMessage.MESSAGE_TYPE) {
|
||||
SendMessageExpiresMessage msg = (SendMessageExpiresMessage) message;
|
||||
expiration = msg.getExpirationTime();
|
||||
flags = msg.getFlags();
|
||||
}
|
||||
if (!_dontSendMSM)
|
||||
_acceptedPending.add(id);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("** Receiving message [" + id.getMessageId() + "] with payload of size ["
|
||||
+ payload.getSize() + "]" + " for session [" + _sessionId.getSessionId()
|
||||
+ "]");
|
||||
long beforeDistribute = _context.clock().now();
|
||||
//long beforeDistribute = _context.clock().now();
|
||||
// the following blocks as described above
|
||||
SessionConfig cfg = _config;
|
||||
if (cfg != null)
|
||||
_manager.distributeMessage(cfg.getDestination(), dest, payload, id, expiration);
|
||||
long timeToDistribute = _context.clock().now() - beforeDistribute;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.warn("Time to distribute in the manager to "
|
||||
+ dest.calculateHash().toBase64() + ": "
|
||||
+ timeToDistribute);
|
||||
_manager.distributeMessage(cfg.getDestination(), dest, payload, id, expiration, flags);
|
||||
// else log error?
|
||||
//long timeToDistribute = _context.clock().now() - beforeDistribute;
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.warn("Time to distribute in the manager to "
|
||||
// + dest.calculateHash().toBase64() + ": "
|
||||
// + timeToDistribute);
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a notification to the client that their message (id specified) was accepted
|
||||
* for delivery (but not necessarily delivered)
|
||||
*
|
||||
* Doesn't do anything if i2cp.messageReliability = "none"
|
||||
*/
|
||||
void ackSendMessage(MessageId id, long nonce) {
|
||||
if (_dontSendMSM)
|
||||
return;
|
||||
SessionId sid = _sessionId;
|
||||
if (sid == null) return;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@@ -334,18 +416,21 @@ public class ClientConnectionRunner {
|
||||
// TunnelPool.locked_buildNewLeaseSet() ensures that leases are sorted,
|
||||
// so the comparison will always work.
|
||||
int leases = set.getLeaseCount();
|
||||
if (_currentLeaseSet != null && _currentLeaseSet.getLeaseCount() == leases) {
|
||||
for (int i = 0; i < leases; i++) {
|
||||
if (! _currentLeaseSet.getLease(i).getTunnelId().equals(set.getLease(i).getTunnelId()))
|
||||
break;
|
||||
if (! _currentLeaseSet.getLease(i).getGateway().equals(set.getLease(i).getGateway()))
|
||||
break;
|
||||
if (i == leases - 1) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Requested leaseSet hasn't changed");
|
||||
if (onCreateJob != null)
|
||||
_context.jobQueue().addJob(onCreateJob);
|
||||
return; // no change
|
||||
// synch so _currentLeaseSet isn't changed out from under us
|
||||
synchronized (this) {
|
||||
if (_currentLeaseSet != null && _currentLeaseSet.getLeaseCount() == leases) {
|
||||
for (int i = 0; i < leases; i++) {
|
||||
if (! _currentLeaseSet.getLease(i).getTunnelId().equals(set.getLease(i).getTunnelId()))
|
||||
break;
|
||||
if (! _currentLeaseSet.getLease(i).getGateway().equals(set.getLease(i).getGateway()))
|
||||
break;
|
||||
if (i == leases - 1) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Requested leaseSet hasn't changed");
|
||||
if (onCreateJob != null)
|
||||
_context.jobQueue().addJob(onCreateJob);
|
||||
return; // no change
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -365,7 +450,7 @@ public class ClientConnectionRunner {
|
||||
// theirs is newer
|
||||
} else {
|
||||
// ours is newer, so wait a few secs and retry
|
||||
SimpleScheduler.getInstance().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000);
|
||||
_context.simpleScheduler().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000);
|
||||
}
|
||||
// fire onCreated?
|
||||
return; // already requesting
|
||||
@@ -406,21 +491,28 @@ public class ClientConnectionRunner {
|
||||
void writeMessage(I2CPMessage msg) {
|
||||
long before = _context.clock().now();
|
||||
try {
|
||||
// We don't still need synchronization here? isn't ClientWriterRunner the only writer?
|
||||
synchronized (_out) {
|
||||
msg.writeMessage(_out);
|
||||
_out.flush();
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("after writeMessage("+ msg.getClass().getName() + "): "
|
||||
+ (_context.clock().now()-before) + "ms");;
|
||||
+ (_context.clock().now()-before) + "ms");
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Message exception sending I2CP message: " + ime);
|
||||
_log.error("Error sending I2CP message to client", ime);
|
||||
stopRunning();
|
||||
} catch (EOFException eofe) {
|
||||
// only warn if client went away
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error sending I2CP message - client went away", eofe);
|
||||
stopRunning();
|
||||
} catch (IOException ioe) {
|
||||
_log.error("IO exception sending I2CP message: " + ioe);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("IO Error sending I2CP message to client", ioe);
|
||||
stopRunning();
|
||||
} catch (Throwable t) {
|
||||
_log.log(Log.CRIT, "Unhandled exception sending I2CP message", t);
|
||||
_log.log(Log.CRIT, "Unhandled exception sending I2CP message to client", t);
|
||||
stopRunning();
|
||||
} finally {
|
||||
long after = _context.clock().now();
|
||||
@@ -461,18 +553,9 @@ public class ClientConnectionRunner {
|
||||
}
|
||||
}
|
||||
|
||||
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
|
||||
private final static int MAX_MESSAGE_ID = 32767;
|
||||
private static volatile int _messageId = RandomSource.getInstance().nextInt(MAX_MESSAGE_ID); // messageId counter
|
||||
private static Object _messageIdLock = new Object();
|
||||
|
||||
static int getNextMessageId() {
|
||||
synchronized (_messageIdLock) {
|
||||
int messageId = (++_messageId)%MAX_MESSAGE_ID;
|
||||
if (_messageId >= MAX_MESSAGE_ID)
|
||||
_messageId = 0;
|
||||
return messageId;
|
||||
}
|
||||
public int getNextMessageId() {
|
||||
// Don't % so we don't get negative IDs
|
||||
return _messageId.incrementAndGet() & (MAX_MESSAGE_ID - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -505,12 +588,17 @@ public class ClientConnectionRunner {
|
||||
}
|
||||
|
||||
public String getName() { return "Update Delivery Status"; }
|
||||
|
||||
/**
|
||||
* Note that this sends the Guaranteed status codes, even though we only support best effort.
|
||||
*/
|
||||
public void runJob() {
|
||||
if (_dead) return;
|
||||
|
||||
MessageStatusMessage msg = new MessageStatusMessage();
|
||||
msg.setMessageId(_messageId.getMessageId());
|
||||
msg.setSessionId(_sessionId.getSessionId());
|
||||
// has to be >= 0, it is initialized to -1
|
||||
msg.setNonce(2);
|
||||
msg.setSize(0);
|
||||
if (_success)
|
||||
@@ -558,7 +646,7 @@ public class ClientConnectionRunner {
|
||||
+ " for session [" + _sessionId.getSessionId()
|
||||
+ "] (with nonce=2), retrying after ["
|
||||
+ (_context.clock().now() - _lastTried)
|
||||
+ "]", getAddedBy());
|
||||
+ "]");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Updating message status for message " + _messageId + " to "
|
||||
|
||||
@@ -9,6 +9,7 @@ package net.i2p.router.client;
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
@@ -23,32 +24,47 @@ import net.i2p.util.Log;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class ClientListenerRunner implements Runnable {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private ClientManager _manager;
|
||||
private ServerSocket _socket;
|
||||
private int _port;
|
||||
private boolean _bindAllInterfaces;
|
||||
private boolean _running;
|
||||
private long _nextFailDelay = 1000;
|
||||
class ClientListenerRunner implements Runnable {
|
||||
protected final Log _log;
|
||||
protected final RouterContext _context;
|
||||
protected final ClientManager _manager;
|
||||
protected ServerSocket _socket;
|
||||
protected final int _port;
|
||||
protected final boolean _bindAllInterfaces;
|
||||
protected boolean _running;
|
||||
protected boolean _listening;
|
||||
|
||||
public static final String BIND_ALL_INTERFACES = "i2cp.tcp.bindAllInterfaces";
|
||||
|
||||
public ClientListenerRunner(RouterContext context, ClientManager manager, int port) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(ClientListenerRunner.class);
|
||||
_log = _context.logManager().getLog(getClass());
|
||||
_manager = manager;
|
||||
_port = port;
|
||||
_running = false;
|
||||
|
||||
String val = context.getProperty(BIND_ALL_INTERFACES, "False");
|
||||
_bindAllInterfaces = Boolean.valueOf(val).booleanValue();
|
||||
_bindAllInterfaces = context.getBooleanProperty(BIND_ALL_INTERFACES);
|
||||
}
|
||||
|
||||
public void setPort(int port) { _port = port; }
|
||||
public int getPort() { return _port; }
|
||||
public boolean isListening() { return _running && _listening; }
|
||||
|
||||
/**
|
||||
* Get a ServerSocket.
|
||||
* Split out so it can be overridden for SSL.
|
||||
* @since 0.8.3
|
||||
*/
|
||||
protected ServerSocket getServerSocket() throws IOException {
|
||||
if (_bindAllInterfaces) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Listening on port " + _port + " on all interfaces");
|
||||
return new ServerSocket(_port);
|
||||
} else {
|
||||
String listenInterface = _context.getProperty(ClientManagerFacadeImpl.PROP_CLIENT_HOST,
|
||||
ClientManagerFacadeImpl.DEFAULT_HOST);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Listening on port " + _port + " of the specific interface: " + listenInterface);
|
||||
return new ServerSocket(_port, 0, InetAddress.getByName(listenInterface));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start up the socket listener, listens for connections, and
|
||||
* fires those connections off via {@link #runConnection runConnection}.
|
||||
@@ -58,26 +74,16 @@ public class ClientListenerRunner implements Runnable {
|
||||
*/
|
||||
public void runServer() {
|
||||
_running = true;
|
||||
int curDelay = 0;
|
||||
int curDelay = 1000;
|
||||
while (_running) {
|
||||
try {
|
||||
if (_bindAllInterfaces) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Listening on port " + _port + " on all interfaces");
|
||||
_socket = new ServerSocket(_port);
|
||||
} else {
|
||||
String listenInterface = _context.getProperty(ClientManagerFacadeImpl.PROP_CLIENT_HOST,
|
||||
ClientManagerFacadeImpl.DEFAULT_HOST);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Listening on port " + _port + " of the specific interface: " + listenInterface);
|
||||
_socket = new ServerSocket(_port, 0, InetAddress.getByName(listenInterface));
|
||||
}
|
||||
|
||||
_socket = getServerSocket();
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("ServerSocket created, before accept: " + _socket);
|
||||
|
||||
curDelay = 0;
|
||||
curDelay = 1000;
|
||||
_listening = true;
|
||||
while (_running) {
|
||||
try {
|
||||
Socket socket = _socket.accept();
|
||||
@@ -88,7 +94,9 @@ public class ClientListenerRunner implements Runnable {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Refused connection from " + socket.getInetAddress());
|
||||
socket.close();
|
||||
try {
|
||||
socket.close();
|
||||
} catch (IOException ioe) {}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
if (_context.router().isAlive())
|
||||
@@ -96,6 +104,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
} catch (Throwable t) {
|
||||
if (_context.router().isAlive())
|
||||
_log.error("Fatal error running client listener - killing the thread!", t);
|
||||
_listening = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -104,6 +113,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
_log.error("Error listening on port " + _port, ioe);
|
||||
}
|
||||
|
||||
_listening = false;
|
||||
if (_socket != null) {
|
||||
try { _socket.close(); } catch (IOException ioe) {}
|
||||
_socket = null;
|
||||
@@ -111,39 +121,47 @@ public class ClientListenerRunner implements Runnable {
|
||||
|
||||
if (!_context.router().isAlive()) break;
|
||||
|
||||
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
|
||||
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
|
||||
curDelay += _nextFailDelay;
|
||||
_nextFailDelay *= 5;
|
||||
if (curDelay < 60*1000)
|
||||
_log.error("Error listening, waiting " + (curDelay/1000) + "s before we try again");
|
||||
else
|
||||
_log.log(Log.CRIT, "I2CP error listening to port " + _port + " - is another I2P instance running? Resolve conflicts and restart");
|
||||
try { Thread.sleep(curDelay); } catch (InterruptedException ie) {}
|
||||
curDelay = Math.min(curDelay*3, 60*1000);
|
||||
}
|
||||
|
||||
if (_context.router().isAlive())
|
||||
_log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!"));
|
||||
_log.error("CANCELING I2CP LISTEN", new Exception("I2CP Listen cancelled!!!"));
|
||||
_running = false;
|
||||
}
|
||||
|
||||
/** give the i2cp client 5 seconds to show that they're really i2cp clients */
|
||||
private final static int CONNECT_TIMEOUT = 5*1000;
|
||||
|
||||
private boolean validate(Socket socket) {
|
||||
protected final static int CONNECT_TIMEOUT = 5*1000;
|
||||
private final static int LOOP_DELAY = 250;
|
||||
|
||||
/**
|
||||
* Verify the first byte.
|
||||
* The InternalSocket doesn't support SoTimeout, so use available()
|
||||
* instead to prevent hanging.
|
||||
*/
|
||||
protected boolean validate(Socket socket) {
|
||||
try {
|
||||
socket.setSoTimeout(CONNECT_TIMEOUT);
|
||||
int read = socket.getInputStream().read();
|
||||
if (read != I2PClient.PROTOCOL_BYTE)
|
||||
return false;
|
||||
socket.setSoTimeout(0);
|
||||
return true;
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping");
|
||||
return false;
|
||||
}
|
||||
InputStream is = socket.getInputStream();
|
||||
for (int i = 0; i < CONNECT_TIMEOUT / LOOP_DELAY; i++) {
|
||||
if (is.available() > 0)
|
||||
return is.read() == I2PClient.PROTOCOL_BYTE;
|
||||
try { Thread.sleep(LOOP_DELAY); } catch (InterruptedException ie) {}
|
||||
}
|
||||
} catch (IOException ioe) {}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping");
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the connection by passing it off to a {@link ClientConnectionRunner ClientConnectionRunner}
|
||||
*
|
||||
*/
|
||||
protected void runConnection(Socket socket) throws IOException {
|
||||
protected void runConnection(Socket socket) {
|
||||
ClientConnectionRunner runner = new ClientConnectionRunner(_context, _manager, socket);
|
||||
_manager.registerConnection(runner);
|
||||
}
|
||||
|
||||
@@ -13,23 +13,24 @@ import java.io.Writer;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.client.I2PSessionException;
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.Payload;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.MessageId;
|
||||
import net.i2p.data.i2cp.SessionConfig;
|
||||
import net.i2p.internal.I2CPMessageQueue;
|
||||
import net.i2p.router.ClientManagerFacade;
|
||||
import net.i2p.router.ClientMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@@ -38,76 +39,106 @@ import net.i2p.util.Log;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class ClientManager {
|
||||
private Log _log;
|
||||
class ClientManager {
|
||||
private final Log _log;
|
||||
private ClientListenerRunner _listener;
|
||||
private HashMap _runners; // Destination --> ClientConnectionRunner
|
||||
private Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
|
||||
private RouterContext _ctx;
|
||||
private final HashMap<Destination, ClientConnectionRunner> _runners; // Destination --> ClientConnectionRunner
|
||||
private final Set<ClientConnectionRunner> _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
|
||||
private final RouterContext _ctx;
|
||||
private boolean _isStarted;
|
||||
|
||||
/** Disable external interface, allow internal clients only @since 0.8.3 */
|
||||
private static final String PROP_DISABLE_EXTERNAL = "i2cp.disableInterface";
|
||||
/** SSL interface (only) @since 0.8.3 */
|
||||
private static final String PROP_ENABLE_SSL = "i2cp.SSL";
|
||||
|
||||
/** ms to wait before rechecking for inbound messages to deliver to clients */
|
||||
private final static int INBOUND_POLL_INTERVAL = 300;
|
||||
|
||||
public ClientManager(RouterContext context, int port) {
|
||||
_ctx = context;
|
||||
_log = context.logManager().getLog(ClientManager.class);
|
||||
_ctx.statManager().createRateStat("client.receiveMessageSize",
|
||||
"How large are messages received by the client?",
|
||||
"ClientMessages",
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
//_ctx.statManager().createRateStat("client.receiveMessageSize",
|
||||
// "How large are messages received by the client?",
|
||||
// "ClientMessages",
|
||||
// new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_runners = new HashMap();
|
||||
_pendingRunners = new HashSet();
|
||||
_listener = new ClientListenerRunner(_ctx, this, port);
|
||||
Thread t = new I2PThread(_listener);
|
||||
t.setName("ClientListener:" + port);
|
||||
t.setDaemon(true);
|
||||
t.start();
|
||||
startListeners(port);
|
||||
}
|
||||
|
||||
/** Todo: Start a 3rd listener for IPV6? */
|
||||
private void startListeners(int port) {
|
||||
if (!_ctx.getBooleanProperty(PROP_DISABLE_EXTERNAL)) {
|
||||
// there's no option to start both an SSL and non-SSL listener
|
||||
if (_ctx.getBooleanProperty(PROP_ENABLE_SSL))
|
||||
_listener = new SSLClientListenerRunner(_ctx, this, port);
|
||||
else
|
||||
_listener = new ClientListenerRunner(_ctx, this, port);
|
||||
Thread t = new I2PThread(_listener, "ClientListener:" + port, true);
|
||||
t.start();
|
||||
}
|
||||
_isStarted = true;
|
||||
}
|
||||
|
||||
public void restart() {
|
||||
shutdown();
|
||||
shutdown("Router restart");
|
||||
|
||||
// to let the old listener die
|
||||
try { Thread.sleep(2*1000); } catch (InterruptedException ie) {}
|
||||
|
||||
int port = ClientManagerFacadeImpl.DEFAULT_PORT;
|
||||
String portStr = _ctx.router().getConfigSetting(ClientManagerFacadeImpl.PROP_CLIENT_PORT);
|
||||
if (portStr != null) {
|
||||
try {
|
||||
port = Integer.parseInt(portStr);
|
||||
} catch (NumberFormatException nfe) {
|
||||
_log.error("Error setting the port: " + portStr + " is not valid", nfe);
|
||||
}
|
||||
}
|
||||
_listener = new ClientListenerRunner(_ctx, this, port);
|
||||
Thread t = new I2PThread(_listener);
|
||||
t.setName("ClientListener:" + port);
|
||||
t.setDaemon(true);
|
||||
t.start();
|
||||
int port = _ctx.getProperty(ClientManagerFacadeImpl.PROP_CLIENT_PORT,
|
||||
ClientManagerFacadeImpl.DEFAULT_PORT);
|
||||
startListeners(port);
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
/**
|
||||
* @param msg message to send to the clients
|
||||
*/
|
||||
public void shutdown(String msg) {
|
||||
_isStarted = false;
|
||||
_log.info("Shutting down the ClientManager");
|
||||
_listener.stopListening();
|
||||
Set runners = new HashSet();
|
||||
if (_listener != null)
|
||||
_listener.stopListening();
|
||||
Set<ClientConnectionRunner> runners = new HashSet();
|
||||
synchronized (_runners) {
|
||||
for (Iterator iter = _runners.values().iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
runners.add(runner);
|
||||
}
|
||||
}
|
||||
synchronized (_pendingRunners) {
|
||||
for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = _pendingRunners.iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
runners.add(runner);
|
||||
}
|
||||
}
|
||||
for (Iterator iter = runners.iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
|
||||
runner.stopRunning();
|
||||
for (Iterator<ClientConnectionRunner> iter = runners.iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
runner.disconnectClient(msg, Log.WARN);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The InternalClientManager interface.
|
||||
* Connects to the router, receiving a message queue to talk to the router with.
|
||||
* @throws I2PSessionException if the router isn't ready
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public I2CPMessageQueue internalConnect() throws I2PSessionException {
|
||||
if (!_isStarted)
|
||||
throw new I2PSessionException("Router client manager is shut down");
|
||||
// for now we make these unlimited size
|
||||
LinkedBlockingQueue<I2CPMessage> in = new LinkedBlockingQueue();
|
||||
LinkedBlockingQueue<I2CPMessage> out = new LinkedBlockingQueue();
|
||||
I2CPMessageQueue myQueue = new I2CPMessageQueueImpl(in, out);
|
||||
I2CPMessageQueue hisQueue = new I2CPMessageQueueImpl(out, in);
|
||||
ClientConnectionRunner runner = new QueuedClientConnectionRunner(_ctx, this, myQueue);
|
||||
registerConnection(runner);
|
||||
return hisQueue;
|
||||
}
|
||||
|
||||
public boolean isAlive() {
|
||||
return _isStarted && (_listener == null || _listener.isListening());
|
||||
}
|
||||
|
||||
public void registerConnection(ClientConnectionRunner runner) {
|
||||
synchronized (_pendingRunners) {
|
||||
_pendingRunners.add(runner);
|
||||
@@ -128,19 +159,34 @@ public class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to the clients list. Check for a dup destination.
|
||||
*/
|
||||
public void destinationEstablished(ClientConnectionRunner runner) {
|
||||
Destination dest = runner.getConfig().getDestination();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("DestinationEstablished called for destination " + runner.getConfig().getDestination().calculateHash().toBase64());
|
||||
_log.debug("DestinationEstablished called for destination " + dest.calculateHash().toBase64());
|
||||
|
||||
synchronized (_pendingRunners) {
|
||||
_pendingRunners.remove(runner);
|
||||
}
|
||||
boolean fail = false;
|
||||
synchronized (_runners) {
|
||||
_runners.put(runner.getConfig().getDestination(), runner);
|
||||
fail = _runners.containsKey(dest);
|
||||
if (!fail)
|
||||
_runners.put(dest, runner);
|
||||
}
|
||||
if (fail) {
|
||||
_log.log(Log.CRIT, "Client attempted to register duplicate destination " + dest.calculateHash().toBase64());
|
||||
runner.disconnectClient("Duplicate destination");
|
||||
}
|
||||
}
|
||||
|
||||
void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId, long expiration) {
|
||||
/**
|
||||
* Distribute message to a local or remote destination.
|
||||
* @param flags ignored for local
|
||||
*/
|
||||
void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId, long expiration, int flags) {
|
||||
// check if there is a runner for it
|
||||
ClientConnectionRunner runner = getRunner(toDest);
|
||||
if (runner != null) {
|
||||
@@ -151,6 +197,7 @@ public class ClientManager {
|
||||
// sender went away
|
||||
return;
|
||||
}
|
||||
// TODO can we just run this inline instead?
|
||||
_ctx.jobQueue().addJob(new DistributeLocal(toDest, runner, sender, fromDest, payload, msgId));
|
||||
} else {
|
||||
// remote. w00t
|
||||
@@ -164,22 +211,22 @@ public class ClientManager {
|
||||
ClientMessage msg = new ClientMessage();
|
||||
msg.setDestination(toDest);
|
||||
msg.setPayload(payload);
|
||||
msg.setReceptionInfo(null);
|
||||
msg.setSenderConfig(runner.getConfig());
|
||||
msg.setFromDestination(runner.getConfig().getDestination());
|
||||
msg.setMessageId(msgId);
|
||||
msg.setExpiration(expiration);
|
||||
msg.setFlags(flags);
|
||||
_ctx.clientMessagePool().add(msg, true);
|
||||
}
|
||||
}
|
||||
|
||||
private class DistributeLocal extends JobImpl {
|
||||
private Destination _toDest;
|
||||
private ClientConnectionRunner _to;
|
||||
private ClientConnectionRunner _from;
|
||||
private Destination _fromDest;
|
||||
private Payload _payload;
|
||||
private MessageId _msgId;
|
||||
private final Destination _toDest;
|
||||
private final ClientConnectionRunner _to;
|
||||
private final ClientConnectionRunner _from;
|
||||
private final Destination _fromDest;
|
||||
private final Payload _payload;
|
||||
private final MessageId _msgId;
|
||||
|
||||
public DistributeLocal(Destination toDest, ClientConnectionRunner to, ClientConnectionRunner from, Destination fromDest, Payload payload, MessageId id) {
|
||||
super(_ctx);
|
||||
@@ -261,22 +308,18 @@ public class ClientManager {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we don't know about this destination at all
|
||||
*/
|
||||
public boolean shouldPublishLeaseSet(Hash destHash) {
|
||||
if (false) return true;
|
||||
if (destHash == null) return true;
|
||||
ClientConnectionRunner runner = getRunner(destHash);
|
||||
if (runner == null) return true;
|
||||
String dontPublish = runner.getConfig().getOptions().getProperty(ClientManagerFacade.PROP_CLIENT_ONLY);
|
||||
if ( (dontPublish != null) && ("true".equals(dontPublish)) ) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Not publishing the leaseSet for " + destHash.toBase64());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return !Boolean.valueOf(runner.getConfig().getOptions().getProperty(ClientManagerFacade.PROP_CLIENT_ONLY)).booleanValue();
|
||||
}
|
||||
|
||||
public Set listClients() {
|
||||
Set rv = new HashSet();
|
||||
public Set<Destination> listClients() {
|
||||
Set<Destination> rv = new HashSet();
|
||||
synchronized (_runners) {
|
||||
rv.addAll(_runners.keySet());
|
||||
}
|
||||
@@ -290,7 +333,7 @@ public class ClientManager {
|
||||
long inLock = 0;
|
||||
synchronized (_runners) {
|
||||
inLock = _ctx.clock().now();
|
||||
rv = (ClientConnectionRunner)_runners.get(dest);
|
||||
rv = _runners.get(dest);
|
||||
}
|
||||
long afterLock = _ctx.clock().now();
|
||||
if (afterLock - beforeLock > 50) {
|
||||
@@ -312,12 +355,25 @@ public class ClientManager {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the client's SessionKeyManager
|
||||
* Use this instead of the RouterContext.sessionKeyManager()
|
||||
* to prevent correlation attacks across destinations
|
||||
*/
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash dest) {
|
||||
ClientConnectionRunner runner = getRunner(dest);
|
||||
if (runner != null)
|
||||
return runner.getSessionKeyManager();
|
||||
else
|
||||
return null;
|
||||
}
|
||||
|
||||
private ClientConnectionRunner getRunner(Hash destHash) {
|
||||
if (destHash == null)
|
||||
return null;
|
||||
synchronized (_runners) {
|
||||
for (Iterator iter = _runners.values().iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner cur = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner cur = iter.next();
|
||||
if (cur.getDestHash().equals(destHash))
|
||||
return cur;
|
||||
}
|
||||
@@ -339,8 +395,8 @@ public class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
Set getRunnerDestinations() {
|
||||
Set dests = new HashSet();
|
||||
Set<Destination> getRunnerDestinations() {
|
||||
Set<Destination> dests = new HashSet();
|
||||
long beforeLock = _ctx.clock().now();
|
||||
long inLock = 0;
|
||||
synchronized (_runners) {
|
||||
@@ -371,52 +427,55 @@ public class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(8*1024);
|
||||
buf.append("<u><b>Local destinations</b></u><br />");
|
||||
/******
|
||||
StringBuilder buf = new StringBuilder(8*1024);
|
||||
buf.append("<u><b>Local destinations</b></u><br>");
|
||||
|
||||
Map runners = null;
|
||||
Map<Destination, ClientConnectionRunner> runners = null;
|
||||
synchronized (_runners) {
|
||||
runners = (Map)_runners.clone();
|
||||
}
|
||||
for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) {
|
||||
Destination dest = (Destination)iter.next();
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest);
|
||||
buf.append("<b>*</b> ").append(dest.calculateHash().toBase64().substring(0,6)).append("<br />\n");
|
||||
for (Iterator<Destination> iter = runners.keySet().iterator(); iter.hasNext(); ) {
|
||||
Destination dest = iter.next();
|
||||
ClientConnectionRunner runner = runners.get(dest);
|
||||
buf.append("<b>*</b> ").append(dest.calculateHash().toBase64().substring(0,6)).append("<br>\n");
|
||||
LeaseSet ls = runner.getLeaseSet();
|
||||
if (ls == null) {
|
||||
buf.append("<font color=\"red\"><i>No lease</i></font><br />\n");
|
||||
buf.append("<font color=\"red\"><i>No lease</i></font><br>\n");
|
||||
} else {
|
||||
long leaseAge = ls.getEarliestLeaseDate() - _ctx.clock().now();
|
||||
if (leaseAge <= 0) {
|
||||
buf.append("<font color=\"red\"><i>Lease expired ");
|
||||
buf.append(DataHelper.formatDuration(0-leaseAge)).append(" ago</i></font><br />\n");
|
||||
buf.append(DataHelper.formatDuration(0-leaseAge)).append(" ago</i></font><br>\n");
|
||||
} else {
|
||||
int count = ls.getLeaseCount();
|
||||
if (count <= 0) {
|
||||
buf.append("<font color=\"red\"><i>No tunnels</i></font><br />\n");
|
||||
buf.append("<font color=\"red\"><i>No tunnels</i></font><br>\n");
|
||||
} else {
|
||||
TunnelId id = ls.getLease(0).getTunnelId();
|
||||
TunnelInfo info = _ctx.tunnelManager().getTunnelInfo(id);
|
||||
if (info == null) {
|
||||
buf.append("<font color=\"red\"><i>Failed tunnels</i></font><br />\n");
|
||||
buf.append("<font color=\"red\"><i>Failed tunnels</i></font><br>\n");
|
||||
} else {
|
||||
buf.append(count).append(" x ");
|
||||
buf.append(info.getLength() - 1).append(" hop tunnel");
|
||||
if (count != 1)
|
||||
buf.append('s');
|
||||
buf.append("<br />\n");
|
||||
buf.append("<br>\n");
|
||||
buf.append("Expiring in ").append(DataHelper.formatDuration(leaseAge));
|
||||
buf.append("<br />\n");
|
||||
buf.append("<br>\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf.append("\n<hr />\n");
|
||||
buf.append("\n<hr>\n");
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
******/
|
||||
}
|
||||
|
||||
public void messageReceived(ClientMessage msg) {
|
||||
@@ -438,8 +497,8 @@ public class ClientManager {
|
||||
runner = getRunner(_msg.getDestinationHash());
|
||||
|
||||
if (runner != null) {
|
||||
_ctx.statManager().addRateData("client.receiveMessageSize",
|
||||
_msg.getPayload().getSize(), 0);
|
||||
//_ctx.statManager().addRateData("client.receiveMessageSize",
|
||||
// _msg.getPayload().getSize(), 0);
|
||||
runner.receiveMessage(_msg.getDestination(), null, _msg.getPayload());
|
||||
} else {
|
||||
// no client connection...
|
||||
|
||||
@@ -14,12 +14,16 @@ import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.client.I2PSessionException;
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.i2cp.MessageId;
|
||||
import net.i2p.data.i2cp.SessionConfig;
|
||||
import net.i2p.internal.I2CPMessageQueue;
|
||||
import net.i2p.internal.InternalClientManager;
|
||||
import net.i2p.router.ClientManagerFacade;
|
||||
import net.i2p.router.ClientMessage;
|
||||
import net.i2p.router.Job;
|
||||
@@ -31,40 +35,40 @@ import net.i2p.util.Log;
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
private final static Log _log = new Log(ClientManagerFacadeImpl.class);
|
||||
public class ClientManagerFacadeImpl extends ClientManagerFacade implements InternalClientManager {
|
||||
private final Log _log;
|
||||
private ClientManager _manager;
|
||||
private RouterContext _context;
|
||||
/** note that this is different than the property the client side uses, i2cp.tcp.port */
|
||||
public final static String PROP_CLIENT_PORT = "i2cp.port";
|
||||
public final static int DEFAULT_PORT = 7654;
|
||||
/** note that this is different than the property the client side uses, i2cp.tcp.host */
|
||||
public final static String PROP_CLIENT_HOST = "i2cp.hostname";
|
||||
public final static String DEFAULT_HOST = "127.0.0.1";
|
||||
|
||||
public ClientManagerFacadeImpl(RouterContext context) {
|
||||
_context = context;
|
||||
_manager = null;
|
||||
_log.debug("Client manager facade created");
|
||||
_log = _context.logManager().getLog(ClientManagerFacadeImpl.class);
|
||||
//_log.debug("Client manager facade created");
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
_log.info("Starting up the client subsystem");
|
||||
String portStr = _context.router().getConfigSetting(PROP_CLIENT_PORT);
|
||||
if (portStr != null) {
|
||||
try {
|
||||
int port = Integer.parseInt(portStr);
|
||||
_manager = new ClientManager(_context, port);
|
||||
} catch (NumberFormatException nfe) {
|
||||
_log.error("Error setting the port: " + portStr + " is not valid", nfe);
|
||||
_manager = new ClientManager(_context, DEFAULT_PORT);
|
||||
}
|
||||
} else {
|
||||
_manager = new ClientManager(_context, DEFAULT_PORT);
|
||||
}
|
||||
int port = _context.getProperty(PROP_CLIENT_PORT, DEFAULT_PORT);
|
||||
_manager = new ClientManager(_context, port);
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
shutdown("Router shutdown");
|
||||
}
|
||||
|
||||
/**
|
||||
* @param msg message to send to the clients
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void shutdown(String msg) {
|
||||
if (_manager != null)
|
||||
_manager.shutdown();
|
||||
_manager.shutdown(msg);
|
||||
}
|
||||
|
||||
public void restart() {
|
||||
@@ -74,7 +78,11 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
startup();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlive() { return _manager != null && _manager.isAlive(); }
|
||||
|
||||
private static final long MAX_TIME_TO_REBUILD = 10*60*1000;
|
||||
@Override
|
||||
public boolean verifyClientLiveliness() {
|
||||
if (_manager == null) return true;
|
||||
boolean lively = true;
|
||||
@@ -165,6 +173,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldPublishLeaseSet(Hash destinationHash) { return _manager.shouldPublishLeaseSet(destinationHash); }
|
||||
|
||||
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {
|
||||
@@ -194,6 +203,21 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the client's current manager or null if not connected
|
||||
*
|
||||
*/
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash dest) {
|
||||
if (_manager != null)
|
||||
return _manager.getClientSessionKeyManager(dest);
|
||||
else {
|
||||
_log.error("Null manager on getClientSessionKeyManager!");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
if (_manager != null)
|
||||
_manager.renderStatusHTML(out);
|
||||
@@ -204,10 +228,23 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
public Set listClients() {
|
||||
@Override
|
||||
public Set<Destination> listClients() {
|
||||
if (_manager != null)
|
||||
return _manager.listClients();
|
||||
else
|
||||
return Collections.EMPTY_SET;
|
||||
}
|
||||
|
||||
/**
|
||||
* The InternalClientManager interface.
|
||||
* Connect to the router, receiving a message queue to talk to the router with.
|
||||
* @throws I2PSessionException if the router isn't ready
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public I2CPMessageQueue connect() throws I2PSessionException {
|
||||
if (_manager != null)
|
||||
return _manager.internalConnect();
|
||||
throw new I2PSessionException("No manager yet");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,11 +10,14 @@ package net.i2p.router.client;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.CoreVersion;
|
||||
import net.i2p.data.Payload;
|
||||
import net.i2p.data.i2cp.BandwidthLimitsMessage;
|
||||
import net.i2p.data.i2cp.CreateLeaseSetMessage;
|
||||
import net.i2p.data.i2cp.CreateSessionMessage;
|
||||
import net.i2p.data.i2cp.DestLookupMessage;
|
||||
import net.i2p.data.i2cp.DestroySessionMessage;
|
||||
import net.i2p.data.i2cp.GetBandwidthLimitsMessage;
|
||||
import net.i2p.data.i2cp.GetDateMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
@@ -26,6 +29,7 @@ import net.i2p.data.i2cp.ReceiveMessageEndMessage;
|
||||
import net.i2p.data.i2cp.ReconfigureSessionMessage;
|
||||
import net.i2p.data.i2cp.SendMessageMessage;
|
||||
import net.i2p.data.i2cp.SendMessageExpiresMessage;
|
||||
import net.i2p.data.i2cp.SessionConfig;
|
||||
import net.i2p.data.i2cp.SessionId;
|
||||
import net.i2p.data.i2cp.SessionStatusMessage;
|
||||
import net.i2p.data.i2cp.SetDateMessage;
|
||||
@@ -40,14 +44,19 @@ import net.i2p.util.RandomSource;
|
||||
*
|
||||
*/
|
||||
class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventListener {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private ClientConnectionRunner _runner;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final ClientConnectionRunner _runner;
|
||||
private final boolean _enforceAuth;
|
||||
|
||||
public ClientMessageEventListener(RouterContext context, ClientConnectionRunner runner) {
|
||||
/**
|
||||
* @param enforceAuth set false for in-JVM, true for socket access
|
||||
*/
|
||||
public ClientMessageEventListener(RouterContext context, ClientConnectionRunner runner, boolean enforceAuth) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(ClientMessageEventListener.class);
|
||||
_runner = runner;
|
||||
_enforceAuth = enforceAuth;
|
||||
_context.statManager().createRateStat("client.distributeTime", "How long it took to inject the client message into the router", "ClientMessages", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
}
|
||||
|
||||
@@ -93,6 +102,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
case ReconfigureSessionMessage.MESSAGE_TYPE:
|
||||
handleReconfigureSession(reader, (ReconfigureSessionMessage)message);
|
||||
break;
|
||||
case GetBandwidthLimitsMessage.MESSAGE_TYPE:
|
||||
handleGetBWLimits(reader, (GetBandwidthLimitsMessage)message);
|
||||
break;
|
||||
default:
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Unhandled I2CP type received: " + message.getType());
|
||||
@@ -110,31 +122,43 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
// Is this is a little drastic for an unknown message type?
|
||||
_runner.stopRunning();
|
||||
}
|
||||
|
||||
|
||||
public void disconnected(I2CPMessageReader reader) {
|
||||
if (_runner.isDead()) return;
|
||||
_runner.disconnected();
|
||||
}
|
||||
|
||||
private void handleGetDate(I2CPMessageReader reader, GetDateMessage message) {
|
||||
// sent by clients >= 0.8.7
|
||||
String clientVersion = message.getVersion();
|
||||
// TODO - save client's version string for future reference
|
||||
try {
|
||||
_runner.doSend(new SetDateMessage());
|
||||
// only send version if the client can handle it (0.8.7 or greater)
|
||||
_runner.doSend(new SetDateMessage(clientVersion != null ? CoreVersion.VERSION : null));
|
||||
} catch (I2CPMessageException ime) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error writing out the setDate message", ime);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* As of 0.8.7, does nothing. Do not allow a client to set the router's clock.
|
||||
*/
|
||||
private void handleSetDate(I2CPMessageReader reader, SetDateMessage message) {
|
||||
_context.clock().setNow(message.getDate().getTime());
|
||||
//_context.clock().setNow(message.getDate().getTime());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Handle a CreateSessionMessage
|
||||
*
|
||||
* Handle a CreateSessionMessage.
|
||||
* On errors, we could perhaps send a SessionStatusMessage with STATUS_INVALID before
|
||||
* sending the DisconnectMessage... but right now the client will send _us_ a
|
||||
* DisconnectMessage in return, and not wait around for our DisconnectMessage.
|
||||
* So keep it simple.
|
||||
*/
|
||||
private void handleCreateSession(I2CPMessageReader reader, CreateSessionMessage message) {
|
||||
if (message.getSessionConfig().verifySignature()) {
|
||||
SessionConfig in = message.getSessionConfig();
|
||||
if (in.verifySignature()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Signature verified correctly on create session message");
|
||||
} else {
|
||||
@@ -143,12 +167,45 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
_runner.disconnectClient("Invalid signature on CreateSessionMessage");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Auth, since 0.8.2
|
||||
if (_enforceAuth && Boolean.valueOf(_context.getProperty("i2cp.auth")).booleanValue()) {
|
||||
String configUser = _context.getProperty("i2cp.username");
|
||||
String configPW = _context.getProperty("i2cp.password");
|
||||
if (configUser != null && configPW != null) {
|
||||
Properties props = in.getOptions();
|
||||
String user = props.getProperty("i2cp.username");
|
||||
String pw = props.getProperty("i2cp.password");
|
||||
if (user == null || pw == null) {
|
||||
_log.error("I2CP auth failed for client: " + props.getProperty("inbound.nickname"));
|
||||
_runner.disconnectClient("Authorization required to create session, specify i2cp.username and i2cp.password in session options");
|
||||
return;
|
||||
}
|
||||
if ((!user.equals(configUser)) || (!pw.equals(configPW))) {
|
||||
_log.error("I2CP auth failed for client: " + props.getProperty("inbound.nickname") + " user: " + user);
|
||||
_runner.disconnectClient("Authorization failed for Create Session, user = " + user);
|
||||
return;
|
||||
}
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("I2CP auth success for client: " + props.getProperty("inbound.nickname") + " user: " + user);
|
||||
}
|
||||
}
|
||||
|
||||
SessionId sessionId = new SessionId();
|
||||
sessionId.setSessionId(getNextSessionId());
|
||||
_runner.setSessionId(sessionId);
|
||||
sendStatusMessage(SessionStatusMessage.STATUS_CREATED);
|
||||
_runner.sessionEstablished(message.getSessionConfig());
|
||||
|
||||
// Copy over the whole config structure so we don't later corrupt it on
|
||||
// the client side if we change settings or later get a
|
||||
// ReconfigureSessionMessage
|
||||
SessionConfig cfg = new SessionConfig(in.getDestination());
|
||||
cfg.setSignature(in.getSignature());
|
||||
Properties props = new Properties();
|
||||
props.putAll(in.getOptions());
|
||||
cfg.setOptions(props);
|
||||
_runner.sessionEstablished(cfg);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("after sessionEstablished for " + message.getSessionConfig().getDestination().calculateHash().toBase64());
|
||||
|
||||
@@ -240,7 +297,10 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
/**
|
||||
* Message's Session ID ignored. This doesn't support removing previously set options.
|
||||
* Nor do we bother with message.getSessionConfig().verifySignature() ... should we?
|
||||
* Nor is the Date checked.
|
||||
*
|
||||
* Note that this does NOT update the few options handled in
|
||||
* ClientConnectionRunner.sessionEstablished(). Those can't be changed later.
|
||||
*/
|
||||
private void handleReconfigureSession(I2CPMessageReader reader, ReconfigureSessionMessage message) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@@ -274,6 +334,24 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Divide router limit by 1.75 for overhead.
|
||||
* This could someday give a different answer to each client.
|
||||
* But it's not enforced anywhere.
|
||||
*/
|
||||
private void handleGetBWLimits(I2CPMessageReader reader, GetBandwidthLimitsMessage message) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Got BW Limits request");
|
||||
int in = _context.bandwidthLimiter().getInboundKBytesPerSecond() * 4 / 7;
|
||||
int out = _context.bandwidthLimiter().getOutboundKBytesPerSecond() * 4 / 7;
|
||||
BandwidthLimitsMessage msg = new BandwidthLimitsMessage(in, out);
|
||||
try {
|
||||
_runner.doSend(msg);
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Error writing out the session status message", ime);
|
||||
}
|
||||
}
|
||||
|
||||
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
|
||||
private final static int MAX_SESSION_ID = 32767;
|
||||
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package net.i2p.router.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageImpl;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
import net.i2p.internal.PoisonI2CPMessage;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@@ -52,7 +49,7 @@ class ClientWriterRunner implements Runnable {
|
||||
public void stopWriting() {
|
||||
_messagesToWrite.clear();
|
||||
try {
|
||||
_messagesToWrite.put(new PoisonMessage());
|
||||
_messagesToWrite.put(new PoisonI2CPMessage());
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
|
||||
@@ -64,23 +61,9 @@ class ClientWriterRunner implements Runnable {
|
||||
} catch (InterruptedException ie) {
|
||||
continue;
|
||||
}
|
||||
if (msg.getType() == PoisonMessage.MESSAGE_TYPE)
|
||||
if (msg.getType() == PoisonI2CPMessage.MESSAGE_TYPE)
|
||||
break;
|
||||
_runner.writeMessage(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* End-of-stream msg used to stop the concurrent queue
|
||||
* See http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/BlockingQueue.html
|
||||
*
|
||||
*/
|
||||
private static class PoisonMessage extends I2CPMessageImpl {
|
||||
public static final int MESSAGE_TYPE = 999999;
|
||||
public int getType() {
|
||||
return MESSAGE_TYPE;
|
||||
}
|
||||
public void doReadMessage(InputStream buf, int size) throws I2CPMessageException, IOException {}
|
||||
public byte[] doWriteMessage() throws I2CPMessageException, IOException { return null; }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,10 +59,10 @@ class CreateSessionJob extends JobImpl {
|
||||
// XXX props.putAll(Router.getInstance().getConfigMap());
|
||||
|
||||
// override them by the client's settings
|
||||
props.putAll(_runner.getConfig().getOptions());
|
||||
props.putAll(cfg.getOptions());
|
||||
|
||||
// and load 'em up (using anything not yet set as the software defaults)
|
||||
settings.readFromProperties(props);
|
||||
getContext().tunnelManager().buildTunnels(_runner.getConfig().getDestination(), settings);
|
||||
getContext().tunnelManager().buildTunnels(cfg.getDestination(), settings);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
package net.i2p.router.client;
|
||||
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.internal.I2CPMessageQueue;
|
||||
|
||||
/**
|
||||
* Contains the methods to talk to a router or client via I2CP,
|
||||
* when both are in the same JVM.
|
||||
* This interface contains methods to access two queues,
|
||||
* one for transmission and one for receiving.
|
||||
* The methods are identical to those in java.util.concurrent.BlockingQueue
|
||||
*
|
||||
* @author zzz
|
||||
* @since 0.8.3
|
||||
*/
|
||||
class I2CPMessageQueueImpl extends I2CPMessageQueue {
|
||||
private final BlockingQueue<I2CPMessage> _in;
|
||||
private final BlockingQueue<I2CPMessage> _out;
|
||||
|
||||
public I2CPMessageQueueImpl(BlockingQueue<I2CPMessage> in, BlockingQueue<I2CPMessage> out) {
|
||||
_in = in;
|
||||
_out = out;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a message, nonblocking
|
||||
* @return success (false if no space available)
|
||||
*/
|
||||
public boolean offer(I2CPMessage msg) {
|
||||
return _out.offer(msg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Receive a message, nonblocking
|
||||
* @return message or null if none available
|
||||
*/
|
||||
public I2CPMessage poll() {
|
||||
return _in.poll();
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a message, blocking until space is available
|
||||
*/
|
||||
public void put(I2CPMessage msg) throws InterruptedException {
|
||||
_out.put(msg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Receive a message, blocking until one is available
|
||||
* @return message
|
||||
*/
|
||||
public I2CPMessage take() throws InterruptedException {
|
||||
return _in.take();
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,6 @@ class LeaseRequestState {
|
||||
_onFailed = onFailed;
|
||||
_expiration = expiration;
|
||||
_requestedLeaseSet = requested;
|
||||
_successful = false;
|
||||
}
|
||||
|
||||
/** created lease set from client */
|
||||
@@ -60,6 +59,7 @@ class LeaseRequestState {
|
||||
public boolean getIsSuccessful() { return _successful; }
|
||||
public void setIsSuccessful(boolean is) { _successful = is; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "leaseSet request asking for " + _requestedLeaseSet
|
||||
+ " having received " + _grantedLeaseSet
|
||||
|
||||
@@ -28,7 +28,8 @@ class LookupDestJob extends JobImpl {
|
||||
public String getName() { return "LeaseSet Lookup for Client"; }
|
||||
public void runJob() {
|
||||
DoneJob done = new DoneJob(getContext());
|
||||
getContext().netDb().lookupLeaseSet(_hash, done, done, 10*1000);
|
||||
// TODO add support for specifying the timeout in the lookup message
|
||||
getContext().netDb().lookupLeaseSet(_hash, done, done, 15*1000);
|
||||
}
|
||||
|
||||
private class DoneJob extends JobImpl {
|
||||
@@ -41,7 +42,7 @@ class LookupDestJob extends JobImpl {
|
||||
if (ls != null)
|
||||
returnDest(ls.getDestination());
|
||||
else
|
||||
returnDest(null);
|
||||
returnHash(_hash);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,4 +52,15 @@ class LookupDestJob extends JobImpl {
|
||||
_runner.doSend(msg);
|
||||
} catch (I2CPMessageException ime) {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the failed hash so the client can correlate replies with requests
|
||||
* @since 0.8.3
|
||||
*/
|
||||
private void returnHash(Hash h) {
|
||||
DestReplyMessage msg = new DestReplyMessage(h);
|
||||
try {
|
||||
_runner.doSend(msg);
|
||||
} catch (I2CPMessageException ime) {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,15 +24,11 @@ import net.i2p.util.Log;
|
||||
class MessageReceivedJob extends JobImpl {
|
||||
private Log _log;
|
||||
private ClientConnectionRunner _runner;
|
||||
private Destination _to;
|
||||
private Destination _from;
|
||||
private Payload _payload;
|
||||
public MessageReceivedJob(RouterContext ctx, ClientConnectionRunner runner, Destination toDest, Destination fromDest, Payload payload) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(MessageReceivedJob.class);
|
||||
_runner = runner;
|
||||
_to = toDest;
|
||||
_from = fromDest;
|
||||
_payload = payload;
|
||||
}
|
||||
|
||||
@@ -40,16 +36,13 @@ class MessageReceivedJob extends JobImpl {
|
||||
public void runJob() {
|
||||
if (_runner.isDead()) return;
|
||||
MessageId id = new MessageId();
|
||||
id.setMessageId(ClientConnectionRunner.getNextMessageId());
|
||||
id.setMessageId(_runner.getNextMessageId());
|
||||
_runner.setPayload(id, _payload);
|
||||
messageAvailable(id, _payload.getSize());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deliver notification to the client that the given message is available.
|
||||
* This is synchronous and returns true if the notification was sent safely,
|
||||
* otherwise it returns false
|
||||
*
|
||||
*/
|
||||
public void messageAvailable(MessageId id, long size) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@@ -59,6 +52,7 @@ class MessageReceivedJob extends JobImpl {
|
||||
msg.setMessageId(id.getMessageId());
|
||||
msg.setSessionId(_runner.getSessionId().getSessionId());
|
||||
msg.setSize(size);
|
||||
// has to be >= 0, it is initialized to -1
|
||||
msg.setNonce(1);
|
||||
msg.setStatus(MessageStatusMessage.STATUS_AVAILABLE);
|
||||
try {
|
||||
|
||||
@@ -0,0 +1,74 @@
|
||||
package net.i2p.router.client;
|
||||
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
import net.i2p.internal.I2CPMessageQueue;
|
||||
import net.i2p.internal.QueuedI2CPMessageReader;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
/**
|
||||
* Zero-copy in-JVM.
|
||||
* While super() starts both a reader and a writer thread, we only need a reader thread here.
|
||||
*
|
||||
* @author zzz
|
||||
* @since 0.8.3
|
||||
*/
|
||||
class QueuedClientConnectionRunner extends ClientConnectionRunner {
|
||||
private final I2CPMessageQueue queue;
|
||||
|
||||
/**
|
||||
* Create a new runner with the given queues
|
||||
*
|
||||
*/
|
||||
public QueuedClientConnectionRunner(RouterContext context, ClientManager manager, I2CPMessageQueue queue) {
|
||||
super(context, manager, null);
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Starts the reader thread. Does not call super().
|
||||
*/
|
||||
@Override
|
||||
public void startRunning() {
|
||||
_reader = new QueuedI2CPMessageReader(this.queue, new ClientMessageEventListener(_context, this, false));
|
||||
_reader.startReading();
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls super() to stop the reader, and sends a poison message to the client.
|
||||
*/
|
||||
@Override
|
||||
void stopRunning() {
|
||||
super.stopRunning();
|
||||
queue.close();
|
||||
// queue = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* In super(), doSend queues it to the writer thread and
|
||||
* the writer thread calls writeMessage() to write to the output stream.
|
||||
* Since we have no writer thread this shouldn't happen.
|
||||
*/
|
||||
@Override
|
||||
void writeMessage(I2CPMessage msg) {
|
||||
throw new RuntimeException("huh?");
|
||||
}
|
||||
|
||||
/**
|
||||
* Actually send the I2CPMessage to the client.
|
||||
* Nonblocking.
|
||||
*/
|
||||
@Override
|
||||
void doSend(I2CPMessage msg) throws I2CPMessageException {
|
||||
// This will never fail, for now, as the router uses unbounded queues
|
||||
// Perhaps in the future we may want to use bounded queues,
|
||||
// with non-blocking writes for the router
|
||||
// and blocking writes for the client?
|
||||
boolean success = queue.offer(msg);
|
||||
if (!success)
|
||||
throw new I2CPMessageException("I2CP write to queue failed");
|
||||
}
|
||||
|
||||
}
|
||||
@@ -27,26 +27,18 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
class RequestLeaseSetJob extends JobImpl {
|
||||
private Log _log;
|
||||
private ClientConnectionRunner _runner;
|
||||
private LeaseSet _ls;
|
||||
private long _expiration;
|
||||
private Job _onCreate;
|
||||
private Job _onFail;
|
||||
private LeaseRequestState _requestState;
|
||||
private final Log _log;
|
||||
private final ClientConnectionRunner _runner;
|
||||
private final LeaseRequestState _requestState;
|
||||
|
||||
public RequestLeaseSetJob(RouterContext ctx, ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail, LeaseRequestState state) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(RequestLeaseSetJob.class);
|
||||
_runner = runner;
|
||||
_ls = set;
|
||||
_expiration = expiration;
|
||||
_onCreate = onCreate;
|
||||
_onFail = onFail;
|
||||
_requestState = state;
|
||||
ctx.statManager().createRateStat("client.requestLeaseSetSuccess", "How frequently the router requests successfully a new leaseSet?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
ctx.statManager().createRateStat("client.requestLeaseSetTimeout", "How frequently the router requests a new leaseSet but gets no reply?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
ctx.statManager().createRateStat("client.requestLeaseSetDropped", "How frequently the router requests a new leaseSet but the client drops?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
ctx.statManager().createRateStat("client.requestLeaseSetSuccess", "How frequently the router requests successfully a new leaseSet?", "ClientMessages", new long[] { 60*60*1000 });
|
||||
ctx.statManager().createRateStat("client.requestLeaseSetTimeout", "How frequently the router requests a new leaseSet but gets no reply?", "ClientMessages", new long[] { 60*60*1000 });
|
||||
ctx.statManager().createRateStat("client.requestLeaseSetDropped", "How frequently the router requests a new leaseSet but the client drops?", "ClientMessages", new long[] { 60*60*1000 });
|
||||
}
|
||||
|
||||
public String getName() { return "Request Lease Set"; }
|
||||
@@ -54,13 +46,15 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
if (_runner.isDead()) return;
|
||||
|
||||
RequestLeaseSetMessage msg = new RequestLeaseSetMessage();
|
||||
Date end = null;
|
||||
// get the earliest end date
|
||||
for (int i = 0; i < _requestState.getRequested().getLeaseCount(); i++) {
|
||||
if ( (end == null) || (end.getTime() > _requestState.getRequested().getLease(i).getEndDate().getTime()) )
|
||||
end = _requestState.getRequested().getLease(i).getEndDate();
|
||||
}
|
||||
|
||||
long endTime = _requestState.getRequested().getEarliestLeaseDate();
|
||||
// Add a small number of ms (0-300) that increases as we approach the expire time.
|
||||
// Since the earliest date functions as a version number,
|
||||
// this will force the floodfill to flood each new version;
|
||||
// otherwise it won't if the earliest time hasn't changed.
|
||||
long fudge = 300 - ((endTime - getContext().clock().now()) / 2000);
|
||||
endTime += fudge;
|
||||
Date end = new Date(endTime);
|
||||
|
||||
msg.setEndDate(end);
|
||||
msg.setSessionId(_runner.getSessionId());
|
||||
|
||||
@@ -90,8 +84,8 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private class CheckLeaseRequestStatus extends JobImpl {
|
||||
private LeaseRequestState _req;
|
||||
private long _start;
|
||||
private final LeaseRequestState _req;
|
||||
private final long _start;
|
||||
|
||||
public CheckLeaseRequestStatus(RouterContext enclosingContext, LeaseRequestState state) {
|
||||
super(enclosingContext);
|
||||
@@ -112,9 +106,9 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
return;
|
||||
} else {
|
||||
RequestLeaseSetJob.CheckLeaseRequestStatus.this.getContext().statManager().addRateData("client.requestLeaseSetTimeout", 1, 0);
|
||||
if (_log.shouldLog(Log.CRIT)) {
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
long waited = System.currentTimeMillis() - _start;
|
||||
_log.log(Log.CRIT, "Failed to receive a leaseSet in the time allotted (" + waited + "): " + _req + " for "
|
||||
_log.error("Failed to receive a leaseSet in the time allotted (" + waited + "): " + _req + " for "
|
||||
+ _runner.getConfig().getDestination().calculateHash().toBase64());
|
||||
}
|
||||
_runner.disconnectClient("Took too long to request leaseSet");
|
||||
|
||||
@@ -0,0 +1,287 @@
|
||||
package net.i2p.router.client;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.ServerSocket;
|
||||
import java.security.KeyStore;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.cert.Certificate;
|
||||
import java.security.cert.CertificateEncodingException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.SSLServerSocketFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
|
||||
import net.i2p.client.I2PClient;
|
||||
import net.i2p.data.Base32;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SecureDirectory;
|
||||
import net.i2p.util.SecureFileOutputStream;
|
||||
import net.i2p.util.ShellCommand;
|
||||
|
||||
/**
|
||||
* SSL version of ClientListenerRunner
|
||||
*
|
||||
* @since 0.8.3
|
||||
* @author zzz
|
||||
*/
|
||||
class SSLClientListenerRunner extends ClientListenerRunner {
|
||||
|
||||
private SSLServerSocketFactory _factory;
|
||||
|
||||
private static final String PROP_KEYSTORE_PASSWORD = "i2cp.keystorePassword";
|
||||
private static final String DEFAULT_KEYSTORE_PASSWORD = "changeit";
|
||||
private static final String PROP_KEY_PASSWORD = "i2cp.keyPassword";
|
||||
private static final String KEY_ALIAS = "i2cp";
|
||||
private static final String ASCII_KEYFILE = "i2cp.local.crt";
|
||||
|
||||
public SSLClientListenerRunner(RouterContext context, ClientManager manager, int port) {
|
||||
super(context, manager, port);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return success if it exists and we have a password, or it was created successfully.
|
||||
*/
|
||||
private boolean verifyKeyStore(File ks) {
|
||||
if (ks.exists()) {
|
||||
boolean rv = _context.getProperty(PROP_KEY_PASSWORD) != null;
|
||||
if (!rv)
|
||||
_log.error("I2CP SSL error, must set " + PROP_KEY_PASSWORD + " in " +
|
||||
(new File(_context.getConfigDir(), "router.config")).getAbsolutePath());
|
||||
return rv;
|
||||
}
|
||||
File dir = ks.getParentFile();
|
||||
if (!dir.exists()) {
|
||||
File sdir = new SecureDirectory(dir.getAbsolutePath());
|
||||
if (!sdir.mkdir())
|
||||
return false;
|
||||
}
|
||||
boolean rv = createKeyStore(ks);
|
||||
|
||||
// Now read it back out of the new keystore and save it in ascii form
|
||||
// where the clients can get to it.
|
||||
// Failure of this part is not fatal.
|
||||
if (rv)
|
||||
exportCert(ks);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Call out to keytool to create a new keystore with a keypair in it.
|
||||
* Trying to do this programatically is a nightmare, requiring either BouncyCastle
|
||||
* libs or using proprietary Sun libs, and it's a huge mess.
|
||||
* If successful, stores the keystore password and key password in router.config.
|
||||
*
|
||||
* @return success
|
||||
*/
|
||||
private boolean createKeyStore(File ks) {
|
||||
// make a random 48 character password (30 * 8 / 5)
|
||||
byte[] rand = new byte[30];
|
||||
_context.random().nextBytes(rand);
|
||||
String keyPassword = Base32.encode(rand);
|
||||
// and one for the cname
|
||||
_context.random().nextBytes(rand);
|
||||
String cname = Base32.encode(rand) + ".i2cp.i2p.net";
|
||||
|
||||
String keytool = (new File(System.getProperty("java.home"), "bin/keytool")).getAbsolutePath();
|
||||
String[] args = new String[] {
|
||||
keytool,
|
||||
"-genkey", // -genkeypair preferred in newer keytools, but this works with more
|
||||
"-storetype", KeyStore.getDefaultType(),
|
||||
"-keystore", ks.getAbsolutePath(),
|
||||
"-storepass", DEFAULT_KEYSTORE_PASSWORD,
|
||||
"-alias", KEY_ALIAS,
|
||||
"-dname", "CN=" + cname + ",OU=I2CP,O=I2P Anonymous Network,L=XX,ST=XX,C=XX",
|
||||
"-validity", "3652", // 10 years
|
||||
"-keyalg", "DSA",
|
||||
"-keysize", "1024",
|
||||
"-keypass", keyPassword};
|
||||
boolean success = (new ShellCommand()).executeSilentAndWaitTimed(args, 30); // 30 secs
|
||||
if (success) {
|
||||
success = ks.exists();
|
||||
if (success) {
|
||||
SecureFileOutputStream.setPerms(ks);
|
||||
Map<String, String> changes = new HashMap();
|
||||
changes.put(PROP_KEYSTORE_PASSWORD, DEFAULT_KEYSTORE_PASSWORD);
|
||||
changes.put(PROP_KEY_PASSWORD, keyPassword);
|
||||
_context.router().saveConfig(changes, null);
|
||||
}
|
||||
}
|
||||
if (success) {
|
||||
_log.logAlways(Log.INFO, "Created self-signed certificate for " + cname + " in keystore: " + ks.getAbsolutePath() + "\n" +
|
||||
"The certificate name was generated randomly, and is not associated with your " +
|
||||
"IP address, host name, router identity, or destination keys.");
|
||||
} else {
|
||||
_log.error("Failed to create I2CP SSL keystore using command line:");
|
||||
StringBuilder buf = new StringBuilder(256);
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
buf.append('"').append(args[i]).append("\" ");
|
||||
}
|
||||
_log.error(buf.toString());
|
||||
_log.error("This is for the Sun/Oracle keytool, others may be incompatible.\n" +
|
||||
"If you create the keystore manually, you must add " + PROP_KEYSTORE_PASSWORD + " and " + PROP_KEY_PASSWORD +
|
||||
" to " + (new File(_context.getConfigDir(), "router.config")).getAbsolutePath());
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull the cert back OUT of the keystore and save it as ascii
|
||||
* so the clients can get to it.
|
||||
*/
|
||||
private void exportCert(File ks) {
|
||||
File sdir = new SecureDirectory(_context.getConfigDir(), "certificates");
|
||||
if (sdir.exists() || sdir.mkdir()) {
|
||||
InputStream fis = null;
|
||||
try {
|
||||
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
|
||||
fis = new FileInputStream(ks);
|
||||
String ksPass = _context.getProperty(PROP_KEYSTORE_PASSWORD, DEFAULT_KEYSTORE_PASSWORD);
|
||||
keyStore.load(fis, ksPass.toCharArray());
|
||||
Certificate cert = keyStore.getCertificate(KEY_ALIAS);
|
||||
if (cert != null) {
|
||||
File certFile = new File(sdir, ASCII_KEYFILE);
|
||||
saveCert(cert, certFile);
|
||||
} else {
|
||||
_log.error("Error getting SSL cert to save as ASCII");
|
||||
}
|
||||
} catch (GeneralSecurityException gse) {
|
||||
_log.error("Error saving ASCII SSL keys", gse);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error saving ASCII SSL keys", ioe);
|
||||
} finally {
|
||||
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
} else {
|
||||
_log.error("Error saving ASCII SSL keys");
|
||||
}
|
||||
}
|
||||
|
||||
private static final int LINE_LENGTH = 64;
|
||||
|
||||
/**
|
||||
* Modified from:
|
||||
* http://www.exampledepot.com/egs/java.security.cert/ExportCert.html
|
||||
*
|
||||
* Write a certificate to a file in base64 format.
|
||||
*/
|
||||
private void saveCert(Certificate cert, File file) {
|
||||
OutputStream os = null;
|
||||
try {
|
||||
// Get the encoded form which is suitable for exporting
|
||||
byte[] buf = cert.getEncoded();
|
||||
os = new SecureFileOutputStream(file);
|
||||
PrintWriter wr = new PrintWriter(os);
|
||||
wr.println("-----BEGIN CERTIFICATE-----");
|
||||
String b64 = Base64.encode(buf, true); // true = use standard alphabet
|
||||
for (int i = 0; i < b64.length(); i += LINE_LENGTH) {
|
||||
wr.println(b64.substring(i, Math.min(i + LINE_LENGTH, b64.length())));
|
||||
}
|
||||
wr.println("-----END CERTIFICATE-----");
|
||||
wr.flush();
|
||||
} catch (CertificateEncodingException cee) {
|
||||
_log.error("Error writing X509 Certificate " + file.getAbsolutePath(), cee);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error writing X509 Certificate " + file.getAbsolutePath(), ioe);
|
||||
} finally {
|
||||
try { if (os != null) os.close(); } catch (IOException foo) {}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets up the SSLContext and sets the socket factory.
|
||||
* @return success
|
||||
*/
|
||||
private boolean initializeFactory(File ks) {
|
||||
String ksPass = _context.getProperty(PROP_KEYSTORE_PASSWORD, DEFAULT_KEYSTORE_PASSWORD);
|
||||
String keyPass = _context.getProperty(PROP_KEY_PASSWORD);
|
||||
if (keyPass == null) {
|
||||
_log.error("No key password, set " + PROP_KEY_PASSWORD +
|
||||
" in " + (new File(_context.getConfigDir(), "router.config")).getAbsolutePath());
|
||||
return false;
|
||||
}
|
||||
InputStream fis = null;
|
||||
try {
|
||||
SSLContext sslc = SSLContext.getInstance("TLS");
|
||||
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
|
||||
fis = new FileInputStream(ks);
|
||||
keyStore.load(fis, ksPass.toCharArray());
|
||||
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
|
||||
kmf.init(keyStore, keyPass.toCharArray());
|
||||
sslc.init(kmf.getKeyManagers(), null, _context.random());
|
||||
_factory = sslc.getServerSocketFactory();
|
||||
return true;
|
||||
} catch (GeneralSecurityException gse) {
|
||||
_log.error("Error loading SSL keys", gse);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error loading SSL keys", ioe);
|
||||
} finally {
|
||||
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a SSLServerSocket.
|
||||
*/
|
||||
@Override
|
||||
protected ServerSocket getServerSocket() throws IOException {
|
||||
ServerSocket rv;
|
||||
if (_bindAllInterfaces) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Listening on port " + _port + " on all interfaces");
|
||||
rv = _factory.createServerSocket(_port);
|
||||
} else {
|
||||
String listenInterface = _context.getProperty(ClientManagerFacadeImpl.PROP_CLIENT_HOST,
|
||||
ClientManagerFacadeImpl.DEFAULT_HOST);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Listening on port " + _port + " of the specific interface: " + listenInterface);
|
||||
rv = _factory.createServerSocket(_port, 0, InetAddress.getByName(listenInterface));
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create (if necessary) and load the key store, then run.
|
||||
*/
|
||||
@Override
|
||||
public void runServer() {
|
||||
File keyStore = new File(_context.getConfigDir(), "keystore/i2cp.ks");
|
||||
if (verifyKeyStore(keyStore) && initializeFactory(keyStore)) {
|
||||
super.runServer();
|
||||
} else {
|
||||
_log.error("SSL I2CP server error - Failed to create or open key store");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden because SSL handshake may need more time,
|
||||
* and available() in super doesn't work.
|
||||
* The handshake doesn't start until a read().
|
||||
*/
|
||||
@Override
|
||||
protected boolean validate(Socket socket) {
|
||||
try {
|
||||
InputStream is = socket.getInputStream();
|
||||
int oldTimeout = socket.getSoTimeout();
|
||||
socket.setSoTimeout(4 * CONNECT_TIMEOUT);
|
||||
boolean rv = is.read() == I2PClient.PROTOCOL_BYTE;
|
||||
socket.setSoTimeout(oldTimeout);
|
||||
return rv;
|
||||
} catch (IOException ioe) {}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package net.i2p.router;
|
||||
package net.i2p.router.dummy;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
@@ -8,11 +8,16 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.i2cp.MessageId;
|
||||
import net.i2p.data.i2cp.SessionConfig;
|
||||
import net.i2p.router.ClientManagerFacade;
|
||||
import net.i2p.router.ClientMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
/**
|
||||
* Manage all interactions with clients
|
||||
@@ -35,11 +40,13 @@ public class DummyClientManagerFacade extends ClientManagerFacade {
|
||||
public void startup() {}
|
||||
public void stopAcceptingClients() { }
|
||||
public void shutdown() {}
|
||||
public void shutdown(String msg) {}
|
||||
public void restart() {}
|
||||
|
||||
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {}
|
||||
|
||||
public SessionConfig getClientSessionConfig(Destination _dest) { return null; }
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash _dest) { return null; }
|
||||
|
||||
public void requestLeaseSet(Hash dest, LeaseSet set) {}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user