propagate from branch 'i2p.i2p.zzz.confsplit' (head 324249e53469a81e66f9d1c1989d9f53817868f4)

to branch 'i2p.i2p' (head cf6476e03a43a35fea6697b29f9ff43f77875100)
This commit is contained in:
zzz
2019-07-26 12:53:03 +00:00
349 changed files with 41339 additions and 32780 deletions

View File

@@ -13,6 +13,7 @@ import java.io.Writer;
import java.util.Collections;
import java.util.Set;
import net.i2p.client.I2PSessionException;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
@@ -104,4 +105,20 @@ public abstract class ClientManagerFacade implements Service {
/** @since 0.8.8 */
public abstract void shutdown(String msg);
/**
* Declare that we're going to publish a meta LS for this destination.
* Must be called before publishing the leaseset.
*
* @throws I2PSessionException on duplicate dest
* @since 0.9.41
*/
public void registerMetaDest(Destination dest) throws I2PSessionException {}
/**
* Declare that we're no longer going to publish a meta LS for this destination.
*
* @since 0.9.41
*/
public void unregisterMetaDest(Destination dest) {}
}

View File

@@ -86,7 +86,6 @@ public class InNetMessagePool implements Service {
_context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*60*1000l });
_context.statManager().createRateStat("inNetPool.droppedDeliveryStatusDelay", "How long after a delivery status message is created do we receive it back again (for messages that are too slow to be handled)", "InNetPool", new long[] { 60*60*1000l });
_context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*60*1000l });
//_context.statManager().createRateStat("inNetPool.droppedTunnelCreateStatusMessage", "How often we drop a slow-to-arrive tunnel request response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("inNetPool.droppedDbLookupResponseMessage", "How often we drop a slow-to-arrive db search response", "InNetPool", new long[] { 60*60*1000l });
}
@@ -175,13 +174,18 @@ public class InNetMessagePool implements Service {
boolean jobFound = false;
boolean allowMatches = true;
if (type == TunnelGatewayMessage.MESSAGE_TYPE) {
switch (type) {
case TunnelGatewayMessage.MESSAGE_TYPE:
shortCircuitTunnelGateway(messageBody);
allowMatches = false;
} else if (type == TunnelDataMessage.MESSAGE_TYPE) {
break;
case TunnelDataMessage.MESSAGE_TYPE:
shortCircuitTunnelData(messageBody, fromRouterHash);
allowMatches = false;
} else {
break;
default:
// why don't we allow type 0? There used to be a message of type 0 long ago...
if ( (type > 0) && (type < _handlerJobBuilders.length) ) {
HandlerJobBuilder builder = _handlerJobBuilders[type];
@@ -203,7 +207,8 @@ public class InNetMessagePool implements Service {
}
}
}
}
break;
} // switch
if (allowMatches) {
int replies = handleReplies(messageBody);
@@ -213,7 +218,9 @@ public class InNetMessagePool implements Service {
if (!jobFound) {
// was not handled via HandlerJobBuilder
_context.messageHistory().droppedOtherMessage(messageBody, (fromRouter != null ? fromRouter.calculateHash() : fromRouterHash));
if (type == DeliveryStatusMessage.MESSAGE_TYPE) {
switch (type) {
case DeliveryStatusMessage.MESSAGE_TYPE:
// Avoid logging side effect from a horrible UDP EstablishmentManager hack
// We could set up a separate stat for it but don't bother for now
long arr = ((DeliveryStatusMessage)messageBody).getArrival();
@@ -223,25 +230,28 @@ public class InNetMessagePool implements Service {
_log.warn("Dropping unhandled delivery status message created " + timeSinceSent + "ms ago: " + messageBody);
_context.statManager().addRateData("inNetPool.droppedDeliveryStatusDelay", timeSinceSent);
}
//} else if (type == TunnelCreateStatusMessage.MESSAGE_TYPE) {
// if (_log.shouldLog(Log.INFO))
// _log.info("Dropping slow tunnel create request response: " + messageBody);
// _context.statManager().addRateData("inNetPool.droppedTunnelCreateStatusMessage", 1, 0);
} else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
break;
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
if (_log.shouldLog(Log.INFO))
_log.info("Dropping slow db lookup response: " + messageBody);
_context.statManager().addRateData("inNetPool.droppedDbLookupResponseMessage", 1);
} else if (type == DatabaseLookupMessage.MESSAGE_TYPE) {
break;
case DatabaseLookupMessage.MESSAGE_TYPE:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dropping netDb lookup due to throttling");
} else {
break;
default:
if (_log.shouldLog(Log.WARN))
_log.warn("Message expiring on "
+ messageBody.getMessageExpiration()
+ " was not handled by a HandlerJobBuilder - DROPPING: " + messageBody,
new Exception("f00!"));
_context.statManager().addRateData("inNetPool.dropped", 1);
}
break;
} // switch
} else {
String mtype = messageBody.getClass().getName();
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),

View File

@@ -30,6 +30,11 @@ public interface Job {
*/
public void runJob();
/**
* @deprecated
* @return null always
*/
@Deprecated
public Exception getAddedBy();
/**

View File

@@ -59,7 +59,9 @@ public abstract class JobImpl implements Job {
* @return null always
*/
@Deprecated
@SuppressWarnings("deprecation")
public Exception getAddedBy() { return null; }
public long getMadeReadyOn() { return _madeReadyOn; }
public void madeReady() { _madeReadyOn = _context.clock().now(); }
public void dropped() {}

View File

@@ -728,7 +728,10 @@ public class JobQueue {
public long getJobId() { return POISON_ID; }
public JobTiming getTiming() { return null; }
public void runJob() {}
@SuppressWarnings("deprecation")
public Exception getAddedBy() { return null; }
public void dropped() {}
}

View File

@@ -144,9 +144,7 @@ class JobQueueRunner extends I2PThread {
} catch (Throwable t) {
//_state = 21;
_log.log(Log.CRIT, "Error processing job [" + _currentJob.getName()
+ "] on thread " + _id + ": " + t.getMessage(), t);
//if (_log.shouldLog(Log.ERROR))
// _log.error("The above job was enqueued by: ", _currentJob.getAddedBy());
+ "] on thread " + _id + ": " + t, t);
}
}
}

View File

@@ -17,6 +17,7 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.tunnel.HopConfig;
import net.i2p.util.Log;
import net.i2p.util.SecureFileOutputStream;
import net.i2p.util.SystemVersion;
/**
* Simply act as a pen register of messages sent in and out of the router.
@@ -25,6 +26,7 @@ import net.i2p.util.SecureFileOutputStream;
* analyze the entire network, if everyone provides their logs honestly)
*
* This is always instantiated in the context and the WriteJob runs every minute,
* (except on Android, we don't set up the WriteJob)
* but unless router.keepHistory=true it does nothing.
* It generates a LARGE log file.
*/
@@ -92,6 +94,8 @@ public class MessageHistory {
*
*/
public synchronized void initialize(boolean forceReinitialize) {
if (SystemVersion.isAndroid())
return;
if (!forceReinitialize) return;
Router router = _context.router();
if (router == null) {

View File

@@ -11,6 +11,7 @@ package net.i2p.router;
import java.io.IOException;
import java.io.Writer;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import net.i2p.data.BlindData;
@@ -178,4 +179,21 @@ public abstract class NetworkDatabaseFacade implements Service {
* @since 0.9.40
*/
public void setBlindData(BlindData bd) {}
/**
* For console ConfigKeyringHelper
* @since 0.9.41
*/
public List<BlindData> getBlindData() {
return null;
}
/**
* For console ConfigKeyringHelper
* @return true if removed
* @since 0.9.41
*/
public boolean removeBlindData(SigningPublicKey spk) {
return false;
}
}

View File

@@ -52,6 +52,7 @@ import net.i2p.router.startup.StartupJob;
import net.i2p.router.startup.WorkingDir;
import net.i2p.router.tasks.*;
import net.i2p.router.transport.FIFOBandwidthLimiter;
import net.i2p.router.transport.UPnPScannerCallback;
import net.i2p.router.transport.ntcp.NTCPTransport;
import net.i2p.router.transport.udp.UDPTransport;
import net.i2p.router.util.EventLog;
@@ -103,6 +104,7 @@ public class Router implements RouterClock.ClockShiftListener {
private FamilyKeyCrypto _familyKeyCrypto;
private boolean _familyKeyCryptoFail;
public final Object _familyKeyLock = new Object();
private UPnPScannerCallback _upnpScannerCallback;
public final static String PROP_CONFIG_FILE = "router.configLocation";
@@ -336,33 +338,35 @@ public class Router implements RouterClock.ClockShiftListener {
// for the ping file
// Check for other router but do not start a thread yet so the update doesn't cause
// a NCDFE
for (int i = 0; i < 14; i++) {
// Wrapper can start us up too quickly after a crash, the ping file
// may still be less than LIVELINESS_DELAY (60s) old.
// So wait at least 60s to be sure.
if (isOnlyRouterRunning()) {
if (i > 0)
System.err.println("INFO: No, there wasn't another router already running. Proceeding with startup.");
break;
}
if (i < 13) {
if (i == 0)
System.err.println("WARN: There may be another router already running. Waiting a while to be sure...");
// yes this is ugly to sleep in the constructor.
try { Thread.sleep(5000); } catch (InterruptedException ie) {}
} else {
_eventLog.addEvent(EventLog.ABORTED, "Another router running");
System.err.println("ERROR: There appears to be another router already running!");
System.err.println(" Please make sure to shut down old instances before starting up");
System.err.println(" a new one. If you are positive that no other instance is running,");
System.err.println(" please delete the file " + getPingFile().getAbsolutePath());
//System.exit(-1);
// throw exception instead, for embedded
throw new IllegalStateException(
"ERROR: There appears to be another router already running!" +
" Please make sure to shut down old instances before starting up" +
" a new one. If you are positive that no other instance is running," +
" please delete the file " + getPingFile().getAbsolutePath());
if (!SystemVersion.isAndroid()) {
for (int i = 0; i < 14; i++) {
// Wrapper can start us up too quickly after a crash, the ping file
// may still be less than LIVELINESS_DELAY (60s) old.
// So wait at least 60s to be sure.
if (isOnlyRouterRunning()) {
if (i > 0)
System.err.println("INFO: No, there wasn't another router already running. Proceeding with startup.");
break;
}
if (i < 13) {
if (i == 0)
System.err.println("WARN: There may be another router already running. Waiting a while to be sure...");
// yes this is ugly to sleep in the constructor.
try { Thread.sleep(5000); } catch (InterruptedException ie) {}
} else {
_eventLog.addEvent(EventLog.ABORTED, "Another router running");
System.err.println("ERROR: There appears to be another router already running!");
System.err.println(" Please make sure to shut down old instances before starting up");
System.err.println(" a new one. If you are positive that no other instance is running,");
System.err.println(" please delete the file " + getPingFile().getAbsolutePath());
//System.exit(-1);
// throw exception instead, for embedded
throw new IllegalStateException(
"ERROR: There appears to be another router already running!" +
" Please make sure to shut down old instances before starting up" +
" a new one. If you are positive that no other instance is running," +
" please delete the file " + getPingFile().getAbsolutePath());
}
}
}
@@ -384,6 +388,8 @@ public class Router implements RouterClock.ClockShiftListener {
} catch (NumberFormatException nfe) {}
}
_networkID = id;
// for testing
setUPnPScannerCallback(new LoggerCallback());
changeState(State.INITIALIZED);
// ********* Start no threads before here ********* //
}
@@ -606,6 +612,32 @@ public class Router implements RouterClock.ClockShiftListener {
*/
public RouterContext getContext() { return _context; }
private class LoggerCallback implements UPnPScannerCallback {
public void beforeScan() { _log.info("SSDP beforeScan()"); }
public void afterScan() { _log.info("SSDP afterScan()"); }
}
/**
* For Android only.
* MUST be set before runRouter() is called.
*
* @param callback the callback or null to clear it
* @since 0.9.41
*/
public synchronized void setUPnPScannerCallback(UPnPScannerCallback callback) {
_upnpScannerCallback = callback;
}
/**
* For Android only.
*
* @return the callback or null if none
* @since 0.9.41
*/
public synchronized UPnPScannerCallback getUPnPScannerCallback() {
return _upnpScannerCallback;
}
/**
* This must be called after instantiation.
* Starts the threads. Does not install updates.
@@ -865,6 +897,8 @@ public class Router implements RouterClock.ClockShiftListener {
} else if (_state == State.EXPL_TUNNELS_READY) {
changeState(State.RUNNING);
changed = true;
} else {
_log.warn("Invalid state " + _state + " for setNetDbReady()");
}
}
if (changed) {
@@ -890,6 +924,8 @@ public class Router implements RouterClock.ClockShiftListener {
changeState(State.EXPL_TUNNELS_READY);
else if (_state == State.NETDB_READY)
changeState(State.RUNNING);
else
_log.warn("Invalid state " + _state + " for setExplTunnelsReady()");
}
}

View File

@@ -189,7 +189,7 @@ public class RouterClock extends Clock {
getLog().info("Updating target clock offset to " + offsetMs + "ms from " + _offset + "ms, Stratum " + stratum);
if (!_statCreated) {
_context.statManager().createRequiredRateStat("clock.skew", "Clock step adjustment (ms)", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("clock.skew", "Clock step adjustment (ms)", "Clock", new long[] { 60*60*1000 });
_statCreated = true;
}
_context.statManager().addRateData("clock.skew", delta);

View File

@@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 1;
public final static long BUILD = 3;
/** for example "-test" */
public final static String EXTRA = "";

View File

@@ -43,6 +43,7 @@ import net.i2p.router.ClientMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer2;
@@ -67,6 +68,8 @@ class ClientManager {
// ClientConnectionRunner for clients w/out a Dest yet
private final Set<ClientConnectionRunner> _pendingRunners;
private final Set<SessionId> _runnerSessionIds;
private final Set<Destination> _metaDests;
private final Set<Hash> _metaHashes;
protected final RouterContext _ctx;
protected final int _port;
protected volatile boolean _isStarted;
@@ -100,11 +103,13 @@ class ClientManager {
// "How large are messages received by the client?",
// "ClientMessages",
// new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_listeners = new ArrayList<ClientListenerRunner>();
_runners = new ConcurrentHashMap<Destination, ClientConnectionRunner>();
_runnersByHash = new ConcurrentHashMap<Hash, ClientConnectionRunner>();
_pendingRunners = new HashSet<ClientConnectionRunner>();
_runnerSessionIds = new HashSet<SessionId>();
_listeners = new ArrayList<ClientListenerRunner>(4);
_runners = new ConcurrentHashMap<Destination, ClientConnectionRunner>(4);
_runnersByHash = new ConcurrentHashMap<Hash, ClientConnectionRunner>(4);
_pendingRunners = new HashSet<ClientConnectionRunner>(4);
_runnerSessionIds = new HashSet<SessionId>(4);
_metaDests = new ConcurrentHashSet<Destination>(4);
_metaHashes = new ConcurrentHashSet<Hash>(4);
_port = port;
_clientTimestamper = new ClientTimestamper();
// following are for RequestLeaseSetJob
@@ -367,6 +372,37 @@ class ClientManager {
return rv;
}
/**
* Declare that we're going to publish a meta LS for this destination.
* Must be called before publishing the leaseset.
*
* @throws I2PSessionException on duplicate dest
* @since 0.9.41
*/
public void registerMetaDest(Destination dest) throws I2PSessionException {
synchronized (_runners) {
if (_runners.containsKey(dest) || _metaDests.contains(dest)) {
String msg = "Client attempted to register duplicate destination " + dest.toBase32();
_log.error(msg);
throw new I2PSessionException(msg);
}
_metaDests.add(dest);
_metaHashes.add(dest.calculateHash());
}
}
/**
* Declare that we're no longer going to publish a meta LS for this destination.
*
* @since 0.9.41
*/
public void unregisterMetaDest(Destination dest) {
synchronized (_runners) {
_metaDests.remove(dest);
_metaHashes.remove(dest.calculateHash());
}
}
/**
* Generate a new random, unused sessionId. Caller must synch on _runners.
* @return null on failure
@@ -409,6 +445,15 @@ class ClientManager {
Job j = new DistributeLocal(toDest, runner, sender, fromDest, payload, msgId, messageNonce);
//_ctx.jobQueue().addJob(j);
j.runJob();
} else if (!_metaDests.isEmpty() && _metaDests.contains(toDest)) {
// meta dests don't have runners but are local, and you can't send to them
ClientConnectionRunner sender = getRunner(fromDest);
if (sender == null) {
// sender went away
return;
}
int rc = MessageStatusMessage.STATUS_SEND_FAILURE_BAD_LEASESET;
sender.updateMessageDeliveryStatus(fromDest, msgId, messageNonce, rc);
} else {
// remote. w00t
if (_log.shouldLog(Log.DEBUG))
@@ -515,18 +560,20 @@ class ClientManager {
}
/**
* Unsynchronized
* Unsynchronized.
* DOES contain meta destinations.
*/
public boolean isLocal(Destination dest) {
return _runners.containsKey(dest);
return _runners.containsKey(dest) || _metaDests.contains(dest);
}
/**
* Unsynchronized
* Unsynchronized.
* DOES contain meta destinations.
*/
public boolean isLocal(Hash destHash) {
if (destHash == null) return false;
return _runnersByHash.containsKey(destHash);
return _runnersByHash.containsKey(destHash) || _metaHashes.contains(destHash);
}
/**
@@ -542,7 +589,8 @@ class ClientManager {
}
/**
* Unsynchronized
* Unsynchronized.
* Does NOT contain meta destinations.
*/
public Set<Destination> listClients() {
Set<Destination> rv = new HashSet<Destination>();
@@ -725,7 +773,7 @@ class ClientManager {
if (_log.shouldLog(Log.WARN))
_log.warn("Message received but we don't have a connection to "
+ dest + "/" + _msg.getDestinationHash()
+ " currently. DROPPED");
+ " currently. DROPPED", new Exception());
}
}
}

View File

@@ -266,4 +266,28 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade implements Inte
return _manager.internalConnect();
throw new I2PSessionException("No manager yet");
}
/**
* Declare that we're going to publish a meta LS for this destination.
* Must be called before publishing the leaseset.
*
* @throws I2PSessionException on duplicate dest
* @since 0.9.41
*/
@Override
public void registerMetaDest(Destination dest) throws I2PSessionException {
if (_manager != null)
_manager.registerMetaDest(dest);
}
/**
* Declare that we're no longer going to publish a meta LS for this destination.
*
* @since 0.9.41
*/
@Override
public void unregisterMetaDest(Destination dest) {
if (_manager != null)
_manager.unregisterMetaDest(dest);
}
}

View File

@@ -14,6 +14,7 @@ import java.util.Properties;
import net.i2p.CoreVersion;
import net.i2p.crypto.EncType;
import net.i2p.crypto.SigType;
import net.i2p.data.Base64;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
@@ -550,6 +551,13 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
Destination dest = cfg.getDestination();
if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
// so we can decrypt it
// secret must be set before destination
String secret = cfg.getOptions().getProperty("i2cp.leaseSetSecret");
if (secret != null) {
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;
secret = DataHelper.getUTF8(Base64.decode(secret));
encls.setSecret(secret);
}
try {
ls.setDestination(dest);
} catch (RuntimeException re) {
@@ -558,6 +566,15 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
_runner.disconnectClient(re.toString());
return;
}
// per-client auth
// we have to do this before verifySignature()
String pk = cfg.getOptions().getProperty("i2cp.leaseSetPrivKey");
if (pk != null) {
byte[] priv = Base64.decode(pk);
PrivateKey privkey = new PrivateKey(EncType.ECIES_X25519, priv);
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;
encls.setClientPrivateKey(privkey);
}
// we have to do this before checking encryption keys below
if (!ls.verifySignature()) {
if (_log.shouldError())
@@ -642,11 +659,6 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
_runner.disconnectClient("Duplicate hash of encrypted LS2");
return;
}
String secret = cfg.getOptions().getProperty("i2cp.leaseSetSecret");
if (secret != null) {
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;
encls.setSecret(secret);
}
}
if (_log.shouldDebug())
_log.debug("Publishing: " + ls);

View File

@@ -99,7 +99,8 @@ class LookupDestJob extends JobImpl {
}
h = bd.getBlindedHash();
if (_log.shouldDebug())
_log.debug("Converting name lookup " + name + " to blinded " + h);
_log.debug("Converting name lookup " + name + " to blinded " + h +
" using BlindData:\n" + bd);
name = null;
} catch (RuntimeException re) {
if (_log.shouldWarn())

View File

@@ -17,6 +17,7 @@ import java.util.Set;
import net.i2p.I2PAppContext;
import net.i2p.crypto.AESEngine;
import net.i2p.crypto.EncType;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
@@ -97,6 +98,8 @@ public final class ElGamalAESEngine {
_log.error("Data is less than the minimum size (" + data.length + " < " + MIN_ENCRYPTED_SIZE + ")");
return null;
}
if (targetPrivateKey.getType() != EncType.ELGAMAL_2048)
return null;
byte tag[] = new byte[32];
System.arraycopy(data, 0, tag, 0, 32);
@@ -399,7 +402,8 @@ public final class ElGamalAESEngine {
* no less than the paddedSize parameter, but may be more. This method uses the
* ElGamal+AES algorithm in the data structure spec.
*
* @param target public key to which the data should be encrypted.
* @param target public key to which the data should be encrypted, must be ELGAMAL_2048.
* May be null if key and currentTag are non-null.
* @param key session key to use during encryption
* @param tagsForDelivery session tags to be associated with the key (or newKey if specified), or null;
* 200 max enforced at receiver
@@ -407,11 +411,17 @@ public final class ElGamalAESEngine {
* @param newKey key to be delivered to the target, with which the tagsForDelivery should be associated, or null
* @param paddedSize minimum size in bytes of the body after padding it (if less than the
* body's real size, no bytes are appended but the body is not truncated)
* @throws IllegalArgumentException on bad target EncType
*
* Unused externally, only called by below (i.e. newKey is always null)
*/
public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set<SessionTag> tagsForDelivery,
SessionTag currentTag, SessionKey newKey, long paddedSize) {
if (target != null) {
EncType type = target.getType();
if (type != EncType.ELGAMAL_2048)
throw new IllegalArgumentException("Bad public key type " + type);
}
if (currentTag == null) {
if (_log.shouldLog(Log.INFO))
_log.info("Current tag is null, encrypting as new session");
@@ -420,8 +430,9 @@ public final class ElGamalAESEngine {
}
//if (_log.shouldLog(Log.INFO))
// _log.info("Current tag is NOT null, encrypting as existing session");
// target unused, using key and tag only
_context.statManager().updateFrequency("crypto.elGamalAES.encryptExistingSession");
byte rv[] = encryptExistingSession(data, target, key, tagsForDelivery, currentTag, newKey, paddedSize);
byte rv[] = encryptExistingSession(data, key, tagsForDelivery, currentTag, newKey, paddedSize);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Existing session encrypted with tag: " + currentTag.toString() + ": " + rv.length + " bytes and key: " + key.toBase64() /* + ": " + Base64.encode(rv, 0, 64) */);
return rv;
@@ -447,13 +458,15 @@ public final class ElGamalAESEngine {
* or a 514-byte ElGamal block and several 32-byte session tags for a new session.
* So the returned encrypted data will be at least 32 bytes larger than paddedSize.
*
* @param target public key to which the data should be encrypted.
* @param target public key to which the data should be encrypted, must be ELGAMAL_2048.
* May be null if key and currentTag are non-null.
* @param key session key to use during encryption
* @param tagsForDelivery session tags to be associated with the key or null;
* 200 max enforced at receiver
* @param currentTag sessionTag to use, or null if it should use ElG (i.e. new session)
* @param paddedSize minimum size in bytes of the body after padding it (if less than the
* body's real size, no bytes are appended but the body is not truncated)
* @throws IllegalArgumentException on bad target EncType
*
*/
public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set<SessionTag> tagsForDelivery,
@@ -468,6 +481,7 @@ public final class ElGamalAESEngine {
*
* @param tagsForDelivery session tags to be associated with the key or null;
* 200 max enforced at receiver
* @throws IllegalArgumentException on bad target EncType
* @deprecated unused
*/
public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set<SessionTag> tagsForDelivery, long paddedSize) {
@@ -479,6 +493,7 @@ public final class ElGamalAESEngine {
* No new session key
* No current tag (encrypt as new session)
*
* @throws IllegalArgumentException on bad target EncType
* @deprecated unused
*/
public byte[] encrypt(byte data[], PublicKey target, SessionKey key, long paddedSize) {
@@ -573,11 +588,10 @@ public final class ElGamalAESEngine {
* - random bytes, padding the total size to greater than paddedSize with a mod 16 = 0
* </pre>
*
* @param target unused, this is AES encrypt only using the session key and tag
* @param tagsForDelivery session tags to be associated with the key or null;
* 200 max enforced at receiver
*/
private byte[] encryptExistingSession(byte data[], PublicKey target, SessionKey key, Set<SessionTag> tagsForDelivery,
private byte[] encryptExistingSession(byte data[], SessionKey key, Set<SessionTag> tagsForDelivery,
SessionTag currentTag, SessionKey newKey, long paddedSize) {
//_log.debug("Encrypting to an EXISTING session");
byte rawTag[] = currentTag.getData();

View File

@@ -25,6 +25,7 @@ import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
import net.i2p.I2PAppContext;
import net.i2p.crypto.EncType;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.crypto.TagSetHandle;
import net.i2p.data.DataHelper;
@@ -283,6 +284,8 @@ public class TransientSessionKeyManager extends SessionKeyManager {
* Retrieve the session key currently associated with encryption to the target.
* Generates a new session and session key if not previously exising.
*
* @param target public key to which the data should be encrypted, must be ELGAMAL_2048.
* @throws IllegalArgumentException on bad target EncType
* @return non-null
* @since 0.9
*/
@@ -310,6 +313,9 @@ public class TransientSessionKeyManager extends SessionKeyManager {
*
* Racy if called after getCurrentKey() to check for a current session;
* use getCurrentOrNewKey() in that case.
*
* @param target public key to which the data should be encrypted, must be ELGAMAL_2048.
* @throws IllegalArgumentException on bad target EncType
*/
@Override
public void createSession(PublicKey target, SessionKey key) {
@@ -322,6 +328,9 @@ public class TransientSessionKeyManager extends SessionKeyManager {
*
*/
private OutboundSession createAndReturnSession(PublicKey target, SessionKey key) {
EncType type = target.getType();
if (type != EncType.ELGAMAL_2048)
throw new IllegalArgumentException("Bad public key type " + type);
if (_log.shouldLog(Log.INFO))
_log.info("New OB session, sesskey: " + key + " target: " + toString(target));
OutboundSession sess = new OutboundSession(_context, _log, target, key);

View File

@@ -8,6 +8,8 @@ import java.io.InputStreamReader;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.crypto.Blinding;
@@ -166,7 +168,19 @@ class BlindCache {
}
}
/**
* Persists immediately if secret or privkey is non-null
*/
public void addToCache(BlindData bd) {
storeInCache(bd);
if (bd.getSecret() != null || bd.getAuthPrivKey() != null)
store();
}
/**
* @since 0.9.41 from addToCache()
*/
private void storeInCache(BlindData bd) {
_cache.put(bd.getUnblindedPubKey(), bd);
_reverseCache.put(bd.getBlindedPubKey(), bd);
Destination dest = bd.getDestination();
@@ -225,6 +239,39 @@ class BlindCache {
}
}
/**
* For console ConfigKeyringHelper
* @return list is copied
* @since 0.9.41
*/
public synchronized List<BlindData> getData() {
List<BlindData> rv = new ArrayList<BlindData>(_cache.size());
rv.addAll(_cache.values());
return rv;
}
/**
* For console ConfigKeyringHelper.
* Persists immediately if removed.
*
* @param spk the unblinded public key
* @return true if removed
* @since 0.9.41
*/
public boolean removeBlindData(SigningPublicKey spk) {
boolean rv = false;
BlindData bd = _cache.remove(spk);
if (bd != null) {
rv = true;
_reverseCache.remove(bd.getBlindedPubKey());
Hash h = bd.getDestHash();
if (h != null)
_hashCache.remove(h);
store();
}
return rv;
}
/**
* Load from file.
* Format:
@@ -245,14 +292,14 @@ class BlindCache {
if (line.startsWith("#"))
continue;
try {
addToCache(fromPersistentString(line));
storeInCache(fromPersistentString(line));
count++;
} catch (IllegalArgumentException iae) {
if (log.shouldLog(Log.WARN))
log.warn("Error reading cache entry", iae);
log.warn("Error reading cache entry: " + line, iae);
} catch (DataFormatException dfe) {
if (log.shouldLog(Log.WARN))
log.warn("Error reading cache entry", dfe);
log.warn("Error reading cache entry: " + line, dfe);
}
}
} catch (IOException ioe) {
@@ -334,15 +381,15 @@ class BlindCache {
privkey = null;
}
BlindData rv;
// TODO pass privkey
if (ss[7].length() > 0) {
Destination dest = new Destination(ss[7]);
if (!spk.equals(dest.getSigningPublicKey()))
throw new DataFormatException("spk mismatch");
rv = new BlindData(_context, dest, st2, secret);
rv = new BlindData(_context, dest, st2, secret, auth, privkey);
} else {
rv = new BlindData(_context, spk, st2, secret);
rv = new BlindData(_context, spk, st2, secret, auth, privkey);
}
rv.setDate(time);
return rv;
}
@@ -356,8 +403,7 @@ class BlindCache {
buf.append(spk.getType().getCode()).append(',');
buf.append(bd.getBlindedSigType().getCode()).append(',');
buf.append(bd.getAuthType()).append(',');
// timestamp todo
buf.append('0').append(',');
buf.append(bd.getDate()).append(',');
buf.append(spk.toBase64()).append(',');
String secret = bd.getSecret();
if (secret != null && secret.length() > 0)

View File

@@ -103,7 +103,8 @@ class FloodfillVerifyStoreJob extends JobImpl {
boolean isInboundExploratory;
TunnelInfo replyTunnelInfo;
if (_isRouterInfo || getContext().keyRing().get(_key) != null) {
if (_isRouterInfo || getContext().keyRing().get(_key) != null ||
_type == DatabaseEntry.KEY_TYPE_META_LS2) {
replyTunnelInfo = getContext().tunnelManager().selectInboundExploratoryTunnel(_target);
isInboundExploratory = true;
} else {
@@ -122,10 +123,12 @@ class FloodfillVerifyStoreJob extends JobImpl {
// to avoid association by the exploratory tunnel OBEP.
// Unless it is an encrypted leaseset.
TunnelInfo outTunnel;
if (_isRouterInfo || getContext().keyRing().get(_key) != null)
if (_isRouterInfo || getContext().keyRing().get(_key) != null ||
_type == DatabaseEntry.KEY_TYPE_META_LS2) {
outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(_target);
else
} else {
outTunnel = getContext().tunnelManager().selectOutboundTunnel(_client, _target);
}
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels to verify a store");

View File

@@ -13,6 +13,7 @@ import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.Lease;
import net.i2p.data.LeaseSet;
import net.i2p.data.LeaseSet2;
import net.i2p.data.TunnelId;
@@ -21,6 +22,7 @@ import net.i2p.data.router.RouterIdentity;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
@@ -293,14 +295,93 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
tgm2.setMessageExpiration(msg.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(tgm2);
}
} else if (toUs || getContext().commSystem().isEstablished(toPeer)) {
return;
}
if (toUs) {
Job send = new SendMessageDirectJob(getContext(), msg, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), msg2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send2.runJob();
}
} else {
return;
}
boolean isEstab = getContext().commSystem().isEstablished(toPeer);
if (!isEstab && replyTunnel != null) {
DatabaseEntry entry = _message.getEntry();
int type = entry.getType();
if (type == DatabaseEntry.KEY_TYPE_LEASESET || type == DatabaseEntry.KEY_TYPE_LS2) {
// As of 0.9.42,
// if reply GW and tunnel are in the LS, we can pick a different one from the LS,
// so look for one that's connected to reduce connections
LeaseSet ls = (LeaseSet) entry;
int count = ls.getLeaseCount();
if (count > 1) {
boolean found = false;
for (int i = 0; i < count; i++) {
Lease lease = ls.getLease(i);
if (lease.getGateway().equals(toPeer) && lease.getTunnelId().equals(replyTunnel)) {
found = true;
break;
}
}
if (found) {
//_log.warn("Looking for alternate to " + toPeer + " reply gw in LS with " + count + " leases");
for (int i = 0; i < count; i++) {
Lease lease = ls.getLease(i);
Hash gw = lease.getGateway();
if (gw.equals(toPeer))
continue;
if (lease.isExpired())
continue;
if (getContext().commSystem().isEstablished(gw)) {
// switch to use this lease instead
toPeer = gw;
replyTunnel = lease.getTunnelId();
isEstab = true;
break;
}
}
if (_log.shouldWarn()) {
if (isEstab)
_log.warn("Switched to alt connected peer " + toPeer + " in LS with " + count + " leases");
else
_log.warn("Alt connected peer not found in LS with " + count + " leases");
}
} else {
if (_log.shouldWarn())
_log.warn("Reply gw not found in LS with " + count + " leases");
}
}
}
}
if (isEstab) {
I2NPMessage out1 = msg;
I2NPMessage out2 = msg2;
if (replyTunnel != null) {
// wrap reply in a TGM
TunnelGatewayMessage tgm = new TunnelGatewayMessage(getContext());
tgm.setMessage(msg);
tgm.setTunnelId(replyTunnel);
tgm.setMessageExpiration(msg.getMessageExpiration());
out1 = tgm;
if (out2 != null) {
TunnelGatewayMessage tgm2 = new TunnelGatewayMessage(getContext());
tgm2.setMessage(msg2);
tgm2.setTunnelId(replyTunnel);
tgm2.setMessageExpiration(msg.getMessageExpiration());
out2 = tgm2;
}
}
Job send = new SendMessageDirectJob(getContext(), out1, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), out2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send2.runJob();
}
return;
}
// pick tunnel with endpoint closest to toPeer
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(toPeer);
if (outTunnel == null) {
@@ -313,7 +394,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (msg2 != null)
getContext().tunnelDispatcher().dispatchOutbound(msg2, outTunnel.getSendTunnelId(0),
replyTunnel, toPeer);
}
}
public String getName() { return "Handle Database Store Message"; }

View File

@@ -10,6 +10,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.crypto.EncType;
import net.i2p.crypto.SigType;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
@@ -425,6 +426,13 @@ public class IterativeSearchJob extends FloodSearchJob {
// request encrypted reply
// now covered by version check above, which is more recent
//if (DatabaseLookupMessage.supportsEncryptedReplies(ri)) {
EncType type = ri.getIdentity().getPublicKey().getType();
if (type != EncType.ELGAMAL_2048) {
failed(peer, false);
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Can't do encrypted lookup to " + peer + " with EncType " + type);
return;
}
if (true) {
MessageWrapper.OneTimeSession sess;
if (isClientReplyTunnel)

View File

@@ -16,6 +16,7 @@ import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -492,6 +493,24 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
_log.warn("Adding to blind cache: " + bd);
_blindCache.addToCache(bd);
}
/**
* For console ConfigKeyringHelper
* @since 0.9.41
*/
public List<BlindData> getBlindData() {
return _blindCache.getData();
}
/**
* For console ConfigKeyringHelper
* @param spk the unblinded public key
* @return true if removed
* @since 0.9.41
*/
public boolean removeBlindData(SigningPublicKey spk) {
return _blindCache.removeBlindData(spk);
}
/**
* @return RouterInfo, LeaseSet, or null, validated
@@ -925,27 +944,40 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
// spoof / hash collision detection
// todo allow non-exp to overwrite exp
if (rv != null && !leaseSet.getDestination().equals(rv.getDestination()))
throw new IllegalArgumentException("LS Hash collision");
if (rv != null) {
Destination d1 = leaseSet.getDestination();
Destination d2 = rv.getDestination();
if (d1 != null && d2 != null && !d1.equals(d2))
throw new IllegalArgumentException("LS Hash collision");
}
EncryptedLeaseSet encls = null;
if (leaseSet.getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
int type = leaseSet.getType();
if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
// set dest or key before validate() calls verifySignature() which
// will do the decryption
encls = (EncryptedLeaseSet) leaseSet;
BlindData bd = _blindCache.getReverseData(leaseSet.getSigningKey());
if (bd != null) {
if (_log.shouldWarn())
_log.warn("Found blind data for encls: " + bd);
encls = (EncryptedLeaseSet) leaseSet;
// secret must be set before destination
String secret = bd.getSecret();
if (secret != null)
encls.setSecret(secret);
Destination dest = bd.getDestination();
if (dest != null) {
encls.setDestination(dest);
} else {
encls.setSigningKey(bd.getUnblindedPubKey());
}
// per-client auth
if (bd.getAuthType() != BlindData.AUTH_NONE)
encls.setClientPrivateKey(bd.getAuthPrivKey());
} else {
if (_log.shouldWarn())
_log.warn("No blind data found for encls: " + encls);
// if we created it, there's no blind data, but it's still decrypted
if (encls.getDecryptedLeaseSet() == null && _log.shouldWarn())
_log.warn("No blind data found for encls: " + leaseSet);
}
}
@@ -967,6 +999,14 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
store(dest.getHash(), decls);
_blindCache.setBlinded(dest);
}
} else if (type == DatabaseEntry.KEY_TYPE_LS2 || type == DatabaseEntry.KEY_TYPE_META_LS2) {
// if it came in via garlic
LeaseSet2 ls2 = (LeaseSet2) leaseSet;
if (ls2.isBlindedWhenPublished()) {
Destination dest = leaseSet.getDestination();
if (dest != null)
_blindCache.setBlinded(dest, null, null);
}
}
// Iterate through the old failure / success count, copying over the old
@@ -1185,19 +1225,22 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
if (DatabaseEntry.isLeaseSet(etype)) {
LeaseSet ls = (LeaseSet) entry;
Destination d = ls.getDestination();
Certificate c = d.getCertificate();
if (c.getCertificateType() == Certificate.CERTIFICATE_TYPE_KEY) {
try {
KeyCertificate kc = c.toKeyCertificate();
SigType type = kc.getSigType();
if (type == null || !type.isAvailable() || type.getBaseAlgorithm() == SigAlgo.RSA) {
failPermanently(d);
String stype = (type != null) ? type.toString() : Integer.toString(kc.getSigTypeCode());
if (_log.shouldLog(Log.WARN))
_log.warn("Unsupported sig type " + stype + " for destination " + h);
throw new UnsupportedCryptoException("Sig type " + stype);
}
} catch (DataFormatException dfe) {}
// will be null for encrypted LS
if (d != null) {
Certificate c = d.getCertificate();
if (c.getCertificateType() == Certificate.CERTIFICATE_TYPE_KEY) {
try {
KeyCertificate kc = c.toKeyCertificate();
SigType type = kc.getSigType();
if (type == null || !type.isAvailable() || type.getBaseAlgorithm() == SigAlgo.RSA) {
failPermanently(d);
String stype = (type != null) ? type.toString() : Integer.toString(kc.getSigTypeCode());
if (_log.shouldLog(Log.WARN))
_log.warn("Unsupported sig type " + stype + " for destination " + h);
throw new UnsupportedCryptoException("Sig type " + stype);
}
} catch (DataFormatException dfe) {}
}
}
} else if (etype == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
RouterInfo ri = (RouterInfo) entry;

View File

@@ -3,6 +3,7 @@ package net.i2p.router.networkdb.kademlia;
import java.util.HashSet;
import java.util.Set;
import net.i2p.crypto.EncType;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.crypto.TagSetHandle;
import net.i2p.data.Certificate;
@@ -40,9 +41,14 @@ public class MessageWrapper {
*
* @param from must be a local client with a session key manager,
* or null to use the router's session key manager
* @param to must be ELGAMAL_2048 EncType
* @return null on encrypt failure
*/
static WrappedMessage wrap(RouterContext ctx, I2NPMessage m, Hash from, RouterInfo to) {
PublicKey sentTo = to.getIdentity().getPublicKey();
if (sentTo.getType() != EncType.ELGAMAL_2048)
return null;
PayloadGarlicConfig payload = new PayloadGarlicConfig(Certificate.NULL_CERT,
ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE),
m.getMessageExpiration(),
@@ -63,7 +69,6 @@ public class MessageWrapper {
if (msg == null)
return null;
TagSetHandle tsh = null;
PublicKey sentTo = to.getIdentity().getPublicKey();
if (!sentTags.isEmpty())
tsh = skm.tagsDelivered(sentTo, sentKey, sentTags);
//if (_log.shouldLog(Log.DEBUG))
@@ -118,10 +123,15 @@ public class MessageWrapper {
* to hide the contents from the OBEP.
* Forces ElGamal.
*
* @param to must be ELGAMAL_2048 EncType
* @return null on encrypt failure
* @since 0.9.5
*/
static GarlicMessage wrap(RouterContext ctx, I2NPMessage m, RouterInfo to) {
PublicKey key = to.getIdentity().getPublicKey();
if (key.getType() != EncType.ELGAMAL_2048)
return null;
PayloadGarlicConfig payload = new PayloadGarlicConfig(Certificate.NULL_CERT,
ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE),
m.getMessageExpiration(),
@@ -129,7 +139,6 @@ public class MessageWrapper {
payload.setRecipient(to);
SessionKey sentKey = ctx.keyGenerator().generateSessionKey();
PublicKey key = to.getIdentity().getPublicKey();
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, payload, null, null,
key, sentKey, null);
return msg;

View File

@@ -40,6 +40,7 @@ import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.util.SecureDirectory;
import net.i2p.util.SecureFileOutputStream;
import net.i2p.util.SystemVersion;
/**
* Write out keys to disk when we get them and periodically read ones we don't know
@@ -342,6 +343,7 @@ public class PersistentDataStore extends TransientDataStore {
private class ReadJob extends JobImpl {
private volatile long _lastModified;
private volatile long _lastReseed;
private volatile boolean _setNetDbReady;
private static final int MIN_ROUTERS = KademliaNetworkDatabaseFacade.MIN_RESEED;
private static final long MIN_RESEED_INTERVAL = 90*60*1000;
@@ -434,10 +436,24 @@ public class PersistentDataStore extends TransientDataStore {
}
}
Collections.shuffle(toRead, _context.random());
int i = 0;
for (File file : toRead) {
Hash key = getRouterInfoHash(file.getName());
if (key != null && !isKnown(key))
if (key != null && !isKnown(key)) {
(new ReadRouterJob(file, key)).runJob();
if (i++ == 150 && SystemVersion.isSlow() && !_initialized) {
// Can take 2 minutes to load them all on Android,
// after we have already built expl. tunnels.
// This is enough to let i2ptunnel get started.
// Do not set _initialized yet so we don't start rescanning.
_setNetDbReady = true;
_context.router().setNetDbReady();
} else if (i == 500 && !_setNetDbReady) {
// do this for faster systems also at 500
_setNetDbReady = true;
_context.router().setNetDbReady();
}
}
}
}
@@ -447,6 +463,7 @@ public class PersistentDataStore extends TransientDataStore {
_lastReseed = _context.clock().now();
// checkReseed will call wakeup() when done and we will run again
} else {
_setNetDbReady = true;
_context.router().setNetDbReady();
}
} else if (_lastReseed < _context.clock().now() - MIN_RESEED_INTERVAL) {
@@ -456,7 +473,19 @@ public class PersistentDataStore extends TransientDataStore {
_lastReseed = _context.clock().now();
// checkReseed will call wakeup() when done and we will run again
} else {
_context.router().setNetDbReady();
if (!_setNetDbReady) {
_setNetDbReady = true;
_context.router().setNetDbReady();
}
}
} else {
// second time through, reseed called wakeup()
if (!_setNetDbReady) {
int count = Math.min(routerCount, size());
if (count >= MIN_ROUTERS) {
_setNetDbReady = true;
_context.router().setNetDbReady();
}
}
}
}

View File

@@ -346,7 +346,9 @@ abstract class StoreJob extends JobImpl {
getContext().statManager().addRateData("netDb.storeLeaseSetSent", 1);
// if it is an encrypted leaseset...
if (getContext().keyRing().get(msg.getKey()) != null)
sendStoreThroughGarlic(msg, peer, expiration);
sendStoreThroughExploratory(msg, peer, expiration);
else if (msg.getEntry().getType() == DatabaseEntry.KEY_TYPE_META_LS2)
sendWrappedStoreThroughExploratory(msg, peer, expiration);
else
sendStoreThroughClient(msg, peer, expiration);
} else {
@@ -355,7 +357,7 @@ abstract class StoreJob extends JobImpl {
if (_connectChecker.canConnect(_connectMask, peer))
sendDirect(msg, peer, expiration);
else
sendStoreThroughGarlic(msg, peer, expiration);
sendStoreThroughExploratory(msg, peer, expiration);
}
}
@@ -387,12 +389,13 @@ abstract class StoreJob extends JobImpl {
}
/**
* This is misnamed, it means sending it out through an exploratory tunnel,
* Send it out through an exploratory tunnel,
* with the reply to come back through an exploratory tunnel.
* There is no garlic encryption added.
*
* @since 0.9.41 renamed from sendStoreThroughGarlic()
*/
private void sendStoreThroughGarlic(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
private void sendStoreThroughExploratory(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
long token = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
Hash to = peer.getIdentity().getHash();
@@ -514,6 +517,76 @@ abstract class StoreJob extends JobImpl {
}
}
/**
* Send a leaseset store message out an exploratory tunnel,
* with the reply to come back through a exploratory tunnel.
* Stores are garlic encrypted to hide the identity from the OBEP.
*
* Only for Meta LS2, for now.
*
* @param msg must contain a leaseset
* @since 0.9.41
*/
private void sendWrappedStoreThroughExploratory(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
long token = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
Hash to = peer.getIdentity().getHash();
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
if (replyTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No inbound expl. tunnels for reply - delaying...");
// continueSending() above did an addPending() so remove it here.
// This means we will skip the peer next time, can't be helped for now
// without modding StoreState
_state.replyTimeout(to);
Job waiter = new WaitJob(getContext());
waiter.getTiming().setStartAfter(getContext().clock().now() + 3*1000);
getContext().jobQueue().addJob(waiter);
return;
}
TunnelId replyTunnelId = replyTunnel.getReceiveTunnelId(0);
msg.setReplyToken(token);
msg.setReplyTunnel(replyTunnelId);
msg.setReplyGateway(replyTunnel.getPeer(0));
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": send(dbStore) w/ token expected " + token);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
if (outTunnel != null) {
I2NPMessage sent;
// garlic encrypt using router SKM
MessageWrapper.WrappedMessage wm = MessageWrapper.wrap(getContext(), msg, null, peer);
if (wm == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Fail garlic encrypting");
fail();
return;
}
sent = wm.getMessage();
_state.addPending(to, wm);
SendSuccessJob onReply = new SendSuccessJob(getContext(), peer, outTunnel, sent.getMessageSize());
FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now());
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
if (_log.shouldLog(Log.DEBUG)) {
_log.debug(getJobId() + ": sending encrypted store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent);
}
getContext().messageRegistry().registerPending(selector, onReply, onFail);
getContext().tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound expl. tunnels to send a dbStore out - delaying...");
// continueSending() above did an addPending() so remove it here.
// This means we will skip the peer next time, can't be helped for now
// without modding StoreState
_state.replyTimeout(to);
Job waiter = new WaitJob(getContext());
waiter.getTiming().setStartAfter(getContext().clock().now() + 3*1000);
getContext().jobQueue().addJob(waiter);
}
}
/**
* Called to wait a little while
* @since 0.7.10

View File

@@ -39,9 +39,9 @@ class StoreMessageSelector implements MessageSelector {
public long getExpiration() { return _expiration; }
public boolean isMatch(I2NPMessage message) {
if (_log.shouldDebug())
_log.debug(_storeJobId + ": isMatch(" + message.getClass().getSimpleName() + ") [want DSM from "
+ _peer + ']');
//if (_log.shouldDebug())
// _log.debug(_storeJobId + ": isMatch(" + message.getClass().getSimpleName() + ") [want DSM from "
// + _peer + ']');
if (message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
DeliveryStatusMessage msg = (DeliveryStatusMessage)message;
if (msg.getMessageId() == _waitingForId) {
@@ -50,9 +50,9 @@ class StoreMessageSelector implements MessageSelector {
_found = true;
return true;
} else {
if (_log.shouldDebug())
_log.debug(_storeJobId + ": DeliveryStatusMessage of " + msg.getMessageId() +
" but waiting for " + _waitingForId);
//if (_log.shouldDebug())
// _log.debug(_storeJobId + ": DeliveryStatusMessage of " + msg.getMessageId() +
// " but waiting for " + _waitingForId);
return false;
}
} else {

View File

@@ -112,7 +112,7 @@ public class Reseeder {
"https://reseed.i2p.net.in/" + ',' + // reseedi2pnetin_at_mail.i2p.crt // CA // Java 8+ only, TLS 1.2 only
"https://i2p.novg.net/" + ',' + // igor_at_novg.net.crt // CA // Java 8+ only
"https://i2pseed.creativecowpat.net:8443/" + ',' + // creativecowpat_at_mail.i2p.crt // i2pseed.creativecowpat.net.crt // Java 7+
"https://itoopie.atomike.ninja/" + ',' + // atomike_at_mail.i2p.crt // CA // Java 8+ only
//"https://itoopie.atomike.ninja/" + ',' + // atomike_at_mail.i2p.crt // CA // Java 8+ only
"https://reseed.onion.im/" + ',' + // lazygravy_at_mail.i2p // reseed.onion.im.crt // Java 8+ only
"https://reseed.memcpy.io/" + ',' + // hottuna_at_mail.i2p.crt // CA // SNI required
//"https://reseed.atomike.ninja/" + ',' + // atomike_at_mail.i2p.crt // CA // SNI required, Java 8+ only
@@ -798,6 +798,11 @@ public class Reseeder {
stats = extractZip(contentRaw);
fetched = stats[0];
errors = stats[1];
if (fetched == 0) {
s = "Reseed got no router infos " + s;
System.err.println(s);
_log.error(s);
}
} catch (Throwable t) {
String s = getDisplayString(seedURL);
System.err.println("Error reseeding " + s + ": " + t);

View File

@@ -237,12 +237,27 @@ public class DBHistory {
private final static String NL = System.getProperty("line.separator");
/**
* write out the data from the profile to the stream
* includes comments
*/
public void store(OutputStream out) throws IOException {
store(out, true);
}
/**
* write out the data from the profile to the stream
* @param addComments add comment lines to the output
* @since 0.9.41
*/
public void store(OutputStream out, boolean addComments) throws IOException {
StringBuilder buf = new StringBuilder(512);
buf.append(NL);
buf.append("#################").append(NL);
buf.append("# DB history").append(NL);
buf.append("###").append(NL);
if (addComments) {
buf.append(NL);
buf.append("#################").append(NL);
buf.append("# DB history").append(NL);
buf.append("###").append(NL);
}
//add(buf, "successfulLookups", _successfulLookups, "How many times have they successfully given us what we wanted when looking for it?");
//add(buf, "failedLookups", _failedLookups, "How many times have we sent them a db lookup and they didn't reply?");
//add(buf, "lookupsReceived", _lookupsReceived, "How many lookups have they sent us?");
@@ -250,23 +265,26 @@ public class DBHistory {
//add(buf, "lookupReplyInvalid", _lookupReplyInvalid, "How many of their reply values to our lookups were invalid (expired, forged, corrupted)?");
//add(buf, "lookupReplyNew", _lookupReplyNew, "How many of their reply values to our lookups were brand new to us?");
//add(buf, "lookupReplyOld", _lookupReplyOld, "How many of their reply values to our lookups were something we had seen before?");
add(buf, "unpromptedDbStoreNew", _unpromptedDbStoreNew, "How times have they sent us something we didn't ask for and hadn't seen before?");
add(buf, "unpromptedDbStoreOld", _unpromptedDbStoreOld, "How times have they sent us something we didn't ask for but have seen before?");
add(buf, addComments, "unpromptedDbStoreNew", _unpromptedDbStoreNew, "How times have they sent us something we didn't ask for and hadn't seen before?");
add(buf, addComments, "unpromptedDbStoreOld", _unpromptedDbStoreOld, "How times have they sent us something we didn't ask for but have seen before?");
//add(buf, "lastLookupReceived", _lastLookupReceived, "When was the last time they send us a lookup? (milliseconds since the epoch)");
//add(buf, "avgDelayBetweenLookupsReceived", _avgDelayBetweenLookupsReceived, "How long is it typically between each db lookup they send us? (in milliseconds)");
// following 4 weren't persisted until 0.9.24
add(buf, "lastLookupSuccessful", _lastLookupSuccessful, "When was the last time a lookup from them succeeded? (milliseconds since the epoch)");
add(buf, "lastLookupFailed", _lastLookupFailed, "When was the last time a lookup from them failed? (milliseconds since the epoch)");
add(buf, "lastStoreSuccessful", _lastStoreSuccessful, "When was the last time a store to them succeeded? (milliseconds since the epoch)");
add(buf, "lastStoreFailed", _lastStoreFailed, "When was the last time a store to them failed? (milliseconds since the epoch)");
add(buf, addComments, "lastLookupSuccessful", _lastLookupSuccessful, "When was the last time a lookup from them succeeded? (milliseconds since the epoch)");
add(buf, addComments, "lastLookupFailed", _lastLookupFailed, "When was the last time a lookup from them failed? (milliseconds since the epoch)");
add(buf, addComments, "lastStoreSuccessful", _lastStoreSuccessful, "When was the last time a store to them succeeded? (milliseconds since the epoch)");
add(buf, addComments, "lastStoreFailed", _lastStoreFailed, "When was the last time a store to them failed? (milliseconds since the epoch)");
out.write(buf.toString().getBytes("UTF-8"));
_failedLookupRate.store(out, "dbHistory.failedLookupRate");
_invalidReplyRate.store(out, "dbHistory.invalidReplyRate");
_failedLookupRate.store(out, "dbHistory.failedLookupRate", addComments);
_invalidReplyRate.store(out, "dbHistory.invalidReplyRate", addComments);
}
private static void add(StringBuilder buf, String name, long val, String description) {
buf.append("# ").append(name.toUpperCase(Locale.US)).append(NL).append("# ").append(description).append(NL);
buf.append("dbHistory.").append(name).append('=').append(val).append(NL).append(NL);
private static void add(StringBuilder buf, boolean addComments, String name, long val, String description) {
if (addComments)
buf.append("# ").append(name.toUpperCase(Locale.US)).append(NL).append("# ").append(description).append(NL);
buf.append("dbHistory.").append(name).append('=').append(val).append(NL);
if (addComments)
buf.append(NL);
}
@@ -307,8 +325,6 @@ public class DBHistory {
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", statGroup, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_invalidReplyRate == null)
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", statGroup, new long[] { 30*60*1000l });
_failedLookupRate.setStatLog(_context.statManager().getStatLog());
_invalidReplyRate.setStatLog(_context.statManager().getStatLog());
}
private final static long getLong(Properties props, String key) {

View File

@@ -482,10 +482,6 @@ public class PeerProfile {
if (_tunnelHistory == null)
_tunnelHistory = new TunnelHistory(_context, group);
//_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
//_receiveSize.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_expanded = true;
}
@@ -502,8 +498,6 @@ public class PeerProfile {
if (_dbHistory == null)
_dbHistory = new DBHistory(_context, group);
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
_expandedDB = true;
}

View File

@@ -75,7 +75,9 @@ class ProfilePersistenceHelper {
public void setUs(Hash routerIdentHash) { _us = routerIdentHash; }
/** write out the data from the profile to the stream */
/**
* write out the data from the profile to the file
*/
public void writeProfile(PeerProfile profile) {
if (isExpired(profile.getLastSendSuccessful()))
return;
@@ -85,7 +87,7 @@ class ProfilePersistenceHelper {
OutputStream fos = null;
try {
fos = new BufferedOutputStream(new GZIPOutputStream(new SecureFileOutputStream(f)));
writeProfile(profile, fos);
writeProfile(profile, fos, false);
} catch (IOException ioe) {
_log.error("Error writing profile to " + f);
} finally {
@@ -96,8 +98,20 @@ class ProfilePersistenceHelper {
_log.debug("Writing the profile to " + f.getName() + " took " + delay + "ms");
}
/** write out the data from the profile to the stream */
/**
* write out the data from the profile to the stream
* includes comments
*/
public void writeProfile(PeerProfile profile, OutputStream out) throws IOException {
writeProfile(profile, out, true);
}
/**
* write out the data from the profile to the stream
* @param addComments add comment lines to the output
* @since 0.9.41
*/
public void writeProfile(PeerProfile profile, OutputStream out, boolean addComments) throws IOException {
String groups = null;
if (_context.profileOrganizer().isFailing(profile.getPeer())) {
groups = "Failing";
@@ -114,66 +128,79 @@ class ProfilePersistenceHelper {
}
StringBuilder buf = new StringBuilder(512);
buf.append("########################################################################").append(NL);
buf.append("# Profile for peer ").append(profile.getPeer().toBase64()).append(NL);
if (_us != null)
buf.append("# as calculated by ").append(_us.toBase64()).append(NL);
buf.append("#").append(NL);
buf.append("# Speed: ").append(profile.getSpeedValue()).append(NL);
buf.append("# Capacity: ").append(profile.getCapacityValue()).append(NL);
buf.append("# Integration: ").append(profile.getIntegrationValue()).append(NL);
buf.append("# Groups: ").append(groups).append(NL);
buf.append("#").append(NL);
buf.append("########################################################################").append(NL);
buf.append("##").append(NL);
add(buf, "speedBonus", profile.getSpeedBonus(), "Manual adjustment to the speed score");
add(buf, "capacityBonus", profile.getCapacityBonus(), "Manual adjustment to the capacity score");
add(buf, "integrationBonus", profile.getIntegrationBonus(), "Manual adjustment to the integration score");
addDate(buf, "firstHeardAbout", profile.getFirstHeardAbout(), "When did we first get a reference to this peer?");
addDate(buf, "lastHeardAbout", profile.getLastHeardAbout(), "When did we last get a reference to this peer?");
addDate(buf, "lastHeardFrom", profile.getLastHeardFrom(), "When did we last get a message from the peer?");
addDate(buf, "lastSentToSuccessfully", profile.getLastSendSuccessful(), "When did we last send the peer a message successfully?");
addDate(buf, "lastFailedSend", profile.getLastSendFailed(), "When did we last fail to send a message to the peer?");
add(buf, "tunnelTestTimeAverage", profile.getTunnelTestTimeAverage(), "Moving average as to how fast the peer replies");
add(buf, "tunnelPeakThroughput", profile.getPeakThroughputKBps(), "KBytes/sec");
add(buf, "tunnelPeakTunnelThroughput", profile.getPeakTunnelThroughputKBps(), "KBytes/sec");
add(buf, "tunnelPeakTunnel1mThroughput", profile.getPeakTunnel1mThroughputKBps(), "KBytes/sec");
buf.append(NL);
if (addComments) {
buf.append("########################################################################").append(NL);
buf.append("# Profile for peer ").append(profile.getPeer().toBase64()).append(NL);
if (_us != null)
buf.append("# as calculated by ").append(_us.toBase64()).append(NL);
buf.append("#").append(NL);
buf.append("# Speed: ").append(profile.getSpeedValue()).append(NL);
buf.append("# Capacity: ").append(profile.getCapacityValue()).append(NL);
buf.append("# Integration: ").append(profile.getIntegrationValue()).append(NL);
buf.append("# Groups: ").append(groups).append(NL);
buf.append("#").append(NL);
buf.append("########################################################################").append(NL);
buf.append("##").append(NL);
}
add(buf, addComments, "speedBonus", profile.getSpeedBonus(), "Manual adjustment to the speed score");
add(buf, addComments, "capacityBonus", profile.getCapacityBonus(), "Manual adjustment to the capacity score");
add(buf, addComments, "integrationBonus", profile.getIntegrationBonus(), "Manual adjustment to the integration score");
addDate(buf, addComments, "firstHeardAbout", profile.getFirstHeardAbout(), "When did we first get a reference to this peer?");
addDate(buf, addComments, "lastHeardAbout", profile.getLastHeardAbout(), "When did we last get a reference to this peer?");
addDate(buf, addComments, "lastHeardFrom", profile.getLastHeardFrom(), "When did we last get a message from the peer?");
addDate(buf, addComments, "lastSentToSuccessfully", profile.getLastSendSuccessful(), "When did we last send the peer a message successfully?");
addDate(buf, addComments, "lastFailedSend", profile.getLastSendFailed(), "When did we last fail to send a message to the peer?");
add(buf, addComments, "tunnelTestTimeAverage", profile.getTunnelTestTimeAverage(), "Moving average as to how fast the peer replies");
add(buf, addComments, "tunnelPeakThroughput", profile.getPeakThroughputKBps(), "KBytes/sec");
add(buf, addComments, "tunnelPeakTunnelThroughput", profile.getPeakTunnelThroughputKBps(), "KBytes/sec");
add(buf, addComments, "tunnelPeakTunnel1mThroughput", profile.getPeakTunnel1mThroughputKBps(), "KBytes/sec");
if (addComments)
buf.append(NL);
out.write(buf.toString().getBytes("UTF-8"));
if (profile.getIsExpanded()) {
// only write out expanded data if, uh, we've got it
profile.getTunnelHistory().store(out);
profile.getTunnelHistory().store(out, addComments);
//profile.getReceiveSize().store(out, "receiveSize");
//profile.getSendSuccessSize().store(out, "sendSuccessSize");
profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime");
profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime");
profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime", addComments);
profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime", addComments);
}
if (profile.getIsExpandedDB()) {
profile.getDBHistory().store(out);
profile.getDbIntroduction().store(out, "dbIntroduction");
profile.getDbResponseTime().store(out, "dbResponseTime");
profile.getDBHistory().store(out, addComments);
profile.getDbIntroduction().store(out, "dbIntroduction", addComments);
profile.getDbResponseTime().store(out, "dbResponseTime", addComments);
}
}
/** @since 0.8.5 */
private static void addDate(StringBuilder buf, String name, long val, String description) {
String when = val > 0 ? (new Date(val)).toString() : "Never";
add(buf, name, val, description + ' ' + when);
private static void addDate(StringBuilder buf, boolean addComments, String name, long val, String description) {
if (addComments) {
String when = val > 0 ? (new Date(val)).toString() : "Never";
add(buf, true, name, val, description + ' ' + when);
} else {
add(buf, false, name, val, description);
}
}
/** @since 0.8.5 */
private static void add(StringBuilder buf, String name, long val, String description) {
buf.append("# ").append(name).append(NL).append("# ").append(description).append(NL);
buf.append(name).append('=').append(val).append(NL).append(NL);
private static void add(StringBuilder buf, boolean addComments, String name, long val, String description) {
if (addComments)
buf.append("# ").append(name).append(NL).append("# ").append(description).append(NL);
buf.append(name).append('=').append(val).append(NL);
if (addComments)
buf.append(NL);
}
/** @since 0.8.5 */
private static void add(StringBuilder buf, String name, float val, String description) {
buf.append("# ").append(name).append(NL).append("# ").append(description).append(NL);
buf.append(name).append('=').append(val).append(NL).append(NL);
private static void add(StringBuilder buf, boolean addComments, String name, float val, String description) {
if (addComments)
buf.append("# ").append(name).append(NL).append("# ").append(description).append(NL);
buf.append(name).append('=').append(val).append(NL);
if (addComments)
buf.append(NL);
}
public Set<PeerProfile> readProfiles() {

View File

@@ -49,8 +49,6 @@ public class TunnelHistory {
private void createRates(String statGroup) {
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_rejectRate.setStatLog(_context.statManager().getStatLog());
_failRate.setStatLog(_context.statManager().getStatLog());
}
/** total tunnels the peer has agreed to participate in */
@@ -137,33 +135,51 @@ public class TunnelHistory {
private final static String NL = System.getProperty("line.separator");
public void store(OutputStream out) throws IOException {
store(out, true);
}
/**
* write out the data from the profile to the stream
* @param addComments add comment lines to the output
* @since 0.9.41
*/
public void store(OutputStream out, boolean addComments) throws IOException {
StringBuilder buf = new StringBuilder(512);
buf.append(NL);
buf.append("#################").append(NL);
buf.append("# Tunnel history").append(NL);
buf.append("###").append(NL);
addDate(buf, "lastAgreedTo", _lastAgreedTo, "When did the peer last agree to participate in a tunnel?");
addDate(buf, "lastFailed", _lastFailed, "When was the last time a tunnel that the peer agreed to participate failed?");
addDate(buf, "lastRejectedCritical", _lastRejectedCritical, "When was the last time the peer refused to participate in a tunnel (Critical response code)?");
addDate(buf, "lastRejectedBandwidth", _lastRejectedBandwidth, "When was the last time the peer refused to participate in a tunnel (Bandwidth response code)?");
addDate(buf, "lastRejectedTransient", _lastRejectedTransient, "When was the last time the peer refused to participate in a tunnel (Transient load response code)?");
addDate(buf, "lastRejectedProbabalistic", _lastRejectedProbabalistic, "When was the last time the peer refused to participate in a tunnel (Probabalistic response code)?");
add(buf, "lifetimeAgreedTo", _lifetimeAgreedTo.get(), "How many tunnels has the peer ever agreed to participate in?");
add(buf, "lifetimeFailed", _lifetimeFailed.get(), "How many tunnels has the peer ever agreed to participate in that failed prematurely?");
add(buf, "lifetimeRejected", _lifetimeRejected.get(), "How many tunnels has the peer ever refused to participate in?");
if (addComments) {
buf.append(NL);
buf.append("#################").append(NL);
buf.append("# Tunnel history").append(NL);
buf.append("###").append(NL);
}
addDate(buf, addComments, "lastAgreedTo", _lastAgreedTo, "When did the peer last agree to participate in a tunnel?");
addDate(buf, addComments, "lastFailed", _lastFailed, "When was the last time a tunnel that the peer agreed to participate failed?");
addDate(buf, addComments, "lastRejectedCritical", _lastRejectedCritical, "When was the last time the peer refused to participate in a tunnel (Critical response code)?");
addDate(buf, addComments, "lastRejectedBandwidth", _lastRejectedBandwidth, "When was the last time the peer refused to participate in a tunnel (Bandwidth response code)?");
addDate(buf, addComments, "lastRejectedTransient", _lastRejectedTransient, "When was the last time the peer refused to participate in a tunnel (Transient load response code)?");
addDate(buf, addComments, "lastRejectedProbabalistic", _lastRejectedProbabalistic, "When was the last time the peer refused to participate in a tunnel (Probabalistic response code)?");
add(buf, addComments, "lifetimeAgreedTo", _lifetimeAgreedTo.get(), "How many tunnels has the peer ever agreed to participate in?");
add(buf, addComments, "lifetimeFailed", _lifetimeFailed.get(), "How many tunnels has the peer ever agreed to participate in that failed prematurely?");
add(buf, addComments, "lifetimeRejected", _lifetimeRejected.get(), "How many tunnels has the peer ever refused to participate in?");
out.write(buf.toString().getBytes("UTF-8"));
_rejectRate.store(out, "tunnelHistory.rejectRate");
_failRate.store(out, "tunnelHistory.failRate");
_rejectRate.store(out, "tunnelHistory.rejectRate", addComments);
_failRate.store(out, "tunnelHistory.failRate", addComments);
}
private static void addDate(StringBuilder buf, String name, long val, String description) {
String when = val > 0 ? (new Date(val)).toString() : "Never";
add(buf, name, val, description + ' ' + when);
private static void addDate(StringBuilder buf, boolean addComments, String name, long val, String description) {
if (addComments) {
String when = val > 0 ? (new Date(val)).toString() : "Never";
add(buf, true, name, val, description + ' ' + when);
} else {
add(buf, false, name, val, description);
}
}
private static void add(StringBuilder buf, String name, long val, String description) {
buf.append("# ").append(name).append(NL).append("# ").append(description).append(NL);
buf.append("tunnels.").append(name).append('=').append(val).append(NL).append(NL);
private static void add(StringBuilder buf, boolean addComments, String name, long val, String description) {
if (addComments)
buf.append("# ").append(name).append(NL).append("# ").append(description).append(NL);
buf.append("tunnels.").append(name).append('=').append(val).append(NL);
if (addComments)
buf.append(NL);
}
public void load(Properties props) {

View File

@@ -17,6 +17,8 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import net.i2p.crypto.EncType;
import net.i2p.crypto.KeyPair;
import net.i2p.crypto.SigType;
import net.i2p.data.Base64;
import net.i2p.data.Certificate;
@@ -57,6 +59,7 @@ public class CreateRouterInfoJob extends JobImpl {
/** TODO make everybody Ed */
private static final SigType DEFAULT_SIGTYPE = SystemVersion.isAndroid() ?
SigType.DSA_SHA1 : SigType.EdDSA_SHA512_Ed25519;
private static final EncType DEFAULT_ENCTYPE = EncType.ELGAMAL_2048;
CreateRouterInfoJob(RouterContext ctx, Job next) {
super(ctx);
@@ -96,36 +99,40 @@ public class CreateRouterInfoJob extends JobImpl {
* Caller must hold Router.routerInfoFileLock.
*/
RouterInfo createRouterInfo() {
SigType type = getSigTypeConfig(getContext());
RouterContext ctx = getContext();
SigType type = getSigTypeConfig(ctx);
RouterInfo info = new RouterInfo();
OutputStream fos1 = null;
try {
info.setAddresses(getContext().commSystem().createAddresses());
info.setAddresses(ctx.commSystem().createAddresses());
// not necessary, in constructor
//info.setPeers(new HashSet());
info.setPublished(getCurrentPublishDate(getContext()));
Object keypair[] = getContext().keyGenerator().generatePKIKeypair();
PublicKey pubkey = (PublicKey)keypair[0];
PrivateKey privkey = (PrivateKey)keypair[1];
SimpleDataStructure signingKeypair[] = getContext().keyGenerator().generateSigningKeys(type);
info.setPublished(getCurrentPublishDate(ctx));
// TODO
EncType etype = DEFAULT_ENCTYPE;
KeyPair keypair = ctx.keyGenerator().generatePKIKeys(etype);
PublicKey pubkey = keypair.getPublic();
PrivateKey privkey = keypair.getPrivate();
SimpleDataStructure signingKeypair[] = ctx.keyGenerator().generateSigningKeys(type);
SigningPublicKey signingPubKey = (SigningPublicKey)signingKeypair[0];
SigningPrivateKey signingPrivKey = (SigningPrivateKey)signingKeypair[1];
RouterIdentity ident = new RouterIdentity();
Certificate cert = createCertificate(getContext(), signingPubKey);
Certificate cert = createCertificate(ctx, signingPubKey, pubkey);
ident.setCertificate(cert);
ident.setPublicKey(pubkey);
ident.setSigningPublicKey(signingPubKey);
byte[] padding;
int padLen = SigningPublicKey.KEYSIZE_BYTES - signingPubKey.length();
int padLen = (SigningPublicKey.KEYSIZE_BYTES - signingPubKey.length()) +
(PublicKey.KEYSIZE_BYTES - pubkey.length());
if (padLen > 0) {
padding = new byte[padLen];
getContext().random().nextBytes(padding);
ctx.random().nextBytes(padding);
ident.setPadding(padding);
} else {
padding = null;
}
info.setIdentity(ident);
Properties stats = getContext().statPublisher().publishStatistics(ident.getHash());
Properties stats = ctx.statPublisher().publishStatistics(ident.getHash());
info.setOptions(stats);
info.sign(signingPrivKey);
@@ -134,15 +141,15 @@ public class CreateRouterInfoJob extends JobImpl {
throw new DataFormatException("RouterInfo we just built is invalid: " + info);
// remove router.keys
(new File(getContext().getRouterDir(), KEYS_FILENAME)).delete();
(new File(ctx.getRouterDir(), KEYS_FILENAME)).delete();
// write router.info
File ifile = new File(getContext().getRouterDir(), INFO_FILENAME);
File ifile = new File(ctx.getRouterDir(), INFO_FILENAME);
fos1 = new BufferedOutputStream(new SecureFileOutputStream(ifile));
info.writeBytes(fos1);
// write router.keys.dat
File kfile = new File(getContext().getRouterDir(), KEYS2_FILENAME);
File kfile = new File(ctx.getRouterDir(), KEYS2_FILENAME);
PrivateKeyFile pkf = new PrivateKeyFile(kfile, pubkey, signingPubKey, cert,
privkey, signingPrivKey, padding);
pkf.write();
@@ -150,17 +157,17 @@ public class CreateRouterInfoJob extends JobImpl {
// set or overwrite old random keys
Map<String, String> map = new HashMap<String, String>(2);
byte rk[] = new byte[32];
getContext().random().nextBytes(rk);
ctx.random().nextBytes(rk);
map.put(Router.PROP_IB_RANDOM_KEY, Base64.encode(rk));
getContext().random().nextBytes(rk);
ctx.random().nextBytes(rk);
map.put(Router.PROP_OB_RANDOM_KEY, Base64.encode(rk));
getContext().router().saveConfig(map, null);
ctx.router().saveConfig(map, null);
getContext().keyManager().setKeys(pubkey, privkey, signingPubKey, signingPrivKey);
ctx.keyManager().setKeys(pubkey, privkey, signingPubKey, signingPrivKey);
if (_log.shouldLog(Log.INFO))
_log.info("Router info created and stored at " + ifile.getAbsolutePath() + " with private keys stored at " + kfile.getAbsolutePath() + " [" + info + "]");
getContext().router().eventLog().addEvent(EventLog.REKEYED, ident.calculateHash().toBase64());
ctx.router().eventLog().addEvent(EventLog.REKEYED, ident.calculateHash().toBase64());
} catch (GeneralSecurityException gse) {
_log.log(Log.CRIT, "Error building the new router information", gse);
} catch (DataFormatException dfe) {
@@ -211,9 +218,9 @@ public class CreateRouterInfoJob extends JobImpl {
* @return the certificate for a new RouterInfo - probably a null cert.
* @since 0.9.16 moved from Router
*/
static Certificate createCertificate(RouterContext ctx, SigningPublicKey spk) {
if (spk.getType() != SigType.DSA_SHA1)
return new KeyCertificate(spk);
private static Certificate createCertificate(RouterContext ctx, SigningPublicKey spk, PublicKey pk) {
if (spk.getType() != SigType.DSA_SHA1 || pk.getType() != EncType.ELGAMAL_2048)
return new KeyCertificate(spk, pk);
if (ctx.getBooleanProperty(Router.PROP_HIDDEN))
return new Certificate(Certificate.CERTIFICATE_TYPE_HIDDEN, null);
return Certificate.NULL_CERT;

View File

@@ -129,7 +129,10 @@ public class RouterAppManager extends ClientAppManagerImpl {
case STOPPING:
case STOPPED:
_clients.remove(app);
_registered.remove(app.getName(), app);
boolean removed = _registered.remove(app.getName(), app);
if (removed && _log.shouldInfo()) {
_log.info("Client " + app.getDisplayName() + " UNREGISTERED AS " + app.getName());
}
if (message == null)
message = "";
if (_log.shouldLog(Log.INFO))
@@ -140,7 +143,10 @@ public class RouterAppManager extends ClientAppManagerImpl {
case CRASHED:
case START_FAILED:
_clients.remove(app);
_registered.remove(app.getName(), app);
boolean removed2 = _registered.remove(app.getName(), app);
if (removed2 && _log.shouldInfo()) {
_log.info("Client " + app.getDisplayName() + " UNREGISTERED AS " + app.getName());
}
if (message == null)
message = "";
_log.log(Log.CRIT, "Client " + app.getDisplayName() + ' ' + state +
@@ -171,6 +177,22 @@ public class RouterAppManager extends ClientAppManagerImpl {
// TODO if old app in there is not running and != this app, allow replacement
return super.register(app);
}
/**
* Unregister with the manager. Name must be the same as that from register().
* Only required for apps used by other apps.
*
* @param app non-null
* @since 0.9.41 overridden for logging only
*/
@Override
public void unregister(ClientApp app) {
if (_log.shouldInfo()) {
if (getRegisteredApp(app.getName()) != null)
_log.info("Client " + app.getDisplayName() + " UNREGISTERED AS " + app.getName());
}
super.unregister(app);
}
/// end ClientAppManager interface

View File

@@ -426,6 +426,9 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
}
}
/**
* @param ip ipv4 or ipv6
*/
@Override
public void queueLookup(byte[] ip) {
_geoIP.add(ip);
@@ -441,17 +444,20 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
}
/**
* Are we in a bad place
* Are we in a strict country
* @since 0.8.13
*/
@Override
public boolean isInStrictCountry() {
String us = getOurCountry();
return (us != null && StrictCountries.contains(us)) || _context.getBooleanProperty("router.forceBadCountry");
return (us != null && StrictCountries.contains(us)) || _context.getBooleanProperty("router.forceStrictCountry");
}
/**
* Are they in a bad place
* Are they in a strict country.
* Not recommended for our local router hash, as we may not be either in the cache or netdb,
* or may not be publishing an IP.
*
* @param peer non-null
* @since 0.9.16
*/
@@ -462,7 +468,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
}
/**
* Are they in a bad place
* Are they in a strict country
* @param ri non-null
* @since 0.9.16
*/
@@ -478,6 +484,8 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
/**
* Uses the transport IP first because that lookup is fast,
* then the IP from the netDb.
* Not recommended for our local router hash, as we may not be either in the cache or netdb,
* or may not be publishing an IP.
*
* As of 0.9.32, works only for literal IPs, returns null for host names.
*

View File

@@ -24,8 +24,11 @@ import com.maxmind.geoip2.DatabaseReader;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.udp.UDPTransport;
import net.i2p.util.Addresses;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
@@ -160,6 +163,14 @@ public class GeoIP {
// clear the negative cache every few runs, to prevent it from getting too big
if (((++_lookupRunCount) % CLEAR) == 0)
_notFound.clear();
// add our detected addresses
Set<String> addrs = Addresses.getAddresses(false, true);
for (String ip : addrs) {
add(ip);
}
String lastIP = _context.getProperty(UDPTransport.PROP_IP);
if (lastIP != null)
add(lastIP);
// IPv4
Long[] search = _pendingSearch.toArray(new Long[_pendingSearch.size()]);
_pendingSearch.clear();
@@ -477,23 +488,53 @@ public class GeoIP {
return;
RouterContext ctx = (RouterContext) _context;
String oldCountry = ctx.router().getConfigSetting(PROP_IP_COUNTRY);
Hash ourHash = ctx.routerHash();
RouterInfo us = ctx.router().getRouterInfo();
String country = null;
// we should always have a RouterInfo by now, but we had one report of an NPE here
if (ourHash == null)
return;
String country = ctx.commSystem().getCountry(ourHash);
if (us != null) {
// try our published addresses
for (RouterAddress ra : us.getAddresses()) {
byte[] ip = ra.getIP();
if (ip != null) {
country = get(ip);
if (country != null)
break;
}
}
}
if (country == null) {
// try our detected addresses
Set<String> addrs = Addresses.getAddresses(false, true);
for (String ip : addrs) {
country = get(ip);
if (country != null)
break;
}
if (country == null) {
String lastIP = _context.getProperty(UDPTransport.PROP_IP);
if (lastIP != null)
country = get(lastIP);
}
}
if (_log.shouldInfo())
_log.info("Old country was " + oldCountry + " new country is " + country);
if (country != null && !country.equals(oldCountry)) {
boolean wasStrict = ctx.commSystem().isInStrictCountry();
ctx.router().saveConfig(PROP_IP_COUNTRY, country);
if (ctx.commSystem().isInStrictCountry() && ctx.getProperty(Router.PROP_HIDDEN_HIDDEN) == null) {
String name = fullName(country);
if (name == null)
name = country;
_log.logAlways(Log.WARN, "Setting hidden mode to protect you in " + name +
", you may override on the network configuration page");
boolean isStrict = ctx.commSystem().isInStrictCountry();
if (_log.shouldInfo())
_log.info("Old country was strict? " + wasStrict + "; new country is strict? " + isStrict);
if (wasStrict != isStrict && ctx.getProperty(Router.PROP_HIDDEN_HIDDEN) == null) {
if (isStrict) {
String name = fullName(country);
if (name == null)
name = country;
_log.logAlways(Log.WARN, "Setting hidden mode to protect you in " + name +
", you may override on the network configuration page");
}
ctx.router().rebuildRouterInfo();
}
}
/****/
}
/**

View File

@@ -14,47 +14,48 @@ public abstract class StrictCountries {
private static final Set<String> _countries;
// zzz.i2p/topics/969
// List created based on the Press Freedom Index. Those countries with a score of higher than 50 are included:
// http://en.wikipedia.org/wiki/Press_Freedom_Index
// Except (quote):
// I don't really think that is usage of I2P is dangerous in countries from CIS
// General situation is really bad (like in Russia) but people here doesn't have problems with Ecnryption usage.
// List updated using the Freedom in the World Index 2019
// https://freedomhouse.org/report/countries-world-freedom-2019
// General guidance: Include countries with a Civil Liberties (CL) score of 6 or 7.
static {
String[] c = {
/* Afghanistan */ "AF",
/* Azerbaijan */ "AZ",
/* Bahrain */ "BH",
/* Belarus */ "BY",
/* Brunei */ "BN",
/* Burma */ "MM",
/* Burundi */ "BI",
/* Cameroon */ "CM",
/* Central African Republic */ "CF",
/* Chad */ "TD",
/* China */ "CN",
/* Colombia */ "CO",
/* Cuba */ "CU",
/* Democratic Republic of the Congo */ "CD",
/* Egypt */ "EG",
/* Equatorial Guinea */ "GQ",
/* Eritrea */ "ER",
/* Ethiopia */ "ET",
/* Fiji */ "FJ",
/* Honduras */ "HN",
/* Iran */ "IR",
/* Laos */ "LA",
/* Libya */ "LY",
/* Malaysia */ "MY",
/* Nigeria */ "NG",
/* Myanmar */ "MM",
/* North Korea */ "KP",
/* Pakistan */ "PK",
/* Palestinian Territories */ "PS",
/* Philippines */ "PH",
/* Rwanda */ "RW",
/* Saudi Arabia */ "SA",
/* Somalia */ "SO",
/* Sri Lanka */ "LK",
/* South Sudan */ "SS",
/* Sudan */ "SD",
/* Swaziland */ "SZ",
/* Eswatini (Swaziland) */ "SZ",
/* Syria */ "SY",
/* Tajikistan */ "TJ",
/* Thailand */ "TH",
/* Tunisia */ "TN",
/* Vietnam */ "VN",
/* Turkmenistan */ "TM",
/* Venezuela */ "VE",
/* United Arab Emirates */ "AE",
/* Uzbekistan */ "UZ",
/* Western Sahara */ "EH",
/* Yemen */ "YE"
};
_countries = new HashSet<String>(Arrays.asList(c));

View File

@@ -341,7 +341,10 @@ public class TransportManager implements TransportEventListener {
// Maybe we need a config option to force on? Probably not.
// What firewall supports UPnP and is configured with a public address on the LAN side?
// Unlikely.
if (_upnpManager != null && Addresses.getAnyAddress() == null)
// Always start on Android, as we may have a cellular IPv4 address but
// are routing all traffic through WiFi.
// Also, conditions may change rapidly.
if (_upnpManager != null && (SystemVersion.isAndroid() || Addresses.getAnyAddress() == null))
_upnpManager.start();
configTransports();
_log.debug("Starting up the transport manager");

View File

@@ -237,7 +237,7 @@ public abstract class TransportUtil {
* @since 0.9.17 moved from logic in individual transports
*/
public static boolean isValidPort(int port) {
// update log message in UDPEndpoint if you update this list
// update log message below if you update this list
return port >= 1024 &&
port <= 65535 &&
port != 1900 && // UPnP SSDP
@@ -257,7 +257,7 @@ public abstract class TransportUtil {
port != 9050 && // Tor
port != 9100 && // network printer
port != 9150 && // Tor browser
// do not block anything in 9111 - 30777, this is the standard random selection range
// do not block anything in 9151 - 30777, this is the standard random selection range
port != 31000 && // Wrapper
port != 32000; // Wrapper
}

View File

@@ -910,7 +910,9 @@ public class UPnP extends ControlPoint implements DeviceChangeListener, EventLis
add.setArgumentValue("NewProtocol", protocol);
add.setArgumentValue("NewPortMappingDescription", description);
add.setArgumentValue("NewEnabled","1");
add.setArgumentValue("NewLeaseDuration", 0);
// 3 hours
// MUST be longer than max RI republish which is 52 minutes
add.setArgumentValue("NewLeaseDuration", 3*60*60);
boolean rv = add.postControlAction();
if(rv) {
@@ -1085,17 +1087,19 @@ public class UPnP extends ControlPoint implements DeviceChangeListener, EventLis
// Ports in ports but not in portsToForwardNow we must forward
// Ports in portsToForwardNow but not in ports we must dump
for(ForwardPort port: ports) {
// Always add, since we now have a 3 hour lease duration,
// so we have to keep refreshing the lease.
//if(portsToForward.contains(port)) {
// If not in portsForwarded, it wasn't successful, try again
if(portsForwarded.contains(port)) {
//if(portsForwarded.contains(port)) {
// We have forwarded it, and it should be forwarded, cool.
// Big problem here, if firewall resets, we don't know it.
// Do we need to re-forward anyway? or poll the router?
} else {
//} else {
// Needs forwarding
if(portsToForwardNow == null) portsToForwardNow = new HashSet<ForwardPort>();
portsToForwardNow.add(port);
}
//}
}
for(ForwardPort port : portsToForward) {
if(ports.contains(port)) {

View File

@@ -34,6 +34,8 @@ class UPnPManager {
private final RouterContext _context;
private final UPnP _upnp;
private final UPnPCallback _upnpCallback;
private final UPnPScannerCallback _scannerCallback;
private final DelayedCallback _delayedCallback;
private volatile boolean _isRunning;
private volatile boolean _shouldBeRunning;
private volatile long _lastRescan;
@@ -73,6 +75,8 @@ class UPnPManager {
Debug.initialize(context);
_upnp = new UPnP(context);
_upnpCallback = new UPnPCallback();
_scannerCallback = _context.router().getUPnPScannerCallback();
_delayedCallback = (_scannerCallback != null) ? new DelayedCallback() : null;
_rescanner = new Rescanner();
}
@@ -91,7 +95,16 @@ class UPnPManager {
// and will eventually hit 1024 and then negative
_upnp.setHTTPPort(_context.getProperty(PROP_HTTP_PORT, DEFAULT_HTTP_PORT));
_upnp.setSSDPPort(_context.getProperty(PROP_SSDP_PORT, DEFAULT_SSDP_PORT));
if (_scannerCallback != null) {
_scannerCallback.beforeScan();
}
_isRunning = _upnp.runPlugin();
if (_scannerCallback != null) {
if (_isRunning)
_delayedCallback.reschedule();
else
_scannerCallback.afterScan();
}
if (_log.shouldDebug())
_log.info("UPnP runPlugin took " + (_context.clock().now() - b));
} catch (RuntimeException e) {
@@ -100,6 +113,9 @@ class UPnPManager {
_log.error("UPnP error, please report", e);
_errorLogged = true;
}
if (_scannerCallback != null) {
_scannerCallback.afterScan();
}
}
}
if (_isRunning) {
@@ -156,6 +172,8 @@ class UPnPManager {
return false;
_lastRescan = now;
if (_isRunning) {
if (_scannerCallback != null)
_scannerCallback.beforeScan();
if (_log.shouldLog(Log.DEBUG))
_log.debug("UPnP Rescan");
// TODO default search MX (jitter) is 3 seconds... reduce?
@@ -163,6 +181,8 @@ class UPnPManager {
// Adaptive Jitter Control for UPnP M-Search
// Kevin Mills and Christopher Dabrowski
_upnp.search();
if (_scannerCallback != null)
_delayedCallback.reschedule();
} else {
start();
}
@@ -188,6 +208,33 @@ class UPnPManager {
}
}
}
/**
* Delayed Callback
*
* @since 0.9.41
*/
private class DelayedCallback extends SimpleTimer2.TimedEvent {
/** caller must reschedule() */
public DelayedCallback() {
super(_context.simpleTimer2());
}
public void timeReached() {
_scannerCallback.afterScan();
}
/**
* Pushes out.
* We do it this way because we may have two scans running concurrently,
* we only want to call afterScan() once.
*/
void reschedule() {
// false == use latest time
reschedule((_upnp.getSearchMx() * 1000) + 500, false);
}
}
/**
* Call when the ports might have changed

View File

@@ -0,0 +1,22 @@
package net.i2p.router.transport;
/**
* For Android MulticastLock.
*
* @since 0.9.41
*/
public interface UPnPScannerCallback {
/**
* Called before a SSDP search begins.
* This may be called more than once before afterScan()
* if there are multiple searches in parallel.
*/
public void beforeScan();
/**
* Called after a SSDP search ends.
* This will only be called once after the last scan ends.
*/
public void afterScan();
}

View File

@@ -2,8 +2,6 @@ package net.i2p.router.transport.crypto;
import java.util.concurrent.LinkedBlockingQueue;
import com.southernstorm.noise.crypto.x25519.Curve25519;
import net.i2p.I2PAppContext;
import net.i2p.crypto.EncType;
import net.i2p.crypto.KeyPair;
@@ -139,15 +137,7 @@ public class X25519KeyFactory extends I2PThread {
private KeyPair precalc() {
long start = System.currentTimeMillis();
byte[] priv = new byte[32];
do {
_context.random().nextBytes(priv);
// little endian, loop if too small
// worth doing?
} while (priv[31] == 0);
byte[] pub = new byte[32];
Curve25519.eval(pub, 0, priv, null);
KeyPair rv = new KeyPair(new PublicKey(EncType.ECIES_X25519, pub), new PrivateKey(EncType.ECIES_X25519, priv));
KeyPair rv = _context.keyGenerator().generatePKIKeys(EncType.ECIES_X25519);
long end = System.currentTimeMillis();
long diff = end - start;
_context.statManager().addRateData("crypto.XDHGenerateTime", diff);

View File

@@ -1619,7 +1619,7 @@ class PacketBuilder {
int hmacLen = totalSize + UDPPacket.IV_SIZE + 2;
//Hash hmac = _context.hmac().calculate(macKey, data, hmacOff, hmacLen);
byte[] ba = SimpleByteCache.acquire(Hash.HASH_LENGTH);
_context.hmac().calculate(macKey, data, hmacOff, hmacLen, ba, 0);
_transport.getHMAC().calculate(macKey, data, hmacOff, hmacLen, ba, 0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Authenticating " + pkt.getLength() +

View File

@@ -10,6 +10,7 @@ import net.i2p.router.RouterContext;
import net.i2p.router.util.CoDelBlockingQueue;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.SessionKey;
import net.i2p.util.I2PThread;
import net.i2p.util.LHMCache;
import net.i2p.util.Log;
@@ -181,6 +182,13 @@ class PacketHandler {
return rv;
}
/**
* @since 0.9.42
*/
private boolean validate(UDPPacket packet, SessionKey key) {
return packet.validate(key, _transport.getHMAC());
}
/** the packet is from a peer we are establishing an outbound con to, but failed validation, so fallback */
private static final short OUTBOUND_FALLBACK = 1;
/** the packet is from a peer we are establishing an inbound con to, but failed validation, so fallback */
@@ -325,17 +333,17 @@ class PacketHandler {
private void receivePacket(UDPPacketReader reader, UDPPacket packet, PeerState state) {
_state = 17;
AuthType auth = AuthType.NONE;
boolean isValid = packet.validate(state.getCurrentMACKey());
boolean isValid = validate(packet, state.getCurrentMACKey());
if (!isValid) {
_state = 18;
if (state.getNextMACKey() != null)
isValid = packet.validate(state.getNextMACKey());
isValid = validate(packet, state.getNextMACKey());
if (!isValid) {
_state = 19;
if (_log.shouldLog(Log.INFO))
_log.info("Failed validation with existing con, trying as new con: " + packet);
isValid = packet.validate(_transport.getIntroKey());
isValid = validate(packet, _transport.getIntroKey());
if (isValid) {
_state = 20;
// this is a stray packet from an inbound establishment
@@ -388,7 +396,7 @@ class PacketHandler {
*/
private void receivePacket(UDPPacketReader reader, UDPPacket packet, short peerType) {
_state = 27;
boolean isValid = packet.validate(_transport.getIntroKey());
boolean isValid = validate(packet, _transport.getIntroKey());
if (!isValid) {
// Note that the vast majority of these are NOT corrupted packets, but
// packets for which we don't have the PeerState (i.e. SessionKey)
@@ -425,7 +433,7 @@ class PacketHandler {
}
if (ps.getRemotePort() == newPort) {
foundSamePort = true;
} else if (packet.validate(ps.getCurrentMACKey())) {
} else if (validate(packet, ps.getCurrentMACKey())) {
packet.decrypt(ps.getCurrentCipherKey());
reader.initialize(packet);
if (_log.shouldLog(Log.WARN))
@@ -513,7 +521,7 @@ class PacketHandler {
}
boolean isValid = false;
if (state.getMACKey() != null) {
isValid = packet.validate(state.getMACKey());
isValid = validate(packet, state.getMACKey());
if (isValid) {
if (_log.shouldLog(Log.INFO))
_log.info("Valid introduction packet received for inbound con: " + packet);
@@ -558,7 +566,7 @@ class PacketHandler {
boolean isValid = false;
if (state.getMACKey() != null) {
_state = 36;
isValid = packet.validate(state.getMACKey());
isValid = validate(packet, state.getMACKey());
if (isValid) {
// this should be the Session Confirmed packet
if (_log.shouldLog(Log.INFO))
@@ -572,7 +580,7 @@ class PacketHandler {
}
// keys not yet exchanged, lets try it with the peer's intro key
isValid = packet.validate(state.getIntroKey());
isValid = validate(packet, state.getIntroKey());
if (isValid) {
if (_log.shouldLog(Log.INFO))
_log.info("Valid packet received for " + state + " with Bob's intro key: " + packet);

View File

@@ -478,7 +478,11 @@ public class PeerState {
*/
//public boolean getRemoteWantsPreviousACKs() { return _remoteWantsPreviousACKs; }
/** how many bytes should we send to the peer in a second */
/**
* how many bytes should we send to the peer in a second
* 1st stat in CWND column, otherwise unused,
* candidate for removal
*/
public int getSendWindowBytes() {
synchronized(_outboundMessages) {
return _sendWindowBytes;
@@ -682,11 +686,6 @@ public class PeerState {
*/
//public void remoteDoesNotWantPreviousACKs() { _remoteWantsPreviousACKs = false; }
/** should we ignore the peer state's congestion window, and let anything through? */
private static final boolean IGNORE_CWIN = false;
/** should we ignore the congestion window on the first push of every message? */
private static final boolean ALWAYS_ALLOW_FIRST_PUSH = false;
/**
* Decrement the remaining bytes in the current period's window,
* returning true if the full size can be decremented, false if it
@@ -717,8 +716,16 @@ public class PeerState {
//_sendACKBytes = 0;
_lastSendRefill = now;
}
//if (true) return true;
if (IGNORE_CWIN || size <= _sendWindowBytesRemaining || (ALWAYS_ALLOW_FIRST_PUSH && messagePushCount == 0)) {
// Ticket 2505
// We always send all unacked fragments for a message,
// because we don't have any mechanism in OutboundMessageFragments
// to track the next send time for fragments individually.
// Huge messages that are larger than the window size could never
// get sent and block the outbound queue forever.
// So we let it through when the window is empty (full window remaining).
if (size <= _sendWindowBytesRemaining ||
(size > _sendWindowBytes && _sendWindowBytesRemaining >= _sendWindowBytes)) {
if ( (messagePushCount == 0) && (_concurrentMessagesActive > _concurrentMessagesAllowed) ) {
_consecutiveRejections++;
_context.statManager().addRateData("udp.rejectConcurrentActive", _concurrentMessagesActive, _consecutiveRejections);
@@ -731,6 +738,8 @@ public class PeerState {
_consecutiveRejections = 0;
}
_sendWindowBytesRemaining -= size;
if (_sendWindowBytesRemaining < 0)
_sendWindowBytesRemaining = 0;
_sendBytes += size;
_lastSendTime = now;
//if (isForACK)
@@ -767,20 +776,36 @@ public class PeerState {
}
****/
/**
* stat in SST column, otherwise unused,
* candidate for removal
*/
public int getSlowStartThreshold() { return _slowStartThreshold; }
/**
* 2nd stat in CWND column, otherwise unused,
* candidate for removal
*/
public int getConcurrentSends() {
synchronized(_outboundMessages) {
return _concurrentMessagesActive;
}
}
/**
* 3rd stat in CWND column, otherwise unused,
* candidate for removal
*/
public int getConcurrentSendWindow() {
synchronized(_outboundMessages) {
return _concurrentMessagesAllowed;
}
}
/**
* 4th stat in CWND column, otherwise unused,
* candidate for removal
*/
public int getConsecutiveSendRejections() {
synchronized(_outboundMessages) {
return _consecutiveRejections;

View File

@@ -0,0 +1,185 @@
package net.i2p.router.transport.udp;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import java.util.concurrent.LinkedBlockingQueue;
// following are for main() tests
//import java.security.InvalidKeyException;
//import java.security.Key;
//import java.security.NoSuchAlgorithmException;
//import javax.crypto.spec.SecretKeySpec;
//import net.i2p.data.Base64;
import net.i2p.crypto.HMACGenerator;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.util.SimpleByteCache;
import org.bouncycastle.oldcrypto.macs.I2PHMac;
/**
* Calculate the HMAC-MD5-128 of a key+message. All the good stuff occurs
* in {@link org.bouncycastle.oldcrypto.macs.I2PHMac}
*
* Keys are always 32 bytes.
* This is used only by UDP.
* Use deprecated outside the router, this may move to router.jar.
*
* NOTE THIS IS NOT COMPATIBLE with javax.crypto.Mac.getInstance("HmacMD5")
* as we tell I2PHMac that the digest length is 32 bytes, so it generates
* a different result.
*
* Quote jrandom:
* "The HMAC is hardcoded to use SHA256 digest size
* for backwards compatability. next time we have a backwards
* incompatible change, we should update this."
*
* Does this mean he intended it to be compatible with MD5?
* See also 2005-07-05 status notes.
*
* @since 0.9.42 moved from net.i2p.crypto.HMACGenerator
*/
class SSUHMACGenerator extends HMACGenerator {
/** set of available HMAC instances for calculate */
private final LinkedBlockingQueue<I2PHMac> _available;
public SSUHMACGenerator() {
super();
_available = new LinkedBlockingQueue<I2PHMac>(32);
}
/**
* Calculate the HMAC of the data with the given key
*
* @param target out parameter the first 16 bytes contain the HMAC, the last 16 bytes are zero
* @param targetOffset offset into target to put the hmac
* @throws IllegalArgumentException for bad key or target too small
*/
public void calculate(SessionKey key, byte data[], int offset, int length, byte target[], int targetOffset) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
I2PHMac mac = acquire();
mac.init(key.getData());
mac.update(data, offset, length);
mac.doFinal(target, targetOffset);
release(mac);
}
/**
* Verify the MAC inline, reducing some unnecessary memory churn.
*
* @param key session key to verify the MAC with
* @param curData MAC to verify
* @param curOffset index into curData to MAC
* @param curLength how much data in curData do we want to run the HMAC over
* @param origMAC what do we expect the MAC of curData to equal
* @param origMACOffset index into origMAC
* @param origMACLength how much of the MAC do we want to verify
* @throws IllegalArgumentException for bad key
*/
public boolean verify(SessionKey key, byte curData[], int curOffset, int curLength,
byte origMAC[], int origMACOffset, int origMACLength) {
if ((key == null) || (key.getData() == null) || (curData == null))
throw new NullPointerException("Null arguments for HMAC");
I2PHMac mac = acquire();
mac.init(key.getData());
mac.update(curData, curOffset, curLength);
byte rv[] = acquireTmp();
mac.doFinal(rv, 0);
release(mac);
boolean eq = DataHelper.eqCT(rv, 0, origMAC, origMACOffset, origMACLength);
releaseTmp(rv);
return eq;
}
private I2PHMac acquire() {
I2PHMac rv = _available.poll();
if (rv != null)
return rv;
// the HMAC is hardcoded to use SHA256 digest size
// for backwards compatability. next time we have a backwards
// incompatible change, we should update this by removing ", 32"
// SEE NOTES ABOVE
try {
MessageDigest md = MessageDigest.getInstance("MD5");
return new I2PHMac(md, 32);
} catch (NoSuchAlgorithmException nsae) {
throw new UnsupportedOperationException("MD5");
}
}
private void release(I2PHMac mac) {
_available.offer(mac);
}
/**
* @since 0.9.42
*/
public void clearCache() {
_available.clear();
}
//private static final int RUNS = 100000;
/**
* Test the BC and the JVM's implementations for speed
*/
/**** All this did was prove that we aren't compatible with standard HmacMD5
public static void main(String args[]) {
if (args.length != 2) {
System.err.println("Usage: HMACGenerator keySeedString dataString");
return;
}
byte[] rand = SHA256Generator.getInstance().calculateHash(args[0].getBytes()).getData();
byte[] data = args[1].getBytes();
Key keyObj = new SecretKeySpec(rand, "HmacMD5");
byte[] keyBytes = keyObj.getEncoded();
System.out.println("key bytes (" + keyBytes.length + ") is [" + Base64.encode(keyBytes) + "]");
SessionKey key = new SessionKey(keyBytes);
System.out.println("session key is [" + key);
System.out.println("key object is [" + keyObj);
HMACGenerator gen = new HMACGenerator(I2PAppContext.getGlobalContext());
byte[] result = new byte[16];
long start = System.currentTimeMillis();
for (int i = 0; i < RUNS; i++) {
gen.calculate(key, data, 0, data.length, result, 0);
if (i == 0)
System.out.println("MAC [" + Base64.encode(result) + "]");
}
long time = System.currentTimeMillis() - start;
System.out.println("Time for " + RUNS + " HMAC-MD5 computations:");
System.out.println("BC time (ms): " + time);
start = System.currentTimeMillis();
javax.crypto.Mac mac;
try {
mac = javax.crypto.Mac.getInstance("HmacMD5");
} catch (NoSuchAlgorithmException e) {
System.err.println("Fatal: " + e);
return;
}
for (int i = 0; i < RUNS; i++) {
try {
mac.init(keyObj);
} catch (InvalidKeyException e) {
System.err.println("Fatal: " + e);
}
byte[] sha = mac.doFinal(data);
if (i == 0)
System.out.println("MAC [" + Base64.encode(sha) + "]");
}
time = System.currentTimeMillis() - start;
System.out.println("JVM time (ms): " + time);
}
****/
}

View File

@@ -6,6 +6,7 @@ import java.util.Arrays;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.crypto.HMACGenerator;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
import net.i2p.data.SessionKey;
@@ -225,7 +226,7 @@ class UDPPacket implements CDQEntry {
* MAC matches, false otherwise.
*
*/
public synchronized boolean validate(SessionKey macKey) {
public synchronized boolean validate(SessionKey macKey, HMACGenerator hmac) {
verifyNotReleased();
//_beforeValidate = _context.clock().now();
boolean eq = false;
@@ -244,14 +245,14 @@ class UDPPacket implements CDQEntry {
DataHelper.toLong(_validateBuf, off, 2, payloadLength /* ^ PacketBuilder.PROTOCOL_VERSION */ );
off += 2;
eq = _context.hmac().verify(macKey, _validateBuf, 0, off, _data, _packet.getOffset(), MAC_SIZE);
eq = hmac.verify(macKey, _validateBuf, 0, off, _data, _packet.getOffset(), MAC_SIZE);
if (!eq) {
// this is relatively frequent, as you can get old keys in PacketHandler.
Log log = _context.logManager().getLog(UDPPacket.class);
if (log.shouldLog(Log.INFO)) {
byte[] calc = new byte[32];
_context.hmac().calculate(macKey, _validateBuf, 0, off, calc, 0);
hmac.calculate(macKey, _validateBuf, 0, off, calc, 0);
StringBuilder str = new StringBuilder(512);
str.append("Bad HMAC:\n\t");
str.append(_packet.getLength()).append(" byte pkt, ");

View File

@@ -20,6 +20,7 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import net.i2p.crypto.HMACGenerator;
import net.i2p.crypto.SigType;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
@@ -51,6 +52,7 @@ import net.i2p.util.Log;
import net.i2p.util.OrderedProperties;
import net.i2p.util.SimpleTimer;
import net.i2p.util.SimpleTimer2;
import net.i2p.util.SystemVersion;
import net.i2p.util.VersionComparator;
/**
@@ -86,6 +88,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
private long _introducersSelectedOn;
private long _lastInboundReceivedOn;
private final DHSessionKeyBuilder.Factory _dhFactory;
private final SSUHMACGenerator _hmac;
private int _mtu;
private int _mtu_ipv6;
private boolean _mismatchLogged;
@@ -271,6 +274,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
_introManager = new IntroductionManager(_context, this);
_introducersSelectedOn = -1;
_lastInboundReceivedOn = -1;
_hmac = new SSUHMACGenerator();
_mtu = PeerState.LARGE_MTU;
_mtu_ipv6 = PeerState.MIN_IPV6_MTU;
setupPort();
@@ -613,6 +617,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
UDPPacket.clearCache();
UDPAddress.clearCache();
_lastInboundIPv6 = 0;
_hmac.clearCache();
}
/**
@@ -1043,8 +1048,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
if (ourIP.length == 4 && !fixedPort)
changes.put(PROP_EXTERNAL_PORT, Integer.toString(ourPort));
// queue a country code lookup of the new IP
if (ourIP.length == 4)
_context.commSystem().queueLookup(ourIP);
_context.commSystem().queueLookup(ourIP);
// store these for laptop-mode (change ident on restart... or every time... when IP changes)
// IPV4 ONLY
String oldIP = _context.getProperty(PROP_IP);
@@ -1070,7 +1074,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
// laptop mode
// For now, only do this at startup
if (oldIP != null &&
System.getProperty("wrapper.version") != null &&
SystemVersion.hasWrapper() &&
_context.getBooleanProperty(PROP_LAPTOP_MODE) &&
now - lastChanged > 10*60*1000 &&
_context.router().getUptime() < 10*60*1000) {
@@ -2748,6 +2752,14 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
return _dhFactory;
}
/**
* @return the SSU HMAC
* @since 0.9.42
*/
HMACGenerator getHMAC() {
return _hmac;
}
/**
* Does nothing
* @deprecated as of 0.9.31

View File

@@ -16,8 +16,9 @@ import net.i2p.router.TunnelInfo;
* Coordinate the info that the tunnel creator keeps track of, including what
* peers are in the tunnel and what their configuration is
*
* See PooledTunnelCreatorConfig for the non-abstract class
*/
public class TunnelCreatorConfig implements TunnelInfo {
public abstract class TunnelCreatorConfig implements TunnelInfo {
protected final RouterContext _context;
/** only necessary for client tunnels */
private final Hash _destination;

View File

@@ -6,6 +6,8 @@ import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import net.i2p.router.RouterContext;
import net.i2p.util.I2PThread;
import net.i2p.util.SimpleTimer;
@@ -23,6 +25,7 @@ class TunnelGatewayPumper implements Runnable {
private final RouterContext _context;
private final Set<PumpedTunnelGateway> _wantsPumping;
private final Set<PumpedTunnelGateway> _backlogged;
private final List<Thread> _threads;
private volatile boolean _stop;
private static final int MIN_PUMPERS = 1;
private static final int MAX_PUMPERS = 4;
@@ -39,14 +42,18 @@ class TunnelGatewayPumper implements Runnable {
_context = ctx;
_wantsPumping = new LinkedHashSet<PumpedTunnelGateway>(16);
_backlogged = new HashSet<PumpedTunnelGateway>(16);
_threads = new CopyOnWriteArrayList<Thread>();
if (ctx.getBooleanProperty("i2p.dummyTunnelManager")) {
_pumpers = 1;
} else {
long maxMemory = SystemVersion.getMaxMemory();
_pumpers = (int) Math.max(MIN_PUMPERS, Math.min(MAX_PUMPERS, 1 + (maxMemory / (32*1024*1024))));
}
for (int i = 0; i < _pumpers; i++)
new I2PThread(this, "Tunnel GW pumper " + (i+1) + '/' + _pumpers, true).start();
for (int i = 0; i < _pumpers; i++) {
Thread t = new I2PThread(this, "Tunnel GW pumper " + (i+1) + '/' + _pumpers, true);
_threads.add(t);
t.start();
}
}
public void stopPumping() {
@@ -61,6 +68,10 @@ class TunnelGatewayPumper implements Runnable {
Thread.sleep(i * 50);
} catch (InterruptedException ie) {}
}
for (Thread t : _threads) {
t.interrupt();
}
_threads.clear();
_wantsPumping.clear();
}
@@ -74,6 +85,14 @@ class TunnelGatewayPumper implements Runnable {
}
public void run() {
try {
run2();
} finally {
_threads.remove(Thread.currentThread());
}
}
private void run2() {
PumpedTunnelGateway gw = null;
List<PendingGatewayMessage> queueBuf = new ArrayList<PendingGatewayMessage>(32);
boolean requeue = false;

View File

@@ -415,7 +415,7 @@ class BuildExecutor implements Runnable {
_context.statManager().addRateData("tunnel.buildConfigTime", pTime, 0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Configuring new tunnel " + i + " for " + pool + ": " + cfg);
buildTunnel(pool, cfg);
buildTunnel(cfg);
//realBuilt++;
} else {
i--;
@@ -507,7 +507,7 @@ class BuildExecutor implements Runnable {
if (cfg != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Configuring short tunnel " + i + " for " + pool + ": " + cfg);
buildTunnel(pool, cfg);
buildTunnel(cfg);
if (cfg.getLength() > 1) {
allowed--; // oops... shouldn't have done that, but hey, its not that bad...
}
@@ -524,7 +524,7 @@ class BuildExecutor implements Runnable {
public boolean isRunning() { return _isRunning; }
void buildTunnel(TunnelPool pool, PooledTunnelCreatorConfig cfg) {
void buildTunnel(PooledTunnelCreatorConfig cfg) {
long beforeBuild = System.currentTimeMillis();
if (cfg.getLength() > 1) {
do {
@@ -532,7 +532,7 @@ class BuildExecutor implements Runnable {
cfg.setReplyMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
} while (addToBuilding(cfg)); // if a dup, go araound again
}
boolean ok = BuildRequestor.request(_context, pool, cfg, this);
boolean ok = BuildRequestor.request(_context, cfg, this);
if (!ok)
return;
if (cfg.getLength() > 1) {
@@ -556,10 +556,10 @@ class BuildExecutor implements Runnable {
* This wakes up the executor, so call this after TunnelPool.addTunnel()
* so we don't build too many.
*/
public void buildComplete(PooledTunnelCreatorConfig cfg, TunnelPool pool) {
public void buildComplete(PooledTunnelCreatorConfig cfg) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Build complete for " + cfg, new Exception());
pool.buildComplete(cfg);
cfg.getTunnelPool().buildComplete(cfg);
if (cfg.getLength() > 1)
removeFromBuilding(cfg.getReplyMessageId());
// Only wake up the build thread if it took a reasonable amount of time -

View File

@@ -322,7 +322,7 @@ class BuildHandler implements Runnable {
if (record < 0) {
_log.error("Bad status index " + i);
// don't leak
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
return;
}
@@ -379,14 +379,14 @@ class BuildHandler implements Runnable {
// This will happen very rarely. We check for dups when
// creating the config, but we don't track IDs for builds in progress.
_context.statManager().addRateData("tunnel.ownDupID", 1);
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
if (_log.shouldLog(Log.WARN))
_log.warn("Dup ID for our own tunnel " + cfg);
return;
}
cfg.getTunnelPool().addTunnel(cfg); // self.self.self.foo!
// call buildComplete() after addTunnel() so we don't try another build.
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
_exec.buildSuccessful(cfg);
if (cfg.getTunnelPool().getSettings().isExploratory()) {
@@ -421,8 +421,7 @@ class BuildHandler implements Runnable {
}
}
ExpireJob expireJob = new ExpireJob(_context, cfg, cfg.getTunnelPool());
cfg.setExpireJob(expireJob);
ExpireJob expireJob = new ExpireJob(_context, cfg);
_context.jobQueue().addJob(expireJob);
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratorySuccess", rtt);
@@ -430,7 +429,7 @@ class BuildHandler implements Runnable {
_context.statManager().addRateData("tunnel.buildClientSuccess", rtt);
} else {
// someone is no fun
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratoryReject", rtt);
else
@@ -441,7 +440,7 @@ class BuildHandler implements Runnable {
_log.warn(msg.getUniqueId() + ": Tunnel reply could not be decrypted for tunnel " + cfg);
_context.statManager().addRateData("tunnel.corruptBuildReply", 1);
// don't leak
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
}
}
@@ -723,8 +722,9 @@ class BuildHandler implements Runnable {
// tunnel-alt-creation.html specifies that this is enforced +/- 1 hour but it was not.
// As of 0.9.16, allow + 5 minutes to - 65 minutes.
long time = req.readRequestTime();
long now = (_context.clock().now() / (60l*60l*1000l)) * (60*60*1000);
long timeDiff = now - time;
long now = _context.clock().now();
long roundedNow = (now / (60l*60l*1000l)) * (60*60*1000);
long timeDiff = roundedNow - time;
if (timeDiff > MAX_REQUEST_AGE) {
_context.statManager().addRateData("tunnel.rejectTooOld", 1);
if (_log.shouldLog(Log.WARN))
@@ -763,7 +763,7 @@ class BuildHandler implements Runnable {
//if ( (response == 0) && (_context.random().nextInt(50) <= 1) )
// response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
long recvDelay = _context.clock().now()-state.recvTime;
long recvDelay = now - state.recvTime;
if (response == 0) {
// unused
@@ -833,8 +833,8 @@ class BuildHandler implements Runnable {
HopConfig cfg = null;
if (response == 0) {
cfg = new HopConfig();
cfg.setCreation(_context.clock().now());
cfg.setExpiration(_context.clock().now() + 10*60*1000);
cfg.setCreation(now);
cfg.setExpiration(now + 10*60*1000);
cfg.setIVKey(req.readIVKey());
cfg.setLayerKey(req.readLayerKey());
if (isInGW) {
@@ -935,7 +935,7 @@ class BuildHandler implements Runnable {
+ " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId());
// now actually send the response
long expires = _context.clock().now() + NEXT_HOP_SEND_TIMEOUT;
long expires = now + NEXT_HOP_SEND_TIMEOUT;
if (!isOutEnd) {
state.msg.setUniqueId(req.readReplyMessageId());
state.msg.setMessageExpiration(expires);

View File

@@ -16,6 +16,7 @@ import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.router.tunnel.BuildMessageGenerator;
import net.i2p.util.Log;
import net.i2p.util.VersionComparator;
@@ -117,24 +118,25 @@ abstract class BuildRequestor {
* @param cfg ReplyMessageId must be set
* @return success
*/
public static boolean request(RouterContext ctx, TunnelPool pool,
public static boolean request(RouterContext ctx,
PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
// new style crypto fills in all the blanks, while the old style waits for replies to fill in the next hop, etc
prepare(ctx, cfg);
if (cfg.getLength() <= 1) {
buildZeroHop(ctx, pool, cfg, exec);
buildZeroHop(ctx, cfg, exec);
return true;
}
Log log = ctx.logManager().getLog(BuildRequestor.class);
cfg.setTunnelPool(pool);
final TunnelPool pool = cfg.getTunnelPool();
final TunnelPoolSettings settings = pool.getSettings();
TunnelInfo pairedTunnel = null;
Hash farEnd = cfg.getFarEnd();
TunnelManagerFacade mgr = ctx.tunnelManager();
boolean isInbound = pool.getSettings().isInbound();
if (pool.getSettings().isExploratory() || !usePairedTunnels(ctx)) {
boolean isInbound = settings.isInbound();
if (settings.isExploratory() || !usePairedTunnels(ctx)) {
if (isInbound)
pairedTunnel = mgr.selectOutboundExploratoryTunnel(farEnd);
else
@@ -142,9 +144,9 @@ abstract class BuildRequestor {
} else {
// building a client tunnel
if (isInbound)
pairedTunnel = mgr.selectOutboundTunnel(pool.getSettings().getDestination(), farEnd);
pairedTunnel = mgr.selectOutboundTunnel(settings.getDestination(), farEnd);
else
pairedTunnel = mgr.selectInboundTunnel(pool.getSettings().getDestination(), farEnd);
pairedTunnel = mgr.selectInboundTunnel(settings.getDestination(), farEnd);
if (pairedTunnel == null) {
if (isInbound) {
// random more reliable than closest ??
@@ -178,12 +180,12 @@ abstract class BuildRequestor {
if (pairedTunnel == null) {
if (log.shouldLog(Log.WARN))
log.warn("Tunnel build failed, as we couldn't find a paired tunnel for " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
// Not even an exploratory tunnel? We are in big trouble.
// Let's not spin through here too fast.
// But don't let a client tunnel waiting for exploratories slow things down too much,
// as there may be other tunnel pools who can build
int ms = pool.getSettings().isExploratory() ? 250 : 25;
int ms = settings.isExploratory() ? 250 : 25;
try { Thread.sleep(ms); } catch (InterruptedException ie) {}
return false;
}
@@ -194,7 +196,7 @@ abstract class BuildRequestor {
if (msg == null) {
if (log.shouldLog(Log.WARN))
log.warn("Tunnel build failed, as we couldn't create the tunnel build message for " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
return false;
}
@@ -225,11 +227,11 @@ abstract class BuildRequestor {
if (peer == null) {
if (log.shouldLog(Log.WARN))
log.warn("Could not find the next hop to send the outbound request to: " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
return false;
}
OutNetMessage outMsg = new OutNetMessage(ctx, msg, ctx.clock().now() + FIRST_HOP_TIMEOUT, PRIORITY, peer);
outMsg.setOnFailedSendJob(new TunnelBuildFirstHopFailJob(ctx, pool, cfg, exec));
outMsg.setOnFailedSendJob(new TunnelBuildFirstHopFailJob(ctx, cfg, exec));
try {
ctx.outNetMessagePool().add(outMsg);
} catch (RuntimeException re) {
@@ -365,20 +367,19 @@ keep this here for the next time we change the build protocol
return msg;
}
private static void buildZeroHop(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
private static void buildZeroHop(RouterContext ctx, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
Log log = ctx.logManager().getLog(BuildRequestor.class);
if (log.shouldLog(Log.DEBUG))
log.debug("Build zero hop tunnel " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
if (cfg.isInbound())
ctx.tunnelDispatcher().joinInbound(cfg);
else
ctx.tunnelDispatcher().joinOutbound(cfg);
pool.addTunnel(cfg);
cfg.getTunnelPool().addTunnel(cfg);
exec.buildSuccessful(cfg);
ExpireJob expireJob = new ExpireJob(ctx, cfg, pool);
cfg.setExpireJob(expireJob);
ExpireJob expireJob = new ExpireJob(ctx, cfg);
ctx.jobQueue().addJob(expireJob);
// can it get much easier?
}
@@ -393,18 +394,16 @@ keep this here for the next time we change the build protocol
* Can't do this for inbound tunnels since the msg goes out an expl. tunnel.
*/
private static class TunnelBuildFirstHopFailJob extends JobImpl {
private final TunnelPool _pool;
private final PooledTunnelCreatorConfig _cfg;
private final BuildExecutor _exec;
private TunnelBuildFirstHopFailJob(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
private TunnelBuildFirstHopFailJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
super(ctx);
_cfg = cfg;
_exec = exec;
_pool = pool;
}
public String getName() { return "Timeout contacting first peer for OB tunnel"; }
public void runJob() {
_exec.buildComplete(_cfg, _pool);
_exec.buildComplete(_cfg);
getContext().profileManager().tunnelTimedOut(_cfg.getPeer(1));
getContext().statManager().addRateData("tunnel.buildFailFirstHop", 1, 0);
// static, no _log

View File

@@ -298,7 +298,7 @@ public class ConnectChecker {
for (RouterAddress ra : addrs) {
String style = ra.getTransportStyle();
String host = ra.getHost();
if ("NTCP".equals(style)) {
if ("NTCP".equals(style) || "NTCP2".equals(style)) {
if (host != null) {
if (host.contains(":"))
rv |= NTCP_V6;

View File

@@ -5,7 +5,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.TunnelCreatorConfig;
/**
* This runs twice for each tunnel.
@@ -13,17 +12,15 @@ import net.i2p.router.tunnel.TunnelCreatorConfig;
* The second time, stop accepting data for it.
*/
class ExpireJob extends JobImpl {
private final TunnelPool _pool;
private final TunnelCreatorConfig _cfg;
private final PooledTunnelCreatorConfig _cfg;
private final AtomicBoolean _leaseUpdated = new AtomicBoolean(false);
private final long _dropAfter;
private static final long OB_EARLY_EXPIRE = 30*1000;
private static final long IB_EARLY_EXPIRE = OB_EARLY_EXPIRE + 7500;
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
public ExpireJob(RouterContext ctx, PooledTunnelCreatorConfig cfg) {
super(ctx);
_pool = pool;
_cfg = cfg;
// we act as if this tunnel expires a random skew before it actually does
// so we rebuild out of sync. otoh, we will honor tunnel messages on it
@@ -31,7 +28,7 @@ class ExpireJob extends JobImpl {
// others may be sending to the published lease expirations
// Also skew the inbound away from the outbound
long expire = cfg.getExpiration();
if (_pool.getSettings().isInbound()) {
if (cfg.getTunnelPool().getSettings().isInbound()) {
// wait extra long for IB so we don't drop msgs that
// got all the way to us.
_dropAfter = expire + (2 * Router.CLOCK_FUDGE_FACTOR);
@@ -51,10 +48,11 @@ class ExpireJob extends JobImpl {
public void runJob() {
if (_leaseUpdated.compareAndSet(false,true)) {
TunnelPool pool = _cfg.getTunnelPool();
// First run
_pool.removeTunnel(_cfg);
pool.removeTunnel(_cfg);
// noop for outbound
_pool.refreshLeaseSet();
pool.refreshLeaseSet();
long timeToDrop = _dropAfter - getContext().clock().now();
requeue(timeToDrop);
} else {

View File

@@ -3,38 +3,30 @@ package net.i2p.router.tunnel.pool;
import java.util.Properties;
import net.i2p.data.Hash;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.tunnel.TunnelCreatorConfig;
import net.i2p.util.Log;
/**
* Data about a tunnel we created
*/
class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
private TunnelPool _pool;
private TestJob _testJob;
/** Creates a new instance of PooledTunnelCreatorConfig */
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound) {
this(ctx, length, isInbound, null);
}
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound, Hash destination) {
private final TunnelPool _pool;
/**
* Creates a new instance of PooledTunnelCreatorConfig
*
* @param destination may be null
* @param pool non-null
*/
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound,
Hash destination, TunnelPool pool) {
super(ctx, length, isInbound, destination);
}
/** calls TestJob */
@Override
public void testSuccessful(int ms) {
if (_testJob != null)
_testJob.testSuccessful(ms);
super.testSuccessful(ms);
_pool = pool;
}
/** called from TestJob */
public void testJobSuccessful(int ms) {
super.testSuccessful(ms);
testSuccessful(ms);
}
/**
@@ -51,39 +43,17 @@ class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
// Todo: Maybe delay or prevent failing if we are near tunnel build capacity,
// to prevent collapse (loss of all tunnels)
_pool.tunnelFailed(this);
if (_testJob != null) // just in case...
_context.jobQueue().removeJob(_testJob);
}
return rv;
}
@Override
public Properties getOptions() {
if (_pool == null) return null;
return _pool.getSettings().getUnknownOptions();
}
public void setTunnelPool(TunnelPool pool) {
if (pool != null) {
_pool = pool;
} else {
Log log = _context.logManager().getLog(getClass());
log.error("Null tunnel pool?", new Exception("foo"));
}
}
public TunnelPool getTunnelPool() { return _pool; }
/** @deprecated unused, which makes _testJob unused - why is it here */
@Deprecated
void setTestJob(TestJob job) { _testJob = job; }
/** does nothing, to be deprecated */
public void setExpireJob(Job job) { /* _expireJob = job; */ }
/**
* @deprecated Fix memory leaks caused by references if you need to use pairedTunnel
* @return non-null
*/
@Deprecated
public void setPairedTunnel(TunnelInfo tunnel) { /* _pairedTunnel = tunnel; */}
// public TunnelInfo getPairedTunnel() { return _pairedTunnel; }
public TunnelPool getTunnelPool() { return _pool; }
}

View File

@@ -12,6 +12,7 @@ import java.util.Properties;
import java.util.Set;
import java.util.StringTokenizer;
import net.i2p.crypto.EncType;
import net.i2p.crypto.SHA256Generator;
import net.i2p.crypto.SigType;
import net.i2p.data.DataFormatException;
@@ -469,6 +470,9 @@ public abstract class TunnelPeerSelector extends ConnectChecker {
maxLen++;
if (cap.length() <= maxLen)
return true;
if (peer.getIdentity().getPublicKey().getType() != EncType.ELGAMAL_2048)
return true;
// otherwise, it contains flags we aren't trying to focus on,
// so don't exclude it based on published capacity

View File

@@ -661,7 +661,7 @@ public class TunnelPool {
_log.info(toString() + ": building a fallback tunnel (usable: " + usable + " needed: " + quantity + ")");
// runs inline, since its 0hop
_manager.getExecutor().buildTunnel(this, configureNewTunnel(true));
_manager.getExecutor().buildTunnel(configureNewTunnel(true));
return true;
}
return false;
@@ -1131,7 +1131,7 @@ public class TunnelPool {
int len = settings.getLengthOverride();
if (len < 0)
len = settings.getLength();
if (len > 0 && (!settings.isExploratory()) && _context.random().nextBoolean()) {
if (len > 0 && (!settings.isExploratory()) && _context.random().nextInt(4) < 3) { // 75%
// look for a tunnel to reuse, if the right length and expiring soon
// ignore variance for now.
len++; // us
@@ -1172,8 +1172,8 @@ public class TunnelPool {
}
PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, peers.size(),
settings.isInbound(), settings.getDestination());
cfg.setTunnelPool(this);
settings.isInbound(), settings.getDestination(),
this);
// peers list is ordered endpoint first, but cfg.getPeer() is ordered gateway first
for (int i = 0; i < peers.size(); i++) {
int j = peers.size() - 1 - i;
@@ -1204,7 +1204,6 @@ public class TunnelPool {
*/
void buildComplete(PooledTunnelCreatorConfig cfg) {
synchronized (_inProgress) { _inProgress.remove(cfg); }
cfg.setTunnelPool(this);
//_manager.buildComplete(cfg);
}

View File

@@ -129,7 +129,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
if (_log.shouldLog(Log.ERROR))
_log.error("Want the inbound tunnel for " + destination.toBase32() +
" but there isn't a pool?");
" but there isn't a pool?", new Exception());
return null;
}
@@ -205,7 +205,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
if (_log.shouldLog(Log.ERROR))
_log.error("Want the inbound tunnel for " + destination.toBase32() +
" but there isn't a pool?");
" but there isn't a pool?", new Exception());
return null;
}
@@ -561,25 +561,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
_context.router().isHidden() ||
_context.router().getRouterInfo().getAddressCount() <= 0)) {
TunnelPool pool = cfg.getTunnelPool();
if (pool == null) {
// never seen this before, do we reallly need to bother
// trying so hard to find his pool?
_log.error("How does this not have a pool? " + cfg, new Exception("baf"));
if (cfg.getDestination() != null) {
if (cfg.isInbound()) {
pool = _clientInboundPools.get(cfg.getDestination());
} else {
pool = _clientOutboundPools.get(cfg.getDestination());
}
} else {
if (cfg.isInbound()) {
pool = _inboundExploratory;
} else {
pool = _outboundExploratory;
}
}
cfg.setTunnelPool(pool);
}
_context.jobQueue().addJob(new TestJob(_context, cfg, pool));
}
}

View File

@@ -0,0 +1,99 @@
package org.bouncycastle.oldcrypto;
/*
* Copyright (c) 2000 - 2004 The Legion Of The Bouncy Castle
* (http://www.bouncycastle.org)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
/**
* The base interface for implementations of message authentication codes (MACs).
*
* modified by jrandom to use the session key byte array directly
*
* Not a public API - Not for external use!
*/
public interface Mac
{
/**
* Initialise the MAC.
*
* @param key the key required by the MAC.
* @throws IllegalArgumentException if the params argument is
* inappropriate.
*/
public void init(byte key[])
throws IllegalArgumentException;
/**
* Return the name of the algorithm the MAC implements.
*
* @return the name of the algorithm the MAC implements.
*/
public String getAlgorithmName();
/**
* Return the block size for this cipher (in bytes).
*
* @return the block size for this cipher in bytes.
*/
public int getMacSize();
/**
* add a single byte to the mac for processing.
*
* @param in the byte to be processed.
* @throws IllegalStateException if the MAC is not initialised.
*/
public void update(byte in)
throws IllegalStateException;
/**
* @param in the array containing the input.
* @param inOff the index in the array the data begins at.
* @param len the length of the input starting at inOff.
* @throws IllegalStateException if the MAC is not initialised.
*/
public void update(byte[] in, int inOff, int len)
throws IllegalStateException;
/**
* Compute the final statge of the MAC writing the output to the out
* parameter.
* <p>
* doFinal leaves the MAC in the same state it was after the last init.
*
* @param out the array the MAC is to be output to.
* @param outOff the offset into the out buffer the output is to start at.
* @throws IllegalStateException if the MAC is not initialised.
*/
public int doFinal(byte[] out, int outOff)
throws IllegalStateException;
/**
* Reset the MAC. At the end of resetting the MAC should be in the
* in the same state it was after the last init (if there was one).
*/
public void reset();
}

View File

@@ -0,0 +1,200 @@
package org.bouncycastle.oldcrypto.macs;
/*
* Copyright (c) 2000 - 2004 The Legion Of The Bouncy Castle
* (http://www.bouncycastle.org)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
//import org.bouncycastle.crypto.CipherParameters;
import java.security.DigestException;
import java.security.MessageDigest;
import java.util.Arrays;
import net.i2p.util.SimpleByteCache;
import org.bouncycastle.oldcrypto.Mac;
/**
* HMAC implementation based on RFC2104
*
* H(K XOR opad, H(K XOR ipad, text))
*
* modified by jrandom to use the session key byte array directly and to cache
* a frequently used buffer (called on doFinal). changes released into the public
* domain in 2005.
*
* This is renamed from HMac because the constructor HMac(digest, sz) does not exist
* in the standard bouncycastle library, thus it conflicts in JVMs that contain the
* standard library (Android).
*
* As of 0.9.12, refactored to use standard MessageDigest.
*
* Deprecated - Do not use outside of router or Syndie.
* Not a public API - Not for external use!
*/
public class I2PHMac
implements Mac
{
private final static int BLOCK_LENGTH = 64;
private final static byte IPAD = (byte)0x36;
private final static byte OPAD = (byte)0x5C;
private final MessageDigest digest;
private final int digestSize;
private final byte[] inputPad = new byte[BLOCK_LENGTH];
private final byte[] outputPad = new byte[BLOCK_LENGTH];
/**
* Standard HMAC, size == digest size.
* @deprecated Use javax.crypto.Mac
*/
@Deprecated
public I2PHMac(MessageDigest digest) {
this(digest, digest.getDigestLength());
}
/**
* @param sz override the digest's size, nonstandard if different.
* SEE NOTES in HMACGenerator about why this isn't compatible with standard HmacMD5
*/
public I2PHMac(MessageDigest digest, int sz) {
this.digest = digest;
this.digestSize = sz;
}
public String getAlgorithmName()
{
return digest.getAlgorithm() + "/HMAC";
}
public MessageDigest getUnderlyingDigest()
{
return digest;
}
//public void init(
// CipherParameters params)
//{
public void init(byte key[])
{
digest.reset();
//byte[] key = ((KeyParameter)params).getKey();
if (key.length > BLOCK_LENGTH)
{
digest.update(key, 0, key.length);
try {
digest.digest(inputPad, 0, digestSize);
} catch (DigestException de) {
digest.reset();
throw new IllegalArgumentException(de);
}
for (int i = digestSize; i < inputPad.length; i++)
{
inputPad[i] = 0;
}
}
else
{
System.arraycopy(key, 0, inputPad, 0, key.length);
for (int i = key.length; i < inputPad.length; i++)
{
inputPad[i] = 0;
}
}
// why reallocate? it hasn't changed sizes, and the arraycopy
// below fills it completely...
//outputPad = new byte[inputPad.length];
System.arraycopy(inputPad, 0, outputPad, 0, inputPad.length);
for (int i = 0; i < inputPad.length; i++)
{
inputPad[i] ^= IPAD;
}
for (int i = 0; i < outputPad.length; i++)
{
outputPad[i] ^= OPAD;
}
digest.update(inputPad, 0, inputPad.length);
}
public int getMacSize() {
return digestSize;
}
public void update(byte in) {
digest.update(in);
}
public void update(byte[] in, int inOff, int len) {
digest.update(in, inOff, len);
}
public int doFinal(byte[] out, int outOff) {
byte[] tmp = acquireTmp(digestSize);
//byte[] tmp = new byte[digestSize];
try {
digest.digest(tmp, 0, digestSize);
digest.update(outputPad, 0, outputPad.length);
digest.update(tmp, 0, tmp.length);
return digest.digest(out, outOff, digestSize);
} catch (DigestException de) {
throw new IllegalArgumentException(de);
} finally {
releaseTmp(tmp);
reset();
}
}
private static byte[] acquireTmp(int sz) {
byte[] rv = SimpleByteCache.acquire(sz);
Arrays.fill(rv, (byte)0x0);
return rv;
}
private static void releaseTmp(byte buf[]) {
SimpleByteCache.release(buf);
}
/**
* Reset the mac generator.
*/
public void reset()
{
/*
* reset the underlying digest.
*/
digest.reset();
/*
* reinitialize the digest.
*/
digest.update(inputPad, 0, inputPad.length);
}
}

View File

@@ -0,0 +1,17 @@
<html>
<body>
<p>
This is from some very old version of bouncycastle, part of package org.bouncycastle.crypto.
Android bundled something similar in pre-Gingerbread, but upgraded to a later, incompatible version
in Gingerbread. As of Java 1.4 these are in javax.crypto - more or less.
To avoid having to make two different versions of our Android app, we rename to org.bouncycastle.oldcrypto.
</p><p>
Ref: <a href="http://docs.oracle.com/javase/1.5.0/docs/api/javax/crypto/package-summary.html">javax.crypto</a>
and
<a href="http://code.google.com/p/android/issues/detail?id=3280">this android issue</a>.
</p><p>
Moved from i2p.jar to router.jar as of 0.9.42.
Not a public API! Not for external use!
</p>
</body>
</html>

View File

@@ -0,0 +1,17 @@
<html>
<body>
<p>
This is from some very old version of bouncycastle, part of package org.bouncycastle.crypto.
Android bundled something similar in pre-Gingerbread, but upgraded to a later, incompatible version
in Gingerbread. As of Java 1.4 these are in javax.crypto - more or less.
To avoid having to make two different versions of our Android app, we rename to org.bouncycastle.oldcrypto.
</p><p>
Ref: <a href="http://docs.oracle.com/javase/1.5.0/docs/api/javax/crypto/package-summary.html">javax.crypto</a>
and
<a href="http://code.google.com/p/android/issues/detail?id=3280">this android issue</a>.
</p><p>
Moved from i2p.jar to router.jar as of 0.9.42.
Not a public API! Not for external use!
</p>
</body>
</html>