propagate from branch 'i2p.i2p.zzz.multisess' (head 655a0c2bbd50625c804b8de8c809b40ed63f53f4)

to branch 'i2p.i2p' (head b977ab50209475c0e74825f361924e05dbd470c7)
This commit is contained in:
zzz
2015-06-17 16:00:53 +00:00
59 changed files with 572 additions and 224 deletions

View File

@@ -49,7 +49,7 @@ public class DataMessage extends FastI2NPMessageImpl {
long size = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if (size > MAX_SIZE)
throw new I2NPMessageException("wtf, size=" + size);
throw new I2NPMessageException("too large msg, size=" + size);
_data = new byte[(int)size];
System.arraycopy(data, curIndex, _data, 0, (int)size);
}

View File

@@ -357,8 +357,8 @@ public class DeliveryInstructions extends DataStructureImpl {
int offset = 0;
offset += getAdditionalInfo(rv, offset);
if (offset != additionalSize)
//_log.log(Log.CRIT, "wtf, additionalSize = " + additionalSize + ", offset = " + offset);
throw new IllegalStateException("wtf, additionalSize = " + additionalSize + ", offset = " + offset);
//_log.log(Log.CRIT, "size mismatch, additionalSize = " + additionalSize + ", offset = " + offset);
throw new IllegalStateException("size mismatch, additionalSize = " + additionalSize + ", offset = " + offset);
return rv;
}

View File

@@ -164,7 +164,7 @@ public class I2NPMessageReader {
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();
} catch (Exception e) {
_log.log(Log.CRIT, "wtf, error reading", e);
_log.log(Log.CRIT, "error reading msg!", e);
_listener.readError(I2NPMessageReader.this, e);
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();

View File

@@ -62,7 +62,7 @@ public class TunnelGatewayMessage extends FastI2NPMessageImpl {
if (_msg != null)
throw new IllegalStateException();
if (msg == null)
throw new IllegalArgumentException("wtf, dont set me to null");
throw new IllegalArgumentException("dont set me to null!");
_msg = msg;
}
@@ -137,7 +137,7 @@ public class TunnelGatewayMessage extends FastI2NPMessageImpl {
//handler.readMessage(data, curIndex);
//_msg = handler.lastRead();
//if (_msg == null)
// throw new I2NPMessageException("wtf, message read has no payload?");
// throw new I2NPMessageException("impossible? message read has no payload?!");
// NEW WAY save lots of effort at the IBGW by reading as an UnknownI2NPMessage instead
// This will save a lot of object churn and processing,

View File

@@ -48,7 +48,7 @@ public class UnknownI2NPMessage extends FastI2NPMessageImpl {
throw new IllegalStateException();
if (type != _type) throw new I2NPMessageException("Message type is incorrect for this message");
if (dataSize > MAX_SIZE)
throw new I2NPMessageException("wtf, size=" + dataSize);
throw new I2NPMessageException("size mismatch, too big, size=" + dataSize);
_data = new byte[dataSize];
System.arraycopy(data, offset, _data, 0, dataSize);
}

View File

@@ -308,7 +308,7 @@ public class RouterInfo extends DatabaseEntry {
*/
protected byte[] getBytes() throws DataFormatException {
if (_byteified != null) return _byteified;
if (_identity == null) throw new DataFormatException("Router identity isn't set? wtf!");
if (_identity == null) throw new DataFormatException("Router identity isn't set?!");
//long before = Clock.getInstance().now();
ByteArrayOutputStream out = new ByteArrayOutputStream(2*1024);

View File

@@ -162,11 +162,11 @@ public class Banlist {
*/
public boolean banlistRouter(Hash peer, String reason, String reasonCode, String transport, long expireOn) {
if (peer == null) {
_log.error("wtf, why did we try to banlist null?", new Exception("banfaced"));
_log.error("why did we try to banlist null?", new Exception("banfaced"));
return false;
}
if (peer.equals(_context.routerHash())) {
_log.error("wtf, why did we try to banlist ourselves?", new Exception("banfaced"));
_log.error("why did we try to banlist ourselves?", new Exception("banfaced"));
return false;
}
boolean wasAlready = false;

View File

@@ -591,7 +591,7 @@ public class JobQueue {
} catch (Throwable t) {
_context.clock().removeUpdateListener(this);
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, pumper killed", t);
_log.error("pumper killed?!", t);
}
}

View File

@@ -117,7 +117,7 @@ class JobQueueRunner extends I2PThread {
//if ( (jobNum % 10) == 0)
// System.gc();
} catch (Throwable t) {
_log.log(Log.CRIT, "WTF, error running?", t);
_log.log(Log.CRIT, "error running?", t);
}
}
//_state = 16;

View File

@@ -852,7 +852,7 @@ public class Router implements RouterClock.ClockShiftListener {
addCapabilities(ri);
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? wtf");
_log.log(Log.CRIT, "Internal error - signing private key not known? Impossible?");
return;
}
ri.sign(key);

View File

@@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 2;
public final static long BUILD = 5;
/** for example "-test" */
public final static String EXTRA = "";

View File

@@ -122,7 +122,7 @@ public class VMCommSystem extends CommSystemFacade {
_ctx.inNetMessagePool().add(msg, null, _from);
} catch (Exception e) {
_log.error("wtf, error reading/formatting a VM message?", e);
_log.error("Error reading/formatting a VM message? Something is not right...", e);
}
}
public String getName() { return "Receive Message"; }

View File

@@ -43,22 +43,40 @@ public class SendMessageDirectJob extends JobImpl {
private boolean _sent;
private long _searchOn;
/**
* @param toPeer may be ourselves
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int timeoutMs, int priority) {
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority);
}
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
/**
* @param toPeer may be ourselves
* @param onSuccess may be null
* @param onFail may be null
* @param selector be null
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority) {
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority);
}
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
/**
* @param toPeer may be ourselves
* @param onSend may be null
* @param onSuccess may be null
* @param onFail may be null
* @param selector be null
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority) {
super(ctx);
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
_message = message;
_targetHash = toPeer;
if (timeoutMs < 10*1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy bastard"));
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy caller!"));
_expiration = ctx.clock().now() + 10*1000;
} else {
_expiration = timeoutMs + ctx.clock().now();

View File

@@ -28,7 +28,7 @@ public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
_context = context;
_facade = facade;
// following are for HFDSMJ
context.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 60*1000, 60*60*1000l });
context.statManager().createRateStat("netDb.storeLeaseSetHandled", "How many leaseSet store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeRouterInfoHandled", "How many routerInfo store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeRecvTime", "How long it takes to handle the local store part of a dbStore?", "NetworkDatabase", new long[] { 60*60*1000l });

View File

@@ -6,6 +6,7 @@ import net.i2p.crypto.SigType;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
@@ -55,13 +56,27 @@ class FloodfillMonitorJob extends JobImpl {
} else {
getContext().router().eventLog().addEvent(EventLog.NOT_FLOODFILL);
}
getContext().router().rebuildRouterInfo();
getContext().router().rebuildRouterInfo(true);
Job routerInfoFlood = new FloodfillRouterInfoFloodJob(getContext(), _facade);
if(getContext().router().getUptime() < 5*60*1000) {
// Needed to prevent race if router.floodfillParticipant=true (not auto)
routerInfoFlood.getTiming().setStartAfter(getContext().clock().now() + 5*60*1000);
getContext().jobQueue().addJob(routerInfoFlood);
if(_log.shouldLog(Log.DEBUG)) {
_log.logAlways(Log.DEBUG, "Deferring our FloodfillRouterInfoFloodJob run because of low uptime.");
}
} else {
routerInfoFlood.runJob();
if(_log.shouldLog(Log.DEBUG)) {
_log.logAlways(Log.DEBUG, "Running FloodfillRouterInfoFloodJob");
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Should we be floodfill? " + ff);
int delay = (REQUEUE_DELAY / 2) + getContext().random().nextInt(REQUEUE_DELAY);
// there's a lot of eligible non-floodfills, keep them from all jumping in at once
// To do: somehow assess the size of the network to make this adaptive?
// TODO: somehow assess the size of the network to make this adaptive?
if (!ff)
delay *= 4; // this was 7, reduced for moar FFs --zab
requeue(delay);

View File

@@ -40,7 +40,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
* Was 7 through release 0.9; 5 for 0.9.1.
* 4 as of 0.9.2; 3 as of 0.9.9
*/
private static final int MAX_TO_FLOOD = 3;
public static final int MAX_TO_FLOOD = 3;
private static final int FLOOD_PRIORITY = OutNetMessage.PRIORITY_NETDB_FLOOD;
private static final int FLOOD_TIMEOUT = 30*1000;
@@ -129,7 +129,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
*/
@Override
public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException {
if (localRouterInfo == null) throw new IllegalArgumentException("wtf, null localRouterInfo?");
if (localRouterInfo == null) throw new IllegalArgumentException("impossible: null localRouterInfo?");
// should this be after super? why not publish locally?
if (_context.router().isHidden()) return; // DE-nied!
super.publish(localRouterInfo);

View File

@@ -0,0 +1,63 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Collections;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
/**
* Job to flood nearby floodfill routers with our RI.
* Speeds up integration of new ffs. Created for #1195.
* Also called when opting out of ff to call off the hounds ASAP.
* Currently floods FNDF.MAX_TO_FLOOD * 2 routers nearest to us.
*
*/
class FloodfillRouterInfoFloodJob extends JobImpl {
private final Log _log;
private final FloodfillNetworkDatabaseFacade _facade;
private static final int FLOOD_PEERS = 2 * FloodfillNetworkDatabaseFacade.MAX_TO_FLOOD;
public FloodfillRouterInfoFloodJob(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
super(context);
_facade = facade;
_log = context.logManager().getLog(FloodfillRouterInfoFloodJob.class);
}
public String getName() { return "Flood our RouterInfo to nearby floodfills"; }
public void runJob() {
FloodfillPeerSelector sel = (FloodfillPeerSelector)_facade.getPeerSelector();
DatabaseStoreMessage dsm;
OutNetMessage outMsg;
RouterInfo nextPeerInfo;
List<Hash> peers = sel.selectFloodfillParticipants(getContext().routerHash(), FLOOD_PEERS, null);
for(Hash ri: peers) {
// Iterate through list of nearby (ff) peers
dsm = new DatabaseStoreMessage(getContext());
dsm.setMessageExpiration(getContext().clock().now() + 10*1000);
dsm.setEntry(getContext().router().getRouterInfo());
nextPeerInfo = getContext().netDb().lookupRouterInfoLocally(ri);
if(nextPeerInfo == null) {
continue;
}
outMsg = new OutNetMessage(getContext(), dsm, getContext().clock().now()+10*1000, OutNetMessage.PRIORITY_MY_NETDB_STORE, nextPeerInfo);
getContext().outNetMessagePool().add(outMsg); // Whoosh!
if(_log.shouldLog(Log.DEBUG)) {
_log.logAlways(Log.DEBUG, "Sending our RI to: " + nextPeerInfo.getHash());
}
}
}
}

View File

@@ -14,14 +14,19 @@ import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.TunnelId;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterIdentity;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.util.Log;
/**
@@ -34,8 +39,15 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
private final RouterIdentity _from;
private Hash _fromHash;
private final FloodfillNetworkDatabaseFacade _facade;
private final static int REPLY_TIMEOUT = 60*1000;
private final static int MESSAGE_PRIORITY = OutNetMessage.PRIORITY_NETDB_REPLY;
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash, FloodfillNetworkDatabaseFacade facade) {
/**
* @param receivedMessage must never have reply token set if it came down a tunnel
*/
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage,
RouterIdentity from, Hash fromHash,
FloodfillNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_message = receivedMessage;
@@ -136,6 +148,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// somebody has our keys...
if (getContext().routerHash().equals(key)) {
//getContext().statManager().addRateData("netDb.storeLocalRouterInfoAttempt", 1, 0);
// This is initiated by PeerTestJob from another peer
// throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
@@ -170,15 +183,18 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
+ ": " + _message);
// don't ack or flood
return;
}
long recvEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeRecvTime", recvEnd-recvBegin);
// ack even if invalid or unsupported
// ack even if invalid
// in particular, ack our own RI (from PeerTestJob)
// TODO any cases where we shouldn't?
if (_message.getReplyToken() > 0)
sendAck();
sendAck(key);
long ackEnd = System.currentTimeMillis();
if (_from != null)
@@ -215,7 +231,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// ERR: see comment in HandleDatabaseLookupMessageJob regarding hidden mode
//else if (!_message.getRouterInfo().isHidden())
long floodEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin);
getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin, 60*1000);
} else {
// don't flood it *again*
getContext().statManager().addRateData("netDb.storeFloodOld", 1);
@@ -223,7 +239,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
}
}
private void sendAck() {
private void sendAck(Hash storedKey) {
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setMessageId(_message.getReplyToken());
// Randomize for a little protection against clock-skew fingerprinting.
@@ -231,31 +247,62 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// TODO just set to 0?
// TODO we have no session to garlic wrap this with, needs new message
msg.setArrival(getContext().clock().now() - getContext().random().nextInt(3*1000));
/*
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) {
// no need to do anything but send it where they ask
// may be null
TunnelId replyTunnel = _message.getReplyTunnel();
// A store of our own RI, only if we are not FF
DatabaseStoreMessage msg2;
if (getContext().netDb().floodfillEnabled() ||
storedKey.equals(getContext().routerHash())) {
// don't send our RI if the store was our RI (from PeerTestJob)
msg2 = null;
} else {
// we aren't ff, send a go-away message
msg2 = new DatabaseStoreMessage(getContext());
RouterInfo me = getContext().router().getRouterInfo();
msg2.setEntry(me);
if (_log.shouldWarn())
_log.warn("Got a store w/ reply token, but we aren't ff: from: " + _from +
" fromHash: " + _fromHash + " msg: " + _message, new Exception());
}
Hash toPeer = _message.getReplyGateway();
boolean toUs = getContext().routerHash().equals(toPeer);
// to reduce connection congestion, send directly if connected already,
// else through an exploratory tunnel.
if (toUs && replyTunnel != null) {
// if we are the gateway, act as if we received it
TunnelGatewayMessage tgm = new TunnelGatewayMessage(getContext());
tgm.setMessage(msg);
tgm.setTunnelId(_message.getReplyTunnel());
tgm.setTunnelId(replyTunnel);
tgm.setMessageExpiration(msg.getMessageExpiration());
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), tgm, _message.getReplyGateway(), 10*1000, 200));
getContext().tunnelDispatcher().dispatch(tgm);
if (msg2 != null) {
TunnelGatewayMessage tgm2 = new TunnelGatewayMessage(getContext());
tgm2.setMessage(msg2);
tgm2.setTunnelId(replyTunnel);
tgm2.setMessageExpiration(msg.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(tgm2);
}
} else if (toUs || getContext().commSystem().isEstablished(toPeer)) {
Job send = new SendMessageDirectJob(getContext(), msg, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), msg2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send2.runJob();
}
} else {
*/
TunnelInfo outTunnel = selectOutboundTunnel();
// pick tunnel with endpoint closest to toPeer
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(toPeer);
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnel could be found");
return;
} else {
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0),
_message.getReplyTunnel(), _message.getReplyGateway());
}
//}
}
private TunnelInfo selectOutboundTunnel() {
return getContext().tunnelManager().selectOutboundTunnel();
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0),
replyTunnel, toPeer);
if (msg2 != null)
getContext().tunnelDispatcher().dispatchOutbound(msg2, outTunnel.getSendTunnelId(0),
replyTunnel, toPeer);
}
}
public String getName() { return "Handle Database Store Message"; }

View File

@@ -58,7 +58,7 @@ class IterativeLookupJob extends JobImpl {
continue;
}
if (peer.equals(from)) {
// wtf
// unusual
invalidPeers++;
continue;
}

View File

@@ -646,7 +646,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
try {
store(h, localLeaseSet);
} catch (IllegalArgumentException iae) {
_log.error("wtf, locally published leaseSet is not valid?", iae);
_log.error("locally published leaseSet is not valid?", iae);
throw iae;
}
if (!_context.clientManager().shouldPublishLeaseSet(h))

View File

@@ -484,7 +484,7 @@ class PersistentDataStore extends TransientDataStore {
// don't overwrite recent netdb RIs with reseed data
return fileDate > _knownDate + (60*60*1000);
} else {
// wtf - prevent injection from reseeding
// safety measure - prevent injection from reseeding
_log.error("Prevented LS overwrite by RI " + _key + " from " + _routerFile);
return false;
}

View File

@@ -94,7 +94,7 @@ class SearchJob extends JobImpl {
Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
super(context);
if ( (key == null) || (key.getData() == null) )
throw new IllegalArgumentException("Search for null key? wtf");
throw new IllegalArgumentException("Search for null key?");
_log = getContext().logManager().getLog(getClass());
_facade = facade;
_state = new SearchState(getContext(), key);
@@ -425,7 +425,7 @@ class SearchJob extends JobImpl {
Hash to = router.getIdentity().getHash();
TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
if (inTunnel == null) {
_log.warn("No tunnels to get search replies through! wtf!");
_log.warn("No tunnels to get search replies through!");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
return;
}
@@ -436,7 +436,7 @@ class SearchJob extends JobImpl {
//RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
//if (inGateway == null) {
// _log.error("We can't find the gateway to our inbound tunnel?! wtf");
// _log.error("We can't find the gateway to our inbound tunnel?!");
// getContext().jobQueue().addJob(new FailedJob(getContext(), router));
// return;
//}
@@ -448,7 +448,7 @@ class SearchJob extends JobImpl {
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
if (outTunnel == null) {
_log.warn("No tunnels to send search out through! wtf!");
_log.warn("No tunnels to send search out through! Impossible?");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
return;
}

View File

@@ -101,7 +101,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + message);
_log.error(getJobId() + ": What?! Reply job matched a strange message: " + message);
return;
}

View File

@@ -42,7 +42,7 @@ class SingleLookupJob extends JobImpl {
Hash peer = _dsrm.getReply(i);
if (peer.equals(getContext().routerHash())) // us
continue;
if (peer.equals(from)) // wtf
if (peer.equals(from)) // unusual?
continue;
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri == null)

View File

@@ -173,7 +173,7 @@ class CapacityCalculator {
case 30*60*1000: return .3;
case 60*60*1000: return .2;
case 24*60*60*1000: return .1;
default: throw new IllegalArgumentException("wtf, period [" + period + "]???");
default: throw new IllegalArgumentException("undefined period passed, period [" + period + "]???");
}
}
}

View File

@@ -24,7 +24,8 @@ import net.i2p.util.Log;
* selection to the peer manager and tests the peer by sending it a useless
* database store message
*
* TODO - What's the point? Disable this? See also notes in PeerManager.selectPeers()
* TODO - What's the point? Disable this? See also notes in PeerManager.selectPeers().
* TODO - Use something besides sending the peer's RI to itself?
*/
public class PeerTestJob extends JobImpl {
private final Log _log;
@@ -82,6 +83,7 @@ public class PeerTestJob extends JobImpl {
/**
* Retrieve a group of 0 or more peers that we want to test.
* Returned list will not include ourselves.
*
* @return set of RouterInfo structures
*/
@@ -110,12 +112,13 @@ public class PeerTestJob extends JobImpl {
/**
* Fire off the necessary jobs and messages to test the given peer
*
* The message is a store of the peer's RI to itself,
* with a reply token.
*/
private void testPeer(RouterInfo peer) {
TunnelInfo inTunnel = getInboundTunnelId();
if (inTunnel == null) {
_log.warn("No tunnels to get peer test replies through! wtf!");
_log.warn("No tunnels to get peer test replies through!");
return;
}
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
@@ -123,19 +126,19 @@ public class PeerTestJob extends JobImpl {
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
if (inGateway == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("We can't find the gateway to our inbound tunnel?! wtf");
_log.warn("We can't find the gateway to our inbound tunnel?! Impossible?");
return;
}
int timeoutMs = getTestTimeout();
long expiration = getContext().clock().now() + timeoutMs;
long nonce = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
long nonce = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE - 1);
DatabaseStoreMessage msg = buildMessage(peer, inTunnelId, inGateway.getIdentity().getHash(), nonce, expiration);
TunnelInfo outTunnel = getOutboundTunnelId();
if (outTunnel == null) {
_log.warn("No tunnels to send search out through! wtf!");
_log.warn("No tunnels to send search out through! Something is wrong...");
return;
}
@@ -172,7 +175,9 @@ public class PeerTestJob extends JobImpl {
}
/**
* Build a message to test the peer with
* Build a message to test the peer with.
* The message is a store of the peer's RI to itself,
* with a reply token.
*/
private DatabaseStoreMessage buildMessage(RouterInfo peer, TunnelId replyTunnel, Hash replyGateway, long nonce, long expiration) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());

View File

@@ -83,7 +83,7 @@ class LoadRouterInfoJob extends JobImpl {
try {
// if we have a routerinfo but no keys, things go bad in a hurry:
// CRIT ...rkdb.PublishLocalRouterInfoJob: Internal error - signing private key not known? rescheduling publish for 30s
// CRIT net.i2p.router.Router : Internal error - signing private key not known? wtf
// CRIT net.i2p.router.Router : Internal error - signing private key not known? Impossible?
// CRIT ...sport.udp.EstablishmentManager: Error in the establisher java.lang.NullPointerException
// at net.i2p.router.transport.udp.PacketBuilder.buildSessionConfirmedPacket(PacketBuilder.java:574)
// so pretend the RI isn't there if there is no keyfile

View File

@@ -55,7 +55,8 @@ class NtpClient {
/** difference between the unix epoch and jan 1 1900 (NTP uses that) */
private final static double SECONDS_1900_TO_EPOCH = 2208988800.0;
private final static int NTP_PORT = 123;
private static final int DEFAULT_TIMEOUT = 10*1000;
/**
* Query the ntp servers, returning the current time from first one we find
*
@@ -84,7 +85,7 @@ class NtpClient {
* @throws IllegalArgumentException if none of the servers are reachable
* @since 0.7.12
*/
public static long[] currentTimeAndStratum(String serverNames[]) {
public static long[] currentTimeAndStratum(String serverNames[], int perServerTimeout) {
if (serverNames == null)
throw new IllegalArgumentException("No NTP servers specified");
ArrayList<String> names = new ArrayList<String>(serverNames.length);
@@ -92,7 +93,7 @@ class NtpClient {
names.add(serverNames[i]);
Collections.shuffle(names);
for (int i = 0; i < names.size(); i++) {
long[] rv = currentTimeAndStratum(names.get(i));
long[] rv = currentTimeAndStratum(names.get(i), perServerTimeout);
if (rv != null && rv[0] > 0)
return rv;
}
@@ -105,7 +106,7 @@ class NtpClient {
* @return milliseconds since january 1, 1970 (UTC), or -1 on error
*/
public static long currentTime(String serverName) {
long[] la = currentTimeAndStratum(serverName);
long[] la = currentTimeAndStratum(serverName, DEFAULT_TIMEOUT);
if (la != null)
return la[0];
return -1;
@@ -116,7 +117,7 @@ class NtpClient {
* @return time in rv[0] and stratum in rv[1], or null for error
* @since 0.7.12
*/
private static long[] currentTimeAndStratum(String serverName) {
private static long[] currentTimeAndStratum(String serverName, int timeout) {
DatagramSocket socket = null;
try {
// Send request
@@ -135,7 +136,7 @@ class NtpClient {
// Get response
packet = new DatagramPacket(buf, buf.length);
socket.setSoTimeout(10*1000);
socket.setSoTimeout(timeout);
socket.receive(packet);
// Immediately record the incoming timestamp

View File

@@ -43,6 +43,8 @@ public class RouterTimestamper extends Timestamper {
/** how many times do we have to query if we are changing the clock? */
private static final int DEFAULT_CONCURRING_SERVERS = 3;
private static final int MAX_CONSECUTIVE_FAILS = 10;
private static final int DEFAULT_TIMEOUT = 10*1000;
private static final int SHORT_TIMEOUT = 5*1000;
public static final String PROP_QUERY_FREQUENCY = "time.queryFrequencyMs";
public static final String PROP_SERVER_LIST = "time.sntpServerList";
@@ -177,7 +179,7 @@ public class RouterTimestamper extends Timestamper {
if (_log != null && _log.shouldDebug())
_log.debug("Querying servers " + servers);
try {
lastFailed = !queryTime(servers.toArray(new String[servers.size()]));
lastFailed = !queryTime(servers.toArray(new String[servers.size()]), SHORT_TIMEOUT);
} catch (IllegalArgumentException iae) {
if (!lastFailed && _log != null && _log.shouldWarn())
_log.warn("Unable to reach any regional NTP servers: " + servers);
@@ -192,7 +194,7 @@ public class RouterTimestamper extends Timestamper {
if (_log != null && _log.shouldDebug())
_log.debug("Querying servers " + _servers);
try {
lastFailed = !queryTime(_servers.toArray(new String[_servers.size()]));
lastFailed = !queryTime(_servers.toArray(new String[_servers.size()]), DEFAULT_TIMEOUT);
} catch (IllegalArgumentException iae) {
lastFailed = true;
}
@@ -259,18 +261,18 @@ public class RouterTimestamper extends Timestamper {
/**
* True if the time was queried successfully, false if it couldn't be
*/
private boolean queryTime(String serverList[]) throws IllegalArgumentException {
private boolean queryTime(String serverList[], int perServerTimeout) throws IllegalArgumentException {
long found[] = new long[_concurringServers];
long now = -1;
int stratum = -1;
long expectedDelta = 0;
_wellSynced = false;
for (int i = 0; i < _concurringServers; i++) {
if (i > 0) {
// this delays startup when net is disconnected or the timeserver list is bad, don't make it too long
try { Thread.sleep(2*1000); } catch (InterruptedException ie) {}
}
long[] timeAndStratum = NtpClient.currentTimeAndStratum(serverList);
//if (i > 0) {
// // this delays startup when net is disconnected or the timeserver list is bad, don't make it too long
// try { Thread.sleep(2*1000); } catch (InterruptedException ie) {}
//}
long[] timeAndStratum = NtpClient.currentTimeAndStratum(serverList, perServerTimeout);
now = timeAndStratum[0];
stratum = (int) timeAndStratum[1];
long delta = now - _context.clock().now();

View File

@@ -58,7 +58,7 @@ class GetBidsJob extends JobImpl {
Hash us = context.routerHash();
if (to.equals(us)) {
if (log.shouldLog(Log.ERROR))
log.error("wtf, send a message to ourselves? nuh uh. msg = " + msg);
log.error("send a message to ourselves? nuh uh. msg = " + msg);
context.statManager().addRateData("transport.bidFailSelf", msg.getLifetime());
fail(context, msg);
return;

View File

@@ -195,9 +195,9 @@ public class OutboundMessageRegistry {
*/
private void registerPending(OutNetMessage msg, boolean allowEmpty) {
if ( (!allowEmpty) && (msg.getMessage() == null) )
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf");
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? Impossible?");
MessageSelector sel = msg.getReplySelector();
if (sel == null) throw new IllegalArgumentException("No reply selector? wtf");
if (sel == null) throw new IllegalArgumentException("No reply selector? Impossible?");
if (!_activeMessages.add(msg))
return; // dont add dups

View File

@@ -373,9 +373,9 @@ public abstract class TransportImpl implements Transport {
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
+ msg.getFailedTransports() + " and succeeding on " + getStyle());
if ( (allTime > 60*1000) && (sendSuccessful) ) {
// WTF!!@#
// VERY slow
if (_log.shouldLog(Log.WARN))
_log.warn("WTF, more than a minute slow? " + msg.getMessageType()
_log.warn("Severe latency? More than a minute slow? " + msg.getMessageType()
+ " of id " + msg.getMessageId() + " (send begin on "
+ new Date(msg.getSendBegin()) + " / created on "
+ new Date(msg.getCreated()) + "): " + msg);
@@ -497,7 +497,7 @@ public abstract class TransportImpl implements Transport {
_listener.messageReceived(inMsg, remoteIdent, remoteIdentHash);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("WTF! Null listener! this = " + toString(), new Exception("Null listener"));
_log.error("Null listener! this = " + toString(), new Exception("Null listener"));
}
}

View File

@@ -530,7 +530,7 @@ public class TransportManager implements TransportEventListener {
if (msg == null)
throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!");
if (_context.router().getRouterInfo().equals(msg.getTarget()))
throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?");
throw new IllegalArgumentException("Bids for a message bound to ourselves?");
List<TransportBid> rv = new ArrayList<TransportBid>(_transports.size());
Set<String> failedTransports = msg.getFailedTransports();

View File

@@ -100,7 +100,7 @@ class NTCPSendFinisher {
// appx 0.1 ms
//_context.statManager().addRateData("ntcp.sendFinishTime", _context.clock().now() - _queued, 0);
} catch (Throwable t) {
_log.log(Log.CRIT, " wtf, afterSend borked", t);
_log.log(Log.CRIT, " afterSend broken?", t);
}
}
}

View File

@@ -168,7 +168,7 @@ class ACKSender implements Runnable {
if (wanted < 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("wtf, why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
_log.warn("why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
continue;
}

View File

@@ -178,7 +178,7 @@ class OutboundMessageFragments {
public void add(OutboundMessageState state) {
PeerState peer = state.getPeer();
if (peer == null)
throw new RuntimeException("wtf, null peer for " + state);
throw new RuntimeException("null peer for " + state);
peer.add(state);
add(peer);
//_context.statManager().addRateData("udp.outboundActiveCount", active, 0);

View File

@@ -370,7 +370,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
if (offset <= 0) {
StringBuilder buf = new StringBuilder(128);
buf.append("wtf, written offset is ").append(offset);
buf.append("uh? written offset is ").append(offset);
buf.append(" for ").append(startAt).append(" through ").append(sendThrough);
for (int i = startAt; i <= sendThrough; i++) {
buf.append(" ").append(pending.get(i).toString());

View File

@@ -164,7 +164,7 @@ class FragmentHandler {
if (_log.shouldLog(Log.ERROR))
_log.error("Corrupt fragment received: offset = " + offset, e);
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
// java.lang.IllegalStateException: wtf, don't get the completed size when we're not complete - null fragment i=0 of 1
// java.lang.IllegalStateException: don't get the completed size when we're not complete - null fragment i=0 of 1
// at net.i2p.router.tunnel.FragmentedMessage.getCompleteSize(FragmentedMessage.java:194)
// at net.i2p.router.tunnel.FragmentedMessage.toByteArray(FragmentedMessage.java:223)
// at net.i2p.router.tunnel.FragmentHandler.receiveComplete(FragmentHandler.java:380)

View File

@@ -164,7 +164,7 @@ class FragmentedMessage {
}
public int getCompleteSize() {
if (!_lastReceived)
throw new IllegalStateException("wtf, don't get the completed size when we're not complete");
throw new IllegalStateException("don't get the completed size when we're not complete!");
if (_releasedAfter > 0) {
RuntimeException e = new RuntimeException("use after free in FragmentedMessage");
_log.error("FM completeSize()", e);
@@ -175,7 +175,7 @@ class FragmentedMessage {
ByteArray ba = _fragments[i];
// NPE seen here, root cause unknown
if (ba == null)
throw new IllegalStateException("wtf, don't get the completed size when we're not complete - null fragment i=" + i + " of " + _highFragmentNum);
throw new IllegalStateException("don't get the completed size when we're not complete! - null fragment i=" + i + " of " + _highFragmentNum);
size += ba.getValid();
}
return size;

View File

@@ -20,6 +20,6 @@ class InboundGatewayProcessor extends HopProcessor {
public void process(byte orig[], int offset, int length) {
boolean ok = super.process(orig, offset, length, null);
if (!ok)
throw new RuntimeException("wtf, we are the gateway, how did it fail?");
throw new RuntimeException("we are the gateway, how did it fail?");
}
}

View File

@@ -118,8 +118,8 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
}
return;
} else if (dsm.getReplyToken() != 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping LS DSM w/ reply token down a tunnel for " + _client + ": " + msg);
_context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);
_log.error("Dropping LS DSM w/ reply token down a tunnel for " + _client + ": " + msg);
return;
} else {
// allow DSM of our own key (used by FloodfillVerifyStoreJob)
@@ -144,6 +144,33 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
return;
} // switch
} else {
// expl. tunnel
switch (type) {
case DatabaseStoreMessage.MESSAGE_TYPE:
DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
if (dsm.getReplyToken() != 0) {
_context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
_log.error("Dropping DSM w/ reply token down a expl. tunnel: " + msg);
return;
}
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET)
((LeaseSet)dsm.getEntry()).setReceivedAsReply();
break;
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
case DeliveryStatusMessage.MESSAGE_TYPE:
case GarlicMessage.MESSAGE_TYPE:
case TunnelBuildReplyMessage.MESSAGE_TYPE:
case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
// these are safe, handled below
break;
default:
_context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
_log.error("Dropped dangerous message down expl tunnel: " + msg, new Exception("cause"));
return;
} // switch
} // client != null
if ( (target == null) || ( (tunnel == null) && (_context.routerHash().equals(target) ) ) ) {
@@ -189,7 +216,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
TunnelId outId = out.getSendTunnelId(0);
if (outId == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, outbound tunnel has no outboundId? " + out
_log.error("strange? outbound tunnel has no outboundId? " + out
+ " failing to distribute " + msg);
return;
}

View File

@@ -211,7 +211,8 @@ public class TunnelDispatcher implements Service {
ctx.statManager().createRequiredRateStat("tunnel.corruptMessage", "Corrupt messages received",
"Tunnels", RATES);
// following are for InboundMessageDistributor
ctx.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 60*60*1000 });
ctx.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "(lifetime is the I2NP type)", "Tunnels", new long[] { 60*60*1000 });
ctx.statManager().createRateStat("tunnel.dropDangerousExplTunnelMessage", "(lifetime is the I2NP type)", "Tunnels", new long[] { 60*60*1000 });
ctx.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*60*1000 });
// following is for PumpedTunnelGateway
ctx.statManager().createRateStat("tunnel.dropGatewayOverflow", "Dropped message at GW, queue full", "Tunnels", new long[] { 60*60*1000 });
@@ -630,7 +631,7 @@ public class TunnelDispatcher implements Service {
* @param targetPeer gateway to the tunnel to receive the message
*/
public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, TunnelId targetTunnel, Hash targetPeer) {
if (outboundTunnel == null) throw new IllegalArgumentException("wtf, null outbound tunnel?");
if (outboundTunnel == null) throw new IllegalArgumentException("null outbound tunnel?");
long before = _context.clock().now();
TunnelGateway gw = _outboundGateways.get(outboundTunnel);
if (gw != null) {
@@ -677,7 +678,7 @@ public class TunnelDispatcher implements Service {
//long dispatchTime = _context.clock().now() - before;
//if (dispatchTime > 1000) {
// if (_log.shouldLog(Log.WARN))
// _log.warn("wtf, took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
// _log.warn("slow? took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
//}
//if (gw instanceof TunnelGatewayZeroHop)
// _context.statManager().addRateData("tunnel.dispatchOutboundZeroHopTime", dispatchTime, dispatchTime);

View File

@@ -352,7 +352,7 @@ class BuildHandler implements Runnable {
default:
_context.statManager().addRateData("tunnel.receiveRejectionCritical", 1);
}
// penalize peer based on their bitchiness level
// penalize peer based on their reported error level
_context.profileManager().tunnelRejected(peer, rtt, howBad);
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
}

View File

@@ -756,7 +756,7 @@ public class TunnelPool {
TunnelId inId = tunnel.getReceiveTunnelId(0);
Hash gw = tunnel.getPeer(0);
if ( (inId == null) || (gw == null) ) {
_log.error(toString() + ": wtf, tunnel has no inbound gateway/tunnelId? " + tunnel);
_log.error(toString() + ": broken? tunnel has no inbound gateway/tunnelId? " + tunnel);
continue;
}
Lease lease = new Lease();