forked from I2P_Developers/i2p.i2p
cleanups, remove commented-out linear average bw code
repleace magic numbers with statics create new SREDQ on config change (untested) change default thresholds from 100-500 ms to 77-333 ms
This commit is contained in:
@@ -24,7 +24,8 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final FIFOBandwidthLimiter _limiter;
|
||||
private final SyntheticREDQueue _partBWE;
|
||||
// This is only changed if the config changes
|
||||
private volatile SyntheticREDQueue _partBWE;
|
||||
|
||||
/** how many KBps do we want to allow? */
|
||||
private int _inboundKBytesPerSecond;
|
||||
@@ -78,6 +79,9 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
* See util/DecayingBloomFilter and tunnel/BloomFilterIVValidator.
|
||||
*/
|
||||
public static final int MAX_OUTBOUND_BANDWIDTH = 16384;
|
||||
|
||||
private static final float MAX_SHARE_PERCENTAGE = 0.90f;
|
||||
private static final float SHARE_LIMIT_FACTOR = 0.95f;
|
||||
|
||||
/**
|
||||
* how often we replenish the queues.
|
||||
@@ -91,12 +95,6 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
_log = context.logManager().getLog(FIFOBandwidthRefiller.class);
|
||||
_context.statManager().createRateStat("bwLimiter.participatingBandwidthQueue", "size in bytes", "BandwidthLimiter", new long[] { 5*60*1000l, 60*60*1000l });
|
||||
reinitialize();
|
||||
// todo config changes
|
||||
int maxKBps = Math.min(_inboundKBytesPerSecond, _outboundKBytesPerSecond);
|
||||
// limit to 90% so it doesn't clog up at the transport bandwidth limiter
|
||||
float share = Math.min((float) _context.router().getSharePercentage(), 0.90f);
|
||||
float maxBps = maxKBps * share * 1024f * 0.95f;
|
||||
_partBWE = new SyntheticREDQueue(context, (int) maxBps);
|
||||
_isRunning = true;
|
||||
}
|
||||
|
||||
@@ -183,6 +181,16 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* In Bytes per second
|
||||
*/
|
||||
private int getShareBandwidth() {
|
||||
int maxKBps = Math.min(_inboundKBytesPerSecond, _outboundKBytesPerSecond);
|
||||
// limit to 90% so it doesn't clog up at the transport bandwidth limiter
|
||||
float share = Math.min((float) _context.router().getSharePercentage(), MAX_SHARE_PERCENTAGE);
|
||||
return (int) (maxKBps * share * 1024f * SHARE_LIMIT_FACTOR);
|
||||
}
|
||||
|
||||
private void checkConfig() {
|
||||
updateInboundRate();
|
||||
@@ -191,7 +199,13 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
updateOutboundBurstRate();
|
||||
updateInboundPeak();
|
||||
updateOutboundPeak();
|
||||
|
||||
|
||||
// if share bandwidth config changed, throw out the SyntheticREDQueue and make a new one
|
||||
int maxBps = getShareBandwidth();
|
||||
if (_partBWE == null || maxBps != _partBWE.getMaxBandwidth()) {
|
||||
_partBWE = new SyntheticREDQueue(_context, maxBps);
|
||||
}
|
||||
|
||||
// We are always limited for now
|
||||
//_limiter.setInboundUnlimited(_inboundKBytesPerSecond <= 0);
|
||||
//_limiter.setOutboundUnlimited(_outboundKBytesPerSecond <= 0);
|
||||
@@ -311,26 +325,6 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
int getOutboundBurstKBytesPerSecond() { return _outboundBurstKBytesPerSecond; }
|
||||
int getInboundBurstKBytesPerSecond() { return _inboundBurstKBytesPerSecond; }
|
||||
|
||||
/**
|
||||
* Participating counter stuff below here
|
||||
* TOTAL_TIME needs to be high enough to get a burst without dropping
|
||||
* @since 0.8.12
|
||||
*/
|
||||
private static final int TOTAL_TIME = 4000;
|
||||
private static final int PERIODS = TOTAL_TIME / (int) REPLENISH_FREQUENCY;
|
||||
/** count in current replenish period */
|
||||
private final AtomicInteger _currentParticipating = new AtomicInteger();
|
||||
private long _lastPartUpdateTime;
|
||||
private int _lastTotal;
|
||||
/** the actual length of last total period as coalesced (nominally TOTAL_TIME) */
|
||||
private long _lastTotalTime;
|
||||
private int _lastIndex;
|
||||
/** buffer of count per replenish period, last is at _lastIndex, older at higher indexes (wraps) */
|
||||
private final int[] _counts = new int[PERIODS];
|
||||
/** the actual length of the period (nominally REPLENISH_FREQUENCY) */
|
||||
private final long[] _times = new long[PERIODS];
|
||||
private final ReentrantReadWriteLock _updateLock = new ReentrantReadWriteLock(false);
|
||||
|
||||
/**
|
||||
* We sent a message.
|
||||
*
|
||||
@@ -338,7 +332,6 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
* @since 0.8.12
|
||||
*/
|
||||
boolean incrementParticipatingMessageBytes(int size, float factor) {
|
||||
//_currentParticipating.addAndGet(size);
|
||||
return _partBWE.offer(size, factor);
|
||||
}
|
||||
|
||||
@@ -350,30 +343,8 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
*/
|
||||
int getCurrentParticipatingBandwidth() {
|
||||
return (int) (_partBWE.getBandwidthEstimate() * 1000f);
|
||||
/*
|
||||
_updateLock.readLock().lock();
|
||||
try {
|
||||
return locked_getCurrentParticipatingBandwidth();
|
||||
} finally {
|
||||
_updateLock.readLock().unlock();
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
private int locked_getCurrentParticipatingBandwidth() {
|
||||
int current = _currentParticipating.get();
|
||||
long totalTime = (_limiter.now() - _lastPartUpdateTime) + _lastTotalTime;
|
||||
if (totalTime <= 0)
|
||||
return 0;
|
||||
// 1000 for ms->seconds in denominator
|
||||
long bw = 1000l * (current + _lastTotal) / totalTime;
|
||||
if (bw > Integer.MAX_VALUE)
|
||||
return 0;
|
||||
return (int) bw;
|
||||
}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Run once every replenish period
|
||||
*
|
||||
@@ -382,46 +353,5 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
private void updateParticipating(long now) {
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidthOut", getCurrentParticipatingBandwidth());
|
||||
_context.statManager().addRateData("bwLimiter.participatingBandwidthQueue", (long) _partBWE.getQueueSizeEstimate());
|
||||
/*
|
||||
_updateLock.writeLock().lock();
|
||||
try {
|
||||
locked_updateParticipating(now);
|
||||
} finally {
|
||||
_updateLock.writeLock().unlock();
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
private void locked_updateParticipating(long now) {
|
||||
long elapsed = now - _lastPartUpdateTime;
|
||||
if (elapsed <= 0) {
|
||||
// glitch in the matrix
|
||||
_lastPartUpdateTime = now;
|
||||
return;
|
||||
}
|
||||
_lastPartUpdateTime = now;
|
||||
if (--_lastIndex < 0)
|
||||
_lastIndex = PERIODS - 1;
|
||||
_counts[_lastIndex] = _currentParticipating.getAndSet(0);
|
||||
_times[_lastIndex] = elapsed;
|
||||
_lastTotal = 0;
|
||||
_lastTotalTime = 0;
|
||||
// add up total counts and times
|
||||
for (int i = 0; i < PERIODS; i++) {
|
||||
int idx = (_lastIndex + i) % PERIODS;
|
||||
_lastTotal += _counts[idx];
|
||||
_lastTotalTime += _times[idx];
|
||||
if (_lastTotalTime >= TOTAL_TIME)
|
||||
break;
|
||||
}
|
||||
if (_lastIndex == 0 && _lastTotalTime > 0) {
|
||||
long bw = 1000l * _lastTotal / _lastTotalTime;
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidthOut", bw);
|
||||
if (_lastTotal > 0 && _log.shouldLog(Log.INFO))
|
||||
_log.info(DataHelper.formatSize(_lastTotal) + " bytes out part. tunnels in last " + _lastTotalTime + " ms: " +
|
||||
DataHelper.formatSize(bw) + " Bps");
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -73,6 +73,8 @@ class SyntheticREDQueue implements BandwidthEstimator {
|
||||
private final int _minth;
|
||||
// max queue size, in bytes, before dropping everything
|
||||
private final int _maxth;
|
||||
// bandwidth in bytes per second, as passed to the constructor.
|
||||
private final int _bwBps;
|
||||
// bandwidth in bytes per ms. The queue is drained at this rate.
|
||||
private final float _bwBpms;
|
||||
// As in RED paper
|
||||
@@ -82,6 +84,10 @@ class SyntheticREDQueue implements BandwidthEstimator {
|
||||
// Should probably match ConnectionOptions.TCP_ALPHA
|
||||
private static final int DECAY_FACTOR = 8;
|
||||
private static final int WESTWOOD_RTT_MIN = 500;
|
||||
// denominator of time, 1/x seconds of traffic in the queue
|
||||
private static final int DEFAULT_LOW_THRESHOLD = 13;
|
||||
// denominator of time, 1/x seconds of traffic in the queue
|
||||
private static final int DEFAULT_HIGH_THRESHOLD = 3;
|
||||
|
||||
/**
|
||||
* Default thresholds.
|
||||
@@ -95,7 +101,7 @@ class SyntheticREDQueue implements BandwidthEstimator {
|
||||
// when we're at the default 80% share or below.
|
||||
// That CoDel starts dropping when above 100 ms latency for 500 ms.
|
||||
// let's try the same 100 ms of traffic here.
|
||||
this(ctx, bwBps, bwBps / 10, bwBps / 2);
|
||||
this(ctx, bwBps, bwBps / DEFAULT_LOW_THRESHOLD, bwBps / DEFAULT_HIGH_THRESHOLD);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -115,12 +121,22 @@ class SyntheticREDQueue implements BandwidthEstimator {
|
||||
_acked = -1;
|
||||
_minth = minThB;
|
||||
_maxth = maxThB;
|
||||
_bwBps = bwBps;
|
||||
_bwBpms = bwBps / 1000f;
|
||||
_tQSize = _tAck;
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Configured " + bwBps + " BPS, min: " + minThB + " B, max: " + maxThB + " B");
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Nominal bandwidth limit in bytes per second, as passed to the constructor.
|
||||
*
|
||||
*/
|
||||
public int getMaxBandwidth() {
|
||||
return _bwBps;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unconditional, never drop.
|
||||
* The queue size and bandwidth estimates will be updated.
|
||||
|
||||
Reference in New Issue
Block a user