Compare commits

...

11 Commits

Author SHA1 Message Date
zzz
c04e1d6387 bump -7 2025-08-17 13:09:53 -04:00
zzz
79860f017f Router: Fix PublishRouterInfoJob sometimes not getting started
if setNetDbReady() is called when netDb().isInitialized() is false.
This can happen if the router has a few RIs at at startup but
not enough and it reseeds, or if reseed is very slow?
If PLRIJ does not run then the local RI doesn't get updated
and most connections fail due to RI clock skew in the handshake.
Then the router is essentially stuck.
2025-08-17 13:07:17 -04:00
zzz
383494fbf6 SSU2: Fix last receive time tracking 2025-08-17 12:02:36 -04:00
zzz
31ce28621d NetDB: Exploration improvements, mostly for hidden mode
- Increase low and min floodfill and router thresholds
- Fix non-exploratory mode calculation when under min threshold
- Log tweaks
2025-08-17 11:00:19 -04:00
zzz
a36f1d2bba bump -6 2025-08-12 10:00:55 -04:00
zzz
f5db530c0e Console: Don't hide SSU2 peers with long idle time
because it makes the total look wrong.
2025-08-12 10:00:05 -04:00
zzz
502efa8349 i2psnark: Handle UDP trackers in magnet links
- Fix NPE announcing magnet to UDP tracker
- Prep for handling multiple trackers in magnet links
2025-08-10 11:49:46 -04:00
zzz
d4c660d863 Console: Hide b32 link on netdb LS tabs for encrypted LS
- Fix layout for final LS data section on netdb LS debug tab
2025-08-10 10:54:55 -04:00
zzz
ec550dce0b Console: Fix NPE on netdb LS debug tab rendering encrypted LS 2025-08-09 11:34:34 -04:00
eyedeekay
d2c3034a47 Add GHCR builder from github instructions, also force gitea to update repo information 2025-07-30 12:54:51 -04:00
eyedeekay
2778bfd7c2 Add GHCR builder from github instructions, also force gitea to update repo information 2025-07-30 12:54:51 -04:00
11 changed files with 145 additions and 40 deletions

63
.github/workflows/ghcr.yml vendored Normal file
View File

@@ -0,0 +1,63 @@
#
name: Create and publish a Docker image
# Cited: https://docs.github.com/en/actions/tutorials/publish-packages/publish-docker-images
# Configures this workflow to run every time a change is pushed to the branch called `master`.
on:
push:
branches:
- master
# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
jobs:
build-and-push-image:
runs-on: ubuntu-latest
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
permissions:
contents: read
packages: write
attestations: write
id-token: write
#
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see [Usage](https://github.com/docker/build-push-action#usage) in the README of the `docker/build-push-action` repository.
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
- name: Build and push Docker image
id: push
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
# This step generates an artifact attestation for the image, which is an unforgeable statement about where and how it was built. It increases supply chain security for people who consume the image. For more information, see [Using artifact attestations to establish provenance for builds](/actions/security-guides/using-artifact-attestations-to-establish-provenance-for-builds).
- name: Generate artifact attestation
uses: actions/attest-build-provenance@v2
with:
subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}}
subject-digest: ${{ steps.push.outputs.digest }}
push-to-registry: true

View File

@@ -15,7 +15,7 @@ import net.i2p.data.Base32;
*/
public class MagnetURI {
private final String _tracker;
private final List<String> _trackers;
private final String _name;
private final byte[] _ih;
@@ -38,7 +38,7 @@ public class MagnetURI {
public MagnetURI(I2PSnarkUtil util, String url) throws IllegalArgumentException {
String ihash;
String name;
String trackerURL = null;
List<String> trackerURLs = null;
if (url.startsWith(MAGNET)) {
// magnet:?xt=urn:btih:0691e40aae02e552cfcb57af1dca56214680c0c5&tr=http://tracker2.postman.i2p/announce.php
String xt = getParam("xt", url);
@@ -46,7 +46,7 @@ public class MagnetURI {
if (xt == null || !xt.startsWith("urn:btih:"))
throw new IllegalArgumentException();
ihash = xt.substring("urn:btih:".length());
trackerURL = getTrackerParam(url);
trackerURLs = getTrackerParam(url);
name = util.getString("Magnet") + ' ' + ihash;
String dn = getParam("dn", url);
if (dn != null)
@@ -79,7 +79,7 @@ public class MagnetURI {
throw new IllegalArgumentException();
_ih = ih;
_name = name;
_tracker = trackerURL;
_trackers = trackerURLs;
}
/**
@@ -97,10 +97,18 @@ public class MagnetURI {
}
/**
* @return tracker url or null
* @return first valid tracker url or null
*/
public String getTrackerURL() {
return _tracker;
return _trackers != null ? _trackers.get(0) : null;
}
/**
* @return all valid tracker urls or null if none
* @since 0.9.67 TODO to be hooked in via SnarkManager.addMagnet() and new Snark()
*/
public List<String> getTrackerURLs() {
return _trackers;
}
/**
@@ -160,26 +168,29 @@ public class MagnetURI {
}
/**
* @return first valid I2P tracker or null
* @return all valid I2P trackers or null if none
* @since 0.9.1
*/
private static String getTrackerParam(String uri) {
private static List<String> getTrackerParam(String uri) {
List<String> trackers = getMultiParam("tr", uri);
if (trackers == null)
return null;
List<String> rv = new ArrayList<String>(trackers.size());
for (String t : trackers) {
try {
URI u = new URI(t);
String protocol = u.getScheme();
String host = u.getHost();
if (protocol == null || host == null ||
!protocol.toLowerCase(Locale.US).equals("http") ||
if (protocol == null || host == null)
continue;
protocol = protocol.toLowerCase(Locale.US);
if (!(protocol.equals("http") || protocol.equals("udp")) ||
!host.toLowerCase(Locale.US).endsWith(".i2p"))
continue;
return t;
rv.add(t);
} catch(URISyntaxException use) {}
}
return null;
return rv.isEmpty() ? null : rv;
}
/**

View File

@@ -955,7 +955,7 @@ public class TrackerClient implements Runnable {
long maxWait = fast ? 5*1000 : 60*1000;
boolean small = left == 0 || event == UDPTrackerClient.EVENT_STOPPED || !coordinator.needOutboundPeers();
int numWant = small ? 0 : _util.getMaxConnections();
UDPTrackerClient.TrackerResponse fetched = udptc.announce(meta.getInfoHash(), snark.getID(), numWant,
UDPTrackerClient.TrackerResponse fetched = udptc.announce(snark.getInfoHash(), snark.getID(), numWant,
maxWait, tr.host, tr.port,
downloaded, left, uploaded, event, fast);
if (fast)

View File

@@ -794,6 +794,7 @@ class NetDbRenderer {
buf.setLength(0);
}
} // for each
buf.append("</div>");
if (debug) {
buf.append("<table id=\"leasesetdebug\"><tr><td><b>Network data (only valid if floodfill):</b></td><td colspan=\"3\">");
//buf.append("</b></p><p><b>Center of Key Space (router hash): " + ourRKey.toBase64());
@@ -811,7 +812,6 @@ class NetDbRenderer {
}
buf.append("</td></tr></table>\n");
} // median table
buf.append("</div>");
} // !empty
out.append(buf);
out.flush();
@@ -919,15 +919,17 @@ class NetDbRenderer {
buf.append(dest.toBase64(), 0, 6);
else
buf.append("n/a");
buf.append("</code></th>" +
"</tr>\n<tr><td");
if (!linkSusi)
buf.append(" colspan=\"2\"");
buf.append("><a href=\"http://").append(b32).append("\">").append(b32).append("</a></td>\n");
if (linkSusi && dest != null) {
buf.append("<td class=\"addtobook\"><a title=\"").append(_t("Add to address book"))
.append("\" href=\"/dns?book=private&amp;destination=")
.append(dest.toBase64()).append("#add\">").append(_t("Add to local address book")).append("</a></td>");
buf.append("</code></th>");
if (dest != null) {
buf.append("</tr>\n<tr><td");
if (!linkSusi)
buf.append(" colspan=\"2\"");
buf.append("><a href=\"http://").append(b32).append("\">").append(b32).append("</a></td>\n");
if (linkSusi) {
buf.append("<td class=\"addtobook\"><a title=\"").append(_t("Add to address book"))
.append("\" href=\"/dns?book=private&amp;destination=")
.append(dest.toBase64()).append("#add\">").append(_t("Add to local address book")).append("</a></td>");
}
}
}
}
@@ -954,7 +956,7 @@ class NetDbRenderer {
buf.append("&nbsp;&nbsp;<b>RAR?</b> ").append(ls.getReceivedAsReply());
buf.append("&nbsp;&nbsp;<b>Distance: </b>").append(distance);
buf.append("&nbsp;&nbsp;<b>").append(_t("Type")).append(": </b>").append(type);
if (dest.isCompressible()) {
if (dest != null && dest.isCompressible()) {
buf.append("&nbsp;&nbsp;<b>Compressible?</b> true");
}
if (type != DatabaseEntry.KEY_TYPE_LEASESET) {

View File

@@ -644,8 +644,8 @@ public class PeerHelper extends HelperBase {
buf.setLength(0);
long now = _context.clock().now();
for (PeerState peer : peers) {
if (now-peer.getLastReceiveTime() > 60*60*1000)
continue; // don't include old peers
//if (now-peer.getLastReceiveTime() > 60*60*1000)
// continue; // don't include old peers
buf.append("<tr><td class=\"cells\" align=\"left\" nowrap>");
buf.append(_context.commSystem().renderPeerHTML(peer.getRemotePeer()));

View File

@@ -1,3 +1,26 @@
2025-08-17 zzz
* NetDB: Exploration improvements and fixes
* Router: Fix PublishRouterInfoJob sometimes not getting started
* SSU2: Fix last receive time tracking
2025-08-10 zzz
* Console:
- Hide b32 link on netdb LS tabs for encrypted LS
- Fix layout for final LS data section on netdb LS debug tab
* i2psnark: Handle UDP trackers in magnet links
2025-08-09 zzz
* Console: Fix NPE on netdb LS debug tab rendering encrypted LS
2025-07-27 zzz
* Crypto: Reduce YK precalc pool size
* I2CP: Stub out new HostLookup types for service records (proposal 167)
* i2ptunnel: Expose 6,4 option to non-advanced-config
* Tomcat 9.0.107
2025-07-17 zzz
* I2CP: Client-side destroy session fixes
2025-07-04 zzz
* i2psnark: UDP announce fixes
* SSU: Increase inbound ban time

View File

@@ -787,7 +787,7 @@ public class Router implements RouterClock.ClockShiftListener {
synchronized(_configFileLock) {
String f = getConfigFilename();
Properties config = getConfig(_context, f);
// to avoid compiler errror
// to avoid compiler error
Map foo = _config;
foo.putAll(config);
}
@@ -956,23 +956,23 @@ public class Router implements RouterClock.ClockShiftListener {
changed = true;
}
}
if (changed && _context.netDb().isInitialized()) {
if (changed) {
if (_log.shouldWarn())
_log.warn("NetDB ready, publishing RI");
_log.warn("NetDB ready, initialized? " + _context.netDb().isInitialized());
// any previous calls to netdb().publish() did not
// actually publish, because netdb init was not complete
Republish r = new Republish(_context);
// this is called from PersistentDataStore.ReadJob,
// so we probably don't need to throw it to the timer queue,
// but just to be safe
_context.simpleTimer2().addEvent(r, 0);
long delay = _context.netDb().isInitialized() ? 0 : 60*1000;
_context.simpleTimer2().addEvent(r, delay);
// periodically update our RI and republish it to the flooodfills
PublishLocalRouterInfoJob plrij = new PublishLocalRouterInfoJob(_context);
plrij.getTiming().setStartAfter(_context.clock().now() + plrij.getDelay());
_context.jobQueue().addJob(plrij);
}
if (changed) {
_context.commSystem().initGeoIP();
if (!SystemVersion.isSlow() &&

View File

@@ -20,7 +20,7 @@ public class RouterVersion {
public final static String VERSION = CoreVersion.VERSION;
/** for example: "beta", "alpha", "rc" */
public final static String QUALIFIER = "";
public final static long BUILD = 5;
public final static long BUILD = 7;
/** for example "-test" */
public final static String EXTRA = "";
public final static String FULL_VERSION = VERSION + "-" + BUILD + QUALIFIER + EXTRA;

View File

@@ -131,7 +131,7 @@ class ExploreJob extends SearchJob {
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peers we don't want to hear about: " + dontIncludePeers);
_log.debug("Search type: " + msg.getSearchType() + " exclude peers: " + dontIncludePeers);
msg.setDontIncludePeers(dontIncludePeers);
@@ -202,7 +202,6 @@ class ExploreJob extends SearchJob {
* searchNext
*
*/
@Override
public String getName() { return "Kademlia NetDb Explore"; }
}

View File

@@ -46,7 +46,7 @@ class StartExplorersJob extends JobImpl {
The goal here is to avoid reseeding.
*/
/** very aggressively explore if we have less than this many routers */
private static final int MIN_ROUTERS = 3 * KademliaNetworkDatabaseFacade.MIN_RESEED;
private static final int MIN_ROUTERS = 5 * KademliaNetworkDatabaseFacade.MIN_RESEED;
/** aggressively explore if we have less than this many routers */
private static final int LOW_ROUTERS = 2 * MIN_ROUTERS;
/** explore slowly if we have more than this many routers */
@@ -54,7 +54,7 @@ class StartExplorersJob extends JobImpl {
// must be lower than LIMIT_ROUTERS in HandleFloodfillDatabaseStoreMessageJob
// because exploration does not register a reply job
private static final int LIMIT_ROUTERS = SystemVersion.isSlow() ? 800 : 3000;
private static final int MIN_FFS = 50;
private static final int MIN_FFS = 100;
static final int LOW_FFS = 2 * MIN_FFS;
private static final long MAX_LAG = 100;
@@ -100,10 +100,16 @@ class StartExplorersJob extends JobImpl {
boolean needffs = ffs < MIN_FFS;
boolean lowffs = ffs < LOW_FFS;
for (Hash key : toExplore) {
// Last param false means get floodfills (non-explore)
// false means get floodfills (non-explore)
// This is very effective so we don't need to do it often
boolean realexpl = !((needffs && getContext().random().nextInt(2) == 0) ||
(lowffs && getContext().random().nextInt(4) == 0));
boolean realexpl;
if (needffs) {
realexpl = getContext().random().nextInt(2) != 0;
} else if (lowffs) {
realexpl = getContext().random().nextInt(4) != 0;
} else {
realexpl = true;
}
ExploreJob j = new ExploreJob(getContext(), _facade, key, realexpl, _msgIDBloomXor);
if (delay > 0)
j.getTiming().setStartAfter(getContext().clock().now() + delay);

View File

@@ -226,6 +226,7 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback
*/
@Override
protected synchronized void messagePartiallyReceived(long now) {
setLastReceiveTime(now);
if (_wantACKSendSince <= 0) {
_wantACKSendSince = now;
_ackTimer.schedule();