diff --git a/apps/i2pcontrol/java/net/i2p/i2pcontrol/servlets/jsonrpc2handlers/RouterInfoHandler.java b/apps/i2pcontrol/java/net/i2p/i2pcontrol/servlets/jsonrpc2handlers/RouterInfoHandler.java index 87a68e779..2bd0d688d 100644 --- a/apps/i2pcontrol/java/net/i2p/i2pcontrol/servlets/jsonrpc2handlers/RouterInfoHandler.java +++ b/apps/i2pcontrol/java/net/i2p/i2pcontrol/servlets/jsonrpc2handlers/RouterInfoHandler.java @@ -125,7 +125,7 @@ public class RouterInfoHandler implements RequestHandler { if (inParams.containsKey("i2p.router.netdb.knownpeers")) { // Why max(-1, 0) is used I don't know, it is the implementation used in the router console. - outParams.put("i2p.router.netdb.knownpeers", Math.max(_context.netDbSegmentor().getKnownRouters(null) - 1, 0)); + outParams.put("i2p.router.netdb.knownpeers", Math.max(_context.mainNetDb().getKnownRouters() - 1, 0)); } if (inParams.containsKey("i2p.router.netdb.activepeers")) { @@ -200,7 +200,7 @@ public class RouterInfoHandler implements RequestHandler { case CommSystemFacade.STATUS_IPV4_DISABLED_IPV6_FIREWALLED: if (_context.router().getRouterInfo().getTargetAddress("NTCP2") != null) return NETWORK_STATUS.WARN_FIREWALLED_WITH_INBOUND_TCP; - if (_context.netDbSegmentor().floodfillEnabled()) + if (_context.mainNetDb().floodfillEnabled()) return NETWORK_STATUS.WARN_FIREWALLED_AND_FLOODFILL; if (_context.router().getRouterInfo().getCapabilities().indexOf('O') >= 0) return NETWORK_STATUS.WARN_FIREWALLED_AND_FAST; diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigAdvancedHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigAdvancedHelper.java index 5b444b54f..9c173b240 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigAdvancedHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigAdvancedHelper.java @@ -66,6 +66,6 @@ public class ConfigAdvancedHelper extends HelperBase { /** @since 0.9.21 */ public boolean isFloodfill() { - return _context.netDbSegmentor().floodfillEnabled(); + return _context.mainNetDb().floodfillEnabled(); } } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java index 18ed3b38b..bf304bb15 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/ConfigKeyringHandler.java @@ -95,7 +95,7 @@ public class ConfigKeyringHandler extends FormHandler { // This is probably not ideal, with some social-engineering a service operator who owns an encrypted destination could associate 2 tunnels. // How realistic is it? Maybe not very, but I don't like it. Still, this is better than nothing. for (String clientBase32 : clientBase32s) { - BlindData bdold = _context.netDbSegmentor().getBlindData(spk, clientBase32); + BlindData bdold = _context.clientNetDb(clientBase32).getBlindData(spk); if (bdold != null && d == null) d = bdold.getDestination(); if (d != null && _context.clientManager().isLocal(d)) { @@ -164,7 +164,7 @@ public class ConfigKeyringHandler extends FormHandler { _log.debug("already cached: " + bdold); } try { - _context.netDbSegmentor().setBlindData(bdout, clientBase32); + _context.clientNetDb(clientBase32).setBlindData(bdout); addFormNotice(_t("Key for {0} added to keyring", bdout.toBase32())); if (_mode == 6 || _mode == 7) { addFormNotice(_t("Send key to server operator.") + ' ' + pk.toPublic().toBase64()); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java index 455b783cf..085419e4b 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/NetDbRenderer.java @@ -747,17 +747,17 @@ class NetDbRenderer { buf.append(hostname); buf.append(""); } else { - LeaseSet ls = _context.netDbSegmentor().lookupLeaseSetLocally(hash); + LeaseSet ls = _context.mainNetDb().lookupLeaseSetLocally(hash); if (ls == null) { // remote lookup LookupWaiter lw = new LookupWaiter(); // use-case for the exploratory netDb here? - _context.exploratoryNetDb().lookupLeaseSetRemotely(hash, lw, lw, 8*1000, null); + _context.mainNetDb().lookupLeaseSetRemotely(hash, lw, lw, 8*1000, null); // just wait right here in the middle of the rendering, sure synchronized(lw) { try { lw.wait(9*1000); } catch (InterruptedException ie) {} } - ls = _context.exploratoryNetDb().lookupLeaseSetLocally(hash); + ls = _context.mainNetDb().lookupLeaseSetLocally(hash); } if (ls != null) { BigInteger dist = HashDistance.getDistance(_context.routerHash(), ls.getRoutingKey()); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/SybilRenderer.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/SybilRenderer.java index f216130a6..b80e515fd 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/SybilRenderer.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/SybilRenderer.java @@ -493,7 +493,7 @@ public class SybilRenderer { Hash client = iter.next(); if (!_context.clientManager().isLocal(client) || !_context.clientManager().shouldPublishLeaseSet(client) || - _context.netDbSegmentor().lookupLeaseSetLocally(client) == null) { + _context.mainNetDb().lookupLeaseSetLocally(client) == null) { iter.remove(); } } @@ -503,7 +503,7 @@ public class SybilRenderer { return; } for (Hash client : destinations) { - LeaseSet ls = _context.netDbSegmentor().lookupLeaseSetLocally(client); + LeaseSet ls = _context.mainNetDb().lookupLeaseSetLocally(client); if (ls == null) continue; Hash rkey = ls.getRoutingKey(); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/helpers/TunnelRenderer.java b/apps/routerconsole/java/src/net/i2p/router/web/helpers/TunnelRenderer.java index 97524c721..ec10a45ea 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/helpers/TunnelRenderer.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/helpers/TunnelRenderer.java @@ -559,7 +559,7 @@ class TunnelRenderer { /** @return cap char or ' ' */ private char getCapacity(Hash peer) { - RouterInfo info = (RouterInfo) _context.netDbSegmentor().lookupLocallyWithoutValidation(peer, null); + RouterInfo info = (RouterInfo) _context.mainNetDb().lookupLocallyWithoutValidation(peer); if (info != null) { String caps = info.getCapabilities(); for (int i = 0; i < RouterInfo.BW_CAPABILITY_CHARS.length(); i++) { diff --git a/core/java/src/net/i2p/client/impl/I2PSessionImpl.java b/core/java/src/net/i2p/client/impl/I2PSessionImpl.java index 598b95bde..07bde4a1a 100644 --- a/core/java/src/net/i2p/client/impl/I2PSessionImpl.java +++ b/core/java/src/net/i2p/client/impl/I2PSessionImpl.java @@ -1335,31 +1335,9 @@ public abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2 _availabilityNotifier.stopNotifying(); closeSocket(); _subsessionMap.clear(); - clearOldNetDB(); if (_sessionListener != null) _sessionListener.disconnected(this); } - private void clearOldNetDB() { - Destination myDest = getMyDestination(); - if (myDest != null) { - String base32 = myDest.toBase32(); - if (base32 != null) { - String dbid = "clients_"+base32; - // get the netDb directory - File netDbDir = new File(_context.getConfigDir(), "netDb"); - File subNetDbDir = new File(netDbDir, dbid); - if (subNetDbDir.exists()) { - subNetDbDir.delete(); - } - File baseNetDbDir = new File(_context.getConfigDir(), "netDb"); - File baseSubNetDbDir = new File(baseNetDbDir, dbid); - if (baseSubNetDbDir.exists()) { - baseSubNetDbDir.delete(); - } - } - } - } - /** * Close the socket carefully. */ diff --git a/router/java/src/net/i2p/router/Banlist.java b/router/java/src/net/i2p/router/Banlist.java index 21e6e69b9..7003a0298 100644 --- a/router/java/src/net/i2p/router/Banlist.java +++ b/router/java/src/net/i2p/router/Banlist.java @@ -269,7 +269,7 @@ public class Banlist { if (transport == null) { // we hate the peer on *any* transport - _context.netDbSegmentor().fail(peer); + _context.mainNetDb().fail(peer); _context.tunnelManager().fail(peer); } //_context.tunnelManager().peerFailed(peer); diff --git a/router/java/src/net/i2p/router/Blocklist.java b/router/java/src/net/i2p/router/Blocklist.java index c18de20b8..a4e6e930e 100644 --- a/router/java/src/net/i2p/router/Blocklist.java +++ b/router/java/src/net/i2p/router/Blocklist.java @@ -942,7 +942,7 @@ public class Blocklist { * Will not contain duplicates. */ private List getAddresses(Hash peer) { - RouterInfo pinfo = _context.netDbSegmentor().lookupRouterInfoLocally(peer, null); + RouterInfo pinfo = _context.mainNetDb().lookupRouterInfoLocally(peer); if (pinfo == null) return Collections.emptyList(); return getAddresses(pinfo); diff --git a/router/java/src/net/i2p/router/MultiRouter.java b/router/java/src/net/i2p/router/MultiRouter.java index 791cfcdb5..4e9519c93 100644 --- a/router/java/src/net/i2p/router/MultiRouter.java +++ b/router/java/src/net/i2p/router/MultiRouter.java @@ -144,7 +144,7 @@ public class MultiRouter { } for(Router r : _routers) { for(RouterInfo ri : riSet){ - r.getContext().netDbSegmentor().publish(ri); + r.getContext().mainNetDb().publish(ri); } } _out.println(riSet.size() + " RouterInfos were reseeded"); diff --git a/router/java/src/net/i2p/router/Router.java b/router/java/src/net/i2p/router/Router.java index e599dc014..f4b6aa76b 100644 --- a/router/java/src/net/i2p/router/Router.java +++ b/router/java/src/net/i2p/router/Router.java @@ -1188,7 +1188,7 @@ public class Router implements RouterClock.ClockShiftListener { // rv.append(CAPABILITY_BW256); // if prop set to true, don't tell people we are ff even if we are - if (_context.netDbSegmentor().floodfillEnabled() && + if (_context.mainNetDb().floodfillEnabled() && !_context.getBooleanProperty("router.hideFloodfillParticipant")) rv.append(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL); diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java index 1d81c384b..89152e52b 100644 --- a/router/java/src/net/i2p/router/RouterContext.java +++ b/router/java/src/net/i2p/router/RouterContext.java @@ -375,8 +375,8 @@ public class RouterContext extends I2PAppContext { public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; } public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); } public FloodfillNetworkDatabaseFacade mainNetDb() { return _netDb.mainNetDB(); } - public FloodfillNetworkDatabaseFacade exploratoryNetDb() { return _netDb.exploratoryNetDB(); } public FloodfillNetworkDatabaseFacade clientNetDb(String id) { return _netDb.clientNetDB(id); } + public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); } /** * The actual driver of the router, where all jobs are enqueued and processed. */ diff --git a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java index df0f85a28..963b112f4 100644 --- a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java +++ b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java @@ -211,6 +211,7 @@ class ClientConnectionRunner { _manager.unregisterEncryptedDestination(this, _encryptedLSHash); _manager.unregisterConnection(this); // netdb may be null in unit tests + Hash dbid = getDestHash(); if (_context.netDbSegmentor() != null) { // Note that if the client sent us a destroy message, // removeSession() was called just before this, and @@ -218,11 +219,11 @@ class ClientConnectionRunner { for (SessionParams sp : _sessions.values()) { LeaseSet ls = sp.currentLeaseSet; if (ls != null) - _context.netDbSegmentor().unpublish(ls); + _context.clientNetDb(dbid).unpublish(ls); // unpublish encrypted LS also ls = sp.currentEncryptedLeaseSet; if (ls != null) - _context.netDbSegmentor().unpublish(ls); + _context.clientNetDb(dbid).unpublish(ls); if (!sp.isPrimary) _context.tunnelManager().removeAlias(sp.dest); } @@ -448,6 +449,7 @@ class ClientConnectionRunner { if (id == null) return; boolean isPrimary = false; + Hash dbid = getDestHash(); for (Iterator iter = _sessions.values().iterator(); iter.hasNext(); ) { SessionParams sp = iter.next(); if (id.equals(sp.sessionId)) { @@ -458,11 +460,11 @@ class ClientConnectionRunner { _manager.unregisterSession(id, sp.dest); LeaseSet ls = sp.currentLeaseSet; if (ls != null) - _context.netDbSegmentor().unpublish(ls); + _context.clientNetDb(dbid).unpublish(ls); // unpublish encrypted LS also ls = sp.currentEncryptedLeaseSet; if (ls != null) - _context.netDbSegmentor().unpublish(ls); + _context.clientNetDb(dbid).unpublish(ls); isPrimary = sp.isPrimary; if (isPrimary) _context.tunnelManager().removeTunnels(sp.dest); @@ -483,11 +485,11 @@ class ClientConnectionRunner { _manager.unregisterSession(sp.sessionId, sp.dest); LeaseSet ls = sp.currentLeaseSet; if (ls != null) - _context.netDbSegmentor().unpublish(ls); + _context.clientNetDb(dbid).unpublish(ls); // unpublish encrypted LS also ls = sp.currentEncryptedLeaseSet; if (ls != null) - _context.netDbSegmentor().unpublish(ls); + _context.clientNetDb(dbid).unpublish(ls); _context.tunnelManager().removeAlias(sp.dest); synchronized(this) { if (sp.rerequestTimer != null) diff --git a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java index f1df9ea19..9d9fb4c82 100644 --- a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java +++ b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java @@ -711,13 +711,13 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi } if (_log.shouldDebug()) _log.debug("Publishing: " + ls); - _context.netDbSegmentor().publish(ls, _runner.getDestHash().toBase32()); + _context.clientNetDb(_runner.getDestHash()).publish(ls); if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) { // store the decrypted ls also EncryptedLeaseSet encls = (EncryptedLeaseSet) ls; if (_log.shouldDebug()) _log.debug("Storing decrypted: " + encls.getDecryptedLeaseSet()); - _context.netDbSegmentor().store(dest.getHash(), encls.getDecryptedLeaseSet()); + _context.clientNetDb(dest.getHash()).store(dest.getHash(), encls.getDecryptedLeaseSet()); } } catch (IllegalArgumentException iae) { if (_log.shouldLog(Log.ERROR)) @@ -861,9 +861,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi _log.warn("Unsupported BlindingInfo type: " + message); return; } - BlindData obd = _context.netDbSegmentor().getBlindData(spk); + BlindData obd = _context.clientNetDb(_runner.getDestHash()).getBlindData(spk); if (obd == null) { - _context.netDbSegmentor().setBlindData(bd, _runner.getDestHash().toBase32()); + _context.clientNetDb(_runner.getDestHash()).setBlindData(bd); if (_log.shouldWarn()) _log.warn("New: " + bd); } else { @@ -884,7 +884,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi return; } } - _context.netDbSegmentor().setBlindData(bd, _runner.getDestHash().toBase32()); + _context.clientNetDb(_runner.getDestHash()).setBlindData(bd); if (_log.shouldWarn()) _log.warn("Updated: " + bd); } else { @@ -893,7 +893,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi if (nexp > oexp) { obd.setExpiration(nexp); // to force save at shutdown - _context.netDbSegmentor().setBlindData(obd, _runner.getDestHash().toBase32()); + _context.clientNetDb(_runner.getDestHash()).setBlindData(obd); if (_log.shouldWarn()) _log.warn("Updated expiration: " + obd); } else { diff --git a/router/java/src/net/i2p/router/client/LookupDestJob.java b/router/java/src/net/i2p/router/client/LookupDestJob.java index c2f37a8d4..ad6bd3c8e 100644 --- a/router/java/src/net/i2p/router/client/LookupDestJob.java +++ b/router/java/src/net/i2p/router/client/LookupDestJob.java @@ -91,7 +91,11 @@ class LookupDestJob extends JobImpl { try { bd = Blinding.decode(context, b); SigningPublicKey spk = bd.getUnblindedPubKey(); - BlindData bd2 = getContext().netDbSegmentor().getBlindData(spk); + BlindData bd2; + if (_fromLocalDest == null) + bd2 = getContext().mainNetDb().getBlindData(spk); + else + bd2 = getContext().clientNetDb(_fromLocalDest).getBlindData(spk); if (bd2 != null) { // BlindData from database may have privkey or secret // check if we need it but don't have it @@ -110,7 +114,7 @@ class LookupDestJob extends JobImpl { long exp = now + ((bd.getAuthRequired() || bd.getSecretRequired()) ? 365*24*60*60*1000L : 90*24*68*60*1000L); bd.setExpiration(exp); - getContext().netDbSegmentor().setBlindData(bd, toBase32()); + getContext().clientNetDb(_fromLocalDest).setBlindData(bd); } h = bd.getBlindedHash(); if (_log.shouldDebug()) @@ -185,7 +189,7 @@ class LookupDestJob extends JobImpl { if (timeout > 1500) timeout -= 500; // TODO tell router this is an encrypted lookup, skip 38 or earlier ffs? - getContext().netDbSegmentor().lookupDestination(_hash, done, timeout, _fromLocalDest, toBase32()); + getContext().clientNetDb(_fromLocalDest).lookupDestination(_hash, done, timeout, _fromLocalDest); } else { // blinding decode fail returnFail(HostReplyMessage.RESULT_DECRYPTION_FAILURE); @@ -204,10 +208,10 @@ class LookupDestJob extends JobImpl { } public String getName() { return "LeaseSet Lookup Reply to Client"; } public void runJob() { - Destination dest = getContext().netDbSegmentor().lookupDestinationLocally(_hash, toBase32()); + Destination dest = getContext().clientNetDb(_fromLocalDest).lookupDestinationLocally(_hash); if (dest == null && _blindData != null) { // TODO store and lookup original hash instead - LeaseSet ls = getContext().netDbSegmentor().lookupLeaseSetLocally(_hash, toBase32()); + LeaseSet ls = getContext().clientNetDb(_fromLocalDest).lookupLeaseSetLocally(_hash); if (ls != null && ls.getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) { // already decrypted EncryptedLeaseSet encls = (EncryptedLeaseSet) ls; diff --git a/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java index c65fcf665..c18298e1d 100644 --- a/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/dummy/DummyNetworkDatabaseFacade.java @@ -11,6 +11,7 @@ package net.i2p.router.dummy; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -42,6 +43,10 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade { return null; } + public FloodfillNetworkDatabaseFacade getSubNetDB(Hash dbid){ + return null; + } + public void restart() {} public void shutdown() {} public void startup() { @@ -88,32 +93,6 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade { public Set getAllRouters() { return new HashSet(_routers.keySet()); } public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) { return getAllRouters(); } - @Override - public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore, String dbid) { - return findNearestRouters(key, maxNumRouters, peersToIgnore); - } - - @Override - public DatabaseEntry lookupLocally(Hash key, String dbid) { - return lookupLocally(key); - } - - @Override - public DatabaseEntry lookupLocallyWithoutValidation(Hash key, String dbid) { - return lookupLocallyWithoutValidation(key); - } - - @Override - public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid) { - lookupLeaseSet(key, onFindJob, onFailedLookupJob, timeoutMs); - } - - @Override - public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, Hash fromLocalDest, - String dbid) { - lookupLeaseSet(key, onFindJob, onFailedLookupJob, timeoutMs, fromLocalDest); - } - @Override public LeaseSet lookupLeaseSetHashIsClient(Hash key) { throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'"); @@ -124,67 +103,6 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade { throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetLocally'"); } - @Override - public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid) { - lookupRouterInfo(key, onFindJob, onFailedLookupJob, timeoutMs); - } - - @Override - public RouterInfo lookupRouterInfoLocally(Hash key, String dbid) { - return lookupRouterInfoLocally(key); - } - - @Override - public void lookupLeaseSetRemotely(Hash key, Hash fromLocalDest, String dbid) { - lookupLeaseSetRemotely(key, fromLocalDest); - } - - @Override - public void lookupLeaseSetRemotely(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, - Hash fromLocalDest, String dbid) { - lookupLeaseSetRemotely(key, onFindJob, onFailedLookupJob, timeoutMs, fromLocalDest); - } - - @Override - public void lookupDestination(Hash key, Job onFinishedJob, long timeoutMs, Hash fromLocalDest, String dbid) { - lookupDestination(key, onFinishedJob, timeoutMs, fromLocalDest); - } - - @Override - public Destination lookupDestinationLocally(Hash key, String dbid) { - return lookupDestinationLocally(key); - } - - @Override - public LeaseSet store(Hash key, LeaseSet leaseSet, String dbid) throws IllegalArgumentException { - return _fndb.store(key, leaseSet); - } - - @Override - public RouterInfo store(Hash key, RouterInfo routerInfo, String dbid) throws IllegalArgumentException { - return _fndb.store(key, routerInfo); - } - - @Override - public void publish(LeaseSet localLeaseSet, String dbid) { - _fndb.publish(localLeaseSet); - } - - @Override - public void unpublish(LeaseSet localLeaseSet, String dbid) { - _fndb.unpublish(localLeaseSet); - } - - @Override - public void fail(Hash dbEntry, String dbid) { - _fndb.fail(dbEntry); - } - - @Override - public Set getAllRouters(String dbid) { - return _fndb.getAllRouters(); - } - @Override public FloodfillNetworkDatabaseFacade mainNetDB() { return _fndb; @@ -201,12 +119,7 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade { } @Override - public FloodfillNetworkDatabaseFacade exploratoryNetDB() { - return _fndb; - } - - @Override - public FloodfillNetworkDatabaseFacade localNetDB() { + public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) { return _fndb; } @@ -214,4 +127,14 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade { public String getDbidByHash(Hash clientKey) { throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'"); } + + @Override + public List getClients() { + throw new UnsupportedOperationException("Unimplemented method 'getClients'"); + } + + @Override + public Set getSubNetDBs(){ + throw new UnsupportedOperationException("Unimplemented method 'getSubNetDBs'"); + } } diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java index 0d5713851..ae4064013 100644 --- a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java +++ b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java @@ -209,7 +209,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { _hashPair = new OutboundCache.HashPair(_from.calculateHash(), toHash); _toString = toHash.toBase32(); // we look up here rather than runJob() so we may adjust the timeout - _leaseSet = ctx.netDbSegmentor().lookupLeaseSetLocally(toHash, _from.calculateHash().toBase32()); + _leaseSet = ctx.clientNetDb(_from.calculateHash()).lookupLeaseSetLocally(toHash); // use expiration requested by client if available, otherwise session config, // otherwise router config, otherwise default @@ -307,8 +307,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { if (_log.shouldInfo()) _log.info(getJobId() + ": RAP LS, firing search: " + _leaseSet.getHash().toBase32()); LookupLeaseSetFailedJob failed = new LookupLeaseSetFailedJob(getContext()); - getContext().netDbSegmentor().lookupLeaseSetRemotely(_leaseSet.getHash(), success, failed, - LS_LOOKUP_TIMEOUT, _from.calculateHash(), _from.calculateHash().toBase32()); + getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetRemotely(_leaseSet.getHash(), success, failed, + LS_LOOKUP_TIMEOUT, _from.calculateHash()); } else { dieFatal(MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET); } @@ -330,7 +330,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { long exp = now - _leaseSet.getLatestLeaseDate(); _log.info(getJobId() + ": leaseSet expired " + DataHelper.formatDuration(exp) + " ago, firing search: " + _leaseSet.getHash().toBase32()); } - getContext().netDbSegmentor().lookupLeaseSetRemotely(_leaseSet.getHash(), _from.calculateHash(), _from.calculateHash().toBase32()); + getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetRemotely(_leaseSet.getHash(), _from.calculateHash()); } } success.runJob(); @@ -340,7 +340,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { _log.debug(getJobId() + ": Send outbound client message - sending off leaseSet lookup job for " + _toString + " from client " + _from.calculateHash().toBase32()); LookupLeaseSetFailedJob failed = new LookupLeaseSetFailedJob(getContext()); Hash key = _to.calculateHash(); - getContext().netDbSegmentor().lookupLeaseSet(key, success, failed, LS_LOOKUP_TIMEOUT, _from.calculateHash(), _from.calculateHash().toBase32()); + getContext().clientNetDb(_from.calculateHash()).lookupLeaseSet(key, success, failed, LS_LOOKUP_TIMEOUT, _from.calculateHash()); } } @@ -349,7 +349,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * @return lease set or null if we should not send the lease set */ private LeaseSet getReplyLeaseSet(boolean force) { - LeaseSet newLS = getContext().netDbSegmentor().lookupLeaseSetLocally(_from.calculateHash(), _from.calculateHash().toBase32()); + LeaseSet newLS = getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetLocally(_from.calculateHash()); if (newLS == null) return null; // punt @@ -423,7 +423,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { private int getNextLease() { // set in runJob if found locally if (_leaseSet == null || !_leaseSet.getReceivedAsReply()) { - _leaseSet = getContext().netDbSegmentor().lookupLeaseSetLocally(_to.calculateHash(), _from.calculateHash().toBase32()); + _leaseSet = getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetLocally(_to.calculateHash()); if (_leaseSet == null) { // shouldn't happen if (_log.shouldLog(Log.WARN)) @@ -550,7 +550,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { *** unfortunately the "U" is rarely seen. if (!getContext().commSystem().wasUnreachable(l.getGateway())) { ***/ - RouterInfo ri = getContext().netDbSegmentor().lookupRouterInfoLocally(l.getGateway(), null); + RouterInfo ri = getContext().mainNetDb().lookupRouterInfoLocally(l.getGateway()); if (ri == null || ri.getCapabilities().indexOf(Router.CAPABILITY_UNREACHABLE) < 0) { _lease = l; break; @@ -587,7 +587,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { int cause; - if (getContext().netDbSegmentor().isNegativeCachedForever(_to.calculateHash(), _from.calculateHash().toBase32())) { + if (getContext().clientNetDb(_from.calculateHash()).isNegativeCachedForever(_to.calculateHash())) { if (_log.shouldLog(Log.WARN)) _log.warn("Unable to send to " + _toString + " because the sig type is unsupported"); cause = MessageStatusMessage.STATUS_SEND_FAILURE_UNSUPPORTED_ENCRYPTION; diff --git a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java index 42315a535..96ec1c18c 100644 --- a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java +++ b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java @@ -76,7 +76,7 @@ public class PublishLocalRouterInfoJob extends JobImpl { requeue(100); return; } - long last = getContext().netDbSegmentor().getLastRouterInfoPublishTime(); + long last = getContext().mainNetDb().getLastRouterInfoPublishTime(); long now = getContext().clock().now(); if (last + MIN_PUBLISH_DELAY > now) { long delay = getDelay(); @@ -141,7 +141,7 @@ public class PublishLocalRouterInfoJob extends JobImpl { + new Date(ri.getPublished())); try { // This won't really publish until the netdb is initialized. - getContext().netDbSegmentor().publish(ri); + getContext().mainNetDb().publish(ri); } catch (IllegalArgumentException iae) { _log.log(Log.CRIT, "Error publishing our identity - corrupt? Restart required", iae); getContext().router().rebuildNewIdentity(); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index a4c9bb349..b45f77909 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -300,32 +300,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad return mfp; } - public List pickRandomFloodfillPeers() { - List list = new ArrayList(); - // In normal operation, client subDb do not need RI. - // pickRandomFloodfillPeers() is provided for future use cases. - - // get the total number of known routers - int count = getFloodfillPeers().size(); - if ((count == 0) || (minFloodfillPeers() == 0)) - return list; // Return empty list. - // pick a random number of routers between 4 and 4+1% of the total routers we know about. - int max = minFloodfillPeers() + (count / 100); - while (list.size() < max) { - int randVal = new RandomSource(_context).nextInt(count); - RouterInfo ri = lookupRouterInfoLocally(getFloodfillPeers().get(randVal)); - if (ri != null) { - if (!list.contains(ri)) { - if (validate(ri) == null) { - list.add(ri); - } - } - } - - } - return list; - } - /** * Send to a subset of all floodfill peers. * We do this to implement Kademlia within the floodfills, i.e. diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java index 73eba0e0a..beb9b6f8b 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseSegmentor.java @@ -22,249 +22,163 @@ import net.i2p.router.RouterContext; import net.i2p.router.networkdb.reseed.ReseedChecker; import net.i2p.util.Log; +/** + * FloodfillNetworkDatabaseSegmentor + * + * Default implementation of the SegmentedNetworkDatabaseFacade. + * + * This is a datastructure which manages (3+Clients) "sub-netDbs" on behalf of an + * I2P router, each representing it's own view of the network. Normally, these sub-netDb's + * are identified by the hash of the primary session belonging to the client who "owns" + * a particular sub-netDb. + * + * There are 3 "Special" netDbs which have non-hash names: + * + * - Main NetDB: This is the netDb we use if or when we become a floodfill, and for + * direct interaction with other routers on the network, such as when we are communicating + * with a floodfill. + * - Multihome NetDB: This is used to stash leaseSets for our own sites when they are + * sent to us by a floodfill, so that we can reply when they are requested back from us + * regardless of our closeness to them in the routing table. + * - Exploratory NetDB: This is used when we want to stash a DatabaseEntry for a key + * during exploration but don't want it to go into the Main NetDB until we do something + * else with it. + * + * And there are an unlimited number of "Client" netDbs. These sub-netDbs are + * intended to contain only the information required to operate them, and as such + * most of them are very small, containing only a few LeaseSets belonging to clients. + * Each one corresponds to a Destination which can recieve information from the + * netDb, and can be indexed either by it's hash or by it's base32 address. This index + * is known as the 'dbid' or database id. + * + * Users of this class should strive to always access their sub-netDbs via the + * explicit DBID of the destination recipient, or using the DBID of the special + * netDb when it's appropriate to route the netDb entry to one of the special tables. + * + * @author idk + * @since 0.9.60 + */ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseFacade { protected final Log _log; private RouterContext _context; private Map _subDBs = new HashMap(); public static final String MAIN_DBID = "main"; private static final String MULTIHOME_DBID = "multihome"; + private static final String EXPLORATORY_DBID = "exploratory"; + private final FloodfillNetworkDatabaseFacade _mainDbid; + private final FloodfillNetworkDatabaseFacade _multihomeDbid; + private final FloodfillNetworkDatabaseFacade _exploratoryDbid; + /** + * Construct a new FloodfillNetworkDatabaseSegmentor with the given + * RouterContext, containing a default, main netDb and a multihome netDb + * and which is prepared to add client netDbs. + * + * @since 0.9.60 + */ public FloodfillNetworkDatabaseSegmentor(RouterContext context) { super(context); _log = context.logManager().getLog(getClass()); if (_context == null) _context = context; - FloodfillNetworkDatabaseFacade subdb = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID); - _subDBs.put(MAIN_DBID, subdb); + _mainDbid = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID); + _multihomeDbid = new FloodfillNetworkDatabaseFacade(_context, MULTIHOME_DBID); + _exploratoryDbid = new FloodfillNetworkDatabaseFacade(_context, EXPLORATORY_DBID); } - /* - * public FloodfillNetworkDatabaseFacade getSubNetDB() { - * return this; - * } + /** + * Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID. + * If the ID is null, the main database is returned. + * + * @param id the ID of the FloodfillNetworkDatabaseFacade object to retrieve + * @return the FloodfillNetworkDatabaseFacade object corresponding to the ID */ @Override - public FloodfillNetworkDatabaseFacade getSubNetDB(String id) { - return GetSubNetDB(id); + protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) { + if (id == null) + return getSubNetDB(MAIN_DBID); + return getSubNetDB(id.toBase32()); } - private FloodfillNetworkDatabaseFacade GetSubNetDB(String id) { - if (id == null || id.isEmpty()) { - return GetSubNetDB(MAIN_DBID); - } + /** + * Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID string. + * + * @param id the ID of the FloodfillNetworkDatabaseFacade object to retrieve + * @return the FloodfillNetworkDatabaseFacade object for the specified ID + * + */ + @Override + protected FloodfillNetworkDatabaseFacade getSubNetDB(String id) { + if (id == null || id.isEmpty() || id.equals(MAIN_DBID)) + return mainNetDB(); + if (id.equals(MULTIHOME_DBID)) + return multiHomeNetDB(); + if (id.equals(EXPLORATORY_DBID)) + return clientNetDB(); + if (id.endsWith(".i2p")) { if (!id.startsWith("clients_")) id = "clients_" + id; } + FloodfillNetworkDatabaseFacade subdb = _subDBs.get(id); if (subdb == null) { subdb = new FloodfillNetworkDatabaseFacade(_context, id); _subDBs.put(id, subdb); subdb.startup(); subdb.createHandlers(); - if (subdb.getFloodfillPeers().size() == 0) { - List ris = mainNetDB().pickRandomFloodfillPeers(); - for (RouterInfo ri : ris) { - if (_log.shouldLog(_log.DEBUG)) - _log.debug("Seeding: " + id + " with " + ris.size() + " peers " + ri.getHash()); - subdb.store(ri.getIdentity().getHash(), ri); - } - } } return subdb; } - public synchronized void startup() { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - // if (!subdb.isInitialized()){ - subdb.startup(); - // } - } - } - - protected void createHandlers() { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - subdb.createHandlers(); - } - } - /** * If we are floodfill, turn it off and tell everybody. + * Shut down all known subDbs. + * + * @since 0.9.60 * - * @since 0.8.9 */ public synchronized void shutdown() { + _mainDbid.shutdown(); + _multihomeDbid.shutdown(); // shut down every entry in _subDBs - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", + + ") Shutting down all remaining sub-netDbs", new Exception()); subdb.shutdown(); } } /** - * This maybe could be shorter than - * RepublishLeaseSetJob.REPUBLISH_LEASESET_TIMEOUT, - * because we are sending direct, but unresponsive floodfills may take a while - * due to timeouts. - */ - static final long PUBLISH_TIMEOUT = 90 * 1000; - - /** - * Send our RI to the closest floodfill. This should always be called from the - * floodFillNetDB context. - * The caller context cannot be determined from here, so the caller will be - * relied on to insure this is only called in the floodfill context. - * - * @throws IllegalArgumentException if the local router info is invalid - */ - public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException { - if (localRouterInfo == null) - throw new IllegalArgumentException("localRouterInfo must not be null"); - if (localRouterInfo.getReceivedBy() == null) - mainNetDB().publish(localRouterInfo); - } - - /** - * @param type database store type - * @param lsSigType may be null - * @since 0.9.39 - */ - /* - * private boolean shouldFloodTo(Hash key, int type, SigType lsSigType, Hash - * peer, RouterInfo target) { - * return subdb.shouldFloodTo(key, type, lsSigType, peer, - * target); - * } - */ - - protected PeerSelector createPeerSelector(String dbid) { - // for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - // return subdb.createPeerSelector(); - // } - return this.getSubNetDB(dbid).createPeerSelector(); - } - - /** - * Public, called from console. This wakes up the floodfill monitor, - * which will rebuild the RI and log in the event log, - * and call setFloodfillEnabledFromMonitor which really sets it. - */ - public synchronized void setFloodfillEnabled(boolean yes) { - mainNetDB().setFloodfillEnabled(yes); - } - - /** - * Package private, called from FloodfillMonitorJob. This does not wake up the - * floodfill monitor. + * list of the RouterInfo objects for all known peers; + * + * @since 0.9.60 * - * @since 0.9.34 */ - synchronized void setFloodfillEnabledFromMonitor(boolean yes) { - mainNetDB().setFloodfillEnabledFromMonitor(yes); - } - - public boolean floodfillEnabled() { - return mainNetDB().floodfillEnabled(); - } - - /** - * @param peer may be null, returns false if null - */ - public boolean isFloodfill(RouterInfo peer) { - return mainNetDB().isFloodfill(peer); - } - public List getKnownRouterData() { List rv = new ArrayList(); - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", + + ") Called from FNDS, will be combined with all other subDbs", new Exception()); rv.addAll(subdb.getKnownRouterData()); } return rv; } - /** - * Lookup using exploratory tunnels. - * - * Caller should check negative cache and/or banlist before calling. - * - * Begin a kademlia style search for the key specified, which can take up to - * timeoutMs and - * will fire the appropriate jobs on success or timeout (or if the kademlia - * search completes - * without any match) - * - * @return null always - */ - - SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - return subdb.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease); - } - return null; - } - - /** - * Lookup using the client's tunnels. - * - * Caller should check negative cache and/or banlist before calling. - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @return null always - * @since 0.9.10 - */ - SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease, - Hash fromLocalDest) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - return subdb.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease, fromLocalDest); - } - return null; - } - - /** - * Must be called by the search job queued by search() on success or failure - */ - void complete(Hash key) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - subdb.complete(key); - } - } - /** * list of the Hashes of currently known floodfill peers; * Returned list will not include our own hash. * List is not sorted and not shuffled. + * + * @since 0.9.60 */ public List getFloodfillPeers() { List peers = new ArrayList(); - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("(dbid: " + subdb._dbid + ") Deprecated! Arbitrary selection of this subDb", @@ -274,143 +188,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF return peers; } - /** @since 0.7.10 */ - boolean isVerifyInProgress(Hash h) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - return subdb.isVerifyInProgress(h); - } - return false; - } - - /** @since 0.7.10 */ - void verifyStarted(Hash h) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - subdb.verifyStarted(h); - } - } - - /** @since 0.7.10 */ - void verifyFinished(Hash h) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - subdb.verifyFinished(h); - } - } - - /** - * Search for a newer router info, drop it from the db if the search fails, - * unless just started up or have bigger problems. - */ - - protected void lookupBeforeDropping(Hash peer, RouterInfo info) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - subdb.lookupBeforeDropping(peer, info); - } - } - - /** - * Return the RouterInfo structures for the routers closest to the given key. - * At most maxNumRouters will be returned - * - * @param key The key - * @param maxNumRouters The maximum number of routers to return - * @param peersToIgnore Hash of routers not to include - */ - @Override - public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore, String dbid) { - return getSubNetDB(dbid).findNearestRouters(key, maxNumRouters, peersToIgnore); - } - - /** - * @return RouterInfo, LeaseSet, or null - * @since 0.8.3 - */ - @Override - public DatabaseEntry lookupLocally(Hash key, String dbid) { - if (dbid == null || dbid.isEmpty()) { - DatabaseEntry rv = null; - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - rv = subdb.lookupLocally(key); - if (rv != null) { - return rv; - } - } - rv = this.lookupLocally(key, MAIN_DBID); - if (rv != null) { - return rv; - } - } - return this.getSubNetDB(dbid).lookupLocally(key); - } - - /** - * Not for use without validation - * - * @return RouterInfo, LeaseSet, or null, NOT validated - * @since 0.9.38 - */ - @Override - public DatabaseEntry lookupLocallyWithoutValidation(Hash key, String dbid) { - if (dbid == null || dbid.isEmpty()) { - DatabaseEntry rv = null; - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - rv = subdb.lookupLocallyWithoutValidation(key); - if (rv != null) { - return rv; - } - } - rv = this.lookupLocallyWithoutValidation(key, MAIN_DBID); - if (rv != null) { - return rv; - } - } - return this.getSubNetDB(dbid).lookupLocallyWithoutValidation(key); - } - - @Override - public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid) { - this.getSubNetDB(dbid).lookupLeaseSet(key, onFindJob, onFailedLookupJob, timeoutMs); - } - - /** - * Lookup using the client's tunnels - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @since 0.9.10 - */ - @Override - public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, Hash fromLocalDest, - String dbid) { - if (dbid == null || dbid.isEmpty()) { - dbid = fromLocalDest.toBase32(); - } - this.getSubNetDB(dbid).lookupLeaseSet(key, onFindJob, onFailedLookupJob, timeoutMs, fromLocalDest); - } - /** * Lookup using the client's tunnels when the client LS key is know * but the client dbid is not. @@ -424,11 +201,19 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF return lookupLeaseSetLocally(key, dbid); } + /** + * Lookup using the client's tunnels when the client LS key is known. + * if a DBID is not provided, the clients will all be checked, and the + * first value will be used. + * + * @since 0.9.60 + * + */ @Override - public LeaseSet lookupLeaseSetLocally(Hash key, String dbid) { + protected LeaseSet lookupLeaseSetLocally(Hash key, String dbid) { if (dbid == null || dbid.isEmpty()) { LeaseSet rv = null; - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("(dbid: " + subdb._dbid + ") Deprecated! Arbitrary selection of this subDb", @@ -446,264 +231,15 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF return this.getSubNetDB(dbid).lookupLeaseSetLocally(key); } - public LeaseSet lookupLeaseSetLocally(Hash key) { - return lookupLeaseSetLocally(key, null); - } - - @Override - public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid) { - this.getSubNetDB(dbid).lookupRouterInfo(key, onFindJob, onFailedLookupJob, timeoutMs); - } - - @Override - public RouterInfo lookupRouterInfoLocally(Hash key, String dbid) { - if (dbid == null || dbid.isEmpty()) { - RouterInfo ri = mainNetDB().lookupRouterInfoLocally(key); - if (ri != null) { - return ri; - } - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - ri = subdb.lookupRouterInfoLocally(key); - if (ri != null) { - return ri; - } - } - } - return this.getSubNetDB(dbid).lookupRouterInfoLocally(key); - } - /** - * Unconditionally lookup using the client's tunnels. - * No success or failed jobs, no local lookup, no checks. - * Use this to refresh a leaseset before expiration. - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @since 0.9.25 - */ - @Override - public void lookupLeaseSetRemotely(Hash key, Hash fromLocalDest, String dbid) { - this.getSubNetDB(dbid).lookupLeaseSetRemotely(key, fromLocalDest); - } - - /** - * Unconditionally lookup using the client's tunnels. - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @param onFindJob may be null - * @param onFailedLookupJob may be null - * @since 0.9.47 - */ - @Override - public void lookupLeaseSetRemotely(Hash key, Job onFindJob, Job onFailedLookupJob, - long timeoutMs, Hash fromLocalDest, String dbid) { - this.getSubNetDB(dbid).lookupLeaseSetRemotely(key, onFindJob, onFailedLookupJob, timeoutMs, fromLocalDest); - } - - /** - * Lookup using the client's tunnels - * Succeeds even if LS validation fails due to unsupported sig type - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @since 0.9.16 - */ - @Override - public void lookupDestination(Hash key, Job onFinishedJob, long timeoutMs, Hash fromLocalDest, String dbid) { - if (dbid == null || dbid.isEmpty()) { - if (fromLocalDest != null) - dbid = fromLocalDest.toBase32(); - else - dbid = null; - } - this.getSubNetDB(dbid).lookupDestination(key, onFinishedJob, timeoutMs, fromLocalDest); - } - - /** - * Lookup locally in netDB and in badDest cache - * Succeeds even if LS validation failed due to unsupported sig type - * - * @since 0.9.16 - */ - @Override - public Destination lookupDestinationLocally(Hash key, String dbid) { - return this.getSubNetDB(dbid).lookupDestinationLocally(key); - } - - /** - * @return the leaseSet if another leaseSet already existed at that key - * - * @throws IllegalArgumentException if the data is not valid - */ - @Override - public LeaseSet store(Hash key, LeaseSet leaseSet, String dbid) throws IllegalArgumentException { - if (dbid == null || dbid.isEmpty()) { - if (key != null) - dbid = key.toBase32(); - } - return getSubNetDB(dbid).store(key, leaseSet); - } - - public LeaseSet store(Hash key, LeaseSet leaseSet) { - if (leaseSet == null) { - return null; - } - Hash to = leaseSet.getReceivedBy(); - if (to != null) { - String b32 = to.toBase32(); - FloodfillNetworkDatabaseFacade cndb = _context.clientNetDb(b32); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("store " + key.toBase32() + " to client " + b32); - if (b32 != null) - return cndb.store(key, leaseSet); - } - FloodfillNetworkDatabaseFacade fndb = _context.mainNetDb(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("store " + key.toBase32() + " to main"); - return fndb.store(key, leaseSet); - } - - public RouterInfo store(Hash key, RouterInfo routerInfo) { - Hash to = routerInfo.getReceivedBy(); - if (to != null) { - String b32 = to.toBase32(); - FloodfillNetworkDatabaseFacade cndb = _context.clientNetDb(b32); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("store " + key.toBase32() + " to client " + b32); - if (b32 != null) - return cndb.store(key, routerInfo); - } - FloodfillNetworkDatabaseFacade fndb = _context.mainNetDb(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("store " + key.toBase32() + " to main"); - return fndb.store(key, routerInfo); - } - - /** - * @return the routerInfo if another router already existed at that key - * - * @throws IllegalArgumentException if the data is not valid - */ - @Override - public RouterInfo store(Hash key, RouterInfo routerInfo, String dbid) throws IllegalArgumentException { - return getSubNetDB(dbid).store(key, routerInfo); - } - - /** - * @return the old entry if it already existed at that key - * @throws IllegalArgumentException if the data is not valid - * @since 0.9.16 - */ - @Override - public DatabaseEntry store(Hash key, DatabaseEntry entry, String dbid) throws IllegalArgumentException { - if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) - return store(key, (RouterInfo) entry, dbid); - if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) - return store(key, (LeaseSet) entry, dbid); - throw new IllegalArgumentException("unknown type"); - } - - @Override - public void publish(LeaseSet localLeaseSet, String dbid) { - this.getSubNetDB(dbid).publish(localLeaseSet); - } - - @Override - public void unpublish(LeaseSet localLeaseSet, String dbid) { - this.getSubNetDB(dbid).unpublish(localLeaseSet); - } - - @Override - public void unpublish(LeaseSet localLeaseSet) { - if (localLeaseSet == null) { - return; - } - Hash client = localLeaseSet.getReceivedBy(); - if (client != null) - this.getSubNetDB(client.toBase32()).unpublish(localLeaseSet); - this.getSubNetDB(null).unpublish(localLeaseSet); - } - - @Override - public void fail(Hash dbEntry, String dbid) { - this.getSubNetDB(dbid).fail(dbEntry); - } - - @Override - public void fail(Hash dbEntry) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - subdb.fail(dbEntry); - } - } - - /** - * The last time we successfully published our RI. + * Check if all of the known subDbs are initialized + * + * @since 0.9.60 * - * @since 0.9.9 */ - @Override - public long getLastRouterInfoPublishTime(String dbid) { - return this.getSubNetDB(dbid).getLastRouterInfoPublishTime(); - } - - public long getLastRouterInfoPublishTime() { - return this.mainNetDB().getLastRouterInfoPublishTime(); - } - - @Override - public Set getAllRouters(String dbid) { - if (dbid == null || dbid.isEmpty()) { - return getAllRouters(); - } - return this.getSubNetDB(dbid).getAllRouters(); - } - - public Set getAllRouters() { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - return subdb.getAllRouters(); - } - return null; - } - - @Override - public int getKnownRouters(String dbid) { - return this.getSubNetDB(dbid).getKnownRouters(); - } - - public int getKnownRouters() { - int total = 0; - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - total += subdb.getKnownRouters(); - } - return total; - } - - @Override - public int getKnownLeaseSets(String dbid) { - return this.getSubNetDB(dbid).getKnownLeaseSets(); - } - - @Override - public boolean isInitialized(String dbid) { - return this.getSubNetDB(dbid).isInitialized(); - } - public boolean isInitialized() { - boolean rv = false; - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + boolean rv = mainNetDB().isInitialized(); + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { rv = subdb.isInitialized(); if (!rv) { break; @@ -712,46 +248,16 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF return rv; } - @Override - public void rescan(String dbid) { - this.getSubNetDB(dbid).rescan(); - } - - /** Debug only - all user info moved to NetDbRenderer in router console */ - @Override - public void renderStatusHTML(Writer out) throws IOException { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - subdb.renderStatusHTML(out); - } - } - - /** public for NetDbRenderer in routerconsole */ - @Override - public Set getLeases(String dbid) { - return this.getSubNetDB(dbid).getLeases(); - } - - /** public for NetDbRenderer in routerconsole */ - @Override - public Set getRouters(String dbid) { - if (dbid == null || dbid.isEmpty()) { - Set rv = new HashSet<>(); - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("(dbid: " + subdb._dbid - + ") Deprecated! Arbitrary selection of this subDb", - new Exception()); - rv.addAll(subdb.getRouters()); - } - return rv; - } - return this.getSubNetDB(dbid).getRouters(); - } - + /** + * list of the RouterInfo objects for all known peers + * + * @since 0.9.60 + * + */ @Override public Set getRouters() { Set rv = new HashSet<>(); - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("(dbid: " + subdb._dbid + ") Deprecated! Arbitrary selection of this subDb", @@ -761,28 +267,42 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF return rv; } + + + /** + * list of the RouterInfo objects for all known peers known to clients(in subDbs) only + * + * @since 0.9.60 + * + */ public Set getRoutersKnownToClients() { Set rv = new HashSet<>(); - for (String key : _subDBs.keySet()) { - if (key != null && !key.isEmpty()) { - if (key.startsWith("client")) - rv.addAll(this.getSubNetDB(key).getRouters()); - } + for (String key : getClients()) { + rv.addAll(this.getSubNetDB(key).getRouters()); } return rv; } + /** + * list of the LeaseSet objects for all known peers known to clients(in subDbs) only + * + * @since 0.9.60 + * + */ public Set getLeasesKnownToClients() { Set rv = new HashSet<>(); - for (String key : _subDBs.keySet()) { - if (key != null && !key.isEmpty()) { - if (key.startsWith("client")) - rv.addAll(this.getSubNetDB(key).getLeases()); - } + for (String key : getClients()) { + rv.addAll(this.getSubNetDB(key).getLeases()); } return rv; } + /** + * list all of the dbids of all known client subDbs + * + * @since 0.9.60 + * + */ public List getClients() { List rv = new ArrayList(); for (String key : _subDBs.keySet()) { @@ -794,130 +314,78 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF return rv; } - /** @since 0.9 */ - @Override - public ReseedChecker reseedChecker() { - return mainNetDB().reseedChecker(); - }; - /** - * Is it permanently negative cached? - * - * @param key only for Destinations; for RouterIdentities, see Banlist - * @since 0.9.16 - */ - @Override - public boolean isNegativeCachedForever(Hash key, String dbid) { - return this.getSubNetDB(dbid).isNegativeCached(key); - } - - /** - * @param spk unblinded key - * @return BlindData or null - * @since 0.9.40 - */ - public BlindData getBlindData(SigningPublicKey spk, String dbid) { - return this.getSubNetDB(dbid).getBlindData(spk); - } - - /** - * @param bd new BlindData to put in the cache - * @since 0.9.40 - */ - @Override - public void setBlindData(BlindData bd, String dbid) { - this.getSubNetDB(dbid).setBlindData(bd); - } - - /** - * For console ConfigKeyringHelper + * get the main netDb, which is the one we will use if we are a floodfill * - * @since 0.9.41 - */ - @Override - public List getBlindData(String dbid) { - return this.getSubNetDB(dbid).getBlindData(); - } - - /** - * For console ConfigKeyringHelper + * @since 0.9.60 * - * @return true if removed - * @since 0.9.41 */ @Override - public boolean removeBlindData(SigningPublicKey spk, String dbid) { - return this.getSubNetDB(dbid).removeBlindData(spk); - } - - /** - * Notify the netDB that the routing key changed at midnight UTC - * - * @since 0.9.50 - */ - @Override - public void routingKeyChanged() { - this.mainNetDB().routingKeyChanged(); - } - - // @Override - /* - * public void restart() { - * for (String dbid : this._subDBs.keySet()) { - * this.getSubNetDB(dbid).restart(); - * } - * } - */ - - @Override public FloodfillNetworkDatabaseFacade mainNetDB() { - return this.getSubNetDB(MAIN_DBID); + return _mainDbid; } + /** + * get the multiHome netDb, which is especially for handling multihomes + * + * @since 0.9.60 + * + */ @Override public FloodfillNetworkDatabaseFacade multiHomeNetDB() { - return this.getSubNetDB(MULTIHOME_DBID); + return _multihomeDbid; } + /** + * get the client netDb for the given id. + * Will return the "exploratory(default client)" netDb if + * the dbid is null. + * + * @since 0.9.60 + * + */ @Override public FloodfillNetworkDatabaseFacade clientNetDB(String id) { if (id == null || id.isEmpty()) - return exploratoryNetDB(); + return clientNetDB(); return this.getSubNetDB(id); } + /** + * get the client netDb for the given id + * Will return the "exploratory(default client)" netDb if + * the dbid is null. + * + * @since 0.9.60 + * + */ + @Override + public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) { + if (id != null) + return getSubNetDB(id.toBase32()); + return clientNetDB(); + } + + /** + * get the default client(exploratory) netDb + * + * @since 0.9.60 + * + */ public FloodfillNetworkDatabaseFacade clientNetDB() { - return clientNetDB(null); - } - - @Override - public FloodfillNetworkDatabaseFacade exploratoryNetDB() { - return this.getSubNetDB("exploratory"); - } - - @Override - public FloodfillNetworkDatabaseFacade localNetDB() { - return this.getSubNetDB("local"); - } - - @Override - public List getLocalClientsBlindData() { - ArrayList rv = new ArrayList<>(); - for (String subdb : _subDBs.keySet()) { - // if (subdb.startsWith("clients_")) - // TODO: see if we can access only one subDb at a time when we need - // to look up a client by SPK. We mostly need this for managing blinded - // and encrypted keys in the Keyring Config UI page. See also - // ConfigKeyringHelper - rv.addAll(_subDBs.get(subdb).getBlindData()); - } - return rv; + return _exploratoryDbid; } + /** + * look up the dbid of the client with the given signing public key + * + * @since 0.9.60 + * + */ @Override public List lookupClientBySigningPublicKey(SigningPublicKey spk) { List rv = new ArrayList<>(); - for (String subdb : _subDBs.keySet()) { + for (String subdb : getClients()) { // if (subdb.startsWith("clients_")) // TODO: see if we can access only one subDb at a time when we need // to look up a client by SPK. We mostly need this for managing blinded @@ -950,10 +418,41 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF * @since 0.9.60 */ private String matchDbid(Hash clientKey) { - for (FloodfillNetworkDatabaseFacade subdb : _subDBs.values()) { + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { if (subdb.matchClientKey(clientKey)) return subdb._dbid; } return null; } + + /** + * get all the subDbs and return them in a Set. + * + * @since 0.9.60 + * + */ + @Override + public Set getSubNetDBs() { + Set rv = new HashSet<>(); + rv.add(mainNetDB()); + rv.add(multiHomeNetDB()); + rv.add(clientNetDB()); + rv.addAll(_subDBs.values()); + return rv; + } + + /** + * list of the BlindData objects for all known clients + * + * @since 0.9.60 + * + */ + @Override + public List getLocalClientsBlindData() { + List rv = new ArrayList<>(); + for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) { + rv.addAll(subdb.getBlindData()); + } + return rv; + } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java index edc7c7ee8..b3815d306 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java @@ -460,10 +460,10 @@ class FloodfillVerifyStoreJob extends JobImpl { private void resend() { // It's safe to check the default netDb first, but if the lookup is for // a client, nearly all RI is expected to be found in the FF netDb. - DatabaseEntry ds = getContext().netDbSegmentor().lookupLocally(_key, _facade._dbid); + DatabaseEntry ds = getContext().netDbSegmentor().getSubNetDB(_facade._dbid).lookupLocally(_key); if ((ds == null) && _facade.isClientDb() && _isRouterInfo) // It's safe to check the floodfill netDb for RI - ds = getContext().netDbSegmentor().lookupLocally(_key, FloodfillNetworkDatabaseSegmentor.MAIN_DBID); + ds = getContext().mainNetDb().lookupLocally(_key); if (ds != null) { // By the time we get here, a minute or more after the store started, // we may have already started a new store diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java index bea1398e7..f092fffc2 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java @@ -38,7 +38,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku */ @Override protected boolean answerAllQueries() { - if (!getContext().netDbSegmentor().floodfillEnabled()) return false; + if (!getContext().mainNetDb().floodfillEnabled()) return false; return FloodfillNetworkDatabaseFacade.isFloodfill(getContext().router().getRouterInfo()); } @@ -52,7 +52,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku super.sendClosest(key, routerInfoSet, toPeer, replyTunnel); // go away, you got the wrong guy, send our RI back unsolicited - if (!getContext().netDbSegmentor().floodfillEnabled()) { + if (!getContext().mainNetDb().floodfillEnabled()) { // We could just call sendData(myhash, myri, toPeer, replyTunnel) but // that would increment the netDb.lookupsHandled and netDb.lookupsMatched stats DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java index 1f65f01cb..9a76366cb 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java @@ -44,7 +44,7 @@ class RepublishLeaseSetJob extends JobImpl { try { if (getContext().clientManager().isLocal(_dest)) { - LeaseSet ls = getContext().netDbSegmentor().lookupLeaseSetLocally(_dest, _dest.toBase32()); + LeaseSet ls = getContext().clientNetDb(_dest).lookupLeaseSetLocally(_dest); if (ls != null) { if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { if (_log.shouldLog(Log.WARN)) @@ -103,9 +103,9 @@ class RepublishLeaseSetJob extends JobImpl { // Don't requeue if there's a newer LS, KNDF will have already done that LeaseSet ls = null; if (_dest != null) - ls = getContext().netDbSegmentor().lookupLeaseSetLocally(_ls.getHash(), _dest.toBase32()); + ls = getContext().clientNetDb(_dest).lookupLeaseSetLocally(_ls.getHash()); else - getContext().netDbSegmentor().lookupLeaseSetLocally(_ls.getHash(), null); + getContext().mainNetDb().lookupLeaseSetLocally(_ls.getHash()); // ^ _dest should never be null here, right? So maybe instead we return immediately? if (ls != null && ls.getEarliestLeaseDate() == _ls.getEarliestLeaseDate()) { requeueRepublish(); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java index 1cc309075..9baf130bb 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java @@ -143,7 +143,7 @@ class SearchJob extends JobImpl { // The other two places this was called (one below and one in FNDF) // have been commented out. // Returning false essentially enables kademlia as a backup to floodfill for search responses. - if (ctx.netDbSegmentor().floodfillEnabled()) + if (ctx.mainNetDb().floodfillEnabled()) return false; return ctx.getProperty("netDb.floodfillOnly", DEFAULT_FLOODFILL_ONLY); } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java index 8bd2f8fbf..b20157efc 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java @@ -85,17 +85,13 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob { DatabaseStoreMessage msg = (DatabaseStoreMessage)message; DatabaseEntry entry = msg.getEntry(); try { - switch (entry.getType()) { - case DatabaseEntry.KEY_TYPE_ROUTERINFO: - RouterInfo ri = (RouterInfo) entry; - getContext().netDbSegmentor().store(ri.getHash(), ri); - break; - case DatabaseEntry.KEY_TYPE_LEASESET: - LeaseSet ls = (LeaseSet) entry; - getContext().netDbSegmentor().store(ls.getHash(), ls); - break; - default: - break; + if (entry.isRouterInfo()) { + RouterInfo ri = (RouterInfo) entry; + getContext().netDbSegmentor().getSubNetDB(_facade._dbid).store(ri.getHash(), ri); + } + if (entry.isLeaseSet()) { + LeaseSet ls = (LeaseSet) entry; + getContext().netDbSegmentor().getSubNetDB(_facade._dbid).store(ls.getHash(), ls); } } catch (UnsupportedCryptoException iae) { // don't blame the peer diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java index 4f1a49a86..e973c2c80 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SegmentedNetworkDatabaseFacade.java @@ -5,6 +5,7 @@ import java.io.Writer; import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.HashSet; import net.i2p.data.BlindData; import net.i2p.data.DatabaseEntry; @@ -18,318 +19,188 @@ import net.i2p.router.NetworkDatabaseFacade; import net.i2p.router.RouterContext; import net.i2p.router.networkdb.reseed.ReseedChecker; -public abstract class SegmentedNetworkDatabaseFacade { // extends FloodfillNetworkDatabaseFacade { +/** + * SegmentedNetworkDatabaseFacade + * + * This class implements an interface for managing many netDbs as part of a + * single I2P instance, each representing it's own view of the network. This + * allows the I2P clients to operate agnostic of what information the other + * clients are obtaining from the network database, and prevents information + * intended for clients from entering the table used by the router for + * Floodfill operations. + * + * The benefit of this is that we can use this to provide an effective barrier + * against "Context-Confusion" attacks which exploit the fact that messages sent + * to clients can update the routing table used by a floodfill, providing + * evidence that the floodfill hosts the corresponding client. When applied + * correctly, so that every client uses a unique subDb, the entire class of attack + * should be neutralized. + * + * The drawback of this is that it makes the netDb less efficient. Clients and + * Floodfills who share a netDb can update the tables used by those netDbs when + * a Client encounters an entry obtained by a Floodfill or vice-versa. Clients also + * must sometimes search the netDb for keys that are owned by other clients or by + * a co-located Floodfill, if one exists. + * + * In some contexts, it makes sense to view all the tables at once, especially when + * viewing information from the UI. The functions 'getLeases*', 'getRouters*', and + * 'getLocalClient*' are provided for this purpose. + * + * In the future, this could also be extended to provide Whanau-like functionality + * by determining when clients and the local floodfill disagree on the content of a + * leaseSet. + * + * See implementation: FloodfillNetworkDatabaseSegmentor + * + * @author idk + * @since 0.9.60 + */ +public abstract class SegmentedNetworkDatabaseFacade { public SegmentedNetworkDatabaseFacade(RouterContext context) { // super(context, null); } - public abstract FloodfillNetworkDatabaseFacade getSubNetDB(String id); - + /** + * Get a sub-netDb using a string identifier + * + * @since 0.9.60 + */ + protected abstract FloodfillNetworkDatabaseFacade getSubNetDB(String dbid); + /** + * Get a sub-netDb using a Hash identifier + * + * @since 0.9.60 + */ + protected abstract FloodfillNetworkDatabaseFacade getSubNetDB(Hash dbid); + /** + * Get the main netDb, the one which is used if we're a floodfill + * + * @since 0.9.60 + */ public abstract FloodfillNetworkDatabaseFacade mainNetDB(); - + /** + * Get the multihome netDb, the one which is used if we're a floodfill AND we + * have a multihome address sent to us + * + * @since 0.9.60 + */ public abstract FloodfillNetworkDatabaseFacade multiHomeNetDB(); - - public abstract FloodfillNetworkDatabaseFacade clientNetDB(String id); - - public abstract FloodfillNetworkDatabaseFacade exploratoryNetDB(); - - public abstract FloodfillNetworkDatabaseFacade localNetDB(); - - public abstract void startup(); - + /** + * Get a client netDb for a given client string identifier. Will never + * return the mainNetDB. + * + * @since 0.9.60 + */ + public abstract FloodfillNetworkDatabaseFacade clientNetDB(String dbid); + /** + * Get a client netDb for a given client Hash identifier. Will never + * return the mainNetDB. + * + * @since 0.9.60 + */ + public abstract FloodfillNetworkDatabaseFacade clientNetDB(Hash dbid); + /** + * Shut down the network database and all subDbs. + * + * @since 0.9.60 + */ public abstract void shutdown(); - /** - * Return the RouterInfo structures for the routers closest to the given key. - * At most maxNumRouters will be returned - * - * @param key The key - * @param maxNumRouters The maximum number of routers to return - * @param peersToIgnore Hash of routers not to include - */ - public abstract Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore, String dbid); - - /** - * @return RouterInfo, LeaseSet, or null - * @since 0.9.59 - */ - public abstract DatabaseEntry lookupLocally(Hash key, String dbid); - - /** - * Not for use without validation + * Lookup the leaseSet for a given key in only client dbs. * - * @return RouterInfo, LeaseSet, or null, NOT validated - * @since 0.9.59 + * @since 0.9.60 */ - public abstract DatabaseEntry lookupLocallyWithoutValidation(Hash key, String dbid); - - public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid); - - /** - * Lookup using the client's tunnels - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @since 0.9.59 - */ - public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, - Hash fromLocalDest, String dbid); - public abstract LeaseSet lookupLeaseSetHashIsClient(Hash key); - - public abstract LeaseSet lookupLeaseSetLocally(Hash key, String dbid); - - public abstract LeaseSet lookupLeaseSetLocally(Hash key); - - public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid); - - public abstract RouterInfo lookupRouterInfoLocally(Hash key, String dbid); - /** - * Unconditionally lookup using the client's tunnels. - * No success or failed jobs, no local lookup, no checks. - * Use this to refresh a leaseset before expiration. - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @since 0.9.59 - */ - public abstract void lookupLeaseSetRemotely(Hash key, Hash fromLocalDest, String dbid); - - /** - * Unconditionally lookup using the client's tunnels. - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @param onFindJob may be null - * @param onFailedLookupJob may be null - * @since 0.9.59 - */ - public abstract void lookupLeaseSetRemotely(Hash key, Job onFindJob, Job onFailedLookupJob, - long timeoutMs, Hash fromLocalDest, String dbid); - - /** - * Lookup using the client's tunnels - * Succeeds even if LS validation fails due to unsupported sig type - * - * @param fromLocalDest use these tunnels for the lookup, or null for - * exploratory - * @since 0.9.59 - */ - public abstract void lookupDestination(Hash key, Job onFinishedJob, long timeoutMs, Hash fromLocalDest, - String dbid); - - /** - * Lookup locally in netDB and in badDest cache - * Succeeds even if LS validation failed due to unsupported sig type - * - * @since 0.9.59 - */ - public abstract Destination lookupDestinationLocally(Hash key, String dbid); - - /** - * @return the leaseSet if another leaseSet already existed at that key - * - * @throws IllegalArgumentException if the data is not valid - */ - public abstract LeaseSet store(Hash key, LeaseSet leaseSet, String dbid) throws IllegalArgumentException; - - /** - * @return the routerInfo if another router already existed at that key - * - * @throws IllegalArgumentException if the data is not valid - */ - public abstract RouterInfo store(Hash key, RouterInfo routerInfo, String dbid) throws IllegalArgumentException; - - /** - * @return the old entry if it already existed at that key - * @throws IllegalArgumentException if the data is not valid - * @since 0.9.59 - */ - public DatabaseEntry store(Hash key, DatabaseEntry entry, String dbid) throws IllegalArgumentException { - if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) - return getSubNetDB(dbid).store(key, (RouterInfo) entry); - if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) - return getSubNetDB(dbid).store(key, (LeaseSet) entry); - throw new IllegalArgumentException("unknown type"); - } - - public LeaseSet store(Hash key, LeaseSet leaseSet) { - return store(key, leaseSet, null); - } - - public RouterInfo store(Hash key, RouterInfo routerInfo) { - return store(key, routerInfo, null); - } - - /** - * @throws IllegalArgumentException if the local router is not valid - */ - public abstract void publish(RouterInfo localRouterInfo) throws IllegalArgumentException; - - public abstract void publish(LeaseSet localLeaseSet, String dbid); - - public abstract void unpublish(LeaseSet localLeaseSet, String dbid); - - public abstract void unpublish(LeaseSet localLeaseSet); - - public abstract void fail(Hash dbEntry, String dbid); - - public abstract void fail(Hash dbEntry); - - /** - * The last time we successfully published our RI. + * Lookup the leaseSet for a given key locally across all dbs if dbid is + * null, or locally for the given dbid if it is not null. Use carefully, + * this function crosses db boundaries and is intended only for local use. * - * @since 0.9.59 + * @since 0.9.60 + */ + protected abstract LeaseSet lookupLeaseSetLocally(Hash key, String dbid); + /** + * Lookup the dbid for a given hash. + * + * @since 0.9.60 + */ + public abstract String getDbidByHash(Hash clientKey); + /** + * Get a set of all sub-netDbs. + * + * @since 0.9.60 + */ + public abstract Set getSubNetDBs(); + /** + * Get a set of all client dbid strings + * + * @since 0.9.60 + */ + public abstract List getClients(); + /** + * Make sure the SNDF is initialized */ - public long getLastRouterInfoPublishTime(String dbid) { - return 0; - } - - public long getLastRouterInfoPublishTime() { - return 0; - } - - public abstract Set getAllRouters(String dbid); - public abstract Set getAllRouters(); - - public int getKnownRouters(String dbid) { - return 0; - } - - public int getKnownRouters() { - return 0; - } - - public int getKnownLeaseSets(String dbid) { - return 0; - } - - public boolean isInitialized(String dbid) { - return true; - } - public boolean isInitialized() { - return true; + return mainNetDB().isInitialized(); } - - public void rescan(String dbid) { - } - - /** Debug only - all user info moved to NetDbRenderer in router console */ - public void renderStatusHTML(Writer out) throws IOException { - } - - /** public for NetDbRenderer in routerconsole */ - public Set getLeases(String dbid) { - return Collections.emptySet(); - } - - /** public for NetDbRenderer in routerconsole */ - public Set getRouters(String dbid) { - return Collections.emptySet(); - } - + /** + * Get a set of all routers + * + * @since 0.9.60 + */ public Set getRouters() { - return Collections.emptySet(); + return mainNetDB().getRouters(); } + /** + * Get a set of all routers known to clients, which should always be zero. + * + * @since 0.9.60 + */ public Set getRoutersKnownToClients() { - return Collections.emptySet(); + Set ris = new HashSet<>(); + Set fndfs = getSubNetDBs(); + for (FloodfillNetworkDatabaseFacade fndf : fndfs) { + ris.addAll(fndf.getRouters()); + } + return ris; } + /** + * Get a set of all leases known to all clients. + * + * @since 0.9.60 + */ public Set getLeasesKnownToClients() { - return Collections.emptySet(); + Set lss = new HashSet<>(); + Set fndfs = getSubNetDBs(); + for (FloodfillNetworkDatabaseFacade fndf : fndfs) { + lss.addAll(fndf.getLeases()); + } + return lss; } - - public List getClients() { - return Collections.emptyList(); - } - - /** @since 0.9.59 */ + /** + * Check if the mainNetDB needs to reseed + * + * @since 0.9.60 + * */ public ReseedChecker reseedChecker() { return mainNetDB().reseedChecker(); }; - - /** - * For convenience, so users don't have to cast to FNDF, and unit tests using - * Dummy NDF will work. - * - * @return false; FNDF overrides to return actual setting - * @since IPv6 - */ - public boolean floodfillEnabled() { - return mainNetDB().floodfillEnabled(); - }; - - /** - * Is it permanently negative cached? - * - * @param key only for Destinations; for RouterIdentities, see Banlist - * @since 0.9.59 - */ - public boolean isNegativeCachedForever(Hash key, String dbid) { - return mainNetDB().isNegativeCachedForever(key); - } - - /** - * @param spk unblinded key - * @return BlindData or null - * @since 0.9.59 - */ - public BlindData getBlindData(SigningPublicKey spk) { - return mainNetDB().getBlindData(spk); - } - - public List getLocalClientsBlindData() { - return mainNetDB().getBlindData(); - } - - /** - * @param bd new BlindData to put in the cache - * @since 0.9.59 - */ - public void setBlindData(BlindData bd, String dbid) { - mainNetDB().setBlindData(bd); - } - /** * For console ConfigKeyringHelper * - * @since 0.9.59 + * @since 0.9.60 */ - public List getBlindData(String dbid) { - return mainNetDB().getBlindData(); - } - - /** - * For console ConfigKeyringHelper - * - * @return true if removed - * @since 0.9.59 - */ - public boolean removeBlindData(SigningPublicKey spk, String dbid) { - return mainNetDB().removeBlindData(spk); - } - - /** - * Notify the netDB that the routing key changed at midnight UTC - * - * @since 0.9.59 - */ - public void routingKeyChanged() { - mainNetDB().routingKeyChanged(); - } - - public void lookupLeaseSetRemotely(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, String dbid) { - mainNetDB().lookupLeaseSetRemotely(key, onFindJob, onFailedLookupJob, timeoutMs, key); - } public List lookupClientBySigningPublicKey(SigningPublicKey spk) { return Collections.emptyList(); } - public BlindData getBlindData(SigningPublicKey spk, String dbid) { - return mainNetDB().getBlindData(spk); + /** + * For console ConfigKeyringHelper + * + * @since 0.9.60 + */ + public List getLocalClientsBlindData() { + return Collections.emptyList(); } - - public abstract String getDbidByHash(Hash clientKey); } diff --git a/router/java/src/net/i2p/router/sybil/Analysis.java b/router/java/src/net/i2p/router/sybil/Analysis.java index 732e70c83..4e32daab8 100644 --- a/router/java/src/net/i2p/router/sybil/Analysis.java +++ b/router/java/src/net/i2p/router/sybil/Analysis.java @@ -429,7 +429,7 @@ public class Analysis extends JobImpl implements RouterApp, Runnable { continue; if (! _context.clientManager().shouldPublishLeaseSet(client)) continue; - LeaseSet ls = _context.netDbSegmentor().lookupLeaseSetLocally(client); + LeaseSet ls = _context.mainNetDb().lookupLeaseSetLocally(client); if (ls == null) continue; Hash rkey = ls.getRoutingKey(); diff --git a/router/java/src/net/i2p/router/tasks/Republish.java b/router/java/src/net/i2p/router/tasks/Republish.java index 0e0535405..8b7e5ed7b 100644 --- a/router/java/src/net/i2p/router/tasks/Republish.java +++ b/router/java/src/net/i2p/router/tasks/Republish.java @@ -32,7 +32,7 @@ public class Republish implements SimpleTimer.TimedEvent { try { ri = _context.router().getRouterInfo(); if (ri != null) - _context.netDbSegmentor().publish(ri); + _context.mainNetDb().publish(ri); } catch (IllegalArgumentException iae) { Log log = _context.logManager().getLog(Router.class); // clock skew / shift race? diff --git a/router/java/src/net/i2p/router/tasks/UpdateRoutingKeyModifierJob.java b/router/java/src/net/i2p/router/tasks/UpdateRoutingKeyModifierJob.java index 1cac4ef4f..dfda4a1d0 100644 --- a/router/java/src/net/i2p/router/tasks/UpdateRoutingKeyModifierJob.java +++ b/router/java/src/net/i2p/router/tasks/UpdateRoutingKeyModifierJob.java @@ -39,7 +39,7 @@ public class UpdateRoutingKeyModifierJob extends JobImpl { // tell netdb if mod data changed boolean changed = gen.generateDateBasedModData(); if (changed) - getContext().netDbSegmentor().routingKeyChanged(); + getContext().mainNetDb().routingKeyChanged(); requeue(delay); } } diff --git a/router/java/src/net/i2p/router/transport/GeoIP.java b/router/java/src/net/i2p/router/transport/GeoIP.java index d1b989c57..9a808cf87 100644 --- a/router/java/src/net/i2p/router/transport/GeoIP.java +++ b/router/java/src/net/i2p/router/transport/GeoIP.java @@ -755,7 +755,7 @@ public class GeoIP { * @since 0.9.48 */ private static void banCountry(RouterContext ctx, String country) { - for (Hash h : ctx.netDbSegmentor().getAllRouters()) { + for (Hash h : ctx.mainNetDb().getAllRouters()) { String hisCountry = ctx.commSystem().getCountry(h); if (country.equals(hisCountry)) { ctx.banlist().banlistRouterHard(h, "In our country"); diff --git a/router/java/src/net/i2p/router/transport/TransportImpl.java b/router/java/src/net/i2p/router/transport/TransportImpl.java index 478b61bdb..fec52f03a 100644 --- a/router/java/src/net/i2p/router/transport/TransportImpl.java +++ b/router/java/src/net/i2p/router/transport/TransportImpl.java @@ -199,7 +199,7 @@ public abstract class TransportImpl implements Transport { break; } - if (_context.netDbSegmentor().floodfillEnabled()) { + if (_context.mainNetDb().floodfillEnabled()) { // && !SystemVersion.isWindows()) { def *= 17; def /= 10; } diff --git a/router/java/src/net/i2p/router/transport/ntcp/InboundEstablishState.java b/router/java/src/net/i2p/router/transport/ntcp/InboundEstablishState.java index b9f52291e..d332428dc 100644 --- a/router/java/src/net/i2p/router/transport/ntcp/InboundEstablishState.java +++ b/router/java/src/net/i2p/router/transport/ntcp/InboundEstablishState.java @@ -689,7 +689,7 @@ class InboundEstablishState extends EstablishBase implements NTCP2Payload.Payloa } try { - RouterInfo old = _context.netDbSegmentor().store(h, ri); + RouterInfo old = _context.mainNetDb().store(h, ri); if (flood && !ri.equals(old)) { FloodfillNetworkDatabaseFacade fndf = (FloodfillNetworkDatabaseFacade) _context.mainNetDb(); if (fndf.floodConditional(ri)) { diff --git a/router/java/src/net/i2p/router/transport/ntcp/NTCPConnection.java b/router/java/src/net/i2p/router/transport/ntcp/NTCPConnection.java index 6d437dd30..e1639736e 100644 --- a/router/java/src/net/i2p/router/transport/ntcp/NTCPConnection.java +++ b/router/java/src/net/i2p/router/transport/ntcp/NTCPConnection.java @@ -1668,7 +1668,7 @@ public class NTCPConnection implements Closeable { try { if (h.equals(_context.routerHash())) return; - RouterInfo old = _context.netDbSegmentor().store(h, ri); + RouterInfo old = _context.mainNetDb().store(h, ri); if (flood && !ri.equals(old)) { FloodfillNetworkDatabaseFacade fndf = (FloodfillNetworkDatabaseFacade) _context.mainNetDb(); if ((old == null || ri.getPublished() > old.getPublished()) && diff --git a/router/java/src/net/i2p/router/transport/udp/InboundEstablishState2.java b/router/java/src/net/i2p/router/transport/udp/InboundEstablishState2.java index 4190eaeb6..4bea8ca62 100644 --- a/router/java/src/net/i2p/router/transport/udp/InboundEstablishState2.java +++ b/router/java/src/net/i2p/router/transport/udp/InboundEstablishState2.java @@ -374,7 +374,7 @@ class InboundEstablishState2 extends InboundEstablishState implements SSU2Payloa _mtu = mtu; try { - RouterInfo old = _context.netDbSegmentor().store(h, ri); + RouterInfo old = _context.mainNetDb().store(h, ri); if (flood && !ri.equals(old)) { FloodfillNetworkDatabaseFacade fndf = (FloodfillNetworkDatabaseFacade) _context.mainNetDb(); if (fndf.floodConditional(ri)) { diff --git a/router/java/src/net/i2p/router/transport/udp/PeerState2.java b/router/java/src/net/i2p/router/transport/udp/PeerState2.java index 392952d11..649774938 100644 --- a/router/java/src/net/i2p/router/transport/udp/PeerState2.java +++ b/router/java/src/net/i2p/router/transport/udp/PeerState2.java @@ -632,7 +632,7 @@ public class PeerState2 extends PeerState implements SSU2Payload.PayloadCallback Hash h = ri.getHash(); if (h.equals(_context.routerHash())) return; - RouterInfo old = _context.netDbSegmentor().store(h, ri); + RouterInfo old = _context.mainNetDb().store(h, ri); if (flood && !ri.equals(old)) { FloodfillNetworkDatabaseFacade fndf = (FloodfillNetworkDatabaseFacade) _context.mainNetDb(); if ((old == null || ri.getPublished() > old.getPublished()) && diff --git a/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java b/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java index 130560069..14774fa6e 100644 --- a/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java +++ b/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java @@ -2568,7 +2568,7 @@ class PeerTestManager { Hash h = ri.getHash(); if (h.equals(_context.routerHash())) return; - _context.netDbSegmentor().store(h, ri); + _context.mainNetDb().store(h, ri); // ignore flood request } catch (IllegalArgumentException iae) { if (_log.shouldWarn()) diff --git a/router/java/src/net/i2p/router/transport/udp/UDPTransport.java b/router/java/src/net/i2p/router/transport/udp/UDPTransport.java index f85dda8c2..1675ce4ab 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPTransport.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPTransport.java @@ -3459,7 +3459,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority (!_context.router().isHidden()) && (!introducersRequired(ipv6)) && haveCapacity() && - (!_context.netDbSegmentor().floodfillEnabled()) && + (!_context.mainNetDb().floodfillEnabled()) && (!ipv6 || _haveIPv6Address) && ((!ipv6 && getIPv6Config() != IPV6_ONLY) || (ipv6 && getIPv6Config() != IPV6_DISABLED)) && @@ -3849,7 +3849,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority boolean shouldPingFirewall = !STATUS_OK.contains(_reachabilityStatus); int currentListenPort = getListenPort(false); boolean pingOneOnly = shouldPingFirewall && getExternalPort(false) == currentListenPort; - boolean shortLoop = shouldPingFirewall || !haveCap || _context.netDbSegmentor().floodfillEnabled(); + boolean shortLoop = shouldPingFirewall || !haveCap || _context.mainNetDb().floodfillEnabled(); long loopTime = shortLoop ? SHORT_LOOP_TIME : LONG_LOOP_TIME; _lastLoopShort = shortLoop; _expireBuffer.clear(); diff --git a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java index 76a86c06b..1586bd004 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java +++ b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java @@ -140,9 +140,9 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver { return; RouterInfo oldri = null; if (_client != null) - oldri = _context.netDbSegmentor().lookupRouterInfoLocally(key, _client.toBase32()); + oldri = _context.clientNetDb(_client).lookupRouterInfoLocally(key); else - oldri = _context.netDbSegmentor().lookupRouterInfoLocally(key, FloodfillNetworkDatabaseSegmentor.MAIN_DBID); + oldri = _context.mainNetDb().lookupRouterInfoLocally(key); // only update if RI is newer and non-ff if (oldri != null && oldri.getPublished() < ri.getPublished() && !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { @@ -271,7 +271,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver { if (dsm.getEntry().isLeaseSet()) { if (_log.shouldLog(Log.INFO)) _log.info("[client: " + _clientNickname + "] Saving LS DSM from client tunnel."); - FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.netDbSegmentor().getSubNetDB(dbid)); + FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.clientNetDb(_client)); Job j = _FDSMH.createJob(msg, null, null); j.runJob(); if (sz > 0) { @@ -391,11 +391,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver { _log.info("Storing garlic LS down tunnel for: " + dsm.getKey() + " sent to: " + _clientNickname + " (" + (_client != null ? _client.toBase32() : ") router")); - - String dbid = null; - if (_client != null) - dbid = _context.netDbSegmentor().getDbidByHash(_client); - if (dbid != null) { + if (_client.toBase32() != null) { // We need to replicate some of the handling that was previously // performed when these types of messages were passed back to // the inNetMessagePool. @@ -406,8 +402,8 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver { dsm.setReceivedAsReply(); // ToDo: This should actually have a try and catch. if (_log.shouldLog(Log.INFO)) - _log.info("Store the LS in the correct dbid subDb: " + dbid); - FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.netDbSegmentor().getSubNetDB(dbid)); + _log.info("Store the LS in the correct dbid subDb: " + _client.toBase32()); + FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.clientNetDb(_client)); Job j = _FDSMH.createJob(data, null, null); j.runJob(); if (sz > 0) { diff --git a/router/java/src/net/i2p/router/tunnel/pool/AliasedTunnelPool.java b/router/java/src/net/i2p/router/tunnel/pool/AliasedTunnelPool.java index 204245e78..8e092d8d3 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/AliasedTunnelPool.java +++ b/router/java/src/net/i2p/router/tunnel/pool/AliasedTunnelPool.java @@ -115,8 +115,7 @@ public class AliasedTunnelPool extends TunnelPool { @Override protected LeaseSet locked_buildNewLeaseSet() { - String base32 = _aliasOf.getSettings().getDestination().toBase32(); - LeaseSet ls = _context.netDbSegmentor().lookupLeaseSetLocally(_aliasOf.getSettings().getDestination(), base32); + LeaseSet ls = _context.clientNetDb(_aliasOf.getSettings().getDestination()).lookupLeaseSetLocally(_aliasOf.getSettings().getDestination()); if (ls == null) return null; // copy everything so it isn't corrupted diff --git a/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java b/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java index 4b21c6feb..e7e36d291 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java +++ b/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java @@ -280,7 +280,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector { } else { // If well connected or ff, don't pick from high cap // even during congestion, because congestion starts from the top - if (active > 500 || ctx.netDbSegmentor().floodfillEnabled()) + if (active > 500 || ctx.mainNetDb().floodfillEnabled()) return false; failPct = getExploratoryFailPercentage(); diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java index c7da42ef2..db3ab8994 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java @@ -357,7 +357,7 @@ public class TunnelPool { return rv; } // TODO high-bw non-ff also - if (_context.netDbSegmentor().floodfillEnabled() && + if (_context.mainNetDb().floodfillEnabled() && _context.router().getUptime() > 5*60*1000) { rv += 2; }