From 2033712a91ea4b7f246107bd907e84e3fc642439 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Tue, 22 Nov 2022 19:01:30 +0100 Subject: [PATCH 001/112] [skip ci] Start of upgrade refacto --- config.nims | 1 + libp2p/builders.nim | 4 +- libp2p/connmanager.nim | 55 +++++----- libp2p/dialer.nim | 77 ++++++------- libp2p/multistream.nim | 110 ++++++++++--------- libp2p/muxers/muxer.nim | 39 +------ libp2p/peerstore.nim | 31 ++++++ libp2p/switch.nim | 63 +++-------- libp2p/transports/tortransport.nim | 1 + libp2p/transports/transport.nim | 5 +- libp2p/upgrademngrs/muxedupgrade.nim | 158 ++++----------------------- libp2p/upgrademngrs/upgrade.nim | 39 ++----- tests/testnoise.nim | 2 +- 13 files changed, 216 insertions(+), 369 deletions(-) diff --git a/config.nims b/config.nims index da72649d44..4412457b67 100644 --- a/config.nims +++ b/config.nims @@ -10,6 +10,7 @@ switch("warning", "LockLevel:off") if (NimMajor, NimMinor) < (1, 6): --styleCheck:hint else: + switch("warningAsError", "UseBase:on") --styleCheck:error # Avoid some rare stack corruption while using exceptions with a SEH-enabled diff --git a/libp2p/builders.nim b/libp2p/builders.nim index fdff75ba0a..479e9aec2c 100644 --- a/libp2p/builders.nim +++ b/libp2p/builders.nim @@ -225,7 +225,7 @@ proc build*(b: SwitchBuilder): Switch identify = Identify.new(peerInfo, b.sendSignedPeerRecord) connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut) ms = MultistreamSelect.new() - muxedUpgrade = MuxedUpgrade.new(identify, b.muxers, secureManagerInstances, connManager, ms) + muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms) let transports = block: @@ -249,7 +249,7 @@ proc build*(b: SwitchBuilder): Switch let switch = newSwitch( peerInfo = peerInfo, transports = transports, - identity = identify, + identify = identify, secureManagers = secureManagerInstances, connManager = connManager, ms = ms, diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim index b7ac3d87aa..2fbac126e4 100644 --- a/libp2p/connmanager.nim +++ b/libp2p/connmanager.nim @@ -67,16 +67,12 @@ type PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].} - MuxerHolder = object - muxer: Muxer - handle: Future[void] - ConnManager* = ref object of RootObj maxConnsPerPeer: int inSema*: AsyncSemaphore outSema*: AsyncSemaphore conns: Table[PeerId, HashSet[Connection]] - muxed: Table[Connection, MuxerHolder] + muxed: Table[Connection, Muxer] connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]] peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]] peerStore*: PeerStore @@ -241,18 +237,18 @@ proc contains*(c: ConnManager, muxer: Muxer): bool = if conn notin c.muxed: return - return muxer == c.muxed.getOrDefault(conn).muxer + return muxer == c.muxed.getOrDefault(conn) -proc closeMuxerHolder(muxerHolder: MuxerHolder) {.async.} = - trace "Cleaning up muxer", m = muxerHolder.muxer +proc closeMuxer(muxer: Muxer) {.async.} = + trace "Cleaning up muxer", m = muxer - await muxerHolder.muxer.close() - if not(isNil(muxerHolder.handle)): + await muxer.close() + if not(isNil(muxer.handle)): try: - await muxerHolder.handle # TODO noraises? + await muxer.handle # TODO noraises? except CatchableError as exc: trace "Exception in close muxer handler", exc = exc.msg - trace "Cleaned up muxer", m = muxerHolder.muxer + trace "Cleaned up muxer", m = muxer proc delConn(c: ConnManager, conn: Connection) = let peerId = conn.peerId @@ -273,15 +269,13 @@ proc cleanupConn(c: ConnManager, conn: Connection) {.async.} = return # Remove connection from all tables without async breaks - var muxer = some(MuxerHolder()) - if not c.muxed.pop(conn, muxer.get()): - muxer = none(MuxerHolder) - + var muxer = c.muxed.getOrDefault(conn) + c.muxed.del(conn) delConn(c, conn) try: - if muxer.isSome: - await closeMuxerHolder(muxer.get()) + if not muxer.isNil: + await closeMuxer(muxer) finally: await conn.close() @@ -373,7 +367,7 @@ proc selectMuxer*(c: ConnManager, conn: Connection): Muxer = return if conn in c.muxed: - return c.muxed.getOrDefault(conn).muxer + return c.muxed.getOrDefault(conn) else: debug "no muxer for connection", conn @@ -440,8 +434,7 @@ proc trackConnection*(cs: ConnectionSlot, conn: Connection) = asyncSpawn semaphoreMonitor() proc storeMuxer*(c: ConnManager, - muxer: Muxer, - handle: Future[void] = nil) + muxer: Muxer) {.raises: [Defect, CatchableError].} = ## store the connection and muxer ## @@ -455,12 +448,10 @@ proc storeMuxer*(c: ConnManager, if muxer.connection notin c: raise newException(CatchableError, "cant add muxer for untracked connection") - c.muxed[muxer.connection] = MuxerHolder( - muxer: muxer, - handle: handle) + c.muxed[muxer.connection] = muxer trace "Stored muxer", - muxer, handle = not handle.isNil, connections = c.conns.len + muxer, connections = c.conns.len asyncSpawn c.onConnUpgraded(muxer.connection) @@ -493,6 +484,14 @@ proc getStream*(c: ConnManager, if not(isNil(muxer)): return await muxer.newStream() +proc getStream*(c: ConnManager, + muxer: Muxer): Future[Connection] {.async, gcsafe.} = + ## get a muxed stream for the passed muxer + ## + + if not(isNil(muxer)): + return await muxer.newStream() + proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} = ## drop connections and cleanup resources for peer ## @@ -502,14 +501,14 @@ proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} = trace "Removing connection", conn delConn(c, conn) - var muxers: seq[MuxerHolder] + var muxers: seq[Muxer] for conn in conns: if conn in c.muxed: muxers.add c.muxed[conn] c.muxed.del(conn) for muxer in muxers: - await closeMuxerHolder(muxer) + await closeMuxer(muxer) for conn in conns: await conn.close() @@ -530,7 +529,7 @@ proc close*(c: ConnManager) {.async.} = c.muxed.clear() for _, muxer in muxed: - await closeMuxerHolder(muxer) + await closeMuxer(muxer) for _, conns2 in conns: for conn in conns2: diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index d3c0826ddf..6b40c162a7 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -18,6 +18,7 @@ import dial, peerid, peerinfo, multicodec, + muxers/muxer, multistream, connmanager, stream/connection, @@ -52,7 +53,7 @@ proc dialAndUpgrade( peerId: Opt[PeerId], hostname: string, address: MultiAddress): - Future[Connection] {.async.} = + Future[Muxer] {.async.} = for transport in self.transports: # for each transport if transport.handles(address): # check if it can dial it @@ -75,7 +76,7 @@ proc dialAndUpgrade( libp2p_successful_dials.inc() - let conn = + let mux = try: await transport.upgradeOutgoing(dialed, peerId) except CatchableError as exc: @@ -89,9 +90,9 @@ proc dialAndUpgrade( # Try other address return nil - doAssert not isNil(conn), "connection died after upgradeOutgoing" - debug "Dial successful", conn, peerId = conn.peerId - return conn + doAssert not isNil(mux), "connection died after upgradeOutgoing" + debug "Dial successful", peerId = mux.connection.peerId + return mux return nil proc expandDnsAddr( @@ -126,7 +127,7 @@ proc dialAndUpgrade( self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress]): - Future[Connection] {.async.} = + Future[Muxer] {.async.} = debug "Dialing peer", peerId @@ -152,7 +153,7 @@ proc internalConnect( peerId: Opt[PeerId], addrs: seq[MultiAddress], forceDial: bool): - Future[Connection] {.async.} = + Future[Muxer] {.async.} = if Opt.some(self.localPeerId) == peerId: raise newException(CatchableError, "can't dial self!") @@ -162,43 +163,45 @@ proc internalConnect( await lock.acquire() # Check if we have a connection already and try to reuse it - var conn = - if peerId.isSome: self.connManager.selectConn(peerId.get()) + var mux = + if peerId.isSome: self.connManager.selectMuxer(self.connManager.selectConn(peerId.get())) else: nil - if conn != nil: - if conn.atEof or conn.closed: - # This connection should already have been removed from the connection - # manager - it's essentially a bug that we end up here - we'll fail - # for now, hoping that this will clean themselves up later... - warn "dead connection in connection manager", conn - await conn.close() - raise newException(DialFailedError, "Zombie connection encountered") - - trace "Reusing existing connection", conn, direction = $conn.dir - return conn + if mux != nil: + #if mux.atEof or mux.closed: + # # This connection should already have been removed from the connection + # # manager - it's essentially a bug that we end up here - we'll fail + # # for now, hoping that this will clean themselves up later... + # warn "dead connection in connection manager", conn + # await conn.close() + # raise newException(DialFailedError, "Zombie connection encountered") + + trace "Reusing existing connection", direction = $mux.connection.dir + return mux let slot = await self.connManager.getOutgoingSlot(forceDial) - conn = + mux = try: await self.dialAndUpgrade(peerId, addrs) except CatchableError as exc: slot.release() raise exc - slot.trackConnection(conn) - if isNil(conn): # None of the addresses connected + #TODO + #slot.trackConnection(conn) + if isNil(mux): # None of the addresses connected raise newException(DialFailedError, "Unable to establish outgoing link") # A disconnect could have happened right after # we've added the connection so we check again # to prevent races due to that. - if conn.closed() or conn.atEof(): - # This can happen when the other ends drops us - # before we get a chance to return the connection - # back to the dialer. - trace "Connection dead on arrival", conn - raise newLPStreamClosedError() - - return conn + # TODO + #if conn.closed() or conn.atEof(): + # # This can happen when the other ends drops us + # # before we get a chance to return the connection + # # back to the dialer. + # trace "Connection dead on arrival", conn + # raise newLPStreamClosedError() + + return mux finally: if lock.locked(): lock.release() @@ -223,7 +226,7 @@ method connect*( ): Future[PeerId] {.async.} = ## Connects to a peer and retrieve its PeerId - return (await self.internalConnect(Opt.none(PeerId), addrs, false)).peerId + return (await self.internalConnect(Opt.none(PeerId), addrs, false)).connection.peerId proc negotiateStream( self: Dialer, @@ -248,11 +251,11 @@ method tryDial*( trace "Check if it can dial", peerId, addrs try: - let conn = await self.dialAndUpgrade(Opt.some(peerId), addrs) - if conn.isNil(): + let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs) + if mux.isNil(): raise newException(DialFailedError, "No valid multiaddress") - await conn.close() - return conn.observedAddr + await mux.close() + return mux.connection.observedAddr except CancelledError as exc: raise exc except CatchableError as exc: @@ -284,7 +287,7 @@ method dial*( ## var - conn: Connection + conn: Muxer stream: Connection proc cleanup() {.async.} = diff --git a/libp2p/multistream.nim b/libp2p/multistream.nim index e2797a6a36..6e22da9991 100644 --- a/libp2p/multistream.nim +++ b/libp2p/multistream.nim @@ -24,9 +24,8 @@ const MsgSize* = 64*1024 Codec* = "/multistream/1.0.0" - MSCodec* = "\x13" & Codec & "\n" - Na* = "\x03na\n" - Ls* = "\x03ls\n" + Na* = "na\n" + Ls* = "ls\n" type Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].} @@ -43,7 +42,7 @@ type codec*: string proc new*(T: typedesc[MultistreamSelect]): T = - T(codec: MSCodec) + T(codec: Codec) template validateSuffix(str: string): untyped = if str.endsWith("\n"): @@ -57,7 +56,7 @@ proc select*(m: MultistreamSelect, Future[string] {.async.} = trace "initiating handshake", conn, codec = m.codec ## select a remote protocol - await conn.write(m.codec) # write handshake + await conn.writeLp(Codec & "\n") # write handshake if proto.len() > 0: trace "selecting proto", conn, proto = proto[0] await conn.writeLp((proto[0] & "\n")) # select proto @@ -116,7 +115,7 @@ proc list*(m: MultistreamSelect, if not await m.select(conn): return - await conn.write(Ls) # send ls + await conn.writeLp(Ls) # send ls var list = newSeq[string]() let ms = string.fromBytes(await conn.readLp(MsgSize)) @@ -126,55 +125,64 @@ proc list*(m: MultistreamSelect, result = list +proc handle*( + _: type MultistreamSelect, + conn: Connection, + protos: seq[string], + active: bool = false, + ): Future[string] {.async, gcsafe.} = + trace "Starting multistream negotiation", conn, handshaked = active + var handshaked = active + while not conn.atEof: + var ms = string.fromBytes(await conn.readLp(MsgSize)) + validateSuffix(ms) + + if not handshaked and ms != Codec: + debug "expected handshake message", conn, instead=ms + raise newException(CatchableError, + "MultistreamSelect handling failed, invalid first message") + + trace "handle: got request", conn, ms + if ms.len() <= 0: + trace "handle: invalid proto", conn + await conn.writeLp(Na) + + case ms: + of "ls": + trace "handle: listing protos", conn + await conn.writeLp(protos.join("\n")) + of Codec: + if not handshaked: + await conn.writeLp(Codec & "\n") + handshaked = true + else: + trace "handle: sending `na` for duplicate handshake while handshaked", + conn + await conn.writeLp(Na) + else: + if ms in protos: + trace "found handler", conn, protocol = ms + await conn.writeLp(ms & "\n") + conn.protocol = ms + return ms + trace "no handlers", conn, protocol = ms + await conn.writeLp(Na) + proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} = trace "Starting multistream handler", conn, handshaked = active var handshaked = active + var protos: seq[string] + for h in m.handlers: + for proto in h.protos: + protos.add(proto) + try: - while not conn.atEof: - var ms = string.fromBytes(await conn.readLp(MsgSize)) - validateSuffix(ms) - - if not handshaked and ms != Codec: - notice "expected handshake message", conn, instead=ms - raise newException(CatchableError, - "MultistreamSelect handling failed, invalid first message") - - trace "handle: got request", conn, ms - if ms.len() <= 0: - trace "handle: invalid proto", conn - await conn.write(Na) - - if m.handlers.len() == 0: - trace "handle: sending `na` for protocol", conn, protocol = ms - await conn.write(Na) - continue - - case ms: - of "ls": - trace "handle: listing protos", conn - var protos = "" - for h in m.handlers: - for proto in h.protos: - protos &= (proto & "\n") - await conn.writeLp(protos) - of Codec: - if not handshaked: - await conn.write(m.codec) - handshaked = true - else: - trace "handle: sending `na` for duplicate handshake while handshaked", - conn - await conn.write(Na) - else: - for h in m.handlers: - if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms): - trace "found handler", conn, protocol = ms - await conn.writeLp(ms & "\n") - conn.protocol = ms - await h.protocol.handler(conn, ms) - return - debug "no handlers", conn, protocol = ms - await conn.write(Na) + let negotiated = await MultistreamSelect.handle(conn, protos, active) + for h in m.handlers: + if h.protos.contains(negotiated): + await h.protocol.handler(conn, negotiated) + return + debug "no handlers", conn, negotiated except CancelledError as exc: raise exc except CatchableError as exc: diff --git a/libp2p/muxers/muxer.nim b/libp2p/muxers/muxer.nim index 69e57e131b..6934c900e9 100644 --- a/libp2p/muxers/muxer.nim +++ b/libp2p/muxers/muxer.nim @@ -32,16 +32,16 @@ type Muxer* = ref object of RootObj streamHandler*: StreamHandler + handler*: Future[void] connection*: Connection # user provider proc that returns a constructed Muxer MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].} # this wraps a creator proc that knows how to make muxers - MuxerProvider* = ref object of LPProtocol + MuxerProvider* = ref object newMuxer*: MuxerConstructor - streamHandler*: StreamHandler # triggered every time there is a new stream, called for any muxer instance - muxerHandler*: MuxerHandler # triggered every time there is a new muxed connection created + codec*: string func shortLog*(m: Muxer): auto = shortLog(m.connection) chronicles.formatIt(Muxer): shortLog(it) @@ -57,36 +57,5 @@ proc new*( creator: MuxerConstructor, codec: string): T {.gcsafe.} = - let muxerProvider = T(newMuxer: creator) - muxerProvider.codec = codec - muxerProvider.init() + let muxerProvider = T(newMuxer: creator, codec: codec) muxerProvider - -method init(c: MuxerProvider) = - proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} = - trace "starting muxer handler", proto=proto, conn - try: - let - muxer = c.newMuxer(conn) - - if not isNil(c.streamHandler): - muxer.streamHandler = c.streamHandler - - var futs = newSeq[Future[void]]() - futs &= muxer.handle() - - # finally await both the futures - if not isNil(c.muxerHandler): - await c.muxerHandler(muxer) - when defined(libp2p_agents_metrics): - conn.shortAgent = muxer.connection.shortAgent - - checkFutures(await allFinished(futs)) - except CancelledError as exc: - raise exc - except CatchableError as exc: - trace "exception in muxer handler", exc = exc.msg, conn, proto - finally: - await conn.close() - - c.handler = handler diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 0084a90acd..205ac96a1b 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -28,11 +28,16 @@ else: import std/[tables, sets, options, macros], + chronos, ./crypto/crypto, ./protocols/identify, + ./protocols/protocol, ./peerid, ./peerinfo, ./routing_record, ./multiaddress, + ./stream/connection, + ./multistream, + ./muxers/muxer, utility type @@ -186,3 +191,29 @@ proc cleanup*( while peerStore.toClean.len > peerStore.capacity: peerStore.del(peerStore.toClean[0]) peerStore.toClean.delete(0) + +proc identify*( + peerStore: PeerStore, + ms: MultistreamSelect, + identify: Identify, + muxer: Muxer) {.async.} = + + # new stream for identify + var stream = await muxer.newStream() + if stream == nil: + return + + try: + if (await ms.select(stream, identify.codec())): + let info = await identify.identify(stream, stream.peerId) + + when defined(libp2p_agents_metrics): + muxer.connection.shortAgent = "unknown" + if info.agentVersion.isSome and info.agentVersion.get().len > 0: + let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii() + if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()): + muxer.connection.shortAgent = shortAgent.get() + + peerStore.updatePeerInfo(info) + finally: + await stream.closeWithEOF() diff --git a/libp2p/switch.nim b/libp2p/switch.nim index b6fc22fecb..374c0c2d40 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -60,12 +60,10 @@ logScope: declareCounter(libp2p_failed_upgrades_incoming, "incoming connections failed upgrades") -const - ConcurrentUpgrades* = 4 - type Switch* {.public.} = ref object of Dial peerInfo*: PeerInfo + identify*: Identify connManager*: ConnManager transports*: seq[Transport] ms*: MultistreamSelect @@ -208,56 +206,23 @@ proc upgradeMonitor(conn: Connection, upgrades: AsyncSemaphore) {.async.} = finally: upgrades.release() # don't forget to release the slot! + proc accept(s: Switch, transport: Transport) {.async.} = # noraises ## switch accept loop, ran for every transport ## - let upgrades = newAsyncSemaphore(ConcurrentUpgrades) + let upgrades = newAsyncSemaphore(4) while transport.running: - var conn: Connection + let + conn = await transport.accept() try: - debug "About to accept incoming connection" - # remember to always release the slot when - # the upgrade succeeds or fails, this is - # currently done by the `upgradeMonitor` - await upgrades.acquire() # first wait for an upgrade slot to become available - let slot = await s.connManager.getIncomingSlot() - conn = - try: - await transport.accept() - except CatchableError as exc: - slot.release() - raise exc - slot.trackConnection(conn) - if isNil(conn): - # A nil connection means that we might have hit a - # file-handle limit (or another non-fatal error), - # we can get one on the next try, but we should - # be careful to not end up in a thigh loop that - # will starve the main event loop, thus we sleep - # here before retrying. - trace "Unable to get a connection, sleeping" - await sleepAsync(100.millis) # TODO: should be configurable? - upgrades.release() - continue - - # set the direction of this bottom level transport - # in order to be able to consume this information in gossipsub if required - # gossipsub gives priority to connections we make - conn.transportDir = Direction.In - - debug "Accepted an incoming connection", conn - asyncSpawn upgradeMonitor(conn, upgrades) - asyncSpawn transport.upgradeIncoming(conn) - except CancelledError as exc: - trace "releasing semaphore on cancellation" - upgrades.release() # always release the slot + let muxed = await transport.upgradeIncoming(conn) + await s.peerStore.identify(s.ms, s.identify, muxed) + s.connManager.storeConn(muxed.connection) + s.connManager.storeMuxer(muxed) except CatchableError as exc: - debug "Exception in accept loop, exiting", exc = exc.msg - upgrades.release() # always release the slot - if not isNil(conn): - await conn.close() - return + debug "Failed to store connection", msg=exc.msg + await conn.close() proc stop*(s: Switch) {.async, public.} = ## Stop listening on every transport, and @@ -332,7 +297,7 @@ proc start*(s: Switch) {.async, gcsafe, public.} = proc newSwitch*(peerInfo: PeerInfo, transports: seq[Transport], - identity: Identify, + identify: Identify, secureManagers: openArray[Secure] = [], connManager: ConnManager, ms: MultistreamSelect, @@ -348,9 +313,11 @@ proc newSwitch*(peerInfo: PeerInfo, transports: transports, connManager: connManager, peerStore: peerStore, + identify: identify, dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver), nameResolver: nameResolver) switch.connManager.peerStore = peerStore - switch.mount(identity) + #TODO identify could be part of the PeerStore instead + switch.mount(identify) return switch diff --git a/libp2p/transports/tortransport.nim b/libp2p/transports/tortransport.nim index e978be90c0..095c1089af 100644 --- a/libp2p/transports/tortransport.nim +++ b/libp2p/transports/tortransport.nim @@ -265,6 +265,7 @@ proc new*( let torSwitch = T( peerInfo: switch.peerInfo, ms: switch.ms, + identify: switch.identify, transports: switch.transports, connManager: switch.connManager, peerStore: switch.peerStore, diff --git a/libp2p/transports/transport.nim b/libp2p/transports/transport.nim index 12d1a08033..ac92b7a9e0 100644 --- a/libp2p/transports/transport.nim +++ b/libp2p/transports/transport.nim @@ -18,6 +18,7 @@ import chronos, chronicles import ../stream/connection, ../multiaddress, ../multicodec, + ../muxers/muxer, ../upgrademngrs/upgrade logScope: @@ -78,7 +79,7 @@ proc dial*( method upgradeIncoming*( self: Transport, - conn: Connection): Future[void] {.base, gcsafe.} = + conn: Connection): Future[Muxer] {.base, gcsafe.} = ## base upgrade method that the transport uses to perform ## transport specific upgrades ## @@ -88,7 +89,7 @@ method upgradeIncoming*( method upgradeOutgoing*( self: Transport, conn: Connection, - peerId: Opt[PeerId]): Future[Connection] {.base, gcsafe.} = + peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} = ## base upgrade method that the transport uses to perform ## transport specific upgrades ## diff --git a/libp2p/upgrademngrs/muxedupgrade.nim b/libp2p/upgrademngrs/muxedupgrade.nim index 030508c10f..79a4c8baf7 100644 --- a/libp2p/upgrademngrs/muxedupgrade.nim +++ b/libp2p/upgrademngrs/muxedupgrade.nim @@ -30,35 +30,24 @@ type proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider = for m in self.muxers: - if muxerName in m.codecs: + if muxerName == m.codec: return m -proc identify*( - self: MuxedUpgrade, - muxer: Muxer) {.async, gcsafe.} = - # new stream for identify - var stream = await muxer.newStream() - if stream == nil: - return - - try: - await self.identify(stream) - when defined(libp2p_agents_metrics): - muxer.connection.shortAgent = stream.shortAgent - finally: - await stream.closeWithEOF() - proc mux*( self: MuxedUpgrade, - conn: Connection): Future[Muxer] {.async, gcsafe.} = - ## mux outgoing connection + conn: Connection, + direction: Direction): Future[Muxer] {.async, gcsafe.} = + ## mux connection trace "Muxing connection", conn if self.muxers.len == 0: warn "no muxers registered, skipping upgrade flow", conn return - let muxerName = await self.ms.select(conn, self.muxers.mapIt(it.codec)) + let muxerName = + if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec)) + else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec)) + if muxerName.len == 0 or muxerName == "na": debug "no muxer available, early exit", conn return @@ -70,36 +59,23 @@ proc mux*( # install stream handler muxer.streamHandler = self.streamHandler - - self.connManager.storeConn(conn) - - # store it in muxed connections if we have a peer for it - self.connManager.storeMuxer(muxer, muxer.handle()) # store muxer and start read loop - - try: - await self.identify(muxer) - except CatchableError as exc: - # Identify is non-essential, though if it fails, it might indicate that - # the connection was closed already - this will be picked up by the read - # loop - debug "Could not identify connection", conn, msg = exc.msg - + muxer.handler = muxer.handle() return muxer -method upgradeOutgoing*( +proc upgrade( self: MuxedUpgrade, conn: Connection, - peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} = - trace "Upgrading outgoing connection", conn + direction: Direction, + peerId: Opt[PeerId]): Future[Muxer] {.async.} = + trace "Upgrading connection", conn, direction - let sconn = await self.secure(conn, peerId) # secure the connection + let sconn = await self.secure(conn, direction, peerId) # secure the connection if isNil(sconn): raise newException(UpgradeFailedError, "unable to secure connection, stopping upgrade") - let muxer = await self.mux(sconn) # mux it if possible + let muxer = await self.mux(sconn, direction) # mux it if possible if muxer == nil: - # TODO this might be relaxed in the future raise newException(UpgradeFailedError, "a muxer is required for outgoing connections") @@ -111,108 +87,28 @@ method upgradeOutgoing*( raise newException(UpgradeFailedError, "Connection closed or missing peer info, stopping upgrade") - trace "Upgraded outgoing connection", conn, sconn - - return sconn + trace "Upgraded connection", conn, sconn, direction + return muxer -method upgradeIncoming*( +method upgradeOutgoing*( self: MuxedUpgrade, - incomingConn: Connection) {.async, gcsafe.} = # noraises - trace "Upgrading incoming connection", incomingConn - let ms = MultistreamSelect.new() - - # secure incoming connections - proc securedHandler(conn: Connection, - proto: string) - {.async, gcsafe, closure.} = - trace "Starting secure handler", conn - let secure = self.secureManagers.filterIt(it.codec == proto)[0] - - var cconn = conn - try: - var sconn = await secure.secure(cconn, false, Opt.none(PeerId)) - if isNil(sconn): - return - - cconn = sconn - # add the muxer - for muxer in self.muxers: - ms.addHandler(muxer.codecs, muxer) + conn: Connection, + peerId: Opt[PeerId]): Future[Muxer] {.async, gcsafe.} = + return await self.upgrade(conn, Out, peerId) - # handle subsequent secure requests - await ms.handle(cconn) - except CatchableError as exc: - debug "Exception in secure handler during incoming upgrade", msg = exc.msg, conn - if not cconn.isUpgraded: - cconn.upgrade(exc) - finally: - if not isNil(cconn): - await cconn.close() - - trace "Stopped secure handler", conn - - try: - if (await ms.select(incomingConn)): # just handshake - # add the secure handlers - for k in self.secureManagers: - ms.addHandler(k.codec, securedHandler) - - # handle un-secured connections - # we handshaked above, set this ms handler as active - await ms.handle(incomingConn, active = true) - except CatchableError as exc: - debug "Exception upgrading incoming", exc = exc.msg - if not incomingConn.isUpgraded: - incomingConn.upgrade(exc) - finally: - if not isNil(incomingConn): - await incomingConn.close() - -proc muxerHandler( +method upgradeIncoming*( self: MuxedUpgrade, - muxer: Muxer) {.async, gcsafe.} = - let - conn = muxer.connection - - # store incoming connection - self.connManager.storeConn(conn) - - # store muxer and muxed connection - self.connManager.storeMuxer(muxer) - - try: - await self.identify(muxer) - when defined(libp2p_agents_metrics): - #TODO Passing data between layers is a pain - if muxer.connection of SecureConn: - let secureConn = (SecureConn)muxer.connection - secureConn.stream.shortAgent = muxer.connection.shortAgent - except IdentifyError as exc: - # Identify is non-essential, though if it fails, it might indicate that - # the connection was closed already - this will be picked up by the read - # loop - debug "Could not identify connection", conn, msg = exc.msg - except LPStreamClosedError as exc: - debug "Identify stream closed", conn, msg = exc.msg - except LPStreamEOFError as exc: - debug "Identify stream EOF", conn, msg = exc.msg - except CancelledError as exc: - await muxer.close() - raise exc - except CatchableError as exc: - await muxer.close() - trace "Exception in muxer handler", conn, msg = exc.msg + conn: Connection): Future[Muxer] {.async, gcsafe.} = + return await self.upgrade(conn, In, Opt.none(PeerId)) proc new*( T: type MuxedUpgrade, - identity: Identify, muxers: seq[MuxerProvider], secureManagers: openArray[Secure] = [], connManager: ConnManager, ms: MultistreamSelect): T = let upgrader = T( - identity: identity, muxers: muxers, secureManagers: @secureManagers, connManager: connManager, @@ -231,10 +127,4 @@ proc new*( await conn.closeWithEOF() trace "Stream handler done", conn - for _, val in muxers: - val.streamHandler = upgrader.streamHandler - val.muxerHandler = proc(muxer: Muxer): Future[void] - {.raises: [Defect].} = - upgrader.muxerHandler(muxer) - return upgrader diff --git a/libp2p/upgrademngrs/upgrade.nim b/libp2p/upgrademngrs/upgrade.nim index c087bedba9..c6a2f176cb 100644 --- a/libp2p/upgrademngrs/upgrade.nim +++ b/libp2p/upgrademngrs/upgrade.nim @@ -19,6 +19,7 @@ import pkg/[chronos, chronicles, metrics] import ../stream/connection, ../protocols/secure/secure, ../protocols/identify, + ../muxers/muxer, ../multistream, ../peerstore, ../connmanager, @@ -37,29 +38,31 @@ type Upgrade* = ref object of RootObj ms*: MultistreamSelect - identity*: Identify connManager*: ConnManager secureManagers*: seq[Secure] method upgradeIncoming*( self: Upgrade, - conn: Connection): Future[void] {.base.} = + conn: Connection): Future[Muxer] {.base.} = doAssert(false, "Not implemented!") method upgradeOutgoing*( self: Upgrade, conn: Connection, - peerId: Opt[PeerId]): Future[Connection] {.base.} = + peerId: Opt[PeerId]): Future[Muxer] {.base.} = doAssert(false, "Not implemented!") proc secure*( self: Upgrade, conn: Connection, + direction: Direction, peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} = if self.secureManagers.len <= 0: raise newException(UpgradeFailedError, "No secure managers registered!") - let codec = await self.ms.select(conn, self.secureManagers.mapIt(it.codec)) + let codec = + if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec)) + else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec)) if codec.len == 0: raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!") @@ -70,30 +73,4 @@ proc secure*( # let's avoid duplicating checks but detect if it fails to do it properly doAssert(secureProtocol.len > 0) - return await secureProtocol[0].secure(conn, true, peerId) - -proc identify*( - self: Upgrade, - conn: Connection) {.async, gcsafe.} = - ## identify the connection - - if (await self.ms.select(conn, self.identity.codec)): - let - info = await self.identity.identify(conn, conn.peerId) - peerStore = self.connManager.peerStore - - if info.pubkey.isNone and isNil(conn): - raise newException(UpgradeFailedError, - "no public key provided and no existing peer identity found") - - conn.peerId = info.peerId - - when defined(libp2p_agents_metrics): - conn.shortAgent = "unknown" - if info.agentVersion.isSome and info.agentVersion.get().len > 0: - let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii() - if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()): - conn.shortAgent = shortAgent.get() - - peerStore.updatePeerInfo(info) - trace "identified remote peer", conn, peerId = shortLog(conn.peerId) + return await secureProtocol[0].secure(conn, direction == Out, peerId) diff --git a/tests/testnoise.nim b/tests/testnoise.nim index f715a7b33b..e0189c71c0 100644 --- a/tests/testnoise.nim +++ b/tests/testnoise.nim @@ -75,7 +75,7 @@ proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switc [Secure(Noise.new(rng, privateKey, outgoing = outgoing))] connManager = ConnManager.new() ms = MultistreamSelect.new() - muxedUpgrade = MuxedUpgrade.new(identify, muxers, secureManagers, connManager, ms) + muxedUpgrade = MuxedUpgrade.new(muxers, secureManagers, connManager, ms) transports = @[Transport(TcpTransport.new(upgrade = muxedUpgrade))] let switch = newSwitch( From df973147d2c35b6eb402bfb4f7d11675b207e57a Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 23 Nov 2022 11:59:53 +0100 Subject: [PATCH 002/112] kinda working --- libp2p/builders.nim | 7 ++- libp2p/connmanager.nim | 10 +++- libp2p/dialer.nim | 46 +++++++-------- libp2p/multistream.nim | 23 ++++---- libp2p/muxers/muxer.nim | 4 +- libp2p/peerstore.nim | 14 +++-- libp2p/switch.nim | 94 +++++++++++++++++++++--------- libp2p/transports/tortransport.nim | 3 +- tests/testidentify.nim | 2 +- tests/testmultistream.nim | 3 +- tests/testnoise.nim | 5 +- tests/testpeerstore.nim | 8 +-- tests/testswitch.nim | 89 ++++++++++++++-------------- 13 files changed, 178 insertions(+), 130 deletions(-) diff --git a/libp2p/builders.nim b/libp2p/builders.nim index 479e9aec2c..c8d25742f0 100644 --- a/libp2p/builders.nim +++ b/libp2p/builders.nim @@ -242,20 +242,21 @@ proc build*(b: SwitchBuilder): Switch let peerStore = if isSome(b.peerStoreCapacity): - PeerStore.new(b.peerStoreCapacity.get()) + PeerStore.new(identify, b.peerStoreCapacity.get()) else: - PeerStore.new() + PeerStore.new(identify) let switch = newSwitch( peerInfo = peerInfo, transports = transports, - identify = identify, secureManagers = secureManagerInstances, connManager = connManager, ms = ms, nameResolver = b.nameResolver, peerStore = peerStore) + switch.mount(identify) + if b.autonat: let autonat = Autonat.new(switch) switch.mount(autonat) diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim index 2fbac126e4..697ae0ab72 100644 --- a/libp2p/connmanager.nim +++ b/libp2p/connmanager.nim @@ -243,9 +243,9 @@ proc closeMuxer(muxer: Muxer) {.async.} = trace "Cleaning up muxer", m = muxer await muxer.close() - if not(isNil(muxer.handle)): + if not(isNil(muxer.handler)): try: - await muxer.handle # TODO noraises? + await muxer.handler # TODO noraises? except CatchableError as exc: trace "Exception in close muxer handler", exc = exc.msg trace "Cleaned up muxer", m = muxer @@ -433,6 +433,12 @@ proc trackConnection*(cs: ConnectionSlot, conn: Connection) = asyncSpawn semaphoreMonitor() +proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) = + if isNil(mux): + cs.release() + return + cs.trackConnection(mux.connection) + proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [Defect, CatchableError].} = diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index 6b40c162a7..2ece45c34c 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -17,6 +17,7 @@ import pkg/[chronos, import dial, peerid, peerinfo, + peerstore, multicodec, muxers/muxer, multistream, @@ -42,10 +43,10 @@ type Dialer* = ref object of Dial localPeerId*: PeerId - ms: MultistreamSelect connManager: ConnManager dialLock: Table[PeerId, AsyncLock] transports: seq[Transport] + peerStore: PeerStore nameResolver: NameResolver proc dialAndUpgrade( @@ -163,10 +164,10 @@ proc internalConnect( await lock.acquire() # Check if we have a connection already and try to reuse it - var mux = + var muxed = if peerId.isSome: self.connManager.selectMuxer(self.connManager.selectConn(peerId.get())) else: nil - if mux != nil: + if muxed != nil: #if mux.atEof or mux.closed: # # This connection should already have been removed from the connection # # manager - it's essentially a bug that we end up here - we'll fail @@ -175,33 +176,30 @@ proc internalConnect( # await conn.close() # raise newException(DialFailedError, "Zombie connection encountered") - trace "Reusing existing connection", direction = $mux.connection.dir - return mux + trace "Reusing existing connection", direction = $muxed.connection.dir + return muxed let slot = await self.connManager.getOutgoingSlot(forceDial) - mux = + muxed = try: await self.dialAndUpgrade(peerId, addrs) except CatchableError as exc: slot.release() raise exc - #TODO - #slot.trackConnection(conn) - if isNil(mux): # None of the addresses connected + slot.trackMuxer(muxed) + if isNil(muxed): # None of the addresses connected raise newException(DialFailedError, "Unable to establish outgoing link") - # A disconnect could have happened right after - # we've added the connection so we check again - # to prevent races due to that. - # TODO - #if conn.closed() or conn.atEof(): - # # This can happen when the other ends drops us - # # before we get a chance to return the connection - # # back to the dialer. - # trace "Connection dead on arrival", conn - # raise newLPStreamClosedError() - - return mux + try: + await self.peerStore.identify(muxed) + self.connManager.storeConn(muxed.connection) + self.connManager.storeMuxer(muxed) + except CatchableError as exc: + trace "Failed to finish outgoung upgrade", err=exc.msg + await muxed.close() + raise exc + + return muxed finally: if lock.locked(): lock.release() @@ -233,7 +231,7 @@ proc negotiateStream( conn: Connection, protos: seq[string]): Future[Connection] {.async.} = trace "Negotiating stream", conn, protos - let selected = await self.ms.select(conn, protos) + let selected = await MultistreamSelect.select(conn, protos) if not protos.contains(selected): await conn.closeWithEOF() raise newException(DialFailedError, "Unable to select sub-protocol " & $protos) @@ -324,12 +322,12 @@ proc new*( T: type Dialer, localPeerId: PeerId, connManager: ConnManager, + peerStore: PeerStore, transports: seq[Transport], - ms: MultistreamSelect, nameResolver: NameResolver = nil): Dialer = T(localPeerId: localPeerId, connManager: connManager, transports: transports, - ms: ms, + peerStore: peerStore, nameResolver: nameResolver) diff --git a/libp2p/multistream.nim b/libp2p/multistream.nim index 6e22da9991..8ebe09d951 100644 --- a/libp2p/multistream.nim +++ b/libp2p/multistream.nim @@ -21,11 +21,11 @@ logScope: topics = "libp2p multistream" const - MsgSize* = 64*1024 - Codec* = "/multistream/1.0.0" + MsgSize = 64*1024 + Codec = "/multistream/1.0.0" - Na* = "na\n" - Ls* = "ls\n" + Na = "na\n" + Ls = "ls\n" type Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].} @@ -50,11 +50,11 @@ template validateSuffix(str: string): untyped = else: raise newException(MultiStreamError, "MultistreamSelect failed, malformed message") -proc select*(m: MultistreamSelect, +proc select*(_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: seq[string]): Future[string] {.async.} = - trace "initiating handshake", conn, codec = m.codec + trace "initiating handshake", conn, codec = Codec ## select a remote protocol await conn.writeLp(Codec & "\n") # write handshake if proto.len() > 0: @@ -98,13 +98,13 @@ proc select*(m: MultistreamSelect, # No alternatives, fail return "" -proc select*(m: MultistreamSelect, +proc select*(_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: string): Future[bool] {.async.} = if proto.len > 0: - return (await m.select(conn, @[proto])) == proto + return (await MultistreamSelect.select(conn, @[proto])) == proto else: - return (await m.select(conn, @[])) == Codec + return (await MultistreamSelect.select(conn, @[])) == Codec proc select*(m: MultistreamSelect, conn: Connection): Future[bool] = m.select(conn, "") @@ -150,7 +150,10 @@ proc handle*( case ms: of "ls": trace "handle: listing protos", conn - await conn.writeLp(protos.join("\n")) + #TODO this doens't seem to follow spec, each protocol + # should be length prefixed. Not very important + # since LS is getting deprecated + await conn.writeLp(protos.join("\n") & "\n") of Codec: if not handshaked: await conn.writeLp(Codec & "\n") diff --git a/libp2p/muxers/muxer.nim b/libp2p/muxers/muxer.nim index 6934c900e9..eeaa59b0f6 100644 --- a/libp2p/muxers/muxer.nim +++ b/libp2p/muxers/muxer.nim @@ -43,7 +43,9 @@ type newMuxer*: MuxerConstructor codec*: string -func shortLog*(m: Muxer): auto = shortLog(m.connection) +func shortLog*(m: Muxer): auto = + if isNil(m): "nil" + else: shortLog(m.connection) chronicles.formatIt(Muxer): shortLog(it) # muxer interface diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 205ac96a1b..250ef6c733 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -75,11 +75,15 @@ type PeerStore* {.public.} = ref object books: Table[string, BasePeerBook] + identify: Identify capacity*: int toClean*: seq[PeerId] -proc new*(T: type PeerStore, capacity = 1000): PeerStore {.public.} = - T(capacity: capacity) +proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} = + T( + identify: identify, + capacity: capacity + ) ######################### # Generic Peer Book API # @@ -194,8 +198,6 @@ proc cleanup*( proc identify*( peerStore: PeerStore, - ms: MultistreamSelect, - identify: Identify, muxer: Muxer) {.async.} = # new stream for identify @@ -204,8 +206,8 @@ proc identify*( return try: - if (await ms.select(stream, identify.codec())): - let info = await identify.identify(stream, stream.peerId) + if (await MultistreamSelect.select(stream, peerStore.identify.codec())): + let info = await peerStore.identify.identify(stream, stream.peerId) when defined(libp2p_agents_metrics): muxer.connection.shortAgent = "unknown" diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 374c0c2d40..dabd31126e 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -60,10 +60,12 @@ logScope: declareCounter(libp2p_failed_upgrades_incoming, "incoming connections failed upgrades") +const + ConcurrentUpgrades* = 4 + type Switch* {.public.} = ref object of Dial peerInfo*: PeerInfo - identify*: Identify connManager*: ConnManager transports*: seq[Transport] ms*: MultistreamSelect @@ -187,42 +189,80 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil) s.ms.addHandler(proto.codecs, proto, matcher) s.peerInfo.protocols.add(proto.codec) -proc upgradeMonitor(conn: Connection, upgrades: AsyncSemaphore) {.async.} = - ## monitor connection for upgrades - ## +proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} = try: - # Since we don't control the flow of the - # upgrade, this timeout guarantees that a - # "hanged" remote doesn't hold the upgrade - # forever - await conn.onUpgrade.wait(30.seconds) # wait for connection to be upgraded + let muxed = await trans.upgradeIncoming(conn) + await switch.peerStore.identify(muxed) + switch.connManager.storeConn(muxed.connection) + switch.connManager.storeMuxer(muxed) trace "Connection upgrade succeeded" except CatchableError as exc: - libp2p_failed_upgrades_incoming.inc() + if exc isnot CancelledError: + libp2p_failed_upgrades_incoming.inc() if not isNil(conn): await conn.close() trace "Exception awaiting connection upgrade", exc = exc.msg, conn - finally: - upgrades.release() # don't forget to release the slot! +proc upgradeMonitor( + switch: Switch, + trans: Transport, + conn: Connection, + upgrades: AsyncSemaphore) {.async.} = + try: + await switch.upgrader(trans, conn).wait(30.seconds) + finally: + upgrades.release() proc accept(s: Switch, transport: Transport) {.async.} = # noraises ## switch accept loop, ran for every transport ## - let upgrades = newAsyncSemaphore(4) + let upgrades = newAsyncSemaphore(ConcurrentUpgrades) while transport.running: - let - conn = await transport.accept() + var conn: Connection try: - let muxed = await transport.upgradeIncoming(conn) - await s.peerStore.identify(s.ms, s.identify, muxed) - s.connManager.storeConn(muxed.connection) - s.connManager.storeMuxer(muxed) + debug "About to accept incoming connection" + # remember to always release the slot when + # the upgrade succeeds or fails, this is + # currently done by the `upgradeMonitor` + await upgrades.acquire() # first wait for an upgrade slot to become available + let slot = await s.connManager.getIncomingSlot() + conn = + try: + await transport.accept() + except CatchableError as exc: + slot.release() + raise exc + slot.trackConnection(conn) + if isNil(conn): + # A nil connection means that we might have hit a + # file-handle limit (or another non-fatal error), + # we can get one on the next try, but we should + # be careful to not end up in a thigh loop that + # will starve the main event loop, thus we sleep + # here before retrying. + trace "Unable to get a connection, sleeping" + await sleepAsync(100.millis) # TODO: should be configurable? + upgrades.release() + continue + + # set the direction of this bottom level transport + # in order to be able to consume this information in gossipsub if required + # gossipsub gives priority to connections we make + conn.transportDir = Direction.In + + debug "Accepted an incoming connection", conn + asyncSpawn s.upgradeMonitor(transport, conn, upgrades) + except CancelledError as exc: + trace "releasing semaphore on cancellation" + upgrades.release() # always release the slot except CatchableError as exc: - debug "Failed to store connection", msg=exc.msg - await conn.close() + debug "Exception in accept loop, exiting", exc = exc.msg + upgrades.release() # always release the slot + if not isNil(conn): + await conn.close() + return proc stop*(s: Switch) {.async, public.} = ## Stop listening on every transport, and @@ -297,13 +337,12 @@ proc start*(s: Switch) {.async, gcsafe, public.} = proc newSwitch*(peerInfo: PeerInfo, transports: seq[Transport], - identify: Identify, secureManagers: openArray[Secure] = [], connManager: ConnManager, ms: MultistreamSelect, - nameResolver: NameResolver = nil, - peerStore = PeerStore.new()): Switch - {.raises: [Defect, LPError], public.} = + peerStore: PeerStore, + nameResolver: NameResolver = nil): Switch + {.raises: [Defect, LPError].} = if secureManagers.len == 0: raise newException(LPError, "Provide at least one secure manager") @@ -313,11 +352,8 @@ proc newSwitch*(peerInfo: PeerInfo, transports: transports, connManager: connManager, peerStore: peerStore, - identify: identify, - dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver), + dialer: Dialer.new(peerInfo.peerId, connManager, peerStore, transports, nameResolver), nameResolver: nameResolver) switch.connManager.peerStore = peerStore - #TODO identify could be part of the PeerStore instead - switch.mount(identify) return switch diff --git a/libp2p/transports/tortransport.nim b/libp2p/transports/tortransport.nim index 095c1089af..726c141d6c 100644 --- a/libp2p/transports/tortransport.nim +++ b/libp2p/transports/tortransport.nim @@ -265,11 +265,10 @@ proc new*( let torSwitch = T( peerInfo: switch.peerInfo, ms: switch.ms, - identify: switch.identify, transports: switch.transports, connManager: switch.connManager, peerStore: switch.peerStore, - dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.transports, switch.ms, nil), + dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.peerStore, switch.transports, nil), nameResolver: nil) torSwitch.connManager.peerStore = switch.peerStore diff --git a/tests/testidentify.nim b/tests/testidentify.nim index 9fa1f4ddd0..8b43237f1d 100644 --- a/tests/testidentify.nim +++ b/tests/testidentify.nim @@ -177,7 +177,7 @@ suite "Identify": check: switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs - + switch1.peerStore[KeyBook][switch2.peerInfo.peerId] == switch2.peerInfo.publicKey switch2.peerStore[KeyBook][switch1.peerInfo.peerId] == switch1.peerInfo.publicKey diff --git a/tests/testmultistream.nim b/tests/testmultistream.nim index 6bdf1aa40a..aa1b7ba410 100644 --- a/tests/testmultistream.nim +++ b/tests/testmultistream.nim @@ -224,8 +224,7 @@ suite "Multistream select": var conn: Connection = nil proc testNaHandler(msg: string): Future[void] {.async, gcsafe.} = - echo msg - check msg == Na + check msg == "\x03na\n" await conn.close() conn = newTestNaStream(testNaHandler) diff --git a/tests/testnoise.nim b/tests/testnoise.nim index e0189c71c0..3ac69db84b 100644 --- a/tests/testnoise.nim +++ b/tests/testnoise.nim @@ -67,6 +67,7 @@ proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switc let identify = Identify.new(peerInfo) + peerStore = PeerStore.new(identify) mplexProvider = MuxerProvider.new(createMplex, MplexCodec) muxers = @[mplexProvider] secureManagers = if secio: @@ -81,10 +82,10 @@ proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switc let switch = newSwitch( peerInfo, transports, - identify, secureManagers, connManager, - ms) + ms, + peerStore) result = (switch, peerInfo) suite "Noise": diff --git a/tests/testpeerstore.nim b/tests/testpeerstore.nim index 74477319bf..b6ce7cdd5f 100644 --- a/tests/testpeerstore.nim +++ b/tests/testpeerstore.nim @@ -96,7 +96,7 @@ suite "PeerStore": toSeq(values(addressBook.book))[0] == @[multiaddr1, multiaddr2] test "Pruner - no capacity": - let peerStore = PeerStore.new(capacity = 0) + let peerStore = PeerStore.new(nil, capacity = 0) peerStore[AgentBook][peerId1] = "gds" peerStore.cleanup(peerId1) @@ -104,7 +104,7 @@ suite "PeerStore": check peerId1 notin peerStore[AgentBook] test "Pruner - FIFO": - let peerStore = PeerStore.new(capacity = 1) + let peerStore = PeerStore.new(nil, capacity = 1) peerStore[AgentBook][peerId1] = "gds" peerStore[AgentBook][peerId2] = "gds" peerStore.cleanup(peerId2) @@ -114,7 +114,7 @@ suite "PeerStore": peerId2 notin peerStore[AgentBook] test "Pruner - regular capacity": - var peerStore = PeerStore.new(capacity = 20) + var peerStore = PeerStore.new(nil, capacity = 20) for i in 0..<30: let randomPeerId = PeerId.init(KeyPair.random(ECDSA, rng[]).get().pubkey).get() @@ -124,7 +124,7 @@ suite "PeerStore": check peerStore[AgentBook].len == 20 test "Pruner - infinite capacity": - var peerStore = PeerStore.new(capacity = -1) + var peerStore = PeerStore.new(nil, capacity = -1) for i in 0..<30: let randomPeerId = PeerId.init(KeyPair.random(ECDSA, rng[]).get().pubkey).get() diff --git a/tests/testswitch.nim b/tests/testswitch.nim index 1e0db7e154..93274bbfc4 100644 --- a/tests/testswitch.nim +++ b/tests/testswitch.nim @@ -76,50 +76,51 @@ suite "Switch": check not switch1.isConnected(switch2.peerInfo.peerId) check not switch2.isConnected(switch1.peerInfo.peerId) - asyncTest "e2e use switch dial proto string with custom matcher": - let done = newFuture[void]() - proc handle(conn: Connection, proto: string) {.async, gcsafe.} = - try: - let msg = string.fromBytes(await conn.readLp(1024)) - check "Hello!" == msg - await conn.writeLp("Hello!") - finally: - await conn.close() - done.complete() - - let testProto = new TestProto - testProto.codec = TestCodec - testProto.handler = handle - - let callProto = TestCodec & "/pew" - - proc match(proto: string): bool {.gcsafe.} = - return proto == callProto - - let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) - switch1.mount(testProto, match) - - let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) - await switch1.start() - await switch2.start() - - let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, callProto) - - check switch1.isConnected(switch2.peerInfo.peerId) - check switch2.isConnected(switch1.peerInfo.peerId) - - await conn.writeLp("Hello!") - let msg = string.fromBytes(await conn.readLp(1024)) - check "Hello!" == msg - await conn.close() - - await allFuturesThrowing( - done.wait(5.seconds), - switch1.stop(), - switch2.stop()) - - check not switch1.isConnected(switch2.peerInfo.peerId) - check not switch2.isConnected(switch1.peerInfo.peerId) +#TODO +# asyncTest "e2e use switch dial proto string with custom matcher": +# let done = newFuture[void]() +# proc handle(conn: Connection, proto: string) {.async, gcsafe.} = +# try: +# let msg = string.fromBytes(await conn.readLp(1024)) +# check "Hello!" == msg +# await conn.writeLp("Hello!") +# finally: +# await conn.close() +# done.complete() +# +# let testProto = new TestProto +# testProto.codec = TestCodec +# testProto.handler = handle +# +# let callProto = TestCodec & "/pew" +# +# proc match(proto: string): bool {.gcsafe.} = +# return proto == callProto +# +# let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) +# switch1.mount(testProto, match) +# +# let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) +# await switch1.start() +# await switch2.start() +# +# let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, callProto) +# +# check switch1.isConnected(switch2.peerInfo.peerId) +# check switch2.isConnected(switch1.peerInfo.peerId) +# +# await conn.writeLp("Hello!") +# let msg = string.fromBytes(await conn.readLp(1024)) +# check "Hello!" == msg +# await conn.close() +# +# await allFuturesThrowing( +# done.wait(5.seconds), +# switch1.stop(), +# switch2.stop()) +# +# check not switch1.isConnected(switch2.peerInfo.peerId) +# check not switch2.isConnected(switch1.peerInfo.peerId) asyncTest "e2e should not leak bufferstreams and connections on channel close": let done = newFuture[void]() From 1ab9cc9cc2200770beb1ae2c58cb9bb3c549c72f Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 23 Nov 2022 12:03:38 +0100 Subject: [PATCH 003/112] remove upgraded system --- libp2p/connmanager.nim | 1 - libp2p/muxers/muxer.nim | 2 +- libp2p/protocols/secure/secure.nim | 1 - libp2p/stream/connection.nim | 24 ------------------------ 4 files changed, 1 insertion(+), 27 deletions(-) diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim index 697ae0ab72..fbb05b17b3 100644 --- a/libp2p/connmanager.nim +++ b/libp2p/connmanager.nim @@ -284,7 +284,6 @@ proc cleanupConn(c: ConnManager, conn: Connection) {.async.} = proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} = try: trace "Triggering connect events", conn - conn.upgrade() let peerId = conn.peerId await c.triggerPeerEvents( diff --git a/libp2p/muxers/muxer.nim b/libp2p/muxers/muxer.nim index eeaa59b0f6..4f053f459b 100644 --- a/libp2p/muxers/muxer.nim +++ b/libp2p/muxers/muxer.nim @@ -39,7 +39,7 @@ type MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].} # this wraps a creator proc that knows how to make muxers - MuxerProvider* = ref object + MuxerProvider* = object newMuxer*: MuxerConstructor codec*: string diff --git a/libp2p/protocols/secure/secure.nim b/libp2p/protocols/secure/secure.nim index 124e1e9b53..cb9f824885 100644 --- a/libp2p/protocols/secure/secure.nim +++ b/libp2p/protocols/secure/secure.nim @@ -56,7 +56,6 @@ proc new*(T: type SecureConn, peerId: peerId, observedAddr: observedAddr, closeEvent: conn.closeEvent, - upgraded: conn.upgraded, timeout: timeout, dir: conn.dir) result.initStream() diff --git a/libp2p/stream/connection.nim b/libp2p/stream/connection.nim index a4f52deaeb..bf4b3de17f 100644 --- a/libp2p/stream/connection.nim +++ b/libp2p/stream/connection.nim @@ -39,7 +39,6 @@ type timeoutHandler*: TimeoutHandler # timeout handler peerId*: PeerId observedAddr*: Opt[MultiAddress] - upgraded*: Future[void] protocol*: string # protocol used by the connection, used as tag for metrics transportDir*: Direction # The bottom level transport (generally the socket) direction when defined(libp2p_agents_metrics): @@ -47,22 +46,6 @@ type proc timeoutMonitor(s: Connection) {.async, gcsafe.} -proc isUpgraded*(s: Connection): bool = - if not isNil(s.upgraded): - return s.upgraded.finished - -proc upgrade*(s: Connection, failed: ref CatchableError = nil) = - if not isNil(s.upgraded): - if not isNil(failed): - s.upgraded.fail(failed) - return - - s.upgraded.complete() - -proc onUpgrade*(s: Connection) {.async.} = - if not isNil(s.upgraded): - await s.upgraded - func shortLog*(conn: Connection): string = try: if conn.isNil: "Connection(nil)" @@ -80,9 +63,6 @@ method initStream*(s: Connection) = doAssert(isNil(s.timerTaskFut)) - if isNil(s.upgraded): - s.upgraded = newFuture[void]() - if s.timeout > 0.millis: trace "Monitoring for timeout", s, timeout = s.timeout @@ -100,10 +80,6 @@ method closeImpl*(s: Connection): Future[void] = s.timerTaskFut.cancel() s.timerTaskFut = nil - if not isNil(s.upgraded) and not s.upgraded.finished: - s.upgraded.cancel() - s.upgraded = nil - trace "Closed connection", s procCall LPStream(s).closeImpl() From 8b7688930799d0483856445a8e7d037f8c7ec8bc Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 23 Nov 2022 12:27:17 +0100 Subject: [PATCH 004/112] re-add custom matcher --- libp2p/multistream.nim | 19 ++++++--- tests/testswitch.nim | 89 +++++++++++++++++++++--------------------- 2 files changed, 58 insertions(+), 50 deletions(-) diff --git a/libp2p/multistream.nim b/libp2p/multistream.nim index 8ebe09d951..225c9f0566 100644 --- a/libp2p/multistream.nim +++ b/libp2p/multistream.nim @@ -129,6 +129,7 @@ proc handle*( _: type MultistreamSelect, conn: Connection, protos: seq[string], + matchers = newSeq[Matcher](), active: bool = false, ): Future[string] {.async, gcsafe.} = trace "Starting multistream negotiation", conn, handshaked = active @@ -163,7 +164,11 @@ proc handle*( conn await conn.writeLp(Na) else: - if ms in protos: + var found = ms in protos + if not found: + for matcher in matchers: + if matcher(ms): found = true + if found: trace "found handler", conn, protocol = ms await conn.writeLp(ms & "\n") conn.protocol = ms @@ -173,16 +178,20 @@ proc handle*( proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} = trace "Starting multistream handler", conn, handshaked = active - var handshaked = active - var protos: seq[string] + var + handshaked = active + protos: seq[string] + matchers: seq[Matcher] for h in m.handlers: + if not isNil(h.match): + matchers.add(h.match) for proto in h.protos: protos.add(proto) try: - let negotiated = await MultistreamSelect.handle(conn, protos, active) + let negotiated = await MultistreamSelect.handle(conn, protos, matchers, active) for h in m.handlers: - if h.protos.contains(negotiated): + if h.protos.contains(negotiated) or (not isNil(h.match) and h.match(negotiated)): await h.protocol.handler(conn, negotiated) return debug "no handlers", conn, negotiated diff --git a/tests/testswitch.nim b/tests/testswitch.nim index 93274bbfc4..1e0db7e154 100644 --- a/tests/testswitch.nim +++ b/tests/testswitch.nim @@ -76,51 +76,50 @@ suite "Switch": check not switch1.isConnected(switch2.peerInfo.peerId) check not switch2.isConnected(switch1.peerInfo.peerId) -#TODO -# asyncTest "e2e use switch dial proto string with custom matcher": -# let done = newFuture[void]() -# proc handle(conn: Connection, proto: string) {.async, gcsafe.} = -# try: -# let msg = string.fromBytes(await conn.readLp(1024)) -# check "Hello!" == msg -# await conn.writeLp("Hello!") -# finally: -# await conn.close() -# done.complete() -# -# let testProto = new TestProto -# testProto.codec = TestCodec -# testProto.handler = handle -# -# let callProto = TestCodec & "/pew" -# -# proc match(proto: string): bool {.gcsafe.} = -# return proto == callProto -# -# let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) -# switch1.mount(testProto, match) -# -# let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) -# await switch1.start() -# await switch2.start() -# -# let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, callProto) -# -# check switch1.isConnected(switch2.peerInfo.peerId) -# check switch2.isConnected(switch1.peerInfo.peerId) -# -# await conn.writeLp("Hello!") -# let msg = string.fromBytes(await conn.readLp(1024)) -# check "Hello!" == msg -# await conn.close() -# -# await allFuturesThrowing( -# done.wait(5.seconds), -# switch1.stop(), -# switch2.stop()) -# -# check not switch1.isConnected(switch2.peerInfo.peerId) -# check not switch2.isConnected(switch1.peerInfo.peerId) + asyncTest "e2e use switch dial proto string with custom matcher": + let done = newFuture[void]() + proc handle(conn: Connection, proto: string) {.async, gcsafe.} = + try: + let msg = string.fromBytes(await conn.readLp(1024)) + check "Hello!" == msg + await conn.writeLp("Hello!") + finally: + await conn.close() + done.complete() + + let testProto = new TestProto + testProto.codec = TestCodec + testProto.handler = handle + + let callProto = TestCodec & "/pew" + + proc match(proto: string): bool {.gcsafe.} = + return proto == callProto + + let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) + switch1.mount(testProto, match) + + let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise]) + await switch1.start() + await switch2.start() + + let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, callProto) + + check switch1.isConnected(switch2.peerInfo.peerId) + check switch2.isConnected(switch1.peerInfo.peerId) + + await conn.writeLp("Hello!") + let msg = string.fromBytes(await conn.readLp(1024)) + check "Hello!" == msg + await conn.close() + + await allFuturesThrowing( + done.wait(5.seconds), + switch1.stop(), + switch2.stop()) + + check not switch1.isConnected(switch2.peerInfo.peerId) + check not switch2.isConnected(switch1.peerInfo.peerId) asyncTest "e2e should not leak bufferstreams and connections on channel close": let done = newFuture[void]() From b954dad10bd282cff94ccf3a8309811dc3974288 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 23 Nov 2022 16:27:23 +0100 Subject: [PATCH 005/112] Simplify connManager --- libp2p/connmanager.nim | 273 +++++++++++----------------------------- libp2p/dialer.nim | 11 +- libp2p/muxers/muxer.nim | 4 +- libp2p/switch.nim | 1 - tests/testconnmngr.nim | 165 +++++++++--------------- 5 files changed, 139 insertions(+), 315 deletions(-) diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim index fbb05b17b3..1d9dbf7f5a 100644 --- a/libp2p/connmanager.nim +++ b/libp2p/connmanager.nim @@ -54,7 +54,6 @@ type PeerEventKind* {.pure.} = enum Left, - Identified, Joined PeerEvent* = object @@ -71,8 +70,7 @@ type maxConnsPerPeer: int inSema*: AsyncSemaphore outSema*: AsyncSemaphore - conns: Table[PeerId, HashSet[Connection]] - muxed: Table[Connection, Muxer] + muxed: Table[PeerId, seq[Muxer]] connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]] peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]] peerStore*: PeerStore @@ -104,7 +102,7 @@ proc new*(C: type ConnManager, outSema: outSema) proc connCount*(c: ConnManager, peerId: PeerId): int = - c.conns.getOrDefault(peerId).len + c.muxed.getOrDefault(peerId).len proc addConnEventHandler*(c: ConnManager, handler: ConnEventHandler, @@ -189,14 +187,6 @@ proc triggerPeerEvents*(c: ConnManager, return try: - let count = c.connCount(peerId) - if event.kind == PeerEventKind.Joined and count != 1: - trace "peer already joined", peer = peerId, event = $event - return - elif event.kind == PeerEventKind.Left and count != 0: - trace "peer still connected or already left", peer = peerId, event = $event - return - trace "triggering peer events", peer = peerId, event = $event var peerEvents: seq[Future[void]] @@ -209,18 +199,8 @@ proc triggerPeerEvents*(c: ConnManager, except CatchableError as exc: # handlers should not raise! warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId -proc contains*(c: ConnManager, conn: Connection): bool = - ## checks if a connection is being tracked by the - ## connection manager - ## - - if isNil(conn): - return - - return conn in c.conns.getOrDefault(conn.peerId) - proc contains*(c: ConnManager, peerId: PeerId): bool = - peerId in c.conns + peerId in c.muxed proc contains*(c: ConnManager, muxer: Muxer): bool = ## checks if a muxer is being tracked by the connection @@ -228,16 +208,10 @@ proc contains*(c: ConnManager, muxer: Muxer): bool = ## if isNil(muxer): - return + return false let conn = muxer.connection - if conn notin c: - return - - if conn notin c.muxed: - return - - return muxer == c.muxed.getOrDefault(conn) + return muxer in c.muxed.getOrDefault(conn.peerId) proc closeMuxer(muxer: Muxer) {.async.} = trace "Cleaning up muxer", m = muxer @@ -250,153 +224,108 @@ proc closeMuxer(muxer: Muxer) {.async.} = trace "Exception in close muxer handler", exc = exc.msg trace "Cleaned up muxer", m = muxer -proc delConn(c: ConnManager, conn: Connection) = - let peerId = conn.peerId - c.conns.withValue(peerId, peerConns): - peerConns[].excl(conn) - - if peerConns[].len == 0: - c.conns.del(peerId) # invalidates `peerConns` - - libp2p_peers.set(c.conns.len.int64) - trace "Removed connection", conn - -proc cleanupConn(c: ConnManager, conn: Connection) {.async.} = - ## clean connection's resources such as muxers and streams - - if isNil(conn): - trace "Wont cleanup a nil connection" - return - - # Remove connection from all tables without async breaks - var muxer = c.muxed.getOrDefault(conn) - c.muxed.del(conn) - delConn(c, conn) - - try: - if not muxer.isNil: - await closeMuxer(muxer) - finally: - await conn.close() - - trace "Connection cleaned up", conn - -proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} = +proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} = try: - trace "Triggering connect events", conn - - let peerId = conn.peerId - await c.triggerPeerEvents( - peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: conn.dir == Direction.Out)) - - await c.triggerConnEvent( - peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: conn.dir == Direction.In)) - except CatchableError as exc: - # This is top-level procedure which will work as separate task, so it - # do not need to propagate CancelledError and should handle other errors - warn "Unexpected exception in switch peer connection cleanup", - conn, msg = exc.msg - -proc peerCleanup(c: ConnManager, conn: Connection) {.async.} = - try: - trace "Triggering disconnect events", conn - let peerId = conn.peerId + trace "Triggering disconnect events", mux + let peerId = mux.connection.peerId await c.triggerConnEvent( peerId, ConnEvent(kind: ConnEventKind.Disconnected)) - await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left)) - if not(c.peerStore.isNil): - c.peerStore.cleanup(peerId) + let muxers = c.muxed.getOrDefault(peerId).filterIt(it != mux) + if muxers.len > 0: + c.muxed[peerId] = muxers + else: + await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left)) + + if not(c.peerStore.isNil): + c.peerStore.cleanup(peerId) + c.muxed.del(peerId) except CatchableError as exc: # This is top-level procedure which will work as separate task, so it # do not need to propagate CancelledError and should handle other errors warn "Unexpected exception peer cleanup handler", - conn, msg = exc.msg + mux, msg = exc.msg -proc onClose(c: ConnManager, conn: Connection) {.async.} = +proc onClose(c: ConnManager, mux: Muxer) {.async.} = ## connection close even handler ## ## triggers the connections resource cleanup ## try: - await conn.join() - trace "Connection closed, cleaning up", conn - await c.cleanupConn(conn) - except CancelledError: - # This is top-level procedure which will work as separate task, so it - # do not need to propagate CancelledError. - debug "Unexpected cancellation in connection manager's cleanup", conn + await mux.connection.join() + trace "Connection closed, cleaning up", mux except CatchableError as exc: debug "Unexpected exception in connection manager's cleanup", - errMsg = exc.msg, conn + errMsg = exc.msg, mux finally: - trace "Triggering peerCleanup", conn - asyncSpawn c.peerCleanup(conn) + await c.muxCleanup(mux) -proc selectConn*(c: ConnManager, +proc selectMuxer*(c: ConnManager, peerId: PeerId, - dir: Direction): Connection = + dir: Direction): Muxer = ## Select a connection for the provided peer and direction ## let conns = toSeq( - c.conns.getOrDefault(peerId)) - .filterIt( it.dir == dir ) + c.muxed.getOrDefault(peerId)) + .filterIt( it.connection.dir == dir ) if conns.len > 0: return conns[0] -proc selectConn*(c: ConnManager, peerId: PeerId): Connection = +proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer = ## Select a connection for the provided giving priority ## to outgoing connections ## - var conn = c.selectConn(peerId, Direction.Out) - if isNil(conn): - conn = c.selectConn(peerId, Direction.In) - if isNil(conn): + var mux = c.selectMuxer(peerId, Direction.Out) + if isNil(mux): + mux = c.selectMuxer(peerId, Direction.In) + if isNil(mux): trace "connection not found", peerId + return mux - return conn - -proc selectMuxer*(c: ConnManager, conn: Connection): Muxer = - ## select the muxer for the provided connection +proc storeMuxer*(c: ConnManager, + muxer: Muxer) + {.raises: [Defect, CatchableError].} = + ## store the connection and muxer ## - if isNil(conn): - return - - if conn in c.muxed: - return c.muxed.getOrDefault(conn) - else: - debug "no muxer for connection", conn - -proc storeConn*(c: ConnManager, conn: Connection) - {.raises: [Defect, LPError].} = - ## store a connection - ## + if isNil(muxer): + raise newException(LPError, "muxer cannot be nil") - if isNil(conn): - raise newException(LPError, "Connection cannot be nil") + if isNil(muxer.connection): + raise newException(LPError, "muxer's connection cannot be nil") - if conn.closed or conn.atEof: + if muxer.connection.closed or muxer.connection.atEof: raise newException(LPError, "Connection closed or EOF") - let peerId = conn.peerId - if c.conns.getOrDefault(peerId).len > c.maxConnsPerPeer: + let peerId = muxer.connection.peerId + if c.muxed.getOrDefault(peerId).len > c.maxConnsPerPeer: debug "Too many connections for peer", - conn, conns = c.conns.getOrDefault(peerId).len + conns = c.muxed.getOrDefault(peerId).len raise newTooManyConnectionsError() - c.conns.mgetOrPut(peerId, HashSet[Connection]()).incl(conn) - libp2p_peers.set(c.conns.len.int64) + assert muxer notin c.muxed.getOrDefault(peerId) + + let + newPeer = peerId in c.muxed + dir = muxer.connection.dir + c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer) + libp2p_peers.set(c.muxed.len.int64) - # Launch on close listener - # All the errors are handled inside `onClose()` procedure. - asyncSpawn c.onClose(conn) - trace "Stored connection", - conn, direction = $conn.dir, connections = c.conns.len + if newPeer: + asyncSpawn c.triggerPeerEvents( + peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out)) + + asyncSpawn c.triggerConnEvent( + peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In)) + + asyncSpawn c.onClose(muxer) + + trace "Stored muxer", + muxer, direction = $muxer.connection.dir, peers = c.muxed.len proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} = await c.inSema.acquire() @@ -438,36 +367,11 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) = return cs.trackConnection(mux.connection) -proc storeMuxer*(c: ConnManager, - muxer: Muxer) - {.raises: [Defect, CatchableError].} = - ## store the connection and muxer - ## - - if isNil(muxer): - raise newException(CatchableError, "muxer cannot be nil") - - if isNil(muxer.connection): - raise newException(CatchableError, "muxer's connection cannot be nil") - - if muxer.connection notin c: - raise newException(CatchableError, "cant add muxer for untracked connection") - - c.muxed[muxer.connection] = muxer - - trace "Stored muxer", - muxer, connections = c.conns.len - - asyncSpawn c.onConnUpgraded(muxer.connection) - proc getStream*(c: ConnManager, - peerId: PeerId, - dir: Direction): Future[Connection] {.async, gcsafe.} = - ## get a muxed stream for the provided peer - ## with the given direction + muxer: Muxer): Future[Connection] {.async, gcsafe.} = + ## get a muxed stream for the passed muxer ## - let muxer = c.selectMuxer(c.selectConn(peerId, dir)) if not(isNil(muxer)): return await muxer.newStream() @@ -476,49 +380,26 @@ proc getStream*(c: ConnManager, ## get a muxed stream for the passed peer from any connection ## - let muxer = c.selectMuxer(c.selectConn(peerId)) - if not(isNil(muxer)): - return await muxer.newStream() + return await c.getStream(c.selectMuxer(peerId)) proc getStream*(c: ConnManager, - conn: Connection): Future[Connection] {.async, gcsafe.} = - ## get a muxed stream for the passed connection + peerId: PeerId, + dir: Direction): Future[Connection] {.async, gcsafe.} = + ## get a muxed stream for the passed peer from a connection with `dir` ## - let muxer = c.selectMuxer(conn) - if not(isNil(muxer)): - return await muxer.newStream() + return await c.getStream(c.selectMuxer(peerId, dir)) -proc getStream*(c: ConnManager, - muxer: Muxer): Future[Connection] {.async, gcsafe.} = - ## get a muxed stream for the passed muxer - ## - - if not(isNil(muxer)): - return await muxer.newStream() proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} = ## drop connections and cleanup resources for peer ## trace "Dropping peer", peerId - let conns = c.conns.getOrDefault(peerId) - for conn in conns: - trace "Removing connection", conn - delConn(c, conn) - - var muxers: seq[Muxer] - for conn in conns: - if conn in c.muxed: - muxers.add c.muxed[conn] - c.muxed.del(conn) + let muxers = c.muxed.getOrDefault(peerId) for muxer in muxers: await closeMuxer(muxer) - for conn in conns: - await conn.close() - trace "Dropped peer", peerId - trace "Peer dropped", peerId proc close*(c: ConnManager) {.async.} = @@ -527,17 +408,11 @@ proc close*(c: ConnManager) {.async.} = ## trace "Closing ConnManager" - let conns = c.conns - c.conns.clear() - let muxed = c.muxed c.muxed.clear() - for _, muxer in muxed: - await closeMuxer(muxer) - - for _, conns2 in conns: - for conn in conns2: - await conn.close() + for _, muxers in muxed: + for mux in muxers: + await closeMuxer(mux) trace "Closed ConnManager" diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index 2ece45c34c..10ea537164 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -165,17 +165,9 @@ proc internalConnect( # Check if we have a connection already and try to reuse it var muxed = - if peerId.isSome: self.connManager.selectMuxer(self.connManager.selectConn(peerId.get())) + if peerId.isSome: self.connManager.selectMuxer(peerId.get()) else: nil if muxed != nil: - #if mux.atEof or mux.closed: - # # This connection should already have been removed from the connection - # # manager - it's essentially a bug that we end up here - we'll fail - # # for now, hoping that this will clean themselves up later... - # warn "dead connection in connection manager", conn - # await conn.close() - # raise newException(DialFailedError, "Zombie connection encountered") - trace "Reusing existing connection", direction = $muxed.connection.dir return muxed @@ -192,7 +184,6 @@ proc internalConnect( try: await self.peerStore.identify(muxed) - self.connManager.storeConn(muxed.connection) self.connManager.storeMuxer(muxed) except CatchableError as exc: trace "Failed to finish outgoung upgrade", err=exc.msg diff --git a/libp2p/muxers/muxer.nim b/libp2p/muxers/muxer.nim index 4f053f459b..afe5ffb80d 100644 --- a/libp2p/muxers/muxer.nim +++ b/libp2p/muxers/muxer.nim @@ -51,7 +51,9 @@ chronicles.formatIt(Muxer): shortLog(it) # muxer interface method newStream*(m: Muxer, name: string = "", lazy: bool = false): Future[Connection] {.base, async, gcsafe.} = discard -method close*(m: Muxer) {.base, async, gcsafe.} = discard +method close*(m: Muxer) {.base, async, gcsafe.} = + if not isNil(m.connection): + await m.connection.close() method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard proc new*( diff --git a/libp2p/switch.nim b/libp2p/switch.nim index dabd31126e..3c07468a63 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -193,7 +193,6 @@ proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} = try: let muxed = await trans.upgradeIncoming(conn) await switch.peerStore.identify(muxed) - switch.connManager.storeConn(muxed.connection) switch.connManager.storeMuxer(muxed) trace "Connection upgrade succeeded" except CatchableError as exc: diff --git a/tests/testconnmngr.nim b/tests/testconnmngr.nim index dda2e3dc19..51785cb5b0 100644 --- a/tests/testconnmngr.nim +++ b/tests/testconnmngr.nim @@ -10,8 +10,8 @@ import ../libp2p/[connmanager, import helpers -proc getConnection(peerId: PeerId, dir: Direction = Direction.In): Connection = - return Connection.new(peerId, dir, Opt.none(MultiAddress)) +proc getMuxer(peerId: PeerId, dir: Direction = Direction.In): Muxer = + return Muxer(connection: Connection.new(peerId, dir, Opt.none(MultiAddress))) type TestMuxer = ref object of Muxer @@ -22,71 +22,55 @@ method newStream*( name: string = "", lazy: bool = false): Future[Connection] {.async, gcsafe.} = - result = getConnection(m.peerId, Direction.Out) + result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress)) suite "Connection Manager": teardown: checkTrackers() - asyncTest "add and retrieve a connection": + asyncTest "add and retrieve a muxer": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) + let mux = getMuxer(peerId) - connMngr.storeConn(conn) - check conn in connMngr + connMngr.storeMuxer(mux) + check mux in connMngr - let peerConn = connMngr.selectConn(peerId) - check peerConn == conn - check peerConn.dir == Direction.In + let peerMux = connMngr.selectMuxer(peerId) + check peerMux == mux + check peerMux.connection.dir == Direction.In await connMngr.close() asyncTest "shouldn't allow a closed connection": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) - await conn.close() + let mux = getMuxer(peerId) + await mux.connection.close() expect CatchableError: - connMngr.storeConn(conn) + connMngr.storeMuxer(mux) await connMngr.close() asyncTest "shouldn't allow an EOFed connection": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) - conn.isEof = true + let mux = getMuxer(peerId) + mux.connection.isEof = true expect CatchableError: - connMngr.storeConn(conn) + connMngr.storeMuxer(mux) - await conn.close() + await mux.close() await connMngr.close() - asyncTest "add and retrieve a muxer": + asyncTest "shouldn't allow a muxer with no connection": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) - let muxer = new Muxer - muxer.connection = conn - - connMngr.storeConn(conn) - connMngr.storeMuxer(muxer) - check muxer in connMngr - - let peerMuxer = connMngr.selectMuxer(conn) - check peerMuxer == muxer - - await connMngr.close() - - asyncTest "shouldn't allow a muxer for an untracked connection": - let connMngr = ConnManager.new() - let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) - let muxer = new Muxer - muxer.connection = conn + let muxer = getMuxer(peerId) + let conn = muxer.connection + muxer.connection = nil expect CatchableError: connMngr.storeMuxer(muxer) @@ -98,33 +82,34 @@ suite "Connection Manager": asyncTest "get conn with direction": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn1 = getConnection(peerId, Direction.Out) - let conn2 = getConnection(peerId) + let mux1 = getMuxer(peerId, Direction.Out) + let mux2 = getMuxer(peerId) - connMngr.storeConn(conn1) - connMngr.storeConn(conn2) - check conn1 in connMngr - check conn2 in connMngr + connMngr.storeMuxer(mux1) + connMngr.storeMuxer(mux2) + check mux1 in connMngr + check mux2 in connMngr - let outConn = connMngr.selectConn(peerId, Direction.Out) - let inConn = connMngr.selectConn(peerId, Direction.In) + let outMux = connMngr.selectMuxer(peerId, Direction.Out) + let inMux = connMngr.selectMuxer(peerId, Direction.In) - check outConn != inConn - check outConn.dir == Direction.Out - check inConn.dir == Direction.In + check outMux != inMux + check outMux == mux1 + check inMux == mux2 + check outMux.connection.dir == Direction.Out + check inMux.connection.dir == Direction.In await connMngr.close() asyncTest "get muxed stream for peer": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) let muxer = new TestMuxer + let connection = Connection.new(peerId, Direction.In, Opt.none(MultiAddress)) muxer.peerId = peerId - muxer.connection = conn + muxer.connection = connection - connMngr.storeConn(conn) connMngr.storeMuxer(muxer) check muxer in connMngr @@ -133,18 +118,18 @@ suite "Connection Manager": check stream.peerId == peerId await connMngr.close() + await connection.close() await stream.close() asyncTest "get stream from directed connection": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) let muxer = new TestMuxer + let connection = Connection.new(peerId, Direction.In, Opt.none(MultiAddress)) muxer.peerId = peerId - muxer.connection = conn + muxer.connection = connection - connMngr.storeConn(conn) connMngr.storeMuxer(muxer) check muxer in connMngr @@ -155,63 +140,39 @@ suite "Connection Manager": await connMngr.close() await stream1.close() - - asyncTest "get stream from any connection": - let connMngr = ConnManager.new() - let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) - - let muxer = new TestMuxer - muxer.peerId = peerId - muxer.connection = conn - - connMngr.storeConn(conn) - connMngr.storeMuxer(muxer) - check muxer in connMngr - - let stream = await connMngr.getStream(conn) - check not(isNil(stream)) - - await connMngr.close() - await stream.close() + await connection.close() asyncTest "should raise on too many connections": let connMngr = ConnManager.new(maxConnsPerPeer = 1) let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - connMngr.storeConn(getConnection(peerId)) + connMngr.storeMuxer(getMuxer(peerId)) - let conns = @[ - getConnection(peerId), - getConnection(peerId)] + let muxs = @[ + getMuxer(peerId), + getMuxer(peerId)] expect TooManyConnectionsError: - connMngr.storeConn(conns[0]) - connMngr.storeConn(conns[1]) + connMngr.storeMuxer(muxs[0]) + connMngr.storeMuxer(muxs[1]) await connMngr.close() await allFuturesThrowing( - allFutures(conns.mapIt( it.close() ))) + allFutures(muxs.mapIt( it.close() ))) asyncTest "cleanup on connection close": let connMngr = ConnManager.new() let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet() - let conn = getConnection(peerId) - let muxer = new Muxer + let muxer = getMuxer(peerId) - muxer.connection = conn - connMngr.storeConn(conn) connMngr.storeMuxer(muxer) - check conn in connMngr check muxer in connMngr - await conn.close() - await sleepAsync(10.millis) + await muxer.close() - check conn notin connMngr - check muxer notin connMngr + checkExpiring: muxer notin connMngr await connMngr.close() @@ -224,23 +185,19 @@ suite "Connection Manager": Direction.In else: Direction.Out - let conn = getConnection(peerId, dir) - let muxer = new Muxer - muxer.connection = conn + let muxer = getMuxer(peerId, dir) - connMngr.storeConn(conn) connMngr.storeMuxer(muxer) - check conn in connMngr check muxer in connMngr - check not(isNil(connMngr.selectConn(peerId, dir))) + check not(isNil(connMngr.selectMuxer(peerId, dir))) check peerId in connMngr await connMngr.dropPeer(peerId) - check peerId notin connMngr - check isNil(connMngr.selectConn(peerId, Direction.In)) - check isNil(connMngr.selectConn(peerId, Direction.Out)) + checkExpiring: peerId notin connMngr + check isNil(connMngr.selectMuxer(peerId, Direction.In)) + check isNil(connMngr.selectMuxer(peerId, Direction.Out)) await connMngr.close() @@ -352,17 +309,17 @@ suite "Connection Manager": asyncTest "release slot on connection end": let connMngr = ConnManager.new(maxConnections = 3) - var conns: seq[Connection] + var muxs: seq[Muxer] for i in 0..<3: let slot = await ((connMngr.getOutgoingSlot()).wait(10.millis)) - let conn = - getConnection( + let muxer = + getMuxer( PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(), Direction.In) - slot.trackConnection(conn) - conns.add(conn) + slot.trackMuxer(muxer) + muxs.add(muxer) # should be full now let incomingSlot = connMngr.getIncomingSlot() @@ -370,7 +327,7 @@ suite "Connection Manager": check (await incomingSlot.withTimeout(10.millis)) == false await allFuturesThrowing( - allFutures(conns.mapIt( it.close() ))) + allFutures(muxs.mapIt( it.close() ))) check await incomingSlot.withTimeout(10.millis) From c0316e30b61a8305cd58dd40d68123760fc68025 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 23 Nov 2022 17:15:42 +0100 Subject: [PATCH 006/112] fix --- libp2p/switch.nim | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 3c07468a63..491312afc8 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -190,18 +190,10 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil) s.peerInfo.protocols.add(proto.codec) proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} = - try: - let muxed = await trans.upgradeIncoming(conn) - await switch.peerStore.identify(muxed) - switch.connManager.storeMuxer(muxed) - trace "Connection upgrade succeeded" - except CatchableError as exc: - if exc isnot CancelledError: - libp2p_failed_upgrades_incoming.inc() - if not isNil(conn): - await conn.close() - - trace "Exception awaiting connection upgrade", exc = exc.msg, conn + let muxed = await trans.upgradeIncoming(conn) + await switch.peerStore.identify(muxed) + switch.connManager.storeMuxer(muxed) + trace "Connection upgrade succeeded" proc upgradeMonitor( switch: Switch, @@ -210,6 +202,12 @@ proc upgradeMonitor( upgrades: AsyncSemaphore) {.async.} = try: await switch.upgrader(trans, conn).wait(30.seconds) + except CatchableError as exc: + if exc isnot CancelledError: + libp2p_failed_upgrades_incoming.inc() + if not isNil(conn): + await conn.close() + trace "Exception awaiting connection upgrade", exc = exc.msg, conn finally: upgrades.release() From 12ac83f25f763b3c2b81bc1411cde296ea9b5218 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Thu, 24 Nov 2022 17:08:38 +0100 Subject: [PATCH 007/112] fix tests --- libp2p.nimble | 2 +- libp2p/connmanager.nim | 16 +++++++++------- tests/pubsub/testgossipinternal.nim | 3 ++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/libp2p.nimble b/libp2p.nimble index ac1407905a..47e92dccc3 100644 --- a/libp2p.nimble +++ b/libp2p.nimble @@ -22,7 +22,7 @@ requires "nim >= 1.2.0", import hashes proc runTest(filename: string, verify: bool = true, sign: bool = true, moreoptions: string = "") = - var excstr = "nim c --skipParentCfg --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics " + var excstr = "nim c --skipParentCfg --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics -d:unittestPrintTime " excstr.add(" -d:chronicles_sinks=textlines[stdout],json[dynamic] -d:chronicles_log_level=TRACE ") excstr.add(" -d:chronicles_runtime_filtering=TRUE ") excstr.add(" " & getEnv("NIMFLAGS") & " ") diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim index 1d9dbf7f5a..2aa80702ab 100644 --- a/libp2p/connmanager.nim +++ b/libp2p/connmanager.nim @@ -228,18 +228,20 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} = try: trace "Triggering disconnect events", mux let peerId = mux.connection.peerId - await c.triggerConnEvent( - peerId, ConnEvent(kind: ConnEventKind.Disconnected)) let muxers = c.muxed.getOrDefault(peerId).filterIt(it != mux) if muxers.len > 0: c.muxed[peerId] = muxers else: + c.muxed.del(peerId) + libp2p_peers.set(c.muxed.len.int64) await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left)) if not(c.peerStore.isNil): c.peerStore.cleanup(peerId) - c.muxed.del(peerId) + + await c.triggerConnEvent( + peerId, ConnEvent(kind: ConnEventKind.Disconnected)) except CatchableError as exc: # This is top-level procedure which will work as separate task, so it # do not need to propagate CancelledError and should handle other errors @@ -309,19 +311,19 @@ proc storeMuxer*(c: ConnManager, assert muxer notin c.muxed.getOrDefault(peerId) let - newPeer = peerId in c.muxed + newPeer = peerId notin c.muxed dir = muxer.connection.dir + assert newPeer or c.muxed[peerId].len > 0 c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer) libp2p_peers.set(c.muxed.len.int64) + asyncSpawn c.triggerConnEvent( + peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In)) if newPeer: asyncSpawn c.triggerPeerEvents( peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out)) - asyncSpawn c.triggerConnEvent( - peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In)) - asyncSpawn c.onClose(muxer) trace "Stored muxer", diff --git a/tests/pubsub/testgossipinternal.nim b/tests/pubsub/testgossipinternal.nim index 91ad4c0f55..87cc7b3f67 100644 --- a/tests/pubsub/testgossipinternal.nim +++ b/tests/pubsub/testgossipinternal.nim @@ -10,6 +10,7 @@ import ../../libp2p/errors import ../../libp2p/crypto/crypto import ../../libp2p/stream/bufferstream import ../../libp2p/switch +import ../../libp2p/muxers/muxer import ../helpers @@ -496,7 +497,7 @@ suite "GossipSub internal": peer.handler = handler peer.appScore = gossipSub.parameters.graylistThreshold - 1 gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer) - gossipSub.switch.connManager.storeConn(conn) + gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn)) gossipSub.updateScores() From 7ad1ea812078ee38b6c89665c6c861379ece5c0a Mon Sep 17 00:00:00 2001 From: Tanguy Date: Fri, 25 Nov 2022 14:21:45 +0100 Subject: [PATCH 008/112] try fix test --- tests/pubsub/testgossipsub.nim | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/pubsub/testgossipsub.nim b/tests/pubsub/testgossipsub.nim index ab6df3af1b..2d2600a79b 100644 --- a/tests/pubsub/testgossipsub.nim +++ b/tests/pubsub/testgossipsub.nim @@ -107,11 +107,7 @@ suite "GossipSub": nodes[0].subscribe("foobar", handler) nodes[1].subscribe("foobar", handler) - var subs: seq[Future[void]] - subs &= waitSub(nodes[1], nodes[0], "foobar") - subs &= waitSub(nodes[0], nodes[1], "foobar") - - await allFuturesThrowing(subs) + await waitSubGraph(nodes, "foobar") let gossip1 = GossipSub(nodes[0]) let gossip2 = GossipSub(nodes[1]) From 263b9e8e2eebc721e59aa6b3a1af322eb1d2c85c Mon Sep 17 00:00:00 2001 From: Tanguy Date: Fri, 25 Nov 2022 16:50:41 +0100 Subject: [PATCH 009/112] Fix GossipSub race condition --- libp2p/protocols/pubsub/pubsubpeer.nim | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim index 5a1afedbd1..815a681ea7 100644 --- a/libp2p/protocols/pubsub/pubsubpeer.nim +++ b/libp2p/protocols/pubsub/pubsubpeer.nim @@ -55,6 +55,7 @@ type onEvent*: OnEvent # Connectivity updates for peer codec*: string # the protocol that this peer joined from sendConn*: Connection # cached send connection + connectedFut: Future[void] address*: Option[MultiAddress] peerId*: PeerId handler*: RPCHandler @@ -120,6 +121,8 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} = conn, peer = p, closed = conn.closed try: try: + # wait for bidirectional connection + await p.connectedFut while not conn.atEof: trace "waiting for data", conn, peer = p, closed = conn.closed @@ -174,6 +177,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = # stop working so we make an effort to only keep a single channel alive trace "Get new send connection", p, newConn + p.connectedFut.complete() p.sendConn = newConn p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress) @@ -182,6 +186,8 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = await handle(p, newConn) finally: + if not p.connectedFut.finished(): + p.connectedFut.fail(newException(LPError, "can't establish conn")) if p.sendConn != nil: trace "Removing send connection", p, conn = p.sendConn await p.sendConn.close() @@ -292,5 +298,6 @@ proc new*( onEvent: onEvent, codec: codec, peerId: peerId, + connectedFut: newFuture[void](), maxMessageSize: maxMessageSize ) From 7b98af68055894b3573134f473a2a8b72f664306 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Fri, 25 Nov 2022 16:54:33 +0100 Subject: [PATCH 010/112] fix more race conditions --- tests/pubsub/testgossipsub.nim | 47 ++++++++++++++++------------------ tests/pubsub/utils.nim | 18 ++++++++----- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/tests/pubsub/testgossipsub.nim b/tests/pubsub/testgossipsub.nim index 2d2600a79b..924bdf825e 100644 --- a/tests/pubsub/testgossipsub.nim +++ b/tests/pubsub/testgossipsub.nim @@ -153,11 +153,7 @@ suite "GossipSub": nodes[0].subscribe("foobar", handler) nodes[1].subscribe("foobar", handler) - var subs: seq[Future[void]] - subs &= waitSub(nodes[1], nodes[0], "foobar") - subs &= waitSub(nodes[0], nodes[1], "foobar") - - await allFuturesThrowing(subs) + await waitSubGraph(nodes, "foobar") let gossip1 = GossipSub(nodes[0]) let gossip2 = GossipSub(nodes[1]) @@ -420,8 +416,6 @@ suite "GossipSub": await passed.wait(2.seconds) - trace "test done, stopping..." - await allFuturesThrowing( nodes[0].switch.stop(), nodes[1].switch.stop() @@ -448,21 +442,23 @@ suite "GossipSub": nodes[1].switch.start(), ) + GossipSub(nodes[1]).parameters.d = 0 + GossipSub(nodes[1]).parameters.dHigh = 0 + GossipSub(nodes[1]).parameters.dLow = 0 + await subscribeNodes(nodes) - nodes[1].subscribe("foobar", handler) nodes[0].subscribe("foobar", handler) - await waitSub(nodes[0], nodes[1], "foobar") - await waitSub(nodes[1], nodes[0], "foobar") - - nodes[0].unsubscribe("foobar", handler) + nodes[1].subscribe("foobar", handler) let gsNode = GossipSub(nodes[1]) - checkExpiring: gsNode.mesh.getOrDefault("foobar").len == 0 - - nodes[0].subscribe("foobar", handler) - - check GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0 + checkExpiring: + gsNode.mesh.getOrDefault("foobar").len == 0 and + GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0 and + ( + GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or + GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1 + ) tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1 @@ -528,8 +524,8 @@ suite "GossipSub": asyncTest "e2e - GossipSub should not send to source & peers who already seen": # 3 nodes: A, B, C - # A publishes, B relays, C is having a long validation - # so C should not send to anyone + # A publishes, C relays, B is having a long validation + # so B should not send to anyone let nodes = generateNodes( @@ -562,10 +558,7 @@ suite "GossipSub": nodes[0].subscribe("foobar", handlerA) nodes[1].subscribe("foobar", handlerB) nodes[2].subscribe("foobar", handlerC) - await waitSub(nodes[0], nodes[1], "foobar") - await waitSub(nodes[0], nodes[2], "foobar") - await waitSub(nodes[2], nodes[1], "foobar") - await waitSub(nodes[1], nodes[2], "foobar") + await waitSubGraph(nodes, "foobar") var gossip1: GossipSub = GossipSub(nodes[0]) var gossip2: GossipSub = GossipSub(nodes[1]) @@ -583,7 +576,11 @@ suite "GossipSub": nodes[1].addValidator("foobar", slowValidator) - tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1 + checkExpiring( + gossip1.mesh.getOrDefault("foobar").len == 2 and + gossip2.mesh.getOrDefault("foobar").len == 2 and + gossip3.mesh.getOrDefault("foobar").len == 2) + tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2 await bFinished @@ -625,7 +622,7 @@ suite "GossipSub": tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1 - check await passed + check await passed.wait(10.seconds) check: "foobar" in gossip1.gossipsub diff --git a/tests/pubsub/utils.nim b/tests/pubsub/utils.nim index 095c68c454..6ac49b9b8b 100644 --- a/tests/pubsub/utils.nim +++ b/tests/pubsub/utils.nim @@ -132,13 +132,17 @@ proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} = seen: HashSet[PeerId] for n in nodes: nodesMesh[n.peerInfo.peerId] = toSeq(GossipSub(n).mesh.getOrDefault(key).items()).mapIt(it.peerId) - proc explore(p: PeerId) = - if p in seen: return - seen.incl(p) - for peer in nodesMesh.getOrDefault(p): - explore(peer) - explore(nodes[0].peerInfo.peerId) - if seen.len == nodes.len: return + var ok = 0 + for n in nodes: + seen.clear() + proc explore(p: PeerId) = + if p in seen: return + seen.incl(p) + for peer in nodesMesh.getOrDefault(p): + explore(peer) + explore(n.peerInfo.peerId) + if seen.len == nodes.len: ok.inc() + if ok == nodes.len: return trace "waitSubGraph sleeping..." await sleepAsync(5.milliseconds) From 99bc8ee88a1bde448dcec6a84bfad11b8a35db23 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Fri, 25 Nov 2022 16:50:41 +0100 Subject: [PATCH 011/112] Fix GossipSub race condition --- libp2p/protocols/pubsub/pubsubpeer.nim | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim index 5a1afedbd1..815a681ea7 100644 --- a/libp2p/protocols/pubsub/pubsubpeer.nim +++ b/libp2p/protocols/pubsub/pubsubpeer.nim @@ -55,6 +55,7 @@ type onEvent*: OnEvent # Connectivity updates for peer codec*: string # the protocol that this peer joined from sendConn*: Connection # cached send connection + connectedFut: Future[void] address*: Option[MultiAddress] peerId*: PeerId handler*: RPCHandler @@ -120,6 +121,8 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} = conn, peer = p, closed = conn.closed try: try: + # wait for bidirectional connection + await p.connectedFut while not conn.atEof: trace "waiting for data", conn, peer = p, closed = conn.closed @@ -174,6 +177,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = # stop working so we make an effort to only keep a single channel alive trace "Get new send connection", p, newConn + p.connectedFut.complete() p.sendConn = newConn p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress) @@ -182,6 +186,8 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = await handle(p, newConn) finally: + if not p.connectedFut.finished(): + p.connectedFut.fail(newException(LPError, "can't establish conn")) if p.sendConn != nil: trace "Removing send connection", p, conn = p.sendConn await p.sendConn.close() @@ -292,5 +298,6 @@ proc new*( onEvent: onEvent, codec: codec, peerId: peerId, + connectedFut: newFuture[void](), maxMessageSize: maxMessageSize ) From a6cec9ff5e549ece3ae8824cc958c4172f014691 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Mon, 2 Jan 2023 18:24:21 +0100 Subject: [PATCH 012/112] Better fix --- libp2p/protocols/pubsub/pubsub.nim | 7 ++-- libp2p/protocols/pubsub/pubsubpeer.nim | 56 ++++++++++++-------------- 2 files changed, 28 insertions(+), 35 deletions(-) diff --git a/libp2p/protocols/pubsub/pubsub.nim b/libp2p/protocols/pubsub/pubsub.nim index ab010a3664..2fed1ce0cc 100644 --- a/libp2p/protocols/pubsub/pubsub.nim +++ b/libp2p/protocols/pubsub/pubsub.nim @@ -202,7 +202,7 @@ proc broadcast*( # Fast path that only encodes message once let encoded = encodeRpcMsg(msg, p.anonymize) for peer in sendPeers: - peer.sendEncoded(encoded) + asyncSpawn peer.sendEncoded(encoded) proc sendSubs*(p: PubSub, peer: PubSubPeer, @@ -307,8 +307,6 @@ proc getOrCreatePeer*( # metrics libp2p_pubsub_peers.set(p.peers.len.int64) - pubSubPeer.connect() - return pubSubPeer proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] = @@ -382,7 +380,8 @@ method subscribePeer*(p: PubSub, peer: PeerId) {.base, gcsafe.} = ## messages ## - discard p.getOrCreatePeer(peer, p.codecs) + let pubSubPeer = p.getOrCreatePeer(peer, p.codecs) + pubSubPeer.connect() proc updateTopicMetrics(p: PubSub, topic: string) = # metrics diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim index 815a681ea7..eba4e06de8 100644 --- a/libp2p/protocols/pubsub/pubsubpeer.nim +++ b/libp2p/protocols/pubsub/pubsubpeer.nim @@ -121,8 +121,6 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} = conn, peer = p, closed = conn.closed try: try: - # wait for bidirectional connection - await p.connectedFut while not conn.atEof: trace "waiting for data", conn, peer = p, closed = conn.closed @@ -168,6 +166,8 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} = proc connectOnce(p: PubSubPeer): Future[void] {.async.} = try: + if p.connectedFut.finished: + p.connectedFut = newFuture[void]() let newConn = await p.getConn() if newConn.isNil: raise (ref LPError)(msg: "Cannot establish send connection") @@ -186,8 +186,6 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = await handle(p, newConn) finally: - if not p.connectedFut.finished(): - p.connectedFut.fail(newException(LPError, "can't establish conn")) if p.sendConn != nil: trace "Removing send connection", p, conn = p.sendConn await p.sendConn.close() @@ -214,27 +212,10 @@ proc connectImpl(p: PubSubPeer) {.async.} = debug "Could not establish send connection", msg = exc.msg proc connect*(p: PubSubPeer) = - asyncSpawn connectImpl(p) - -proc sendImpl(conn: Connection, encoded: seq[byte]): Future[void] {.raises: [Defect].} = - trace "sending encoded msgs to peer", conn, encoded = shortLog(encoded) - - let fut = conn.writeLp(encoded) # Avoid copying `encoded` into future - proc sendWaiter(): Future[void] {.async.} = - try: - await fut - trace "sent pubsub message to remote", conn - - except CatchableError as exc: # never cancelled - # Because we detach the send call from the currently executing task using - # asyncSpawn, no exceptions may leak out of it - trace "Unable to send to remote", conn, msg = exc.msg - # Next time sendConn is used, it will be have its close flag set and thus - # will be recycled - - await conn.close() # This will clean up the send connection + if p.connected: + return - return sendWaiter() + asyncSpawn connectImpl(p) template sendMetrics(msg: RPCMsg): untyped = when defined(libp2p_expensive_metrics): @@ -243,7 +224,7 @@ template sendMetrics(msg: RPCMsg): untyped = # metrics libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t]) -proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} = +proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} = doAssert(not isNil(p), "pubsubpeer nil!") if msg.len <= 0: @@ -254,14 +235,27 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} = info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len return - let conn = p.sendConn + if p.sendConn == nil: + discard await p.connectedFut.withTimeout(1.seconds) + + var conn = p.sendConn if conn == nil or conn.closed(): - trace "No send connection, skipping message", p, msg = shortLog(msg) + debug "No send connection, skipping message", p, msg = shortLog(msg) return - # To limit the size of the closure, we only pass the encoded message and - # connection to the spawned send task - asyncSpawn sendImpl(conn, msg) + trace "sending encoded msgs to peer", conn, encoded = shortLog(encoded) + + try: + await conn.writeLp(msg) + trace "sent pubsub message to remote", conn + except CatchableError as exc: # never cancelled + # Because we detach the send call from the currently executing task using + # asyncSpawn, no exceptions may leak out of it + trace "Unable to send to remote", conn, msg = exc.msg + # Next time sendConn is used, it will be have its close flag set and thus + # will be recycled + + await conn.close() # This will clean up the send connection proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} = trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg) @@ -283,7 +277,7 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} = sendMetrics(msg) encodeRpcMsg(msg, anonymize) - p.sendEncoded(encoded) + asyncSpawn p.sendEncoded(encoded) proc new*( T: typedesc[PubSubPeer], From 92a425cc0cd5a8fa130f41f49b212c00f24cb926 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Tue, 3 Jan 2023 10:44:12 +0100 Subject: [PATCH 013/112] Fix typo --- libp2p/protocols/pubsub/pubsubpeer.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim index eba4e06de8..a40332de47 100644 --- a/libp2p/protocols/pubsub/pubsubpeer.nim +++ b/libp2p/protocols/pubsub/pubsubpeer.nim @@ -243,7 +243,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} = debug "No send connection, skipping message", p, msg = shortLog(msg) return - trace "sending encoded msgs to peer", conn, encoded = shortLog(encoded) + trace "sending encoded msgs to peer", conn, encoded = shortLog(msg) try: await conn.writeLp(msg) From 7a74dd3d9a49240f3d5c919d6ab07d90b0172228 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Tue, 3 Jan 2023 15:56:36 +0100 Subject: [PATCH 014/112] fix ci --- libp2p/protocols/pubsub/pubsubpeer.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim index c905f6ae51..270526db4e 100644 --- a/libp2p/protocols/pubsub/pubsubpeer.nim +++ b/libp2p/protocols/pubsub/pubsubpeer.nim @@ -106,14 +106,14 @@ proc recvObservers(p: PubSubPeer, msg: var RPCMsg) = # trigger hooks if not(isNil(p.observers)) and p.observers[].len > 0: for obs in p.observers[]: - if not(isNil(obs)): # TODO: should never be nil, but... + if not(isNil(obs.onRecv)): obs.onRecv(p, msg) proc sendObservers(p: PubSubPeer, msg: var RPCMsg) = # trigger hooks if not(isNil(p.observers)) and p.observers[].len > 0: for obs in p.observers[]: - if not(isNil(obs)): # TODO: should never be nil, but... + if not(isNil(obs.onSend)): obs.onSend(p, msg) proc handle*(p: PubSubPeer, conn: Connection) {.async.} = From c279e5ad05bf4a324f2aa60e7cfc6005a7ab464c Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 25 Jan 2023 17:40:22 +0100 Subject: [PATCH 015/112] Fix pubsub --- libp2p/protocols/pubsub/pubsub.nim | 6 +++++- libp2p/protocols/pubsub/pubsubpeer.nim | 15 +++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/libp2p/protocols/pubsub/pubsub.nim b/libp2p/protocols/pubsub/pubsub.nim index 1cc8d9810f..2804bafa13 100644 --- a/libp2p/protocols/pubsub/pubsub.nim +++ b/libp2p/protocols/pubsub/pubsub.nim @@ -405,7 +405,11 @@ method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base, # Notify others that we are no longer interested in the topic for _, peer in p.peers: - p.sendSubs(peer, [topic], subscribed) + # If we don't have a sendConn yet, we will + # send the full sub list when we get the sendConn, + # so no need to send it here + if peer.hasSendConn: + p.sendSubs(peer, [topic], subscribed) if subscribed: libp2p_pubsub_subscriptions.inc() diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim index aaf4173914..b925b23788 100644 --- a/libp2p/protocols/pubsub/pubsubpeer.nim +++ b/libp2p/protocols/pubsub/pubsubpeer.nim @@ -106,14 +106,14 @@ proc recvObservers(p: PubSubPeer, msg: var RPCMsg) = # trigger hooks if not(isNil(p.observers)) and p.observers[].len > 0: for obs in p.observers[]: - if not(isNil(obs.onRecv)): + if not(isNil(obs)): # TODO: should never be nil, but... obs.onRecv(p, msg) proc sendObservers(p: PubSubPeer, msg: var RPCMsg) = # trigger hooks if not(isNil(p.observers)) and p.observers[].len > 0: for obs in p.observers[]: - if not(isNil(obs.onSend)): + if not(isNil(obs)): # TODO: should never be nil, but... obs.onSend(p, msg) proc handle*(p: PubSubPeer, conn: Connection) {.async.} = @@ -121,8 +121,6 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} = conn, peer = p, closed = conn.closed try: try: - # wait for bidirectional connection - await p.connectedFut while not conn.atEof: trace "waiting for data", conn, peer = p, closed = conn.closed @@ -179,6 +177,10 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = # stop working so we make an effort to only keep a single channel alive trace "Get new send connection", p, newConn + + # Careful to race conditions here. + # Topic subscription relies on either connectedFut + # to be completed, or onEvent to be called later p.connectedFut.complete() p.sendConn = newConn p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress) @@ -188,8 +190,6 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} = await handle(p, newConn) finally: - if not p.connectedFut.finished(): - p.connectedFut.fail(newException(LPError, "can't establish conn")) if p.sendConn != nil: trace "Removing send connection", p, conn = p.sendConn await p.sendConn.close() @@ -221,6 +221,9 @@ proc connect*(p: PubSubPeer) = asyncSpawn connectImpl(p) +proc hasSendConn*(p: PubSubPeer): bool = + p.sendConn != nil + template sendMetrics(msg: RPCMsg): untyped = when defined(libp2p_expensive_metrics): for x in msg.messages: From 5c6fe92337c49bf2a5f0f3a2b2def17ea0fe8369 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Mon, 30 Jan 2023 15:00:01 +0100 Subject: [PATCH 016/112] fix short agent --- libp2p/peerstore.nim | 5 +++-- libp2p/stream/connection.nim | 7 +++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 7c18c437cd..5cb1df44b0 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -210,11 +210,12 @@ proc identify*( let info = await peerStore.identify.identify(stream, stream.peerId) when defined(libp2p_agents_metrics): - muxer.connection.shortAgent = "unknown" + var knownAgent = "unknown" if info.agentVersion.isSome and info.agentVersion.get().len > 0: let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii() if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()): - muxer.connection.shortAgent = shortAgent.get() + knownAgent = shortAgent.get() + muxer.connection.setShortAgent(knownAgent) peerStore.updatePeerInfo(info) finally: diff --git a/libp2p/stream/connection.nim b/libp2p/stream/connection.nim index a69b9c3355..86539a0588 100644 --- a/libp2p/stream/connection.nim +++ b/libp2p/stream/connection.nim @@ -134,6 +134,13 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} = method getWrapped*(s: Connection): Connection {.base.} = doAssert(false, "not implemented!") +when defined(libp2p_agents_metrics): + proc setShortAgent*(s: Connection, shortAgent: string) = + var conn = s + while not isNil(conn): + conn.shortAgent = shortAgent + conn = conn.getWrapped() + proc new*(C: type Connection, peerId: PeerId, dir: Direction, From 5dc290bc43cf63fa3a1e47adaa747c53df9bf0cf Mon Sep 17 00:00:00 2001 From: Tanguy Date: Mon, 30 Jan 2023 17:46:08 +0100 Subject: [PATCH 017/112] Update libp2p/multistream.nim Co-authored-by: diegomrsantos --- libp2p/multistream.nim | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/libp2p/multistream.nim b/libp2p/multistream.nim index 7cc1b923df..9f9e75f1b9 100644 --- a/libp2p/multistream.nim +++ b/libp2p/multistream.nim @@ -166,16 +166,12 @@ proc handle*( trace "handle: sending `na` for duplicate handshake while handshaked", conn await conn.writeLp(Na) + elif ms in protos or matchers.anyIt(it(ms)): + trace "found handler", conn, protocol = ms + await conn.writeLp(ms & "\n") + conn.protocol = ms + return ms else: - var found = ms in protos - if not found: - for matcher in matchers: - if matcher(ms): found = true - if found: - trace "found handler", conn, protocol = ms - await conn.writeLp(ms & "\n") - conn.protocol = ms - return ms trace "no handlers", conn, protocol = ms await conn.writeLp(Na) From de06b46b36818176ffeabfdc166faa6934668ef6 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Tue, 21 Feb 2023 11:30:38 +0100 Subject: [PATCH 018/112] Add getWrapped to YamuxChannel --- libp2p/muxers/yamux/yamux.nim | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libp2p/muxers/yamux/yamux.nim b/libp2p/muxers/yamux/yamux.nim index f60cf72f9e..8d94b719d1 100644 --- a/libp2p/muxers/yamux/yamux.nim +++ b/libp2p/muxers/yamux/yamux.nim @@ -356,6 +356,8 @@ proc open*(channel: YamuxChannel) {.async, gcsafe.} = channel.opened = true await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack})) +method getWrapped*(channel: YamuxChannel): Connection = channel.conn + type Yamux* = ref object of Muxer channels: Table[uint32, YamuxChannel] From 685966c531ca739ca05a3d212430920fd4b95dc8 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Tue, 21 Feb 2023 13:48:58 +0100 Subject: [PATCH 019/112] fix autonat --- libp2p/protocols/connectivity/autonat/service.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index afbf3ca817..398feb3ced 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -152,7 +152,7 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = if hasBeenSetup: if self.askNewConnectedPeers: self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} = - if switch.connManager.selectConn(peerId, In) != nil: # no need to ask an incoming peer + if switch.connManager.selectMuxer(peerId, In) != nil: # no need to ask an incoming peer return discard askPeer(self, switch, peerId) await self.callHandler() From c9cc2bc2b605f5facb57b25c127164fd708e21fb Mon Sep 17 00:00:00 2001 From: Tanguy Date: Thu, 2 Mar 2023 15:44:33 +0100 Subject: [PATCH 020/112] Trigger events before identify --- libp2p/dialer.nim | 2 +- libp2p/switch.nim | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index d18432509f..a5e7e0c270 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -189,8 +189,8 @@ proc internalConnect( raise newException(DialFailedError, "Unable to establish outgoing link") try: - await self.peerStore.identify(muxed) self.connManager.storeMuxer(muxed) + await self.peerStore.identify(muxed) except CatchableError as exc: trace "Failed to finish outgoung upgrade", err=exc.msg await muxed.close() diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 2aa42a484d..672eca123d 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -222,8 +222,8 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil) proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} = let muxed = await trans.upgradeIncoming(conn) - await switch.peerStore.identify(muxed) switch.connManager.storeMuxer(muxed) + await switch.peerStore.identify(muxed) trace "Connection upgrade succeeded" proc upgradeMonitor( From 1fee13cb51cfdeba584d3f2982121ecc6bbb9779 Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 2 Mar 2023 21:45:31 +0100 Subject: [PATCH 021/112] ObservedMAManager --- .pinned | 2 +- libp2p/peerstore.nim | 85 ++++++++++++++++++- .../connectivity/autonat/service.nim | 65 +++++++++++++- 3 files changed, 148 insertions(+), 4 deletions(-) diff --git a/.pinned b/.pinned index a9b5e63f5a..501285242a 100644 --- a/.pinned +++ b/.pinned @@ -1,6 +1,6 @@ bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a -chronos;https://github.com/status-im/nim-chronos@#5d3da66e563d21277b57a9b601744273c083a01b +chronos;https://github.com/status-im/nim-chronos@#8ed5307544540d04a8caa666c0703de9e6b06d58 dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823 faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 5cb1df44b0..03a8745155 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -27,7 +27,7 @@ else: {.push raises: [].} import - std/[tables, sets, options, macros], + std/[tables, sets, options, macros, heapqueue], chronos, ./crypto/crypto, ./protocols/identify, @@ -40,6 +40,82 @@ import ./muxers/muxer, utility +type + ObservedMA = object + ma: MultiAddress + count: int + + ObservedMAManager* = ref object of RootObj + observedIPs: HeapQueue[ObservedMA] + observedIPsAndPorts: HeapQueue[ObservedMA] + maxSize: int + minCount: int + +proc `<`(a, b: ObservedMA): bool = a.count < b.count +proc `==`(a, b: ObservedMA): bool = a.ma == b.ma + +proc add*(self:ObservedMAManager, heap: var HeapQueue[ObservedMA], observedMA: MultiAddress) = + if heap.len >= self.maxSize: + discard heap.pop() + + let idx = heap.find(ObservedMA(ma: observedMA, count: 0)) + if idx >= 0: + let observedMA = heap[idx] + heap.del(idx) + heap.push(ObservedMA(ma: observedMA.ma, count: observedMA.count + 1)) + else: + heap.push(ObservedMA(ma: observedMA, count: 1)) + +proc add*(self:ObservedMAManager, observedMA: MultiAddress) = + self.add(self.observedIPs, observedMA[0].get()) + self.add(self.observedIPsAndPorts, observedMA) + +proc getIP(self: ObservedMAManager, heap: HeapQueue[ObservedMA], ipVersion: MaPattern): Opt[MultiAddress] = + var i = 1 + while heap.len - i >= 0: + let observedMA = heap[heap.len - i] + if ipVersion.match(observedMA.ma[0].get()) and observedMA.count >= self.minCount: + return Opt.some(observedMA.ma) + else: + i = i + 1 + return Opt.none(MultiAddress) + +proc getIP6*(self: ObservedMAManager): Opt[MultiAddress] = + ## Returns the most observed IP6 address + return self.getIP(self.observedIPs, IP6) + +proc getIP4*(self: ObservedMAManager): Opt[MultiAddress] = + ## Returns the most observed IP4 address + return self.getIP(self.observedIPs, IP4) + +proc getIP6AndPort*(self: ObservedMAManager): Opt[MultiAddress] = + ## Returns the most observed IP6 address + return self.getIP(self.observedIPsAndPorts, IP6) + +proc getIP4AndPort*(self: ObservedMAManager): Opt[MultiAddress] = + ## Returns the most observed IP4 address + return self.getIP(self.observedIPsAndPorts, IP4) + +proc getIPsAndPorts*(self: ObservedMAManager): seq[MultiAddress] = + ## Returns the most observed IP4 and IP6 address + var res: seq[MultiAddress] + if self.getIP4().isSome(): + res.add(self.getIP4().get()) + if self.getIP6().isSome(): + res.add(self.getIP6().get()) + return res + +proc `$`*(self: ObservedMAManager): string = + return "IPs: " & $self.observedIPs & "; IPs and Ports: " & $self.observedIPsAndPorts + +proc new*( + T: typedesc[ObservedMAManager]): T = + return T( + observedIPs: initHeapQueue[ObservedMA](), + observedIPsAndPorts: initHeapQueue[ObservedMA](), + maxSize: 10, + minCount: 3) + type ################# # Handler types # @@ -78,11 +154,13 @@ type identify: Identify capacity*: int toClean*: seq[PeerId] + observedMAManager*: ObservedMAManager proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} = T( identify: identify, - capacity: capacity + capacity: capacity, + observedMAManager: ObservedMAManager.new(), ) ######################### @@ -209,6 +287,9 @@ proc identify*( if (await MultistreamSelect.select(stream, peerStore.identify.codec())): let info = await peerStore.identify.identify(stream, stream.peerId) + if info.observedAddr.isSome: + peerStore.observedMAManager.add(info.observedAddr.get()) + when defined(libp2p_agents_metrics): var knownAgent = "unknown" if info.agentVersion.isSome and info.agentVersion.get().len > 0: diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index f75907d333..857658306a 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -133,6 +133,7 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo await self.handleAnswer(ans) if not isNil(self.statusAndConfidenceHandler): await self.statusAndConfidenceHandler(self.networkReachability, self.confidence) + await switch.peerInfo.update() return ans proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} = @@ -153,7 +154,69 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy heartbeat "Scheduling AutonatService run", interval: await service.run(switch) +proc handleManualPortForwarding( + observedMAManager: ObservedMAManager, + listenAddr: MultiAddress, + isIP4: bool): Opt[MultiAddress] = + try: + let maFirst = listenAddr[0] + let maEnd = listenAddr[1..^1] + + if maEnd.isErr(): + return Opt.none(MultiAddress) + + let observedIP = + if isIP4: + observedMAManager.getIP4() + else: + observedMAManager.getIP6() + + let newMA = + if observedIP.isNone() or maFirst.get() == observedIP.get(): + listenAddr + else: + observedIP.get() & maEnd.get() + + return Opt.some(newMA) + except CatchableError as error: + debug "Error while handling manual port forwarding", msg = error.msg + return Opt.none(MultiAddress) + +proc addressMapper( + self: AutonatService, + observedMAManager: ObservedMAManager, + listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = + + var addrs = newSeq[MultiAddress]() + for listenAddr in listenAddrs: + try: + let maFirst = listenAddr[0] + if maFirst.isErr(): + continue + var isIP4 = true + let hostIP = + if IP4.match(maFirst.get()): + getBestRoute(initTAddress("8.8.8.8:0")).source + elif IP6.match(maFirst.get()): + isIP4 = false + getBestRoute(initTAddress("2600:::0")).source + else: + continue + if not hostIP.isGlobal(): + if self.networkReachability == NetworkReachability.Reachable: + let newMA = handleManualPortForwarding(observedMAManager, listenAddr, isIP4) + if newMA.isSome(): + addrs.add(newMA.get()) + continue + addrs.add(listenAddr) # do nothing + except CatchableError as exc: + continue + return addrs + method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = + proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = + return await self.addressMapper(switch.peerStore.observedMAManager, listenAddrs) + info "Setting up AutonatService" let hasBeenSetup = await procCall Service(self).setup(switch) if hasBeenSetup: @@ -163,6 +226,7 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) if self.scheduleInterval.isSome(): self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get()) + switch.peerInfo.addressMappers.add(addressMapper) return hasBeenSetup method run*(self: AutonatService, switch: Switch) {.async, public.} = @@ -170,7 +234,6 @@ method run*(self: AutonatService, switch: Switch) {.async, public.} = await askConnectedPeers(self, switch) await self.callHandler() - method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} = info "Stopping AutonatService" let hasBeenStopped = await procCall Service(self).stop(switch) From 19de3d790a922ea102ea48b2fe2239fbda1a5c26 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 3 Mar 2023 15:06:27 +0100 Subject: [PATCH 022/112] improvements --- libp2p/observedaddrmanager.nim | 110 ++++++++++++++++++ libp2p/peerstore.nim | 93 +++------------ .../connectivity/autonat/service.nim | 12 +- tests/asyncunit.nim | 4 +- tests/testobservedaddrmanager.nim | 48 ++++++++ 5 files changed, 179 insertions(+), 88 deletions(-) create mode 100644 libp2p/observedaddrmanager.nim create mode 100644 tests/testobservedaddrmanager.nim diff --git a/libp2p/observedaddrmanager.nim b/libp2p/observedaddrmanager.nim new file mode 100644 index 0000000000..2a1e3ed480 --- /dev/null +++ b/libp2p/observedaddrmanager.nim @@ -0,0 +1,110 @@ +# Nim-LibP2P +# Copyright (c) 2023 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[sets, options, heapqueue], + chronos, + ./crypto/crypto, + ./protocols/identify, + ./protocols/protocol, + ./peerid, ./peerinfo, + ./routing_record, + ./multiaddress, + ./stream/connection, + ./multistream, + ./muxers/muxer, + utility + +type + ObservedAddr = object + ma: MultiAddress + count: int + + ## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port. + ObservedAddrManager* = ref object of RootObj + observedIPs: HeapQueue[ObservedAddr] + observedIPsAndPorts: HeapQueue[ObservedAddr] + maxSize: int + minCount: int + +proc `<`(a, b: ObservedAddr): bool = a.count < b.count + +proc add(self:ObservedAddrManager, heap: var HeapQueue[ObservedAddr], observedAddr: MultiAddress) = + if heap.len >= self.maxSize: + discard heap.pop() + + for i in 0 ..< heap.len: + if heap[i].ma == observedAddr: + let count = heap[i].count + heap.del(i) + heap.push(ObservedAddr(ma: observedAddr, count: count + 1)) + return + + heap.push(ObservedAddr(ma: observedAddr, count: 1)) + +proc add*(self:ObservedAddrManager, observedAddr: MultiAddress) = + ## Adds a new observed MultiAddress. If the MultiAddress already exists, the count is increased. + self.add(self.observedIPs, observedAddr[0].get()) + self.add(self.observedIPsAndPorts, observedAddr) + +proc getIP(self: ObservedAddrManager, heap: HeapQueue[ObservedAddr], ipVersion: MaPattern): Opt[MultiAddress] = + var i = 1 + while heap.len - i >= 0: + let observedAddr = heap[heap.len - i] + if ipVersion.match(observedAddr.ma[0].get()) and observedAddr.count >= self.minCount: + return Opt.some(observedAddr.ma) + else: + i = i + 1 + return Opt.none(MultiAddress) + +proc getMostObservedIP6*(self: ObservedAddrManager): Opt[MultiAddress] = + ## Returns the most observed IP6 address or none if the number of observations are less than minCount. + return self.getIP(self.observedIPs, IP6) + +proc getMostObservedIP4*(self: ObservedAddrManager): Opt[MultiAddress] = + ## Returns the most observed IP4 address or none if the number of observations are less than minCount. + return self.getIP(self.observedIPs, IP4) + +proc getMostObservedIP6AndPort*(self: ObservedAddrManager): Opt[MultiAddress] = + ## Returns the most observed IP6/Port address or none if the number of observations are less than minCount. + return self.getIP(self.observedIPsAndPorts, IP6) + +proc getMostObservedIP4AndPort*(self: ObservedAddrManager): Opt[MultiAddress] = + ## Returns the most observed IP4/Port address or none if the number of observations are less than minCount. + return self.getIP(self.observedIPsAndPorts, IP4) + +proc getMostObservedIPsAndPorts*(self: ObservedAddrManager): seq[MultiAddress] = + ## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations + ## are less than minCount. + var res: seq[MultiAddress] + if self.getMostObservedIP4().isSome(): + res.add(self.getMostObservedIP4().get()) + if self.getMostObservedIP6().isSome(): + res.add(self.getMostObservedIP6().get()) + return res + +proc `$`*(self: ObservedAddrManager): string = + ## Returns a string representation of the ObservedAddrManager. + return "IPs: " & $self.observedIPs & "; IPs and Ports: " & $self.observedIPsAndPorts + +proc new*( + T: typedesc[ObservedAddrManager], + maxSize = 10, + minCount = 3): T = + ## Creates a new ObservedAddrManager. + return T( + observedIPs: initHeapQueue[ObservedAddr](), + observedIPsAndPorts: initHeapQueue[ObservedAddr](), + maxSize: maxSize, + minCount: minCount) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 03a8745155..382b628e30 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -38,83 +38,8 @@ import ./stream/connection, ./multistream, ./muxers/muxer, - utility - -type - ObservedMA = object - ma: MultiAddress - count: int - - ObservedMAManager* = ref object of RootObj - observedIPs: HeapQueue[ObservedMA] - observedIPsAndPorts: HeapQueue[ObservedMA] - maxSize: int - minCount: int - -proc `<`(a, b: ObservedMA): bool = a.count < b.count -proc `==`(a, b: ObservedMA): bool = a.ma == b.ma - -proc add*(self:ObservedMAManager, heap: var HeapQueue[ObservedMA], observedMA: MultiAddress) = - if heap.len >= self.maxSize: - discard heap.pop() - - let idx = heap.find(ObservedMA(ma: observedMA, count: 0)) - if idx >= 0: - let observedMA = heap[idx] - heap.del(idx) - heap.push(ObservedMA(ma: observedMA.ma, count: observedMA.count + 1)) - else: - heap.push(ObservedMA(ma: observedMA, count: 1)) - -proc add*(self:ObservedMAManager, observedMA: MultiAddress) = - self.add(self.observedIPs, observedMA[0].get()) - self.add(self.observedIPsAndPorts, observedMA) - -proc getIP(self: ObservedMAManager, heap: HeapQueue[ObservedMA], ipVersion: MaPattern): Opt[MultiAddress] = - var i = 1 - while heap.len - i >= 0: - let observedMA = heap[heap.len - i] - if ipVersion.match(observedMA.ma[0].get()) and observedMA.count >= self.minCount: - return Opt.some(observedMA.ma) - else: - i = i + 1 - return Opt.none(MultiAddress) - -proc getIP6*(self: ObservedMAManager): Opt[MultiAddress] = - ## Returns the most observed IP6 address - return self.getIP(self.observedIPs, IP6) - -proc getIP4*(self: ObservedMAManager): Opt[MultiAddress] = - ## Returns the most observed IP4 address - return self.getIP(self.observedIPs, IP4) - -proc getIP6AndPort*(self: ObservedMAManager): Opt[MultiAddress] = - ## Returns the most observed IP6 address - return self.getIP(self.observedIPsAndPorts, IP6) - -proc getIP4AndPort*(self: ObservedMAManager): Opt[MultiAddress] = - ## Returns the most observed IP4 address - return self.getIP(self.observedIPsAndPorts, IP4) - -proc getIPsAndPorts*(self: ObservedMAManager): seq[MultiAddress] = - ## Returns the most observed IP4 and IP6 address - var res: seq[MultiAddress] - if self.getIP4().isSome(): - res.add(self.getIP4().get()) - if self.getIP6().isSome(): - res.add(self.getIP6().get()) - return res - -proc `$`*(self: ObservedMAManager): string = - return "IPs: " & $self.observedIPs & "; IPs and Ports: " & $self.observedIPsAndPorts - -proc new*( - T: typedesc[ObservedMAManager]): T = - return T( - observedIPs: initHeapQueue[ObservedMA](), - observedIPsAndPorts: initHeapQueue[ObservedMA](), - maxSize: 10, - minCount: 3) + utility, + observedaddrmanager type ################# @@ -154,13 +79,13 @@ type identify: Identify capacity*: int toClean*: seq[PeerId] - observedMAManager*: ObservedMAManager + observedAddrManager*: ObservedAddrManager proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} = T( identify: identify, capacity: capacity, - observedMAManager: ObservedMAManager.new(), + observedAddrManager: ObservedAddrManager.new(), ) ######################### @@ -288,7 +213,7 @@ proc identify*( let info = await peerStore.identify.identify(stream, stream.peerId) if info.observedAddr.isSome: - peerStore.observedMAManager.add(info.observedAddr.get()) + peerStore.observedAddrManager.add(info.observedAddr.get()) when defined(libp2p_agents_metrics): var knownAgent = "unknown" @@ -301,3 +226,11 @@ proc identify*( peerStore.updatePeerInfo(info) finally: await stream.closeWithEOF() + +proc getObservedIP6*(self: PeerStore): Opt[MultiAddress] = + ## Returns the most observed IP6 address or none if the number of observations are less than minCount. + return self.observedAddrManager.getMostObservedIP4() + +proc getObservedIP4*(self: PeerStore): Opt[MultiAddress] = + ## Returns the most observed IP4 address or none if the number of observations are less than minCount. + return self.observedAddrManager.getMostObservedIP6() diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 857658306a..d2e421131b 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -155,7 +155,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy await service.run(switch) proc handleManualPortForwarding( - observedMAManager: ObservedMAManager, + peerStore: PeerStore, listenAddr: MultiAddress, isIP4: bool): Opt[MultiAddress] = try: @@ -167,9 +167,9 @@ proc handleManualPortForwarding( let observedIP = if isIP4: - observedMAManager.getIP4() + peerStore.getObservedIP4() else: - observedMAManager.getIP6() + peerStore.getObservedIP6() let newMA = if observedIP.isNone() or maFirst.get() == observedIP.get(): @@ -184,7 +184,7 @@ proc handleManualPortForwarding( proc addressMapper( self: AutonatService, - observedMAManager: ObservedMAManager, + peerStore: PeerStore, listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = var addrs = newSeq[MultiAddress]() @@ -204,7 +204,7 @@ proc addressMapper( continue if not hostIP.isGlobal(): if self.networkReachability == NetworkReachability.Reachable: - let newMA = handleManualPortForwarding(observedMAManager, listenAddr, isIP4) + let newMA = handleManualPortForwarding(peerStore, listenAddr, isIP4) if newMA.isSome(): addrs.add(newMA.get()) continue @@ -215,7 +215,7 @@ proc addressMapper( method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = - return await self.addressMapper(switch.peerStore.observedMAManager, listenAddrs) + return await self.addressMapper(switch.peerStore, listenAddrs) info "Setting up AutonatService" let hasBeenSetup = await procCall Service(self).setup(switch) diff --git a/tests/asyncunit.nim b/tests/asyncunit.nim index fa10c9eb24..1589bf6217 100644 --- a/tests/asyncunit.nim +++ b/tests/asyncunit.nim @@ -1,6 +1,6 @@ -import unittest2 +import unittest2, chronos -export unittest2 +export unittest2, chronos template asyncTeardown*(body: untyped): untyped = teardown: diff --git a/tests/testobservedaddrmanager.nim b/tests/testobservedaddrmanager.nim new file mode 100644 index 0000000000..0d6f7e0d38 --- /dev/null +++ b/tests/testobservedaddrmanager.nim @@ -0,0 +1,48 @@ +import unittest2, + ../libp2p/multiaddress, + ../libp2p/observedaddrmanager, + ./helpers + +suite "ObservedAddrManager": + teardown: + checkTrackers() + + asyncTest "Calculate the most oberserved IP correctly": + + let observedAddrManager = ObservedAddrManager.new(minCount = 3) + + # Calculate the most oberserved IP4 correctly + let mostObservedIP4AndPort = MultiAddress.init("/ip4/1.2.3.0/tcp/1").get() + + observedAddrManager.add(mostObservedIP4AndPort) + observedAddrManager.add(mostObservedIP4AndPort) + + check observedAddrManager.getMostObservedIP4().isNone() + + observedAddrManager.add(MultiAddress.init("/ip4/1.2.3.0/tcp/2").get()) + observedAddrManager.add(MultiAddress.init("/ip4/1.2.3.1/tcp/1").get()) + + check observedAddrManager.getMostObservedIP4().get() == MultiAddress.init("/ip4/1.2.3.0").get() + check observedAddrManager.getMostObservedIP4AndPort().isNone() + + observedAddrManager.add(mostObservedIP4AndPort) + + check observedAddrManager.getMostObservedIP4AndPort().get() == mostObservedIP4AndPort + + # Calculate the most oberserved IP6 correctly + let mostObservedIP6AndPort = MultiAddress.init("/ip6/::1/tcp/1").get() + + observedAddrManager.add(mostObservedIP6AndPort) + observedAddrManager.add(mostObservedIP6AndPort) + + check observedAddrManager.getMostObservedIP6().isNone() + + observedAddrManager.add(MultiAddress.init("/ip6/::1/tcp/2").get()) + observedAddrManager.add(MultiAddress.init("/ip6/::2/tcp/1").get()) + + check observedAddrManager.getMostObservedIP6().get() == MultiAddress.init("/ip6/::1").get() + check observedAddrManager.getMostObservedIP6AndPort().isNone() + + observedAddrManager.add(mostObservedIP6AndPort) + + check observedAddrManager.getMostObservedIP6AndPort().get() == mostObservedIP6AndPort From 7627325a86f2ec91a9d06b2761ae61fbca5a986e Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 3 Mar 2023 17:43:32 +0100 Subject: [PATCH 023/112] replace the heap by a seq --- libp2p/observedaddrmanager.nim | 49 +++++++++++++--------------------- 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/libp2p/observedaddrmanager.nim b/libp2p/observedaddrmanager.nim index 2a1e3ed480..c421ce1ab2 100644 --- a/libp2p/observedaddrmanager.nim +++ b/libp2p/observedaddrmanager.nim @@ -13,7 +13,7 @@ else: {.push raises: [].} import - std/[sets, options, heapqueue], + std/[sets, options, tables], sequtils, chronos, ./crypto/crypto, ./protocols/identify, @@ -27,45 +27,34 @@ import utility type - ObservedAddr = object - ma: MultiAddress - count: int - ## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port. ObservedAddrManager* = ref object of RootObj - observedIPs: HeapQueue[ObservedAddr] - observedIPsAndPorts: HeapQueue[ObservedAddr] + observedIPs: seq[MultiAddress] + observedIPsAndPorts: seq[MultiAddress] maxSize: int minCount: int -proc `<`(a, b: ObservedAddr): bool = a.count < b.count - -proc add(self:ObservedAddrManager, heap: var HeapQueue[ObservedAddr], observedAddr: MultiAddress) = - if heap.len >= self.maxSize: - discard heap.pop() - - for i in 0 ..< heap.len: - if heap[i].ma == observedAddr: - let count = heap[i].count - heap.del(i) - heap.push(ObservedAddr(ma: observedAddr, count: count + 1)) - return +proc add(self:ObservedAddrManager, observations: var seq[MultiAddress], observedAddr: MultiAddress) = + if observations.len >= self.maxSize: + observations.del(0) - heap.push(ObservedAddr(ma: observedAddr, count: 1)) + observations.add(observedAddr) proc add*(self:ObservedAddrManager, observedAddr: MultiAddress) = ## Adds a new observed MultiAddress. If the MultiAddress already exists, the count is increased. self.add(self.observedIPs, observedAddr[0].get()) self.add(self.observedIPsAndPorts, observedAddr) -proc getIP(self: ObservedAddrManager, heap: HeapQueue[ObservedAddr], ipVersion: MaPattern): Opt[MultiAddress] = - var i = 1 - while heap.len - i >= 0: - let observedAddr = heap[heap.len - i] - if ipVersion.match(observedAddr.ma[0].get()) and observedAddr.count >= self.minCount: - return Opt.some(observedAddr.ma) - else: - i = i + 1 +proc getIP(self: ObservedAddrManager, observations: seq[MultiAddress], ipVersion: MaPattern): Opt[MultiAddress] = + var countTable = toCountTable(observations) + countTable.sort() + var orderedPairs = toSeq(countTable.pairs) + for maAndCount in orderedPairs: + let ma = maAndCount[0] + let ip = ma[0].get() + let count = maAndCount[1] + if ipVersion.match(ip) and count >= self.minCount: + return Opt.some(ma) return Opt.none(MultiAddress) proc getMostObservedIP6*(self: ObservedAddrManager): Opt[MultiAddress] = @@ -104,7 +93,7 @@ proc new*( minCount = 3): T = ## Creates a new ObservedAddrManager. return T( - observedIPs: initHeapQueue[ObservedAddr](), - observedIPsAndPorts: initHeapQueue[ObservedAddr](), + observedIPs: newSeq[MultiAddress](), + observedIPsAndPorts: newSeq[MultiAddress](), maxSize: maxSize, minCount: minCount) From 92bfd83c6804f0b0f072bd27e58496a6b0622022 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 6 Mar 2023 12:23:51 +0100 Subject: [PATCH 024/112] Move manager to identify --- libp2p/observedaddrmanager.nim | 13 +---- libp2p/peerstore.nim | 16 ++----- .../connectivity/autonat/service.nim | 4 +- libp2p/protocols/identify.nim | 48 ++++++++++++------- tests/testnative.nim | 3 +- 5 files changed, 41 insertions(+), 43 deletions(-) diff --git a/libp2p/observedaddrmanager.nim b/libp2p/observedaddrmanager.nim index c421ce1ab2..c470414003 100644 --- a/libp2p/observedaddrmanager.nim +++ b/libp2p/observedaddrmanager.nim @@ -13,18 +13,9 @@ else: {.push raises: [].} import - std/[sets, options, tables], sequtils, + std/[sequtils, tables], chronos, - ./crypto/crypto, - ./protocols/identify, - ./protocols/protocol, - ./peerid, ./peerinfo, - ./routing_record, - ./multiaddress, - ./stream/connection, - ./multistream, - ./muxers/muxer, - utility + ./multiaddress type ## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port. diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 382b628e30..14e9063989 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -38,8 +38,7 @@ import ./stream/connection, ./multistream, ./muxers/muxer, - utility, - observedaddrmanager + utility type ################# @@ -79,13 +78,11 @@ type identify: Identify capacity*: int toClean*: seq[PeerId] - observedAddrManager*: ObservedAddrManager proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} = T( identify: identify, capacity: capacity, - observedAddrManager: ObservedAddrManager.new(), ) ######################### @@ -212,9 +209,6 @@ proc identify*( if (await MultistreamSelect.select(stream, peerStore.identify.codec())): let info = await peerStore.identify.identify(stream, stream.peerId) - if info.observedAddr.isSome: - peerStore.observedAddrManager.add(info.observedAddr.get()) - when defined(libp2p_agents_metrics): var knownAgent = "unknown" if info.agentVersion.isSome and info.agentVersion.get().len > 0: @@ -227,10 +221,10 @@ proc identify*( finally: await stream.closeWithEOF() -proc getObservedIP6*(self: PeerStore): Opt[MultiAddress] = +proc getMostObservedIP6*(self: PeerStore): Opt[MultiAddress] = ## Returns the most observed IP6 address or none if the number of observations are less than minCount. - return self.observedAddrManager.getMostObservedIP4() + return self.identify.getMostObservedIP6() -proc getObservedIP4*(self: PeerStore): Opt[MultiAddress] = +proc getMostObservedIP4*(self: PeerStore): Opt[MultiAddress] = ## Returns the most observed IP4 address or none if the number of observations are less than minCount. - return self.observedAddrManager.getMostObservedIP6() + return self.identify.getMostObservedIP4() diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index d2e421131b..dbeb57dcae 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -167,9 +167,9 @@ proc handleManualPortForwarding( let observedIP = if isIP4: - peerStore.getObservedIP4() + peerStore.getMostObservedIP4() else: - peerStore.getObservedIP6() + peerStore.getMostObservedIP6() let newMA = if observedIP.isNone() or maFirst.get() == observedIP.get(): diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index 61a8ddad13..d1d60dc4d9 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -26,7 +26,8 @@ import ../protobuf/minprotobuf, ../multiaddress, ../protocols/protocol, ../utility, - ../errors + ../errors, + ../observedaddrmanager logScope: topics = "libp2p identify" @@ -56,6 +57,7 @@ type Identify* = ref object of LPProtocol peerInfo*: PeerInfo sendSignedPeerRecord*: bool + observedAddrManager*: ObservedAddrManager IdentifyPushHandler* = proc ( peer: PeerId, @@ -160,7 +162,8 @@ proc new*( ): T = let identify = T( peerInfo: peerInfo, - sendSignedPeerRecord: sendSignedPeerRecord + sendSignedPeerRecord: sendSignedPeerRecord, + observedAddrManager: ObservedAddrManager.new(), ) identify.init() identify @@ -182,7 +185,7 @@ method init*(p: Identify) = p.handler = handle p.codec = IdentifyCodec -proc identify*(p: Identify, +proc identify*(self: Identify, conn: Connection, remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} = trace "initiating identify", conn @@ -194,23 +197,24 @@ proc identify*(p: Identify, let infoOpt = decodeMsg(message) if infoOpt.isNone(): raise newException(IdentityInvalidMsgError, "Incorrect message received!") - result = infoOpt.get() - - if result.pubkey.isSome: - let peer = PeerId.init(result.pubkey.get()) - if peer.isErr: - raise newException(IdentityInvalidMsgError, $peer.error) - else: - result.peerId = peer.get() - if peer.get() != remotePeerId: - trace "Peer ids don't match", - remote = peer, - local = remotePeerId - - raise newException(IdentityNoMatchError, "Peer ids don't match") - else: + + var info = infoOpt.get() + if info.pubkey.isNone(): raise newException(IdentityInvalidMsgError, "No pubkey in identify") + let peer = PeerId.init(info.pubkey.get()) + if peer.isErr: + raise newException(IdentityInvalidMsgError, $peer.error) + + if peer.get() != remotePeerId: + trace "Peer ids don't match", remote = peer, local = remotePeerId + raise newException(IdentityNoMatchError, "Peer ids don't match") + info.peerId = peer.get() + + if info.observedAddr.isSome: + self.observedAddrManager.add(info.observedAddr.get()) + return info + proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} = ## Create a IdentifyPush protocol. `handler` will be called every time ## a peer sends us new `PeerInfo` @@ -254,3 +258,11 @@ proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, publi ## Send new `peerInfo`s to a connection var pb = encodeMsg(peerInfo, conn.observedAddr, true) await conn.writeLp(pb.buffer) + +proc getMostObservedIP6*(self: Identify): Opt[MultiAddress] = + ## Returns the most observed IP6 address or none if the number of observations are less than minCount. + return self.observedAddrManager.getMostObservedIP6() + +proc getMostObservedIP4*(self: Identify): Opt[MultiAddress] = + ## Returns the most observed IP4 address or none if the number of observations are less than minCount. + return self.observedAddrManager.getMostObservedIP4() \ No newline at end of file diff --git a/tests/testnative.nim b/tests/testnative.nim index b41508160f..59a5b62635 100644 --- a/tests/testnative.nim +++ b/tests/testnative.nim @@ -28,6 +28,7 @@ import testtcptransport, testmultistream, testbufferstream, testidentify, + testobservedaddrmanager, testconnmngr, testswitch, testnoise, @@ -42,4 +43,4 @@ import testtcptransport, testyamux, testautonat, testautonatservice, - testautorelay + testautorelay \ No newline at end of file From 4fc7b6ddd9375b3ecedb7787a19816e41a7111b2 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 6 Mar 2023 12:58:54 +0100 Subject: [PATCH 025/112] Simplify api --- libp2p/observedaddrmanager.nim | 41 ++++++++----------- libp2p/peerstore.nim | 10 ++--- .../connectivity/autonat/service.nim | 22 +++++----- libp2p/protocols/identify.nim | 12 +++--- tests/testobservedaddrmanager.nim | 17 ++++---- 5 files changed, 46 insertions(+), 56 deletions(-) diff --git a/libp2p/observedaddrmanager.nim b/libp2p/observedaddrmanager.nim index c470414003..bf5f2f5109 100644 --- a/libp2p/observedaddrmanager.nim +++ b/libp2p/observedaddrmanager.nim @@ -25,14 +25,17 @@ type maxSize: int minCount: int + IPVersion* = enum + IPv4, IPv6 + proc add(self:ObservedAddrManager, observations: var seq[MultiAddress], observedAddr: MultiAddress) = if observations.len >= self.maxSize: observations.del(0) - observations.add(observedAddr) proc add*(self:ObservedAddrManager, observedAddr: MultiAddress) = - ## Adds a new observed MultiAddress. If the MultiAddress already exists, the count is increased. + ## Adds a new observed MultiAddress. If the number of observations exceeds maxSize, the oldest one is removed. + ## Both IP and IP/Port are tracked. self.add(self.observedIPs, observedAddr[0].get()) self.add(self.observedIPsAndPorts, observedAddr) @@ -40,38 +43,30 @@ proc getIP(self: ObservedAddrManager, observations: seq[MultiAddress], ipVersion var countTable = toCountTable(observations) countTable.sort() var orderedPairs = toSeq(countTable.pairs) - for maAndCount in orderedPairs: - let ma = maAndCount[0] + for (ma, count) in orderedPairs: let ip = ma[0].get() - let count = maAndCount[1] if ipVersion.match(ip) and count >= self.minCount: return Opt.some(ma) return Opt.none(MultiAddress) -proc getMostObservedIP6*(self: ObservedAddrManager): Opt[MultiAddress] = - ## Returns the most observed IP6 address or none if the number of observations are less than minCount. - return self.getIP(self.observedIPs, IP6) - -proc getMostObservedIP4*(self: ObservedAddrManager): Opt[MultiAddress] = - ## Returns the most observed IP4 address or none if the number of observations are less than minCount. - return self.getIP(self.observedIPs, IP4) - -proc getMostObservedIP6AndPort*(self: ObservedAddrManager): Opt[MultiAddress] = - ## Returns the most observed IP6/Port address or none if the number of observations are less than minCount. - return self.getIP(self.observedIPsAndPorts, IP6) +proc getMostObservedIP*(self: ObservedAddrManager, ipVersion: IPVersion): Opt[MultiAddress] = + ## Returns the most observed IP address or none if the number of observations are less than minCount. + return self.getIP(self.observedIPs, if ipVersion == IPv4: IP4 else: IP6) -proc getMostObservedIP4AndPort*(self: ObservedAddrManager): Opt[MultiAddress] = - ## Returns the most observed IP4/Port address or none if the number of observations are less than minCount. - return self.getIP(self.observedIPsAndPorts, IP4) +proc getMostObservedIPAndPort*(self: ObservedAddrManager, ipVersion: IPVersion): Opt[MultiAddress] = + ## Returns the most observed IP/Port address or none if the number of observations are less than minCount. + return self.getIP(self.observedIPsAndPorts, if ipVersion == IPv4: IP4 else: IP6) proc getMostObservedIPsAndPorts*(self: ObservedAddrManager): seq[MultiAddress] = ## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations ## are less than minCount. var res: seq[MultiAddress] - if self.getMostObservedIP4().isSome(): - res.add(self.getMostObservedIP4().get()) - if self.getMostObservedIP6().isSome(): - res.add(self.getMostObservedIP6().get()) + let ip4 = self.getMostObservedIPAndPort(IPv4) + if ip4.isSome(): + res.add(ip4.get()) + let ip6 = self.getMostObservedIPAndPort(IPv6) + if ip6.isSome(): + res.add(ip6.get()) return res proc `$`*(self: ObservedAddrManager): string = diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 14e9063989..6dcc68ef4e 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -221,10 +221,6 @@ proc identify*( finally: await stream.closeWithEOF() -proc getMostObservedIP6*(self: PeerStore): Opt[MultiAddress] = - ## Returns the most observed IP6 address or none if the number of observations are less than minCount. - return self.identify.getMostObservedIP6() - -proc getMostObservedIP4*(self: PeerStore): Opt[MultiAddress] = - ## Returns the most observed IP4 address or none if the number of observations are less than minCount. - return self.identify.getMostObservedIP4() +proc getMostObservedIP*(self: PeerStore, ipVersion: IPVersion): Opt[MultiAddress] = + ## Returns the most observed IP address or none if the number of observations are less than minCount. + return self.identify.getMostObservedIP(ipVersion) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index dbeb57dcae..86217b22b8 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -159,23 +159,23 @@ proc handleManualPortForwarding( listenAddr: MultiAddress, isIP4: bool): Opt[MultiAddress] = try: - let maFirst = listenAddr[0] - let maEnd = listenAddr[1..^1] + let maIP = listenAddr[0] + let maWithoutIP = listenAddr[1..^1] - if maEnd.isErr(): + if maWithoutIP.isErr(): return Opt.none(MultiAddress) let observedIP = if isIP4: - peerStore.getMostObservedIP4() + peerStore.getMostObservedIP(IPv4) else: - peerStore.getMostObservedIP6() + peerStore.getMostObservedIP(IPv6) let newMA = - if observedIP.isNone() or maFirst.get() == observedIP.get(): + if observedIP.isNone() or maIP.get() == observedIP.get(): listenAddr else: - observedIP.get() & maEnd.get() + observedIP.get() & maWithoutIP.get() return Opt.some(newMA) except CatchableError as error: @@ -190,14 +190,14 @@ proc addressMapper( var addrs = newSeq[MultiAddress]() for listenAddr in listenAddrs: try: - let maFirst = listenAddr[0] - if maFirst.isErr(): + let maIP = listenAddr[0] + if maIP.isErr(): continue var isIP4 = true let hostIP = - if IP4.match(maFirst.get()): + if IP4.match(maIP.get()): getBestRoute(initTAddress("8.8.8.8:0")).source - elif IP6.match(maFirst.get()): + elif IP6.match(maIP.get()): isIP4 = false getBestRoute(initTAddress("2600:::0")).source else: diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index d1d60dc4d9..be467ec50a 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -29,6 +29,8 @@ import ../protobuf/minprotobuf, ../errors, ../observedaddrmanager +export observedaddrmanager + logScope: topics = "libp2p identify" @@ -259,10 +261,6 @@ proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, publi var pb = encodeMsg(peerInfo, conn.observedAddr, true) await conn.writeLp(pb.buffer) -proc getMostObservedIP6*(self: Identify): Opt[MultiAddress] = - ## Returns the most observed IP6 address or none if the number of observations are less than minCount. - return self.observedAddrManager.getMostObservedIP6() - -proc getMostObservedIP4*(self: Identify): Opt[MultiAddress] = - ## Returns the most observed IP4 address or none if the number of observations are less than minCount. - return self.observedAddrManager.getMostObservedIP4() \ No newline at end of file +proc getMostObservedIP*(self: Identify, ipVersion: IPVersion): Opt[MultiAddress] = + ## Returns the most observed IP address or none if the number of observations are less than minCount. + return self.observedAddrManager.getMostObservedIP(ipVersion) diff --git a/tests/testobservedaddrmanager.nim b/tests/testobservedaddrmanager.nim index 0d6f7e0d38..cc47d3694c 100644 --- a/tests/testobservedaddrmanager.nim +++ b/tests/testobservedaddrmanager.nim @@ -17,17 +17,18 @@ suite "ObservedAddrManager": observedAddrManager.add(mostObservedIP4AndPort) observedAddrManager.add(mostObservedIP4AndPort) - check observedAddrManager.getMostObservedIP4().isNone() + check observedAddrManager.getMostObservedIP(IPv4).isNone() + check observedAddrManager.getMostObservedIP(IPv6).isNone() observedAddrManager.add(MultiAddress.init("/ip4/1.2.3.0/tcp/2").get()) observedAddrManager.add(MultiAddress.init("/ip4/1.2.3.1/tcp/1").get()) - check observedAddrManager.getMostObservedIP4().get() == MultiAddress.init("/ip4/1.2.3.0").get() - check observedAddrManager.getMostObservedIP4AndPort().isNone() + check observedAddrManager.getMostObservedIP(IPv4).get() == MultiAddress.init("/ip4/1.2.3.0").get() + check observedAddrManager.getMostObservedIPAndPort(IPv4).isNone() observedAddrManager.add(mostObservedIP4AndPort) - check observedAddrManager.getMostObservedIP4AndPort().get() == mostObservedIP4AndPort + check observedAddrManager.getMostObservedIPAndPort(IPv4).get() == mostObservedIP4AndPort # Calculate the most oberserved IP6 correctly let mostObservedIP6AndPort = MultiAddress.init("/ip6/::1/tcp/1").get() @@ -35,14 +36,14 @@ suite "ObservedAddrManager": observedAddrManager.add(mostObservedIP6AndPort) observedAddrManager.add(mostObservedIP6AndPort) - check observedAddrManager.getMostObservedIP6().isNone() + check observedAddrManager.getMostObservedIP(IPv6).isNone() observedAddrManager.add(MultiAddress.init("/ip6/::1/tcp/2").get()) observedAddrManager.add(MultiAddress.init("/ip6/::2/tcp/1").get()) - check observedAddrManager.getMostObservedIP6().get() == MultiAddress.init("/ip6/::1").get() - check observedAddrManager.getMostObservedIP6AndPort().isNone() + check observedAddrManager.getMostObservedIP(IPv6).get() == MultiAddress.init("/ip6/::1").get() + check observedAddrManager.getMostObservedIPAndPort(IPv6).isNone() observedAddrManager.add(mostObservedIP6AndPort) - check observedAddrManager.getMostObservedIP6AndPort().get() == mostObservedIP6AndPort + check observedAddrManager.getMostObservedIPAndPort(IPv6).get() == mostObservedIP6AndPort From 22d6c0573ad3e570d7b24dd9b7b7743e38796083 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 8 Mar 2023 13:19:53 +0100 Subject: [PATCH 026/112] Improvements after code review --- .pinned | 2 +- libp2p/observedaddrmanager.nim | 21 +++++-------- libp2p/peerstore.nim | 2 +- .../connectivity/autonat/service.nim | 31 ++++++------------- libp2p/protocols/identify.nim | 2 +- tests/testobservedaddrmanager.nim | 20 ++++++------ 6 files changed, 31 insertions(+), 47 deletions(-) diff --git a/.pinned b/.pinned index 501285242a..110bc93fb6 100644 --- a/.pinned +++ b/.pinned @@ -1,6 +1,6 @@ bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a -chronos;https://github.com/status-im/nim-chronos@#8ed5307544540d04a8caa666c0703de9e6b06d58 +chronos;https://github.com/status-im/nim-chronos@#f7835a192b45c37e97614d865141f21eea8c156e dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823 faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f diff --git a/libp2p/observedaddrmanager.nim b/libp2p/observedaddrmanager.nim index bf5f2f5109..41f0d17c24 100644 --- a/libp2p/observedaddrmanager.nim +++ b/libp2p/observedaddrmanager.nim @@ -15,12 +15,11 @@ else: import std/[sequtils, tables], chronos, - ./multiaddress + multiaddress type ## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port. ObservedAddrManager* = ref object of RootObj - observedIPs: seq[MultiAddress] observedIPsAndPorts: seq[MultiAddress] maxSize: int minCount: int @@ -28,16 +27,12 @@ type IPVersion* = enum IPv4, IPv6 -proc add(self:ObservedAddrManager, observations: var seq[MultiAddress], observedAddr: MultiAddress) = - if observations.len >= self.maxSize: - observations.del(0) - observations.add(observedAddr) - -proc add*(self:ObservedAddrManager, observedAddr: MultiAddress) = +proc addObservation*(self:ObservedAddrManager, observedAddr: MultiAddress) = ## Adds a new observed MultiAddress. If the number of observations exceeds maxSize, the oldest one is removed. ## Both IP and IP/Port are tracked. - self.add(self.observedIPs, observedAddr[0].get()) - self.add(self.observedIPsAndPorts, observedAddr) + if self.observedIPsAndPorts.len >= self.maxSize: + self.observedIPsAndPorts.del(0) + self.observedIPsAndPorts.add(observedAddr) proc getIP(self: ObservedAddrManager, observations: seq[MultiAddress], ipVersion: MaPattern): Opt[MultiAddress] = var countTable = toCountTable(observations) @@ -51,7 +46,8 @@ proc getIP(self: ObservedAddrManager, observations: seq[MultiAddress], ipVersion proc getMostObservedIP*(self: ObservedAddrManager, ipVersion: IPVersion): Opt[MultiAddress] = ## Returns the most observed IP address or none if the number of observations are less than minCount. - return self.getIP(self.observedIPs, if ipVersion == IPv4: IP4 else: IP6) + let observedIPs = self.observedIPsAndPorts.mapIt(it[0].get()) + return self.getIP(observedIPs, if ipVersion == IPv4: IP4 else: IP6) proc getMostObservedIPAndPort*(self: ObservedAddrManager, ipVersion: IPVersion): Opt[MultiAddress] = ## Returns the most observed IP/Port address or none if the number of observations are less than minCount. @@ -71,7 +67,7 @@ proc getMostObservedIPsAndPorts*(self: ObservedAddrManager): seq[MultiAddress] = proc `$`*(self: ObservedAddrManager): string = ## Returns a string representation of the ObservedAddrManager. - return "IPs: " & $self.observedIPs & "; IPs and Ports: " & $self.observedIPsAndPorts + return "IPs and Ports: " & $self.observedIPsAndPorts proc new*( T: typedesc[ObservedAddrManager], @@ -79,7 +75,6 @@ proc new*( minCount = 3): T = ## Creates a new ObservedAddrManager. return T( - observedIPs: newSeq[MultiAddress](), observedIPsAndPorts: newSeq[MultiAddress](), maxSize: maxSize, minCount: minCount) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 6dcc68ef4e..4536607250 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -27,7 +27,7 @@ else: {.push raises: [].} import - std/[tables, sets, options, macros, heapqueue], + std/[tables, sets, options, macros], chronos, ./crypto/crypto, ./protocols/identify, diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 86217b22b8..3bf96b68ef 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -15,6 +15,7 @@ else: import std/[options, deques, sequtils] import chronos, metrics import ../../../switch +import ../../../wire import client import ../../../utils/heartbeat import ../../../crypto/crypto @@ -156,8 +157,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy proc handleManualPortForwarding( peerStore: PeerStore, - listenAddr: MultiAddress, - isIP4: bool): Opt[MultiAddress] = + listenAddr: MultiAddress): Opt[MultiAddress] = try: let maIP = listenAddr[0] let maWithoutIP = listenAddr[1..^1] @@ -166,7 +166,7 @@ proc handleManualPortForwarding( return Opt.none(MultiAddress) let observedIP = - if isIP4: + if IP4.match(maIP.get()): peerStore.getMostObservedIP(IPv4) else: peerStore.getMostObservedIP(IPv6) @@ -189,28 +189,17 @@ proc addressMapper( var addrs = newSeq[MultiAddress]() for listenAddr in listenAddrs: + var processedMA = listenAddr try: - let maIP = listenAddr[0] - if maIP.isErr(): - continue - var isIP4 = true - let hostIP = - if IP4.match(maIP.get()): - getBestRoute(initTAddress("8.8.8.8:0")).source - elif IP6.match(maIP.get()): - isIP4 = false - getBestRoute(initTAddress("2600:::0")).source - else: - continue + let hostIP = initTAddress(listenAddr).get() if not hostIP.isGlobal(): if self.networkReachability == NetworkReachability.Reachable: - let newMA = handleManualPortForwarding(peerStore, listenAddr, isIP4) - if newMA.isSome(): - addrs.add(newMA.get()) - continue - addrs.add(listenAddr) # do nothing + let maOpt = handleManualPortForwarding(peerStore, listenAddr) + if maOpt.isSome(): + processedMA = maOpt.get() except CatchableError as exc: - continue + debug "Error while handling address mapper", msg = exc.msg + addrs.add(processedMA) return addrs method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index be467ec50a..1141ace271 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -214,7 +214,7 @@ proc identify*(self: Identify, info.peerId = peer.get() if info.observedAddr.isSome: - self.observedAddrManager.add(info.observedAddr.get()) + self.observedAddrManager.addObservation(info.observedAddr.get()) return info proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} = diff --git a/tests/testobservedaddrmanager.nim b/tests/testobservedaddrmanager.nim index cc47d3694c..a4b06368ec 100644 --- a/tests/testobservedaddrmanager.nim +++ b/tests/testobservedaddrmanager.nim @@ -14,36 +14,36 @@ suite "ObservedAddrManager": # Calculate the most oberserved IP4 correctly let mostObservedIP4AndPort = MultiAddress.init("/ip4/1.2.3.0/tcp/1").get() - observedAddrManager.add(mostObservedIP4AndPort) - observedAddrManager.add(mostObservedIP4AndPort) + observedAddrManager.addObservation(mostObservedIP4AndPort) + observedAddrManager.addObservation(mostObservedIP4AndPort) check observedAddrManager.getMostObservedIP(IPv4).isNone() check observedAddrManager.getMostObservedIP(IPv6).isNone() - observedAddrManager.add(MultiAddress.init("/ip4/1.2.3.0/tcp/2").get()) - observedAddrManager.add(MultiAddress.init("/ip4/1.2.3.1/tcp/1").get()) + observedAddrManager.addObservation(MultiAddress.init("/ip4/1.2.3.0/tcp/2").get()) + observedAddrManager.addObservation(MultiAddress.init("/ip4/1.2.3.1/tcp/1").get()) check observedAddrManager.getMostObservedIP(IPv4).get() == MultiAddress.init("/ip4/1.2.3.0").get() check observedAddrManager.getMostObservedIPAndPort(IPv4).isNone() - observedAddrManager.add(mostObservedIP4AndPort) + observedAddrManager.addObservation(mostObservedIP4AndPort) check observedAddrManager.getMostObservedIPAndPort(IPv4).get() == mostObservedIP4AndPort # Calculate the most oberserved IP6 correctly let mostObservedIP6AndPort = MultiAddress.init("/ip6/::1/tcp/1").get() - observedAddrManager.add(mostObservedIP6AndPort) - observedAddrManager.add(mostObservedIP6AndPort) + observedAddrManager.addObservation(mostObservedIP6AndPort) + observedAddrManager.addObservation(mostObservedIP6AndPort) check observedAddrManager.getMostObservedIP(IPv6).isNone() - observedAddrManager.add(MultiAddress.init("/ip6/::1/tcp/2").get()) - observedAddrManager.add(MultiAddress.init("/ip6/::2/tcp/1").get()) + observedAddrManager.addObservation(MultiAddress.init("/ip6/::1/tcp/2").get()) + observedAddrManager.addObservation(MultiAddress.init("/ip6/::2/tcp/1").get()) check observedAddrManager.getMostObservedIP(IPv6).get() == MultiAddress.init("/ip6/::1").get() check observedAddrManager.getMostObservedIPAndPort(IPv6).isNone() - observedAddrManager.add(mostObservedIP6AndPort) + observedAddrManager.addObservation(mostObservedIP6AndPort) check observedAddrManager.getMostObservedIPAndPort(IPv6).get() == mostObservedIP6AndPort From 7cb27cc5406336f4ee5e32ede788303d62d92af2 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 8 Mar 2023 13:35:04 +0100 Subject: [PATCH 027/112] More fixes --- libp2p/peerstore.nim | 2 +- tests/testnative.nim | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 4536607250..0addc1499d 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -82,7 +82,7 @@ type proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} = T( identify: identify, - capacity: capacity, + capacity: capacity ) ######################### diff --git a/tests/testnative.nim b/tests/testnative.nim index 59a5b62635..25dd7d0f02 100644 --- a/tests/testnative.nim +++ b/tests/testnative.nim @@ -43,4 +43,4 @@ import testtcptransport, testyamux, testautonat, testautonatservice, - testautorelay \ No newline at end of file + testautorelay From c46250335179c4ca768ceb80486a97097d9112a8 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 14:30:36 +0100 Subject: [PATCH 028/112] move procs here for reuse --- libp2p/peerstore.nim | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 0addc1499d..24ced1c773 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -28,7 +28,7 @@ else: import std/[tables, sets, options, macros], - chronos, + chronos, chronicles, ./crypto/crypto, ./protocols/identify, ./protocols/protocol, @@ -224,3 +224,36 @@ proc identify*( proc getMostObservedIP*(self: PeerStore, ipVersion: IPVersion): Opt[MultiAddress] = ## Returns the most observed IP address or none if the number of observations are less than minCount. return self.identify.getMostObservedIP(ipVersion) + +proc replaceMAIpByMostObserved*( + self: PeerStore, + ma: MultiAddress): Opt[MultiAddress] = + try: + let maIP = ma[0] + let maWithoutIP = ma[1..^1] + + if maWithoutIP.isErr(): + return Opt.none(MultiAddress) + + let observedIP = + if IP4.match(maIP.get()): + self.getMostObservedIP(IPv4) + else: + self.getMostObservedIP(IPv6) + + let newMA = + if observedIP.isNone() or maIP.get() == observedIP.get(): + ma + else: + observedIP.get() & maWithoutIP.get() + + return Opt.some(newMA) + except CatchableError as error: + debug "Error while handling manual port forwarding", msg = error.msg + return Opt.none(MultiAddress) + +proc guessNatAddrs*(self: PeerStore, listenAddrs: seq[MultiAddress]): seq[MultiAddress] = + for l in listenAddrs: + let guess = self.replaceMAIpByMostObserved(l) + if guess.isSome(): + result.add(guess.get()) \ No newline at end of file From e6908d0d5678ddaa7da9500366a2aedb3a40cea3 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 15:48:42 +0100 Subject: [PATCH 029/112] improve naming --- libp2p/peerstore.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 24ced1c773..5e45bd98b8 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -252,7 +252,7 @@ proc replaceMAIpByMostObserved*( debug "Error while handling manual port forwarding", msg = error.msg return Opt.none(MultiAddress) -proc guessNatAddrs*(self: PeerStore, listenAddrs: seq[MultiAddress]): seq[MultiAddress] = +proc guessDialableAddrs*(self: PeerStore, listenAddrs: seq[MultiAddress]): seq[MultiAddress] = for l in listenAddrs: let guess = self.replaceMAIpByMostObserved(l) if guess.isSome(): From a378b2d3cabdd0e346a804a1bfebf8b2977b1e0e Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 16:07:30 +0100 Subject: [PATCH 030/112] remove proc --- .../connectivity/autonat/service.nim | 29 +------------------ 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 3bf96b68ef..d1e23c9933 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -155,33 +155,6 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy heartbeat "Scheduling AutonatService run", interval: await service.run(switch) -proc handleManualPortForwarding( - peerStore: PeerStore, - listenAddr: MultiAddress): Opt[MultiAddress] = - try: - let maIP = listenAddr[0] - let maWithoutIP = listenAddr[1..^1] - - if maWithoutIP.isErr(): - return Opt.none(MultiAddress) - - let observedIP = - if IP4.match(maIP.get()): - peerStore.getMostObservedIP(IPv4) - else: - peerStore.getMostObservedIP(IPv6) - - let newMA = - if observedIP.isNone() or maIP.get() == observedIP.get(): - listenAddr - else: - observedIP.get() & maWithoutIP.get() - - return Opt.some(newMA) - except CatchableError as error: - debug "Error while handling manual port forwarding", msg = error.msg - return Opt.none(MultiAddress) - proc addressMapper( self: AutonatService, peerStore: PeerStore, @@ -194,7 +167,7 @@ proc addressMapper( let hostIP = initTAddress(listenAddr).get() if not hostIP.isGlobal(): if self.networkReachability == NetworkReachability.Reachable: - let maOpt = handleManualPortForwarding(peerStore, listenAddr) + let maOpt = peerStore.replaceMAIpByMostObserved(listenAddr) # handle manual port forwarding if maOpt.isSome(): processedMA = maOpt.get() except CatchableError as exc: From f4b34f2c93800eeb0d3731e54fab88c8ef61c510 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 13 Mar 2023 18:36:23 +0100 Subject: [PATCH 031/112] Add an AddressMapper to the AutoRelayService --- libp2p/services/autorelayservice.nim | 17 +++++++++++++++-- tests/testautorelay.nim | 5 ++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/libp2p/services/autorelayservice.nim b/libp2p/services/autorelayservice.nim index 14f58c76ac..defd42f17c 100644 --- a/libp2p/services/autorelayservice.nim +++ b/libp2p/services/autorelayservice.nim @@ -32,9 +32,15 @@ type backingOff: seq[PeerId] peerAvailable: AsyncEvent onReservation: OnReservationHandler + addressMapper: AddressMapper rng: ref HmacDrbgContext -proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, selfPid: PeerId) {.async.} = +proc addressMapper( + self: AutoRelayService, + listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = + return concat(toSeq(self.relayAddresses.values)) + +proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} = while self.running: let rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5)) @@ -46,11 +52,15 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, selfPid: PeerId) break if relayPid notin self.relayAddresses or self.relayAddresses[relayPid] != relayedAddr: self.relayAddresses[relayPid] = relayedAddr + await switch.peerInfo.update() if not self.onReservation.isNil(): self.onReservation(concat(toSeq(self.relayAddresses.values))) await sleepAsync chronos.seconds(ttl - 30) method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} = + self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = + return await addressMapper(self, listenAddrs) + let hasBeenSetUp = await procCall Service(self).setup(switch) if hasBeenSetUp: proc handlePeerJoined(peerId: PeerId, event: PeerEvent) {.async.} = @@ -63,6 +73,7 @@ method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcs future[].cancel() switch.addPeerEventHandler(handlePeerJoined, Joined) switch.addPeerEventHandler(handlePeerLeft, Left) + switch.peerInfo.addressMappers.add(self.addressMapper) await self.run(switch) return hasBeenSetUp @@ -96,7 +107,7 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} = for relayPid in connectedPeers: if self.relayPeers.len() >= self.numRelays: break - self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch.peerInfo.peerId) + self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch) if self.relayPeers.len() > 0: await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait() @@ -116,6 +127,8 @@ method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsa if hasBeenStopped: self.running = false self.runner.cancel() + switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper) + await switch.peerInfo.update() return hasBeenStopped proc getAddresses*(self: AutoRelayService): seq[MultiAddress] = diff --git a/tests/testautorelay.nim b/tests/testautorelay.nim index 9fc58d96ca..3117387eed 100644 --- a/tests/testautorelay.nim +++ b/tests/testautorelay.nim @@ -72,10 +72,13 @@ suite "Autorelay": await fut.wait(1.seconds) let addresses = autorelay.getAddresses() check: - addresses[0] == buildRelayMA(switchRelay, switchClient) + addresses == @[buildRelayMA(switchRelay, switchClient)] addresses.len() == 1 + addresses == switchClient.peerInfo.addrs await allFutures(switchClient.stop(), switchRelay.stop()) + check addresses != switchClient.peerInfo.addrs + asyncTest "Three relays connections": var state = 0 let From e1c4c9e845ebc30539d80c02604db11edf11861b Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 14 Mar 2023 11:12:45 +0100 Subject: [PATCH 032/112] Remove AddressMapper when AutonatService stops --- libp2p/protocols/connectivity/autonat/service.nim | 9 ++++++--- tests/testautonatservice.nim | 4 ++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index d1e23c9933..022893b48d 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -28,6 +28,7 @@ declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability type AutonatService* = ref object of Service newConnectedPeerHandler: PeerEventHandler + addressMapper: AddressMapper scheduleHandle: Future[void] networkReachability: NetworkReachability confidence: Option[float] @@ -176,8 +177,8 @@ proc addressMapper( return addrs method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = - proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = - return await self.addressMapper(switch.peerStore, listenAddrs) + self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} = + return await addressMapper(self, switch.peerStore, listenAddrs) info "Setting up AutonatService" let hasBeenSetup = await procCall Service(self).setup(switch) @@ -188,7 +189,7 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} = switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) if self.scheduleInterval.isSome(): self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get()) - switch.peerInfo.addressMappers.add(addressMapper) + switch.peerInfo.addressMappers.add(self.addressMapper) return hasBeenSetup method run*(self: AutonatService, switch: Switch) {.async, public.} = @@ -205,6 +206,8 @@ method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public self.scheduleHandle = nil if not isNil(self.newConnectedPeerHandler): switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) + switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper) + await switch.peerInfo.update() return hasBeenStopped proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) = diff --git a/tests/testautonatservice.nim b/tests/testautonatservice.nim index 4a0d35d63d..c5c93b07b4 100644 --- a/tests/testautonatservice.nim +++ b/tests/testautonatservice.nim @@ -104,9 +104,13 @@ suite "Autonat Service": check autonatService.networkReachability() == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3 + check switch1.peerInfo.addrs == switch1.peerStore.guessDialableAddrs(switch1.peerInfo.listenAddrs) + await allFuturesThrowing( switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) + check switch1.peerInfo.addrs == switch1.peerInfo.listenAddrs + asyncTest "Peer must be not reachable and then reachable": let autonatClientStub = AutonatClientStub.new(expectedDials = 6) From a38cd88c0c0a072981d93ae68f77dbca0057156d Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 7 Mar 2023 17:33:04 +0100 Subject: [PATCH 033/112] Bind to local addr when behind a NAT --- .pinned | 2 +- libp2p/peerstore.nim | 5 +++++ libp2p/protocols/connectivity/autonat/core.nim | 3 +++ .../protocols/connectivity/autonat/service.nim | 4 +--- libp2p/protocols/identify.nim | 6 ++++++ libp2p/transports/tcptransport.nim | 18 +++++++++++++----- libp2p/transports/transport.nim | 6 +++++- libp2p/wire.nim | 8 ++++++-- 8 files changed, 40 insertions(+), 12 deletions(-) diff --git a/.pinned b/.pinned index 110bc93fb6..bdb288614b 100644 --- a/.pinned +++ b/.pinned @@ -1,6 +1,6 @@ bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a -chronos;https://github.com/status-im/nim-chronos@#f7835a192b45c37e97614d865141f21eea8c156e +chronos;https://github.com/status-im/nim-chronos@#0240dd8b8a7fc54676b8d494a9126b5ffbbb4adf dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823 faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 5e45bd98b8..07055b0338 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -225,6 +225,11 @@ proc getMostObservedIP*(self: PeerStore, ipVersion: IPVersion): Opt[MultiAddress ## Returns the most observed IP address or none if the number of observations are less than minCount. return self.identify.getMostObservedIP(ipVersion) +proc getMostObservedIPsAndPorts*(self: PeerStore): seq[MultiAddress] = + ## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations + ## are less than minCount. + return self.identify.getMostObservedIPsAndPorts() + proc replaceMAIpByMostObserved*( self: PeerStore, ma: MultiAddress): Opt[MultiAddress] = diff --git a/libp2p/protocols/connectivity/autonat/core.nim b/libp2p/protocols/connectivity/autonat/core.nim index ce76f7a28b..db49d4c324 100644 --- a/libp2p/protocols/connectivity/autonat/core.nim +++ b/libp2p/protocols/connectivity/autonat/core.nim @@ -58,6 +58,9 @@ type dial*: Option[AutonatDial] response*: Option[AutonatDialResponse] + NetworkReachability* {.pure.} = enum + NotReachable, Reachable, Unknown + proc encode(p: AutonatPeerInfo): ProtoBuffer = result = initProtoBuffer() if p.id.isSome(): diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 022893b48d..42bd9a3b3f 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -43,9 +43,6 @@ type minConfidence: float dialTimeout: Duration - NetworkReachability* {.pure.} = enum - NotReachable, Reachable, Unknown - StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].} proc new*( @@ -136,6 +133,7 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo if not isNil(self.statusAndConfidenceHandler): await self.statusAndConfidenceHandler(self.networkReachability, self.confidence) await switch.peerInfo.update() + echo switch.peerStore.getMostObservedIPsAndPorts() return ans proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} = diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index 1141ace271..41ea59d6a1 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -264,3 +264,9 @@ proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, publi proc getMostObservedIP*(self: Identify, ipVersion: IPVersion): Opt[MultiAddress] = ## Returns the most observed IP address or none if the number of observations are less than minCount. return self.observedAddrManager.getMostObservedIP(ipVersion) + +proc getMostObservedIPsAndPorts*(self: Identify): seq[MultiAddress] = + ## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations + ## are less than minCount. + echo self.observedAddrManager + return self.observedAddrManager.getMostObservedIPsAndPorts() diff --git a/libp2p/transports/tcptransport.nim b/libp2p/transports/tcptransport.nim index 46aa0a259f..371a34e92c 100644 --- a/libp2p/transports/tcptransport.nim +++ b/libp2p/transports/tcptransport.nim @@ -42,7 +42,7 @@ type servers*: seq[StreamServer] clients: array[Direction, seq[StreamTransport]] flags: set[ServerFlags] - clientFlags: set[TransportFlags] + clientFlags: set[ClientFlags] acceptFuts: seq[Future[StreamTransport]] TcpTransportTracker* = ref object of TrackerBase @@ -136,13 +136,14 @@ proc new*( clientFlags: if ServerFlags.TcpNoDelay in flags: compilesOr: - {TransportFlags.TcpNoDelay} + {ClientFlags.TcpNoDelay} do: doAssert(false) default(set[TransportFlags]) else: - default(set[TransportFlags]), - upgrader: upgrade) + default(set[ClientFlags]), + upgrader: upgrade, + networkReachability: NetworkReachability.Unknown) return transport @@ -165,6 +166,8 @@ method start*( trace "Invalid address detected, skipping!", address = ma continue + if self.networkReachability == NetworkReachability.NotReachable: + self.flags.incl(ServerFlags.ReusePort) let server = createStreamServer( ma = ma, flags = self.flags, @@ -263,8 +266,13 @@ method dial*( ## trace "Dialing remote peer", address = $address + let transp = + if self.networkReachability == NetworkReachability.NotReachable and self.addrs.len > 0: + self.clientFlags.incl(ClientFlags.ReusePort) + await connect(address, flags = self.clientFlags, localAddress = Opt.some(self.addrs[0])) + else: + await connect(address, flags = self.clientFlags) - let transp = await connect(address, flags = self.clientFlags) try: let observedAddr = await getObservedAddr(transp) return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out) diff --git a/libp2p/transports/transport.nim b/libp2p/transports/transport.nim index a5a651d7e6..9a06a66f56 100644 --- a/libp2p/transports/transport.nim +++ b/libp2p/transports/transport.nim @@ -19,7 +19,10 @@ import ../stream/connection, ../multiaddress, ../multicodec, ../muxers/muxer, - ../upgrademngrs/upgrade + ../upgrademngrs/upgrade, + ../protocols/connectivity/autonat/core + +export core.NetworkReachability logScope: topics = "libp2p transport" @@ -33,6 +36,7 @@ type addrs*: seq[MultiAddress] running*: bool upgrader*: Upgrade + networkReachability*: NetworkReachability proc newTransportClosedError*(parent: ref Exception = nil): ref LPError = newException(TransportClosedError, diff --git a/libp2p/wire.nim b/libp2p/wire.nim index 6c3671f2d2..17cd0dd789 100644 --- a/libp2p/wire.nim +++ b/libp2p/wire.nim @@ -77,7 +77,8 @@ proc connect*( ma: MultiAddress, bufferSize = DefaultStreamBufferSize, child: StreamTransport = nil, - flags = default(set[TransportFlags])): Future[StreamTransport] + flags = default(set[ClientFlags]), + localAddress: Opt[MultiAddress] = Opt.none(MultiAddress)): Future[StreamTransport] {.raises: [Defect, LPError, MaInvalidAddress].} = ## Open new connection to remote peer with address ``ma`` and create ## new transport object ``StreamTransport`` for established connection. @@ -90,7 +91,10 @@ proc connect*( let transportAddress = initTAddress(ma).tryGet() compilesOr: - return connect(transportAddress, bufferSize, child, flags) + if localAddress.isSome(): + return connect(transportAddress, flags, bufferSize, child, initTAddress(localAddress.get()).tryGet()) + else: + return connect(transportAddress, flags, bufferSize, child) do: # support for older chronos versions return connect(transportAddress, bufferSize, child) From 2289e82642305256d32d1cc8f9b812abf5325e98 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 8 Mar 2023 16:30:25 +0100 Subject: [PATCH 034/112] Use ReusePort when starting --- libp2p/transports/tcptransport.nim | 16 ++++++++++++++-- tests/testtcptransport.nim | 19 +++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/libp2p/transports/tcptransport.nim b/libp2p/transports/tcptransport.nim index 371a34e92c..f5dcd6d2b7 100644 --- a/libp2p/transports/tcptransport.nim +++ b/libp2p/transports/tcptransport.nim @@ -49,6 +49,8 @@ type opened*: uint64 closed*: uint64 + TcpTransportError* = object of transport.TransportError + proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [Defect].} proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} = @@ -157,6 +159,17 @@ method start*( warn "TCP transport already running" return + proc getPort(ma: MultiAddress): seq[byte] = + return ma[1].get().protoArgument().get() + + proc isNotZeroPort(port: seq[byte]): bool = + return port != @[0.byte, 0] + + let supported = addrs.filterIt(self.handles(it)) + let nonZeroPorts = supported.mapIt(getPort(it)).filterIt(isNotZeroPort(it)) + if deduplicate(nonZeroPorts).len < nonZeroPorts.len: + raise newException(TcpTransportError, "Duplicate ports detected") + await procCall Transport(self).start(addrs) trace "Starting TCP transport" inc getTcpTransportTracker().opened @@ -166,8 +179,7 @@ method start*( trace "Invalid address detected, skipping!", address = ma continue - if self.networkReachability == NetworkReachability.NotReachable: - self.flags.incl(ServerFlags.ReusePort) + self.flags.incl(ServerFlags.ReusePort) let server = createStreamServer( ma = ma, flags = self.flags, diff --git a/tests/testtcptransport.nim b/tests/testtcptransport.nim index d0666e1d30..b78ab0a354 100644 --- a/tests/testtcptransport.nim +++ b/tests/testtcptransport.nim @@ -125,6 +125,25 @@ suite "TCP transport": server.close() await server.join() + asyncTest "Starting with duplicate ports must fail": + # Starting with duplicate addresses must fail + let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/8080").tryGet(), + MultiAddress.init("/ip4/0.0.0.0/tcp/8080").tryGet()] + + let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade()) + + expect TcpTransportError: + await transport.start(ma) + + asyncTest "Starting with duplicate but zero ports addresses must NOT fail": + let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet(), + MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] + + let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade()) + + await transport.start(ma) + await transport.stop() + proc transProvider(): Transport = TcpTransport.new(upgrade = Upgrade()) commonTransportTest( From 113d4af2ca8f139465fef836181b51378ac4e40e Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 27 Mar 2023 15:37:17 +0200 Subject: [PATCH 035/112] update chronos --- .pinned | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pinned b/.pinned index bdb288614b..276c26fc48 100644 --- a/.pinned +++ b/.pinned @@ -1,6 +1,6 @@ bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a -chronos;https://github.com/status-im/nim-chronos@#0240dd8b8a7fc54676b8d494a9126b5ffbbb4adf +chronos;https://github.com/status-im/nim-chronos@#d488db3324aed2f096a63a926e3f709d6975e4df dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823 faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f From d72e110e1730c47c3f72a858f6c5a13a7ae5e07e Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 27 Mar 2023 15:37:40 +0200 Subject: [PATCH 036/112] use SocketFlags --- libp2p/transports/tcptransport.nim | 10 +++++----- libp2p/wire.nim | 9 ++++----- tests/testtcptransport.nim | 30 ++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 10 deletions(-) diff --git a/libp2p/transports/tcptransport.nim b/libp2p/transports/tcptransport.nim index f5dcd6d2b7..b20ae3b8c3 100644 --- a/libp2p/transports/tcptransport.nim +++ b/libp2p/transports/tcptransport.nim @@ -42,7 +42,7 @@ type servers*: seq[StreamServer] clients: array[Direction, seq[StreamTransport]] flags: set[ServerFlags] - clientFlags: set[ClientFlags] + clientFlags: set[SocketFlags] acceptFuts: seq[Future[StreamTransport]] TcpTransportTracker* = ref object of TrackerBase @@ -138,12 +138,12 @@ proc new*( clientFlags: if ServerFlags.TcpNoDelay in flags: compilesOr: - {ClientFlags.TcpNoDelay} + {SocketFlags.TcpNoDelay} do: doAssert(false) - default(set[TransportFlags]) + default(set[SocketFlags]) else: - default(set[ClientFlags]), + default(set[SocketFlags]), upgrader: upgrade, networkReachability: NetworkReachability.Unknown) @@ -280,7 +280,7 @@ method dial*( trace "Dialing remote peer", address = $address let transp = if self.networkReachability == NetworkReachability.NotReachable and self.addrs.len > 0: - self.clientFlags.incl(ClientFlags.ReusePort) + self.clientFlags.incl(SocketFlags.ReusePort) await connect(address, flags = self.clientFlags, localAddress = Opt.some(self.addrs[0])) else: await connect(address, flags = self.clientFlags) diff --git a/libp2p/wire.nim b/libp2p/wire.nim index 17cd0dd789..0888149455 100644 --- a/libp2p/wire.nim +++ b/libp2p/wire.nim @@ -77,7 +77,7 @@ proc connect*( ma: MultiAddress, bufferSize = DefaultStreamBufferSize, child: StreamTransport = nil, - flags = default(set[ClientFlags]), + flags = default(set[SocketFlags]), localAddress: Opt[MultiAddress] = Opt.none(MultiAddress)): Future[StreamTransport] {.raises: [Defect, LPError, MaInvalidAddress].} = ## Open new connection to remote peer with address ``ma`` and create @@ -91,10 +91,9 @@ proc connect*( let transportAddress = initTAddress(ma).tryGet() compilesOr: - if localAddress.isSome(): - return connect(transportAddress, flags, bufferSize, child, initTAddress(localAddress.get()).tryGet()) - else: - return connect(transportAddress, flags, bufferSize, child) + return connect(transportAddress, bufferSize, child, + if localAddress.isSome(): initTAddress(localAddress.get()).tryGet() else : TransportAddress(), + flags) do: # support for older chronos versions return connect(transportAddress, bufferSize, child) diff --git a/tests/testtcptransport.nim b/tests/testtcptransport.nim index b78ab0a354..41236462ae 100644 --- a/tests/testtcptransport.nim +++ b/tests/testtcptransport.nim @@ -7,6 +7,7 @@ import ../libp2p/[stream/connection, transports/tcptransport, upgrademngrs/upgrade, multiaddress, + multicodec, errors, wire] @@ -144,6 +145,35 @@ suite "TCP transport": await transport.start(ma) await transport.stop() + asyncTest "Bind to listening port when not reachable": + let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] + let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade()) + await transport.start(ma) + + let ma2 = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] + let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade()) + await transport2.start(ma2) + + let ma3 = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] + let transport3: TcpTransport = TcpTransport.new(upgrade = Upgrade()) + await transport3.start(ma3) + + let listeningPort = transport.addrs[0][multiCodec("tcp")].get() + + let conn = await transport.dial(transport2.addrs[0]) + let acceptedConn = await transport2.accept() + let acceptedPort = acceptedConn.observedAddr.get()[multiCodec("tcp")].get() + check listeningPort != acceptedPort + + transport.networkReachability = NetworkReachability.NotReachable + + let conn2 = await transport.dial(transport3.addrs[0]) + let acceptedConn2 = await transport3.accept() + let acceptedPort2 = acceptedConn2.observedAddr.get()[multiCodec("tcp")].get() + check listeningPort == acceptedPort2 + + await allFutures(transport.stop(), transport2.stop(), transport3.stop()) + proc transProvider(): Transport = TcpTransport.new(upgrade = Upgrade()) commonTransportTest( From 8988e6894f079f1dafba5945d0f52b941f58d6c7 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 27 Mar 2023 16:48:49 +0200 Subject: [PATCH 037/112] remove duplicate ports check --- libp2p/transports/tcptransport.nim | 11 ----------- tests/testtcptransport.nim | 10 ---------- 2 files changed, 21 deletions(-) diff --git a/libp2p/transports/tcptransport.nim b/libp2p/transports/tcptransport.nim index b20ae3b8c3..4af207ee18 100644 --- a/libp2p/transports/tcptransport.nim +++ b/libp2p/transports/tcptransport.nim @@ -159,17 +159,6 @@ method start*( warn "TCP transport already running" return - proc getPort(ma: MultiAddress): seq[byte] = - return ma[1].get().protoArgument().get() - - proc isNotZeroPort(port: seq[byte]): bool = - return port != @[0.byte, 0] - - let supported = addrs.filterIt(self.handles(it)) - let nonZeroPorts = supported.mapIt(getPort(it)).filterIt(isNotZeroPort(it)) - if deduplicate(nonZeroPorts).len < nonZeroPorts.len: - raise newException(TcpTransportError, "Duplicate ports detected") - await procCall Transport(self).start(addrs) trace "Starting TCP transport" inc getTcpTransportTracker().opened diff --git a/tests/testtcptransport.nim b/tests/testtcptransport.nim index 41236462ae..d765bfa5ca 100644 --- a/tests/testtcptransport.nim +++ b/tests/testtcptransport.nim @@ -126,16 +126,6 @@ suite "TCP transport": server.close() await server.join() - asyncTest "Starting with duplicate ports must fail": - # Starting with duplicate addresses must fail - let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/8080").tryGet(), - MultiAddress.init("/ip4/0.0.0.0/tcp/8080").tryGet()] - - let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade()) - - expect TcpTransportError: - await transport.start(ma) - asyncTest "Starting with duplicate but zero ports addresses must NOT fail": let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] From 34f5f0d32124ecd10562aacc107c41b92222f304 Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Mon, 27 Mar 2023 16:59:43 +0200 Subject: [PATCH 038/112] Make Unknown the NetworkReachability default value Co-authored-by: Tanguy --- libp2p/protocols/connectivity/autonat/core.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/autonat/core.nim b/libp2p/protocols/connectivity/autonat/core.nim index db49d4c324..703ff91206 100644 --- a/libp2p/protocols/connectivity/autonat/core.nim +++ b/libp2p/protocols/connectivity/autonat/core.nim @@ -59,7 +59,7 @@ type response*: Option[AutonatDialResponse] NetworkReachability* {.pure.} = enum - NotReachable, Reachable, Unknown + Unknown, NotReachable, Reachable proc encode(p: AutonatPeerInfo): ProtoBuffer = result = initProtoBuffer() From 341f983501c66d1a2f6310b1ab72d68c95c0693e Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 19 Dec 2022 18:57:04 +0100 Subject: [PATCH 039/112] Messages --- .../protocols/connectivity/dcutr/messages.nim | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 libp2p/protocols/connectivity/dcutr/messages.nim diff --git a/libp2p/protocols/connectivity/dcutr/messages.nim b/libp2p/protocols/connectivity/dcutr/messages.nim new file mode 100644 index 0000000000..4f63a7bf00 --- /dev/null +++ b/libp2p/protocols/connectivity/dcutr/messages.nim @@ -0,0 +1,44 @@ +# Nim-LibP2P +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +import std/options +import ../../../multiaddress +import stew/objects + +export multiaddress + +type + MsgType* = enum + Connect = 100 + Sync = 300 + + DcutrMsg* = object + msgType*: MsgType + addrs*: seq[MultiAddress] + +proc encode*(msg: DcutrMsg): ProtoBuffer = + result = initProtoBuffer() + result.write(1, msg.msgType.uint) + for addr in msg.addrs: + result.write(2, addr) + result.finish() + +proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): Option[DcutrMsg] = + var + msgTypeOrd: uint32 + dcutrMsg: DcutrMsg + var pb = initProtoBuffer(buf) + var r1 = pb.getField(1, msgTypeOrd) + let r2 = pb.getRepeatedField(2, dcutrMsg.addrs) + if r1.isErr or r2.isErr or not checkedEnumAssign(dcutrMsg.msgType, msgTypeOrd): + return none(DcutrMsg) + return some(dcutrMsg) + + + From ece68021dcd176a0846ab1fa38e7a47c0c40b0f9 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 19 Dec 2022 18:57:14 +0100 Subject: [PATCH 040/112] Basic version --- libp2p/protocols/connectivity/dcutr/dcutr.nim | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 libp2p/protocols/connectivity/dcutr/dcutr.nim diff --git a/libp2p/protocols/connectivity/dcutr/dcutr.nim b/libp2p/protocols/connectivity/dcutr/dcutr.nim new file mode 100644 index 0000000000..3fca39c90f --- /dev/null +++ b/libp2p/protocols/connectivity/dcutr/dcutr.nim @@ -0,0 +1,59 @@ +# Nim-LibP2P +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/[options, sets, sequtils] +import ../../protocol, + ../../../switch, + ../../../stream/connection +import messages +import stew/results +import chronos, chronicles, stew/objects + +logScope: + topics = "libp2p dcutr" + +const + DcutrCodec* = "/libp2p/dcutr/1.0.0" + +type + DcutrError* = object of LPError + + Dcutr* = ref object of LPProtocol + switch*: Switch + +method new*(T: typedesc[Dcutr]) = + proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} = + discard + + let d = T() + + d.handler = handleStream + d.codec = DcutrCodec + +proc sendConnectMsg(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} = + let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() + await conn.writeLp(pb.buffer) + +proc connect*(a: Dcutr, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()): + Future[MultiAddress] {.async.} = + let conn = + try: + if addrs.len == 0: + await a.switch.dial(pid, @[DcutrCodec]) + else: + await a.switch.dial(pid, addrs, DcutrCodec) + except CatchableError as err: + raise newException(DcutrError, "Unexpected error when dialling", err) + defer: await conn.close() + await conn.sendConnectMsg(a.switch.peerInfo.peerId, a.switch.peerInfo.addrs) \ No newline at end of file From e415d9c5e1ed1584e0a06f551d883becf9bb2d8a Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 19 Dec 2022 18:57:23 +0100 Subject: [PATCH 041/112] Basic messages test --- tests/testdcutr.nim | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 tests/testdcutr.nim diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim new file mode 100644 index 0000000000..338a5f1340 --- /dev/null +++ b/tests/testdcutr.nim @@ -0,0 +1,27 @@ +# Nim-LibP2P +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +import std/options +import chronos, metrics +import unittest2 +import ../libp2p/protocols/connectivity/dcutr/[dcutr, messages] +import ./helpers + +suite "Dcutr": + teardown: + checkTrackers() + + asyncTest "Encode / Decode": + let addrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] + let connectMsg = DcutrMsg(msgType: MsgType.Connect, addrs: addrs) + + let pb = connectMsg.encode() + let connectMsgDecoded = DcutrMsg.decode(pb.buffer) + + check connectMsg == connectMsgDecoded.get() \ No newline at end of file From cda770590c5ec0522390a496a4a785021957afba Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 18 Jan 2023 14:13:30 +0100 Subject: [PATCH 042/112] dcutr --- libp2p/protocols/connectivity/dcutr/dcutr.nim | 70 +++++++++++++++---- .../protocols/connectivity/dcutr/messages.nim | 6 +- tests/testdcutr.nim | 18 ++++- 3 files changed, 77 insertions(+), 17 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/dcutr.nim b/libp2p/protocols/connectivity/dcutr/dcutr.nim index 3fca39c90f..afb2d468e2 100644 --- a/libp2p/protocols/connectivity/dcutr/dcutr.nim +++ b/libp2p/protocols/connectivity/dcutr/dcutr.nim @@ -20,6 +20,8 @@ import messages import stew/results import chronos, chronicles, stew/objects +export chronicles + logScope: topics = "libp2p dcutr" @@ -31,29 +33,71 @@ type Dcutr* = ref object of LPProtocol switch*: Switch + rttStart: Option[Moment] + rttEnd: Option[Moment] -method new*(T: typedesc[Dcutr]) = - proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} = - discard +proc sendConnectMsg(self: Dcutr, conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} = + let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() + await conn.writeLp(pb.buffer) - let d = T() +proc sendSyncMsg(self: Dcutr, conn: Connection) {.async.} = + let pb = DcutrMsg(msgType: MsgType.Sync, addrs: @[]).encode() + await conn.writeLp(pb.buffer) - d.handler = handleStream - d.codec = DcutrCodec +proc startSync*(self: Dcutr, conn: Connection): Future[Connection] {.async.} = + self.rttStart = some(Moment.now()) + trace "Sending a Connect msg", conn + await self.sendConnectMsg(conn, self.switch.peerInfo.peerId, self.switch.peerInfo.addrs) + let connectAnswer = DcutrMsg.decode(await conn.readLp(1024)) + trace "Received a Connect msg back", conn + self.rttEnd = some(Moment.now()) + trace "Sending a Sync msg", conn + await self.sendSyncMsg(conn) + let halfRtt = (self.rttEnd.get() - self.rttStart.get()) + await sleepAsync(halfRtt) + let directConn = + try: + await self.switch.dial(conn.peerId, connectAnswer.addrs, DcutrCodec) + except CatchableError as err: + raise newException(DcutrError, "Unexpected error when dialling", err) + return directConn -proc sendConnectMsg(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} = - let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() - await conn.writeLp(pb.buffer) +proc new*(T: typedesc[Dcutr], switch: Switch): T = + let self = T(switch: switch, rttStart: none(Moment), rttEnd: none(Moment)) + proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = + try: + let msg = DcutrMsg.decode(await stream.readLp(1024)) + case msg.msgType: + of MsgType.Connect: + #trace "Received a Connect msg", msg + trace "Sending a Connect msg back", msg + await self.sendConnectMsg(stream, self.switch.peerInfo.peerId, self.switch.peerInfo.addrs) + of MsgType.Sync: + let directConn = + try: + await self.switch.dial(stream.peerId, msg.addrs, DcutrCodec) + except CatchableError as err: + raise newException(DcutrError, "Unexpected error when dialling", err) + await directConn.writeLp("hi") + except CatchableError as exc: + error "Unexpected error in dcutr handler", msg = exc.msg + finally: + trace "exiting dcutr handler", stream + await stream.close() + + self.handler = handleStream + self.codec = DcutrCodec + self -proc connect*(a: Dcutr, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()): +proc connect*(self: Dcutr, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()): Future[MultiAddress] {.async.} = let conn = try: if addrs.len == 0: - await a.switch.dial(pid, @[DcutrCodec]) + await self.switch.dial(pid, @[DcutrCodec]) else: - await a.switch.dial(pid, addrs, DcutrCodec) + await self.switch.dial(pid, addrs, DcutrCodec) except CatchableError as err: raise newException(DcutrError, "Unexpected error when dialling", err) defer: await conn.close() - await conn.sendConnectMsg(a.switch.peerInfo.peerId, a.switch.peerInfo.addrs) \ No newline at end of file + await self.sendConnectMsg(conn, self.switch.peerInfo.peerId, self.switch.peerInfo.addrs) \ No newline at end of file diff --git a/libp2p/protocols/connectivity/dcutr/messages.nim b/libp2p/protocols/connectivity/dcutr/messages.nim index 4f63a7bf00..a81355a194 100644 --- a/libp2p/protocols/connectivity/dcutr/messages.nim +++ b/libp2p/protocols/connectivity/dcutr/messages.nim @@ -29,7 +29,7 @@ proc encode*(msg: DcutrMsg): ProtoBuffer = result.write(2, addr) result.finish() -proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): Option[DcutrMsg] = +proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg = var msgTypeOrd: uint32 dcutrMsg: DcutrMsg @@ -37,8 +37,8 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): Option[DcutrMsg] = var r1 = pb.getField(1, msgTypeOrd) let r2 = pb.getRepeatedField(2, dcutrMsg.addrs) if r1.isErr or r2.isErr or not checkedEnumAssign(dcutrMsg.msgType, msgTypeOrd): - return none(DcutrMsg) - return some(dcutrMsg) + raise newException(DcutrError, "Received malformed message") + return dcutrMsg diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 338a5f1340..f66eef2078 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -11,6 +11,7 @@ import std/options import chronos, metrics import unittest2 import ../libp2p/protocols/connectivity/dcutr/[dcutr, messages] +import ../libp2p/builders import ./helpers suite "Dcutr": @@ -24,4 +25,19 @@ suite "Dcutr": let pb = connectMsg.encode() let connectMsgDecoded = DcutrMsg.decode(pb.buffer) - check connectMsg == connectMsgDecoded.get() \ No newline at end of file + check connectMsg == connectMsgDecoded + + asyncTest "Direct connection": + let clientSwitch = newStandardSwitch() + + let serverSwitch = newStandardSwitch() + let dcutrProto = Dcutr.new(serverSwitch) + serverSwitch.mount(dcutrProto) + + asyncSpawn serverSwitch.start() + + let conn = await clientSwitch.dial(serverSwitch.peerInfo.peerId, serverSwitch.peerInfo.addrs, @[DcutrCodec]) + let directConn = await Dcutr.new(clientSwitch).startSync(conn) + echo await directConn.readLp(1024) + + await allFutures(serverSwitch.stop()) \ No newline at end of file From dd69bb016d6e006f9c1b81e8846c36f21b4758b4 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 15 Feb 2023 00:18:37 +0100 Subject: [PATCH 043/112] small refactoring --- libp2p/protocols/connectivity/dcutr/dcutr.nim | 29 ++++++++++--------- tests/testdcutr.nim | 4 +-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/dcutr.nim b/libp2p/protocols/connectivity/dcutr/dcutr.nim index afb2d468e2..46f5670790 100644 --- a/libp2p/protocols/connectivity/dcutr/dcutr.nim +++ b/libp2p/protocols/connectivity/dcutr/dcutr.nim @@ -32,11 +32,10 @@ type DcutrError* = object of LPError Dcutr* = ref object of LPProtocol - switch*: Switch rttStart: Option[Moment] rttEnd: Option[Moment] -proc sendConnectMsg(self: Dcutr, conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} = +proc sendConnectMsg(self: Dcutr, conn: Connection, addrs: seq[MultiAddress]) {.async.} = let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() await conn.writeLp(pb.buffer) @@ -44,35 +43,39 @@ proc sendSyncMsg(self: Dcutr, conn: Connection) {.async.} = let pb = DcutrMsg(msgType: MsgType.Sync, addrs: @[]).encode() await conn.writeLp(pb.buffer) -proc startSync*(self: Dcutr, conn: Connection): Future[Connection] {.async.} = +proc startSync*(self: Dcutr, switch: Switch, conn: Connection): Future[Connection] {.async.} = + logScope: + peerId = self.switch.peerInfo.peerId + self.rttStart = some(Moment.now()) - trace "Sending a Connect msg", conn - await self.sendConnectMsg(conn, self.switch.peerInfo.peerId, self.switch.peerInfo.addrs) + trace "Sync initiator has sent a Connect message", conn + await self.sendConnectMsg(conn, self.switch.peerInfo.addrs) let connectAnswer = DcutrMsg.decode(await conn.readLp(1024)) - trace "Received a Connect msg back", conn + trace "Sync initiator has received a Connect message back", conn self.rttEnd = some(Moment.now()) - trace "Sending a Sync msg", conn + trace "Sending a Sync message", conn await self.sendSyncMsg(conn) - let halfRtt = (self.rttEnd.get() - self.rttStart.get()) + let halfRtt = (self.rttEnd.get() - self.rttStart.get()) / 2 await sleepAsync(halfRtt) let directConn = try: - await self.switch.dial(conn.peerId, connectAnswer.addrs, DcutrCodec) + await switch.dial(conn.peerId, connectAnswer.addrs, DcutrCodec) except CatchableError as err: raise newException(DcutrError, "Unexpected error when dialling", err) return directConn -proc new*(T: typedesc[Dcutr], switch: Switch): T = +proc new*(T: typedesc[Dcutr]): T = let self = T(switch: switch, rttStart: none(Moment), rttEnd: none(Moment)) proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = try: let msg = DcutrMsg.decode(await stream.readLp(1024)) + trace "Sync receiver received a Connect message.", msg case msg.msgType: of MsgType.Connect: - #trace "Received a Connect msg", msg - trace "Sending a Connect msg back", msg - await self.sendConnectMsg(stream, self.switch.peerInfo.peerId, self.switch.peerInfo.addrs) + await self.sendConnectMsg(stream, self.switch.peerInfo.addrs) + trace "Sync receiver has sent a Connect message back" of MsgType.Sync: + trace "Sync receiver has received a Sync message" let directConn = try: await self.switch.dial(stream.peerId, msg.addrs, DcutrCodec) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index f66eef2078..d893f43e65 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -31,13 +31,13 @@ suite "Dcutr": let clientSwitch = newStandardSwitch() let serverSwitch = newStandardSwitch() - let dcutrProto = Dcutr.new(serverSwitch) + let dcutrProto = Dcutr.new() serverSwitch.mount(dcutrProto) asyncSpawn serverSwitch.start() let conn = await clientSwitch.dial(serverSwitch.peerInfo.peerId, serverSwitch.peerInfo.addrs, @[DcutrCodec]) - let directConn = await Dcutr.new(clientSwitch).startSync(conn) + let directConn = await Dcutr.new().startSync(clientSwitch, conn) echo await directConn.readLp(1024) await allFutures(serverSwitch.stop()) \ No newline at end of file From 55b9ae41ff5733f3fabd0b0c90d42bb277bc8854 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 15 Feb 2023 19:55:33 +0100 Subject: [PATCH 044/112] Split dcutr files --- .../protocols/connectivity/dcutr/client.nim | 70 ++++++++++++ .../dcutr/{messages.nim => core.nim} | 25 ++++- libp2p/protocols/connectivity/dcutr/dcutr.nim | 106 ------------------ .../protocols/connectivity/dcutr/server.nim | 59 ++++++++++ tests/testdcutr.nim | 30 ++--- 5 files changed, 166 insertions(+), 124 deletions(-) create mode 100644 libp2p/protocols/connectivity/dcutr/client.nim rename libp2p/protocols/connectivity/dcutr/{messages.nim => core.nim} (61%) delete mode 100644 libp2p/protocols/connectivity/dcutr/dcutr.nim create mode 100644 libp2p/protocols/connectivity/dcutr/server.nim diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim new file mode 100644 index 0000000000..0e3e9d31c4 --- /dev/null +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -0,0 +1,70 @@ +# Nim-LibP2P +# Copyright (c) 2023 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import core +import ../../protocol, + ../../../stream/connection, + ../../../switch + +import stew/results +import chronos, chronicles + +type + DcutrClient* = ref object of LPProtocol + rttStart: Opt[Moment] + rttEnd: Opt[Moment] + +logScope: + topics = "libp2p dcutrclient" + +proc new*(T: typedesc[DcutrClient]): T = + return T(rttStart: Opt.none(Moment), rttEnd: Opt.none(Moment)) + +proc sendSyncMsg(conn: Connection) {.async.} = + let pb = DcutrMsg(msgType: MsgType.Sync, addrs: @[]).encode() + await conn.writeLp(pb.buffer) + +proc startSync*(self: DcutrClient, switch: Switch, conn: Connection): Future[Connection] {.async.} = + logScope: + peerId = switch.peerInfo.peerId + + self.rttStart = Opt.some(Moment.now()) + trace "Sync initiator has sent a Connect message", conn + await sendConnectMsg(conn, switch.peerInfo.addrs) + let connectAnswer = DcutrMsg.decode(await conn.readLp(1024)) + trace "Sync initiator has received a Connect message back", conn + self.rttEnd = Opt.some(Moment.now()) + trace "Sending a Sync message", conn + await sendSyncMsg(conn) + let halfRtt = (self.rttEnd.get() - self.rttStart.get()) div 2 + await sleepAsync(halfRtt) + let directConn = + try: + await switch.dial(conn.peerId, connectAnswer.addrs, DcutrCodec) + except CatchableError as err: + raise newException(DcutrError, "Unexpected error when dialling", err) + return directConn + +proc connect*(switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()): + Future[MultiAddress] {.async.} = + let conn = + try: + if addrs.len == 0: + await switch.dial(pid, @[DcutrCodec]) + else: + await switch.dial(pid, addrs, DcutrCodec) + except CatchableError as err: + raise newException(DcutrError, "Unexpected error when dialling", err) + defer: await conn.close() + await sendConnectMsg(conn, switch.peerInfo.addrs) diff --git a/libp2p/protocols/connectivity/dcutr/messages.nim b/libp2p/protocols/connectivity/dcutr/core.nim similarity index 61% rename from libp2p/protocols/connectivity/dcutr/messages.nim rename to libp2p/protocols/connectivity/dcutr/core.nim index a81355a194..9d5fc26c0f 100644 --- a/libp2p/protocols/connectivity/dcutr/messages.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -1,5 +1,5 @@ # Nim-LibP2P -# Copyright (c) 2022 Status Research & Development GmbH +# Copyright (c) 2023 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -7,12 +7,25 @@ # This file may not be copied, modified, or distributed except according to # those terms. +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + import std/options -import ../../../multiaddress + +import ../../../multiaddress, + ../../../errors, + ../../../stream/connection + +import chronos import stew/objects export multiaddress +const + DcutrCodec* = "/libp2p/dcutr/1.0.0" + type MsgType* = enum Connect = 100 @@ -22,6 +35,8 @@ type msgType*: MsgType addrs*: seq[MultiAddress] + DcutrError* = object of LPError + proc encode*(msg: DcutrMsg): ProtoBuffer = result = initProtoBuffer() result.write(1, msg.msgType.uint) @@ -29,7 +44,7 @@ proc encode*(msg: DcutrMsg): ProtoBuffer = result.write(2, addr) result.finish() -proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg = +proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrError].} = var msgTypeOrd: uint32 dcutrMsg: DcutrMsg @@ -40,5 +55,9 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg = raise newException(DcutrError, "Received malformed message") return dcutrMsg +proc sendConnectMsg*(conn: Connection, addrs: seq[MultiAddress]) {.async.} = + let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() + await conn.writeLp(pb.buffer) + diff --git a/libp2p/protocols/connectivity/dcutr/dcutr.nim b/libp2p/protocols/connectivity/dcutr/dcutr.nim deleted file mode 100644 index 46f5670790..0000000000 --- a/libp2p/protocols/connectivity/dcutr/dcutr.nim +++ /dev/null @@ -1,106 +0,0 @@ -# Nim-LibP2P -# Copyright (c) 2022 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -# * MIT license ([LICENSE-MIT](LICENSE-MIT)) -# at your option. -# This file may not be copied, modified, or distributed except according to -# those terms. - -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} - -import std/[options, sets, sequtils] -import ../../protocol, - ../../../switch, - ../../../stream/connection -import messages -import stew/results -import chronos, chronicles, stew/objects - -export chronicles - -logScope: - topics = "libp2p dcutr" - -const - DcutrCodec* = "/libp2p/dcutr/1.0.0" - -type - DcutrError* = object of LPError - - Dcutr* = ref object of LPProtocol - rttStart: Option[Moment] - rttEnd: Option[Moment] - -proc sendConnectMsg(self: Dcutr, conn: Connection, addrs: seq[MultiAddress]) {.async.} = - let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() - await conn.writeLp(pb.buffer) - -proc sendSyncMsg(self: Dcutr, conn: Connection) {.async.} = - let pb = DcutrMsg(msgType: MsgType.Sync, addrs: @[]).encode() - await conn.writeLp(pb.buffer) - -proc startSync*(self: Dcutr, switch: Switch, conn: Connection): Future[Connection] {.async.} = - logScope: - peerId = self.switch.peerInfo.peerId - - self.rttStart = some(Moment.now()) - trace "Sync initiator has sent a Connect message", conn - await self.sendConnectMsg(conn, self.switch.peerInfo.addrs) - let connectAnswer = DcutrMsg.decode(await conn.readLp(1024)) - trace "Sync initiator has received a Connect message back", conn - self.rttEnd = some(Moment.now()) - trace "Sending a Sync message", conn - await self.sendSyncMsg(conn) - let halfRtt = (self.rttEnd.get() - self.rttStart.get()) / 2 - await sleepAsync(halfRtt) - let directConn = - try: - await switch.dial(conn.peerId, connectAnswer.addrs, DcutrCodec) - except CatchableError as err: - raise newException(DcutrError, "Unexpected error when dialling", err) - return directConn - -proc new*(T: typedesc[Dcutr]): T = - let self = T(switch: switch, rttStart: none(Moment), rttEnd: none(Moment)) - proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = - try: - let msg = DcutrMsg.decode(await stream.readLp(1024)) - trace "Sync receiver received a Connect message.", msg - case msg.msgType: - of MsgType.Connect: - await self.sendConnectMsg(stream, self.switch.peerInfo.addrs) - trace "Sync receiver has sent a Connect message back" - of MsgType.Sync: - trace "Sync receiver has received a Sync message" - let directConn = - try: - await self.switch.dial(stream.peerId, msg.addrs, DcutrCodec) - except CatchableError as err: - raise newException(DcutrError, "Unexpected error when dialling", err) - await directConn.writeLp("hi") - except CatchableError as exc: - error "Unexpected error in dcutr handler", msg = exc.msg - finally: - trace "exiting dcutr handler", stream - await stream.close() - - self.handler = handleStream - self.codec = DcutrCodec - self - -proc connect*(self: Dcutr, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()): - Future[MultiAddress] {.async.} = - let conn = - try: - if addrs.len == 0: - await self.switch.dial(pid, @[DcutrCodec]) - else: - await self.switch.dial(pid, addrs, DcutrCodec) - except CatchableError as err: - raise newException(DcutrError, "Unexpected error when dialling", err) - defer: await conn.close() - await self.sendConnectMsg(conn, self.switch.peerInfo.peerId, self.switch.peerInfo.addrs) \ No newline at end of file diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim new file mode 100644 index 0000000000..e988945463 --- /dev/null +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -0,0 +1,59 @@ +# Nim-LibP2P +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/[options, sets, sequtils] + +import core +import ../../protocol, + ../../../stream/connection, + ../../../switch + +import stew/[results, objects] +import chronos, chronicles + +export chronicles + +type Dcutr* = ref object of LPProtocol + switch: Switch + +logScope: + topics = "libp2p dcutr" + +proc new*(T: typedesc[Dcutr], switch: Switch): T = + let self = T(switch: switch) + proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = + try: + let msg = DcutrMsg.decode(await stream.readLp(1024)) + trace "Sync receiver received a Connect message.", msg + case msg.msgType: + of MsgType.Connect: + await sendConnectMsg(stream, self.switch.peerInfo.addrs) + trace "Sync receiver has sent a Connect message back" + of MsgType.Sync: + trace "Sync receiver has received a Sync message" + let directConn = + try: + await self.switch.dial(stream.peerId, msg.addrs, DcutrCodec) + except CatchableError as err: + raise newException(DcutrError, "Unexpected error when dialling", err) + await directConn.writeLp("hi") + except CatchableError as exc: + error "Unexpected error in dcutr handler", msg = exc.msg + finally: + trace "exiting dcutr handler", stream + await stream.close() + + self.handler = handleStream + self.codec = DcutrCodec + self \ No newline at end of file diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index d893f43e65..8d5f995709 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -10,7 +10,7 @@ import std/options import chronos, metrics import unittest2 -import ../libp2p/protocols/connectivity/dcutr/[dcutr, messages] +import ../libp2p/protocols/connectivity/dcutr/[core, client, server] import ../libp2p/builders import ./helpers @@ -27,17 +27,17 @@ suite "Dcutr": check connectMsg == connectMsgDecoded - asyncTest "Direct connection": - let clientSwitch = newStandardSwitch() - - let serverSwitch = newStandardSwitch() - let dcutrProto = Dcutr.new() - serverSwitch.mount(dcutrProto) - - asyncSpawn serverSwitch.start() - - let conn = await clientSwitch.dial(serverSwitch.peerInfo.peerId, serverSwitch.peerInfo.addrs, @[DcutrCodec]) - let directConn = await Dcutr.new().startSync(clientSwitch, conn) - echo await directConn.readLp(1024) - - await allFutures(serverSwitch.stop()) \ No newline at end of file + # asyncTest "Direct connection": + # let clientSwitch = newStandardSwitch() + # + # let serverSwitch = newStandardSwitch() + # let dcutrProto = Dcutr.new(serverSwitch) + # serverSwitch.mount(dcutrProto) + # + # asyncSpawn serverSwitch.start() + # + # let conn = await clientSwitch.dial(serverSwitch.peerInfo.peerId, serverSwitch.peerInfo.addrs, @[DcutrCodec]) + # let directConn = await DcutrClient.new().startSync(clientSwitch, conn) + # echo await directConn.readLp(1024) + # + # await allFutures(serverSwitch.stop()) \ No newline at end of file From 5e8744d0ed218a9f56f04d42ae8432ba1a95e149 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 12:41:40 +0100 Subject: [PATCH 045/112] Fix several issues --- .../protocols/connectivity/dcutr/client.nim | 58 ++++++------ .../protocols/connectivity/dcutr/server.nim | 30 +++---- tests/testdcutr.nim | 88 +++++++++++++++---- 3 files changed, 109 insertions(+), 67 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 0e3e9d31c4..33524c27f4 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -21,7 +21,7 @@ import stew/results import chronos, chronicles type - DcutrClient* = ref object of LPProtocol + DcutrClient* = ref object of RootObj rttStart: Opt[Moment] rttEnd: Opt[Moment] @@ -31,40 +31,32 @@ logScope: proc new*(T: typedesc[DcutrClient]): T = return T(rttStart: Opt.none(Moment), rttEnd: Opt.none(Moment)) -proc sendSyncMsg(conn: Connection) {.async.} = - let pb = DcutrMsg(msgType: MsgType.Sync, addrs: @[]).encode() - await conn.writeLp(pb.buffer) +proc sendSyncMsg(stream: Connection, addrs: seq[MultiAddress]) {.async.} = + let pb = DcutrMsg(msgType: MsgType.Sync, addrs: addrs).encode() + await stream.writeLp(pb.buffer) -proc startSync*(self: DcutrClient, switch: Switch, conn: Connection): Future[Connection] {.async.} = +proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]) {.async.} = logScope: peerId = switch.peerInfo.peerId - self.rttStart = Opt.some(Moment.now()) - trace "Sync initiator has sent a Connect message", conn - await sendConnectMsg(conn, switch.peerInfo.addrs) - let connectAnswer = DcutrMsg.decode(await conn.readLp(1024)) - trace "Sync initiator has received a Connect message back", conn - self.rttEnd = Opt.some(Moment.now()) - trace "Sending a Sync message", conn - await sendSyncMsg(conn) - let halfRtt = (self.rttEnd.get() - self.rttStart.get()) div 2 - await sleepAsync(halfRtt) - let directConn = - try: - await switch.dial(conn.peerId, connectAnswer.addrs, DcutrCodec) - except CatchableError as err: - raise newException(DcutrError, "Unexpected error when dialling", err) - return directConn + var stream: Connection + try: + stream = await switch.dial(remotePeerId, DcutrCodec) + await sendConnectMsg(stream, addrs) + debug "Dcutr initiator has sent a Connect message." + self.rttStart = Opt.some(Moment.now()) + let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) + self.rttEnd = Opt.some(Moment.now()) + debug "Dcutr initiator has received a Connect message back.", connectAnswer + let halfRtt = (self.rttEnd.get() - self.rttStart.get()) div 2 + await sendSyncMsg(stream, addrs) + debug "Dcutr initiator has sent a Sync message." + await sleepAsync(halfRtt) + await switch.connect(remotePeerId, connectAnswer.addrs, true, false) + except CatchableError as err: + error "Unexpected error when trying direct conn", err = err.msg + raise newException(DcutrError, "Unexpected error when trying a direct conn", err) + finally: + if stream != nil: + await stream.close() -proc connect*(switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()): - Future[MultiAddress] {.async.} = - let conn = - try: - if addrs.len == 0: - await switch.dial(pid, @[DcutrCodec]) - else: - await switch.dial(pid, addrs, DcutrCodec) - except CatchableError as err: - raise newException(DcutrError, "Unexpected error when dialling", err) - defer: await conn.close() - await sendConnectMsg(conn, switch.peerInfo.addrs) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index e988945463..88a583f4f3 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -34,25 +34,17 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = let self = T(switch: switch) proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = try: - let msg = DcutrMsg.decode(await stream.readLp(1024)) - trace "Sync receiver received a Connect message.", msg - case msg.msgType: - of MsgType.Connect: - await sendConnectMsg(stream, self.switch.peerInfo.addrs) - trace "Sync receiver has sent a Connect message back" - of MsgType.Sync: - trace "Sync receiver has received a Sync message" - let directConn = - try: - await self.switch.dial(stream.peerId, msg.addrs, DcutrCodec) - except CatchableError as err: - raise newException(DcutrError, "Unexpected error when dialling", err) - await directConn.writeLp("hi") - except CatchableError as exc: - error "Unexpected error in dcutr handler", msg = exc.msg - finally: - trace "exiting dcutr handler", stream - await stream.close() + let connectMsg = DcutrMsg.decode(await stream.readLp(1024)) + debug "Dcutr receiver received a Connect message.", connectMsg + let dialingAddrs = connectMsg.addrs + await sendConnectMsg(stream, self.switch.peerInfo.addrs) + debug "Dcutr receiver has sent a Connect message back." + let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) + debug "Dcutr receiver has received a Sync message.", syncMsg + await switch.connect(stream.peerId, dialingAddrs, true, false) + except CatchableError as err: + error "Unexpected error in dcutr handler", msg = err.msg + raise newException(DcutrError, "Unexpected error when trying a direct conn", err) self.handler = handleStream self.codec = DcutrCodec diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 8d5f995709..e5221af5f0 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -10,15 +10,39 @@ import std/options import chronos, metrics import unittest2 +import ../libp2p/protocols/connectivity/relay/[relay, client] +import ../libp2p/services/autorelayservice import ../libp2p/protocols/connectivity/dcutr/[core, client, server] +from ../libp2p/protocols/connectivity/autonat/core import NetworkReachability import ../libp2p/builders import ./helpers +proc createSwitch(r: Relay = nil, autoRelay: Service = nil): Switch = + var builder = SwitchBuilder.new() + .withRng(newRng()) + .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ]) + .withTcpTransport() + .withMplex() + .withNoise() + + if autoRelay != nil: + builder = builder.withServices(@[autoRelay]) + + if r != nil: + builder = builder.withCircuitRelay(r) + + return builder.build() + +proc buildRelayMA(switchRelay: Switch, switchClient: Switch): MultiAddress = + MultiAddress.init($switchRelay.peerInfo.addrs[0] & "/p2p/" & + $switchRelay.peerInfo.peerId & "/p2p-circuit/p2p/" & + $switchClient.peerInfo.peerId).get() + suite "Dcutr": teardown: checkTrackers() - asyncTest "Encode / Decode": + asyncTest "Connect msg Encode / Decode": let addrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] let connectMsg = DcutrMsg(msgType: MsgType.Connect, addrs: addrs) @@ -26,18 +50,52 @@ suite "Dcutr": let connectMsgDecoded = DcutrMsg.decode(pb.buffer) check connectMsg == connectMsgDecoded + echo connectMsgDecoded + + asyncTest "Sync msg Encode / Decode": + let addrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] + let syncMsg = DcutrMsg(msgType: MsgType.Sync, addrs: addrs) + + let pb = syncMsg.encode() + let syncMsgDecoded = DcutrMsg.decode(pb.buffer) + + check syncMsg == syncMsgDecoded + + asyncTest "Direct connection": + + let fut = newFuture[seq[MultiAddress]]() + + let switch2 = createSwitch(RelayClient.new()) + proc checkMA(address: seq[MultiAddress]) = + if not fut.completed(): + echo $address + fut.complete(address) + + let relayClient = RelayClient.new() + let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng()) + let behindNATSwitch = createSwitch(relayClient, autoRelayService) + + let switchRelay = createSwitch(Relay.new()) + let publicSwitch = createSwitch(RelayClient.new()) + + let dcutrProto = Dcutr.new(publicSwitch) + publicSwitch.mount(dcutrProto) + + await allFutures(switchRelay.start(), behindNATSwitch.start(), publicSwitch.start()) + + await behindNATSwitch.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs) + await publicSwitch.connect(behindNATSwitch.peerInfo.peerId, (await fut)) + + for t in behindNATSwitch.transports: + t.networkReachability = NetworkReachability.NotReachable + + for t in publicSwitch.transports: + t.networkReachability = NetworkReachability.NotReachable + + await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + + echo behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) + checkExpiring: + behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1 - # asyncTest "Direct connection": - # let clientSwitch = newStandardSwitch() - # - # let serverSwitch = newStandardSwitch() - # let dcutrProto = Dcutr.new(serverSwitch) - # serverSwitch.mount(dcutrProto) - # - # asyncSpawn serverSwitch.start() - # - # let conn = await clientSwitch.dial(serverSwitch.peerInfo.peerId, serverSwitch.peerInfo.addrs, @[DcutrCodec]) - # let directConn = await DcutrClient.new().startSync(clientSwitch, conn) - # echo await directConn.readLp(1024) - # - # await allFutures(serverSwitch.stop()) \ No newline at end of file + await allFutures(switchRelay.stop(), behindNATSwitch.stop(), publicSwitch.stop()) \ No newline at end of file From 080ae376158158454a16cae3d7a1b15637cfd620 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 15:47:40 +0100 Subject: [PATCH 046/112] fix server side --- libp2p/protocols/connectivity/dcutr/server.nim | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 88a583f4f3..996f0369a1 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -25,27 +25,30 @@ import chronos, chronicles export chronicles type Dcutr* = ref object of LPProtocol - switch: Switch logScope: topics = "libp2p dcutr" proc new*(T: typedesc[Dcutr], switch: Switch): T = - let self = T(switch: switch) + proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = try: let connectMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver received a Connect message.", connectMsg - let dialingAddrs = connectMsg.addrs - await sendConnectMsg(stream, self.switch.peerInfo.addrs) + var ourAddrs = switch.peerStore.getMostObservedIPsAndPorts() # likely empty when the peer is reachable + if ourAddrs.len == 0: + # this list should be the same as the peer'' public addrs when it is reachable + ourAddrs = guessDialableAddrs(switch.peerStore, switch.peerInfo.addrs) + await sendConnectMsg(stream, ourAddrs) debug "Dcutr receiver has sent a Connect message back." let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver has received a Sync message.", syncMsg - await switch.connect(stream.peerId, dialingAddrs, true, false) + await switch.connect(stream.peerId, connectMsg.addrs, true, false) except CatchableError as err: error "Unexpected error in dcutr handler", msg = err.msg raise newException(DcutrError, "Unexpected error when trying a direct conn", err) + let self = T() self.handler = handleStream self.codec = DcutrCodec self \ No newline at end of file From f568a1e7be3bce0685d0a143497815cd3868ded2 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 15:58:30 +0100 Subject: [PATCH 047/112] fix typo --- libp2p/protocols/connectivity/dcutr/server.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 996f0369a1..f4ddb84ab9 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -37,7 +37,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = debug "Dcutr receiver received a Connect message.", connectMsg var ourAddrs = switch.peerStore.getMostObservedIPsAndPorts() # likely empty when the peer is reachable if ourAddrs.len == 0: - # this list should be the same as the peer'' public addrs when it is reachable + # this list should be the same as the peer's public addrs when it is reachable ourAddrs = guessDialableAddrs(switch.peerStore, switch.peerInfo.addrs) await sendConnectMsg(stream, ourAddrs) debug "Dcutr receiver has sent a Connect message back." From 9c881cf54161f4dd1dd2851c8b35ed1d4a6bf33b Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 16 Mar 2023 17:40:27 +0100 Subject: [PATCH 048/112] upgrade during simultaneous conn --- libp2p/dial.nim | 1 + libp2p/dialer.nim | 26 ++++++++++++++----- .../protocols/connectivity/dcutr/client.nim | 2 ++ .../protocols/connectivity/dcutr/server.nim | 1 + libp2p/switch.nim | 2 -- libp2p/upgrademngrs/upgrade.nim | 3 ++- 6 files changed, 25 insertions(+), 10 deletions(-) diff --git a/libp2p/dial.nim b/libp2p/dial.nim index b4c205af7a..c5c7d800c0 100644 --- a/libp2p/dial.nim +++ b/libp2p/dial.nim @@ -22,6 +22,7 @@ export results type Dial* = ref object of RootObj + isSimultaneousConnServer*: bool method connect*( self: Dial, diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index a5e7e0c270..25ee10a129 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -36,7 +36,6 @@ logScope: declareCounter(libp2p_total_dial_attempts, "total attempted dials") declareCounter(libp2p_successful_dials, "dialed successful peers") declareCounter(libp2p_failed_dials, "failed dials") -declareCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades") type DialFailedError* = object of LPError @@ -71,22 +70,35 @@ proc dialAndUpgrade( libp2p_failed_dials.inc() return nil # Try the next address - # also keep track of the connection's bottom unsafe transport direction - # required by gossipsub scoring - dialed.transportDir = Direction.Out - libp2p_successful_dials.inc() let mux = try: - await transport.upgradeOutgoing(dialed, peerId) + if self.isSimultaneousConnServer: + dialed.transportDir = Direction.In + let upgradedConn = await transport.upgradeIncoming(dialed) + doAssert not isNil(upgradedConn), "connection died after upgradeIncoming" + upgradedConn + else: + # also keep track of the connection's bottom unsafe transport direction + # required by gossipsub scoring + dialed.transportDir = Direction.Out + # This is related to the simultaneous connection through DCUtr. According to the spec, the server side of + # the DCUtr protocol should be the client of the simultaneous connection. + let upgradedConn = await transport.upgradeOutgoing(dialed, peerId) + doAssert not isNil(upgradedConn), "connection died after upgradeOutgoing" + upgradedConn + except CatchableError as exc: # If we failed to establish the connection through one transport, # we won't succeeded through another - no use in trying again await dialed.close() debug "Upgrade failed", msg = exc.msg, peerId if exc isnot CancelledError: - libp2p_failed_upgrades_outgoing.inc() + if self.isSimultaneousConnServer: + libp2p_failed_upgrades_outgoing.inc() + else: + libp2p_failed_upgrades_incoming.inc() # Try other address return nil diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 33524c27f4..d3cea2ef48 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -52,7 +52,9 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) + switch.dialer.isSimultaneousConnServer = true await switch.connect(remotePeerId, connectAnswer.addrs, true, false) + debug "Dcutr initiator has directly connected to the remote peer." except CatchableError as err: error "Unexpected error when trying direct conn", err = err.msg raise newException(DcutrError, "Unexpected error when trying a direct conn", err) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index f4ddb84ab9..0d94536121 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -44,6 +44,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver has received a Sync message.", syncMsg await switch.connect(stream.peerId, connectMsg.addrs, true, false) + debug "Dcutr receiver has directly connected to the remote peer." except CatchableError as err: error "Unexpected error in dcutr handler", msg = err.msg raise newException(DcutrError, "Unexpected error when trying a direct conn", err) diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 672eca123d..7f5a33fec3 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -58,8 +58,6 @@ logScope: # and only if the channel has been secured (i.e. if a secure manager has been # previously provided) -declareCounter(libp2p_failed_upgrades_incoming, "incoming connections failed upgrades") - const ConcurrentUpgrades* = 4 diff --git a/libp2p/upgrademngrs/upgrade.nim b/libp2p/upgrademngrs/upgrade.nim index 3cc3fc75df..4ee17abe02 100644 --- a/libp2p/upgrademngrs/upgrade.nim +++ b/libp2p/upgrademngrs/upgrade.nim @@ -28,7 +28,8 @@ import ../stream/connection, export connmanager, connection, identify, secure, multistream -declarePublicCounter(libp2p_failed_upgrade, "peers failed upgrade") +declarePublicCounter(libp2p_failed_upgrades_incoming, "incoming connections failed upgrades") +declarePublicCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades") logScope: topics = "libp2p upgrade" From e98ad5b209ffdbd23dac814adda77eedbc51b920 Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 16 Mar 2023 19:28:35 +0100 Subject: [PATCH 049/112] fix for nim 1.2 --- libp2p/protocols/connectivity/dcutr/core.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index 9d5fc26c0f..0c1194ceee 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -44,7 +44,7 @@ proc encode*(msg: DcutrMsg): ProtoBuffer = result.write(2, addr) result.finish() -proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrError].} = +proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, DcutrError].} = var msgTypeOrd: uint32 dcutrMsg: DcutrMsg From 83c2daa2b8f4a7e78505c2be27ea093dd1fdd240 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 27 Mar 2023 20:45:49 +0200 Subject: [PATCH 050/112] fix compilation --- libp2p/peerstore.nim | 3 +++ libp2p/protocols/connectivity/dcutr/server.nim | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 617a40ec62..26a8349c5c 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -221,5 +221,8 @@ proc identify*( finally: await stream.closeWithEOF() +proc getMostObservedProtosAndPorts*(self: PeerStore): seq[MultiAddress] = + return self.identify.observedAddrManager.getMostObservedProtosAndPorts() + proc guessDialableAddr*(self: PeerStore, ma: MultiAddress): MultiAddress = return self.identify.observedAddrManager.guessDialableAddr(ma) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 0d94536121..3f6ec0c9c6 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -35,10 +35,10 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = try: let connectMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver received a Connect message.", connectMsg - var ourAddrs = switch.peerStore.getMostObservedIPsAndPorts() # likely empty when the peer is reachable + var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts() # likely empty when the peer is reachable if ourAddrs.len == 0: # this list should be the same as the peer's public addrs when it is reachable - ourAddrs = guessDialableAddrs(switch.peerStore, switch.peerInfo.addrs) + ourAddrs = switch.peerInfo.addrs.mapIt(switch.peerStore.guessDialableAddr(it)) await sendConnectMsg(stream, ourAddrs) debug "Dcutr receiver has sent a Connect message back." let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) From 84ad89f571b7b7807de73aa5fdb5f37c1fa58240 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 27 Mar 2023 20:46:01 +0200 Subject: [PATCH 051/112] fix text and make it run --- tests/testdcutr.nim | 10 +++++----- tests/testnative.nim | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index e5221af5f0..79789c7f4d 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -92,10 +92,10 @@ suite "Dcutr": for t in publicSwitch.transports: t.networkReachability = NetworkReachability.NotReachable - await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) - - echo behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) - checkExpiring: - behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1 + try: + await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + .wait(300.millis) + except CatchableError as exc: + discard await allFutures(switchRelay.stop(), behindNATSwitch.stop(), publicSwitch.stop()) \ No newline at end of file diff --git a/tests/testnative.nim b/tests/testnative.nim index 25dd7d0f02..317e976b09 100644 --- a/tests/testnative.nim +++ b/tests/testnative.nim @@ -43,4 +43,5 @@ import testtcptransport, testyamux, testautonat, testautonatservice, - testautorelay + testautorelay, + testdcutr From 2745a4997ea579c6e3ebf97d46bcc53aa7c1498b Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 27 Mar 2023 23:43:38 +0200 Subject: [PATCH 052/112] fix for nim 1.2 --- tests/testdcutr.nim | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 79789c7f4d..7d8b4f5579 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -10,9 +10,11 @@ import std/options import chronos, metrics import unittest2 -import ../libp2p/protocols/connectivity/relay/[relay, client] +import ../libp2p/protocols/connectivity/relay/relay +import ../libp2p/protocols/connectivity/relay/client as rclient import ../libp2p/services/autorelayservice -import ../libp2p/protocols/connectivity/dcutr/[core, client, server] +import ../libp2p/protocols/connectivity/dcutr/core as dcore +import ../libp2p/protocols/connectivity/dcutr/[client, server] from ../libp2p/protocols/connectivity/autonat/core import NetworkReachability import ../libp2p/builders import ./helpers From 051b1cf83f7f4916370ab0feebf196ae39bb4595 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 28 Mar 2023 14:12:26 +0200 Subject: [PATCH 053/112] small fixes --- libp2p/protocols/connectivity/dcutr/core.nim | 3 --- libp2p/protocols/connectivity/dcutr/server.nim | 2 +- tests/testdcutr.nim | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index 0c1194ceee..83d9148de6 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -58,6 +58,3 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, proc sendConnectMsg*(conn: Connection, addrs: seq[MultiAddress]) {.async.} = let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() await conn.writeLp(pb.buffer) - - - diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 3f6ec0c9c6..d6c2735474 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -52,4 +52,4 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = let self = T() self.handler = handleStream self.codec = DcutrCodec - self \ No newline at end of file + self diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 7d8b4f5579..134a8c2383 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -100,4 +100,4 @@ suite "Dcutr": except CatchableError as exc: discard - await allFutures(switchRelay.stop(), behindNATSwitch.stop(), publicSwitch.stop()) \ No newline at end of file + await allFutures(switchRelay.stop(), behindNATSwitch.stop(), publicSwitch.stop()) From b7d97b2d5affff73e78dadef76393b2f82056065 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 28 Mar 2023 17:08:05 +0200 Subject: [PATCH 054/112] use a proc param instead --- libp2p/dial.nim | 4 ++-- libp2p/dialer.nim | 22 +++++++++++-------- .../protocols/connectivity/dcutr/client.nim | 3 +-- libp2p/switch.nim | 5 +++-- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/libp2p/dial.nim b/libp2p/dial.nim index c5c7d800c0..001679946d 100644 --- a/libp2p/dial.nim +++ b/libp2p/dial.nim @@ -22,14 +22,14 @@ export results type Dial* = ref object of RootObj - isSimultaneousConnServer*: bool method connect*( self: Dial, peerId: PeerId, addrs: seq[MultiAddress], forceDial = false, - reuseConnection = true) {.async, base.} = + reuseConnection = true, + isSimultaneousConnServer = false) {.async, base.} = ## connect remote peer without negotiating ## a protocol ## diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index 25ee10a129..d17ede1fa3 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -52,7 +52,8 @@ proc dialAndUpgrade( self: Dialer, peerId: Opt[PeerId], hostname: string, - address: MultiAddress): + address: MultiAddress, + isSimultaneousConnServer = false): Future[Muxer] {.async.} = for transport in self.transports: # for each transport @@ -74,7 +75,7 @@ proc dialAndUpgrade( let mux = try: - if self.isSimultaneousConnServer: + if isSimultaneousConnServer: dialed.transportDir = Direction.In let upgradedConn = await transport.upgradeIncoming(dialed) doAssert not isNil(upgradedConn), "connection died after upgradeIncoming" @@ -95,7 +96,7 @@ proc dialAndUpgrade( await dialed.close() debug "Upgrade failed", msg = exc.msg, peerId if exc isnot CancelledError: - if self.isSimultaneousConnServer: + if isSimultaneousConnServer: libp2p_failed_upgrades_outgoing.inc() else: libp2p_failed_upgrades_incoming.inc() @@ -139,7 +140,8 @@ proc expandDnsAddr( proc dialAndUpgrade( self: Dialer, peerId: Opt[PeerId], - addrs: seq[MultiAddress]): + addrs: seq[MultiAddress], + isSimultaneousConnServer = false): Future[Muxer] {.async.} = debug "Dialing peer", peerId @@ -157,7 +159,7 @@ proc dialAndUpgrade( else: await self.nameResolver.resolveMAddress(expandedAddress) for resolvedAddress in resolvedAddresses: - result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress) + result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, isSimultaneousConnServer) if not isNil(result): return result @@ -174,7 +176,8 @@ proc internalConnect( peerId: Opt[PeerId], addrs: seq[MultiAddress], forceDial: bool, - reuseConnection = true): + reuseConnection = true, + isSimultaneousConnServer = false): Future[Muxer] {.async.} = if Opt.some(self.localPeerId) == peerId: raise newException(CatchableError, "can't dial self!") @@ -192,7 +195,7 @@ proc internalConnect( let slot = self.connManager.getOutgoingSlot(forceDial) let muxed = try: - await self.dialAndUpgrade(peerId, addrs) + await self.dialAndUpgrade(peerId, addrs, isSimultaneousConnServer) except CatchableError as exc: slot.release() raise exc @@ -218,7 +221,8 @@ method connect*( peerId: PeerId, addrs: seq[MultiAddress], forceDial = false, - reuseConnection = true) {.async.} = + reuseConnection = true, + isSimultaneousConnServer = false) {.async.} = ## connect remote peer without negotiating ## a protocol ## @@ -226,7 +230,7 @@ method connect*( if self.connManager.connCount(peerId) > 0 and reuseConnection: return - discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection) + discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, isSimultaneousConnServer) method connect*( self: Dialer, diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index d3cea2ef48..8442420eaf 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -52,8 +52,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) - switch.dialer.isSimultaneousConnServer = true - await switch.connect(remotePeerId, connectAnswer.addrs, true, false) + await switch.connect(remotePeerId, connectAnswer.addrs, true, false, true) debug "Dcutr initiator has directly connected to the remote peer." except CatchableError as err: error "Unexpected error when trying direct conn", err = err.msg diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 7f5a33fec3..7fbdddd8ed 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -147,10 +147,11 @@ method connect*( peerId: PeerId, addrs: seq[MultiAddress], forceDial = false, - reuseConnection = true): Future[void] {.public.} = + reuseConnection = true, + isSimultaneousConnServer = false): Future[void] {.public.} = ## Connects to a peer without opening a stream to it - s.dialer.connect(peerId, addrs, forceDial, reuseConnection) + s.dialer.connect(peerId, addrs, forceDial, reuseConnection, isSimultaneousConnServer) method connect*( s: Switch, From b16f7a5a9d3bca184a4393df0a4cff1ea764668b Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 28 Mar 2023 17:08:19 +0200 Subject: [PATCH 055/112] simplify test --- tests/testdcutr.nim | 56 +++++++++++---------------------------------- 1 file changed, 13 insertions(+), 43 deletions(-) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 134a8c2383..5b492a303b 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -10,36 +10,12 @@ import std/options import chronos, metrics import unittest2 -import ../libp2p/protocols/connectivity/relay/relay -import ../libp2p/protocols/connectivity/relay/client as rclient -import ../libp2p/services/autorelayservice import ../libp2p/protocols/connectivity/dcutr/core as dcore import ../libp2p/protocols/connectivity/dcutr/[client, server] from ../libp2p/protocols/connectivity/autonat/core import NetworkReachability import ../libp2p/builders import ./helpers -proc createSwitch(r: Relay = nil, autoRelay: Service = nil): Switch = - var builder = SwitchBuilder.new() - .withRng(newRng()) - .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ]) - .withTcpTransport() - .withMplex() - .withNoise() - - if autoRelay != nil: - builder = builder.withServices(@[autoRelay]) - - if r != nil: - builder = builder.withCircuitRelay(r) - - return builder.build() - -proc buildRelayMA(switchRelay: Switch, switchClient: Switch): MultiAddress = - MultiAddress.init($switchRelay.peerInfo.addrs[0] & "/p2p/" & - $switchRelay.peerInfo.peerId & "/p2p-circuit/p2p/" & - $switchClient.peerInfo.peerId).get() - suite "Dcutr": teardown: checkTrackers() @@ -63,30 +39,17 @@ suite "Dcutr": check syncMsg == syncMsgDecoded - asyncTest "Direct connection": + asyncTest "DCUtR establishes a new connection": - let fut = newFuture[seq[MultiAddress]]() - - let switch2 = createSwitch(RelayClient.new()) - proc checkMA(address: seq[MultiAddress]) = - if not fut.completed(): - echo $address - fut.complete(address) - - let relayClient = RelayClient.new() - let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng()) - let behindNATSwitch = createSwitch(relayClient, autoRelayService) - - let switchRelay = createSwitch(Relay.new()) - let publicSwitch = createSwitch(RelayClient.new()) + let behindNATSwitch = newStandardSwitch() + let publicSwitch = newStandardSwitch() let dcutrProto = Dcutr.new(publicSwitch) publicSwitch.mount(dcutrProto) - await allFutures(switchRelay.start(), behindNATSwitch.start(), publicSwitch.start()) + await allFutures(behindNATSwitch.start(), publicSwitch.start()) - await behindNATSwitch.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs) - await publicSwitch.connect(behindNATSwitch.peerInfo.peerId, (await fut)) + await publicSwitch.connect(behindNATSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) for t in behindNATSwitch.transports: t.networkReachability = NetworkReachability.NotReachable @@ -95,9 +58,16 @@ suite "Dcutr": t.networkReachability = NetworkReachability.NotReachable try: + # we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result + # in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the + # tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case. await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) .wait(300.millis) except CatchableError as exc: discard - await allFutures(switchRelay.stop(), behindNATSwitch.stop(), publicSwitch.stop()) + checkExpiring: + # we still expect a new connection to be open by the other peer acting as the dcutr server + behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2 + + await allFutures(behindNATSwitch.stop(), publicSwitch.stop()) From b467d9d1ab12f1a46e24a17f43eee3f741575e0b Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 28 Mar 2023 18:06:23 +0200 Subject: [PATCH 056/112] simplify upgrade --- libp2p/dial.nim | 2 +- libp2p/dialer.nim | 37 +++++++------------ .../protocols/connectivity/dcutr/client.nim | 2 +- libp2p/stream/lpstream.nim | 5 +++ libp2p/switch.nim | 6 +-- libp2p/transports/transport.nim | 14 ++----- libp2p/upgrademngrs/muxedupgrade.nim | 15 +------- libp2p/upgrademngrs/upgrade.nim | 8 +--- 8 files changed, 30 insertions(+), 59 deletions(-) diff --git a/libp2p/dial.nim b/libp2p/dial.nim index 001679946d..dfd8b667d4 100644 --- a/libp2p/dial.nim +++ b/libp2p/dial.nim @@ -29,7 +29,7 @@ method connect*( addrs: seq[MultiAddress], forceDial = false, reuseConnection = true, - isSimultaneousConnServer = false) {.async, base.} = + upgradeDir = Out) {.async, base.} = ## connect remote peer without negotiating ## a protocol ## diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index d17ede1fa3..f4ddf82c7a 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -53,7 +53,7 @@ proc dialAndUpgrade( peerId: Opt[PeerId], hostname: string, address: MultiAddress, - isSimultaneousConnServer = false): + upgradeDir = Out): Future[Muxer] {.async.} = for transport in self.transports: # for each transport @@ -75,28 +75,17 @@ proc dialAndUpgrade( let mux = try: - if isSimultaneousConnServer: - dialed.transportDir = Direction.In - let upgradedConn = await transport.upgradeIncoming(dialed) - doAssert not isNil(upgradedConn), "connection died after upgradeIncoming" - upgradedConn - else: - # also keep track of the connection's bottom unsafe transport direction - # required by gossipsub scoring - dialed.transportDir = Direction.Out - # This is related to the simultaneous connection through DCUtr. According to the spec, the server side of - # the DCUtr protocol should be the client of the simultaneous connection. - let upgradedConn = await transport.upgradeOutgoing(dialed, peerId) - doAssert not isNil(upgradedConn), "connection died after upgradeOutgoing" - upgradedConn - + dialed.transportDir = upgradeDir + let upgradedConn = await transport.upgrade(dialed, upgradeDir, peerId) + doAssert not isNil(upgradedConn), "connection died after upgrade " & $upgradeDir + upgradedConn except CatchableError as exc: # If we failed to establish the connection through one transport, # we won't succeeded through another - no use in trying again await dialed.close() debug "Upgrade failed", msg = exc.msg, peerId if exc isnot CancelledError: - if isSimultaneousConnServer: + if upgradeDir == Out: libp2p_failed_upgrades_outgoing.inc() else: libp2p_failed_upgrades_incoming.inc() @@ -104,7 +93,7 @@ proc dialAndUpgrade( # Try other address return nil - doAssert not isNil(mux), "connection died after upgradeOutgoing" + doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir debug "Dial successful", peerId = mux.connection.peerId return mux return nil @@ -141,7 +130,7 @@ proc dialAndUpgrade( self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], - isSimultaneousConnServer = false): + upgradeDir = Out): Future[Muxer] {.async.} = debug "Dialing peer", peerId @@ -159,7 +148,7 @@ proc dialAndUpgrade( else: await self.nameResolver.resolveMAddress(expandedAddress) for resolvedAddress in resolvedAddresses: - result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, isSimultaneousConnServer) + result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir) if not isNil(result): return result @@ -177,7 +166,7 @@ proc internalConnect( addrs: seq[MultiAddress], forceDial: bool, reuseConnection = true, - isSimultaneousConnServer = false): + upgradeDir = Out): Future[Muxer] {.async.} = if Opt.some(self.localPeerId) == peerId: raise newException(CatchableError, "can't dial self!") @@ -195,7 +184,7 @@ proc internalConnect( let slot = self.connManager.getOutgoingSlot(forceDial) let muxed = try: - await self.dialAndUpgrade(peerId, addrs, isSimultaneousConnServer) + await self.dialAndUpgrade(peerId, addrs, upgradeDir) except CatchableError as exc: slot.release() raise exc @@ -222,7 +211,7 @@ method connect*( addrs: seq[MultiAddress], forceDial = false, reuseConnection = true, - isSimultaneousConnServer = false) {.async.} = + upgradeDir = Out) {.async.} = ## connect remote peer without negotiating ## a protocol ## @@ -230,7 +219,7 @@ method connect*( if self.connManager.connCount(peerId) > 0 and reuseConnection: return - discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, isSimultaneousConnServer) + discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir) method connect*( self: Dialer, diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 8442420eaf..9d26140779 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -52,7 +52,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) - await switch.connect(remotePeerId, connectAnswer.addrs, true, false, true) + await switch.connect(remotePeerId, connectAnswer.addrs, true, false, In) debug "Dcutr initiator has directly connected to the remote peer." except CatchableError as err: error "Unexpected error when trying direct conn", err = err.msg diff --git a/libp2p/stream/lpstream.nim b/libp2p/stream/lpstream.nim index 6dfe501c4f..9c49bf8068 100644 --- a/libp2p/stream/lpstream.nim +++ b/libp2p/stream/lpstream.nim @@ -80,6 +80,11 @@ type opened*: uint64 closed*: uint64 +proc `$`*(dir: Direction): string = + case dir + of In: "In" + of Out: "Out" + proc setupStreamTracker*(name: string): StreamTracker = let tracker = new StreamTracker diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 7fbdddd8ed..a04cacfc0f 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -148,10 +148,10 @@ method connect*( addrs: seq[MultiAddress], forceDial = false, reuseConnection = true, - isSimultaneousConnServer = false): Future[void] {.public.} = + upgradeDir = Out): Future[void] {.public.} = ## Connects to a peer without opening a stream to it - s.dialer.connect(peerId, addrs, forceDial, reuseConnection, isSimultaneousConnServer) + s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir) method connect*( s: Switch, @@ -220,7 +220,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil) s.peerInfo.protocols.add(proto.codec) proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} = - let muxed = await trans.upgradeIncoming(conn) + let muxed = await trans.upgrade(conn, In, Opt.none(PeerId)) switch.connManager.storeMuxer(muxed) await switch.peerStore.identify(muxed) trace "Connection upgrade succeeded" diff --git a/libp2p/transports/transport.nim b/libp2p/transports/transport.nim index 9a06a66f56..7e00fc2fca 100644 --- a/libp2p/transports/transport.nim +++ b/libp2p/transports/transport.nim @@ -83,24 +83,16 @@ proc dial*( peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.gcsafe.} = self.dial("", address) -method upgradeIncoming*( - self: Transport, - conn: Connection): Future[Muxer] {.base, gcsafe.} = - ## base upgrade method that the transport uses to perform - ## transport specific upgrades - ## - - self.upgrader.upgradeIncoming(conn) - -method upgradeOutgoing*( +method upgrade*( self: Transport, conn: Connection, + direction: Direction, peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} = ## base upgrade method that the transport uses to perform ## transport specific upgrades ## - self.upgrader.upgradeOutgoing(conn, peerId) + self.upgrader.upgrade(conn, direction, peerId) method handles*( self: Transport, diff --git a/libp2p/upgrademngrs/muxedupgrade.nim b/libp2p/upgrademngrs/muxedupgrade.nim index 7c833e0b94..d63aeb1be3 100644 --- a/libp2p/upgrademngrs/muxedupgrade.nim +++ b/libp2p/upgrademngrs/muxedupgrade.nim @@ -62,14 +62,14 @@ proc mux*( muxer.handler = muxer.handle() return muxer -proc upgrade( +method upgrade*( self: MuxedUpgrade, conn: Connection, direction: Direction, peerId: Opt[PeerId]): Future[Muxer] {.async.} = trace "Upgrading connection", conn, direction - let sconn = await self.secure(conn, direction, peerId) # secure the connection + let sconn = await self.secure(conn, direction, if direction == In: Opt.none(PeerId) else: peerId) # secure the connection if isNil(sconn): raise newException(UpgradeFailedError, "unable to secure connection, stopping upgrade") @@ -90,17 +90,6 @@ proc upgrade( trace "Upgraded connection", conn, sconn, direction return muxer -method upgradeOutgoing*( - self: MuxedUpgrade, - conn: Connection, - peerId: Opt[PeerId]): Future[Muxer] {.async, gcsafe.} = - return await self.upgrade(conn, Out, peerId) - -method upgradeIncoming*( - self: MuxedUpgrade, - conn: Connection): Future[Muxer] {.async, gcsafe.} = - return await self.upgrade(conn, In, Opt.none(PeerId)) - proc new*( T: type MuxedUpgrade, muxers: seq[MuxerProvider], diff --git a/libp2p/upgrademngrs/upgrade.nim b/libp2p/upgrademngrs/upgrade.nim index 4ee17abe02..3a2cc66871 100644 --- a/libp2p/upgrademngrs/upgrade.nim +++ b/libp2p/upgrademngrs/upgrade.nim @@ -42,14 +42,10 @@ type connManager*: ConnManager secureManagers*: seq[Secure] -method upgradeIncoming*( - self: Upgrade, - conn: Connection): Future[Muxer] {.base.} = - doAssert(false, "Not implemented!") - -method upgradeOutgoing*( +method upgrade*( self: Upgrade, conn: Connection, + direction: Direction, peerId: Opt[PeerId]): Future[Muxer] {.base.} = doAssert(false, "Not implemented!") From 59be5057c6a395530a38303fb98351e85e5adb30 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 28 Mar 2023 18:34:27 +0200 Subject: [PATCH 057/112] fix for nim 1.2 --- libp2p/dialer.nim | 10 +++++----- libp2p/protocols/connectivity/dcutr/client.nim | 2 +- libp2p/switch.nim | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index f4ddf82c7a..9f8ffe5ef1 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -53,7 +53,7 @@ proc dialAndUpgrade( peerId: Opt[PeerId], hostname: string, address: MultiAddress, - upgradeDir = Out): + upgradeDir = Direction.Out): Future[Muxer] {.async.} = for transport in self.transports: # for each transport @@ -85,7 +85,7 @@ proc dialAndUpgrade( await dialed.close() debug "Upgrade failed", msg = exc.msg, peerId if exc isnot CancelledError: - if upgradeDir == Out: + if upgradeDir == Direction.Out: libp2p_failed_upgrades_outgoing.inc() else: libp2p_failed_upgrades_incoming.inc() @@ -130,7 +130,7 @@ proc dialAndUpgrade( self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], - upgradeDir = Out): + upgradeDir = Direction.Out): Future[Muxer] {.async.} = debug "Dialing peer", peerId @@ -166,7 +166,7 @@ proc internalConnect( addrs: seq[MultiAddress], forceDial: bool, reuseConnection = true, - upgradeDir = Out): + upgradeDir = Direction.Out): Future[Muxer] {.async.} = if Opt.some(self.localPeerId) == peerId: raise newException(CatchableError, "can't dial self!") @@ -211,7 +211,7 @@ method connect*( addrs: seq[MultiAddress], forceDial = false, reuseConnection = true, - upgradeDir = Out) {.async.} = + upgradeDir = Direction.Out) {.async.} = ## connect remote peer without negotiating ## a protocol ## diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 9d26140779..3bd30672d3 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -52,7 +52,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) - await switch.connect(remotePeerId, connectAnswer.addrs, true, false, In) + await switch.connect(remotePeerId, connectAnswer.addrs, true, false, Direction.In) debug "Dcutr initiator has directly connected to the remote peer." except CatchableError as err: error "Unexpected error when trying direct conn", err = err.msg diff --git a/libp2p/switch.nim b/libp2p/switch.nim index a04cacfc0f..bc2975d332 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -148,7 +148,7 @@ method connect*( addrs: seq[MultiAddress], forceDial = false, reuseConnection = true, - upgradeDir = Out): Future[void] {.public.} = + upgradeDir = Direction.Out): Future[void] {.public.} = ## Connects to a peer without opening a stream to it s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir) @@ -220,7 +220,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil) s.peerInfo.protocols.add(proto.codec) proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} = - let muxed = await trans.upgrade(conn, In, Opt.none(PeerId)) + let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId)) switch.connManager.storeMuxer(muxed) await switch.peerStore.identify(muxed) trace "Connection upgrade succeeded" From 8745c6e40aa8fdc2a07314836f9e044938afb364 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 28 Mar 2023 20:22:50 +0200 Subject: [PATCH 058/112] fix for nim 1.2 --- tests/testdcutr.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 5b492a303b..af13f6f346 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -28,7 +28,6 @@ suite "Dcutr": let connectMsgDecoded = DcutrMsg.decode(pb.buffer) check connectMsg == connectMsgDecoded - echo connectMsgDecoded asyncTest "Sync msg Encode / Decode": let addrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] @@ -66,6 +65,7 @@ suite "Dcutr": except CatchableError as exc: discard + await sleepAsync(200.millis) # wait for the dcutr server to finish checkExpiring: # we still expect a new connection to be open by the other peer acting as the dcutr server behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2 From 7d40521c0e40f82fe268a6dd72ae495d2f3006c3 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 29 Mar 2023 14:00:33 +0200 Subject: [PATCH 059/112] remove unnecessary fields from client --- libp2p/protocols/connectivity/dcutr/client.nim | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 3bd30672d3..f2bb331cf5 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -22,14 +22,12 @@ import chronos, chronicles type DcutrClient* = ref object of RootObj - rttStart: Opt[Moment] - rttEnd: Opt[Moment] logScope: topics = "libp2p dcutrclient" proc new*(T: typedesc[DcutrClient]): T = - return T(rttStart: Opt.none(Moment), rttEnd: Opt.none(Moment)) + return T() proc sendSyncMsg(stream: Connection, addrs: seq[MultiAddress]) {.async.} = let pb = DcutrMsg(msgType: MsgType.Sync, addrs: addrs).encode() @@ -44,11 +42,11 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: stream = await switch.dial(remotePeerId, DcutrCodec) await sendConnectMsg(stream, addrs) debug "Dcutr initiator has sent a Connect message." - self.rttStart = Opt.some(Moment.now()) + let rttStart = Opt.some(Moment.now()) let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) - self.rttEnd = Opt.some(Moment.now()) + let rttEnd = Opt.some(Moment.now()) debug "Dcutr initiator has received a Connect message back.", connectAnswer - let halfRtt = (self.rttEnd.get() - self.rttStart.get()) div 2 + let halfRtt = (rttEnd.get() - rttStart.get()) div 2 await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) From 187fe6efd29c46410048e899842dc0a43d0a3064 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 29 Mar 2023 14:21:39 +0200 Subject: [PATCH 060/112] naming boolean flags --- libp2p/protocols/connectivity/dcutr/client.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index f2bb331cf5..f46a6c5a3f 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -50,7 +50,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) - await switch.connect(remotePeerId, connectAnswer.addrs, true, false, Direction.In) + await switch.connect(remotePeerId, connectAnswer.addrs, forceDial = true, reuseConnection = false, upgradeDir = Direction.In) debug "Dcutr initiator has directly connected to the remote peer." except CatchableError as err: error "Unexpected error when trying direct conn", err = err.msg From daeeec950198ad4e41aa8accf2712761cd476098 Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Wed, 29 Mar 2023 14:42:27 +0200 Subject: [PATCH 061/112] Update libp2p/dial.nim Co-authored-by: Tanguy --- libp2p/dial.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/dial.nim b/libp2p/dial.nim index dfd8b667d4..089ebdb69f 100644 --- a/libp2p/dial.nim +++ b/libp2p/dial.nim @@ -29,7 +29,7 @@ method connect*( addrs: seq[MultiAddress], forceDial = false, reuseConnection = true, - upgradeDir = Out) {.async, base.} = + upgradeDir = Direction.Out) {.async, base.} = ## connect remote peer without negotiating ## a protocol ## From 6ee8c4e2f225ed84ec87edc454427c53751dc92a Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 29 Mar 2023 16:01:42 +0200 Subject: [PATCH 062/112] remove unnecessary proc --- libp2p/stream/lpstream.nim | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libp2p/stream/lpstream.nim b/libp2p/stream/lpstream.nim index 9c49bf8068..90e5df320f 100644 --- a/libp2p/stream/lpstream.nim +++ b/libp2p/stream/lpstream.nim @@ -80,10 +80,10 @@ type opened*: uint64 closed*: uint64 -proc `$`*(dir: Direction): string = - case dir - of In: "In" - of Out: "Out" +# proc `$`*(dir: Direction): string = +# case dir +# of In: "In" +# of Out: "Out" proc setupStreamTracker*(name: string): StreamTracker = let tracker = new StreamTracker From e57630d27baabeda66d4a09f0b7785c38ee8bb39 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 29 Mar 2023 16:06:28 +0200 Subject: [PATCH 063/112] always use peerId in secure --- libp2p/upgrademngrs/muxedupgrade.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/upgrademngrs/muxedupgrade.nim b/libp2p/upgrademngrs/muxedupgrade.nim index d63aeb1be3..6205923ac5 100644 --- a/libp2p/upgrademngrs/muxedupgrade.nim +++ b/libp2p/upgrademngrs/muxedupgrade.nim @@ -69,7 +69,7 @@ method upgrade*( peerId: Opt[PeerId]): Future[Muxer] {.async.} = trace "Upgrading connection", conn, direction - let sconn = await self.secure(conn, direction, if direction == In: Opt.none(PeerId) else: peerId) # secure the connection + let sconn = await self.secure(conn, direction, peerId) # secure the connection if isNil(sconn): raise newException(UpgradeFailedError, "unable to secure connection, stopping upgrade") From a98c490a8ea2864409c63c853503e37ac0f95c2e Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 29 Mar 2023 16:20:12 +0200 Subject: [PATCH 064/112] guessDialableAddr must use listenAddrs --- libp2p/protocols/connectivity/dcutr/server.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index d6c2735474..6085b69ff5 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -38,7 +38,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts() # likely empty when the peer is reachable if ourAddrs.len == 0: # this list should be the same as the peer's public addrs when it is reachable - ourAddrs = switch.peerInfo.addrs.mapIt(switch.peerStore.guessDialableAddr(it)) + ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it)) await sendConnectMsg(stream, ourAddrs) debug "Dcutr receiver has sent a Connect message back." let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) From e1e6839c97ec1d87c3795115497f8a44e587393f Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 29 Mar 2023 17:11:53 +0200 Subject: [PATCH 065/112] improve imports and test comment --- libp2p/protocols/connectivity/dcutr/client.nim | 6 +++--- libp2p/protocols/connectivity/dcutr/core.nim | 6 +++--- libp2p/protocols/connectivity/dcutr/server.nim | 6 +++--- tests/testdcutr.nim | 7 ++++--- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index f46a6c5a3f..327335d9f0 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -12,14 +12,14 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} +import stew/results +import chronos, chronicles + import core import ../../protocol, ../../../stream/connection, ../../../switch -import stew/results -import chronos, chronicles - type DcutrClient* = ref object of RootObj diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index 83d9148de6..07613ac223 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -14,13 +14,13 @@ else: import std/options +import chronos +import stew/objects + import ../../../multiaddress, ../../../errors, ../../../stream/connection -import chronos -import stew/objects - export multiaddress const diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 6085b69ff5..351cd00089 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -14,14 +14,14 @@ else: import std/[options, sets, sequtils] +import stew/[results, objects] +import chronos, chronicles + import core import ../../protocol, ../../../stream/connection, ../../../switch -import stew/[results, objects] -import chronos, chronicles - export chronicles type Dcutr* = ref object of LPProtocol diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index af13f6f346..5f8c6513ca 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -7,9 +7,9 @@ # This file may not be copied, modified, or distributed except according to # those terms. -import std/options -import chronos, metrics +import chronos import unittest2 + import ../libp2p/protocols/connectivity/dcutr/core as dcore import ../libp2p/protocols/connectivity/dcutr/[client, server] from ../libp2p/protocols/connectivity/autonat/core import NetworkReachability @@ -58,8 +58,9 @@ suite "Dcutr": try: # we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result - # in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the + # in two connections attemps, instead of one. This dial is likely going to fail because the dcutr client is acting as the # tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case. + # The test should still pass if this doesn't fail though. await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) .wait(300.millis) except CatchableError as exc: From 7beef85e3ef24bf30ed4dc12e891f0ac369d9895 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 4 Apr 2023 00:15:29 +0200 Subject: [PATCH 066/112] remove sleep --- tests/testdcutr.nim | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 5f8c6513ca..8b5abd76d0 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -66,7 +66,6 @@ suite "Dcutr": except CatchableError as exc: discard - await sleepAsync(200.millis) # wait for the dcutr server to finish checkExpiring: # we still expect a new connection to be open by the other peer acting as the dcutr server behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2 From 06a8f8355e68b124faba3e83647849a9d688db75 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 4 Apr 2023 12:27:04 +0200 Subject: [PATCH 067/112] testing fix for i386 --- libp2p/protocols/connectivity/dcutr/client.nim | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 327335d9f0..3944fbca04 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -42,11 +42,12 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: stream = await switch.dial(remotePeerId, DcutrCodec) await sendConnectMsg(stream, addrs) debug "Dcutr initiator has sent a Connect message." - let rttStart = Opt.some(Moment.now()) + let rttStart = Moment.now() let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) - let rttEnd = Opt.some(Moment.now()) + let rttEnd = Moment.now() debug "Dcutr initiator has received a Connect message back.", connectAnswer - let halfRtt = (rttEnd.get() - rttStart.get()) div 2 + let halfRtt = (rttEnd - rttStart) div 2'i64 + echo halfRtt.type await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) From 9b1eb3ea60ed827512dabf0e5bcc29ace1ec8366 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 4 Apr 2023 15:51:11 +0200 Subject: [PATCH 068/112] update chronos version --- .pinned | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pinned b/.pinned index 276c26fc48..7df527b2cf 100644 --- a/.pinned +++ b/.pinned @@ -1,6 +1,6 @@ bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a -chronos;https://github.com/status-im/nim-chronos@#d488db3324aed2f096a63a926e3f709d6975e4df +chronos;https://github.com/status-im/nim-chronos@#ab5a8c2e0f6941fe3debd61dff0293790079d1b0 dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823 faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f From 1b5e4fc436462c0d6d65cac6d250ad286fcabe54 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 4 Apr 2023 17:51:37 +0200 Subject: [PATCH 069/112] fix test --- tests/testdcutr.nim | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index 8b5abd76d0..ced7a9eb2d 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -53,21 +53,15 @@ suite "Dcutr": for t in behindNATSwitch.transports: t.networkReachability = NetworkReachability.NotReachable - for t in publicSwitch.transports: - t.networkReachability = NetworkReachability.NotReachable - - try: + expect CatchableError: # we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result - # in two connections attemps, instead of one. This dial is likely going to fail because the dcutr client is acting as the + # in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the # tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case. - # The test should still pass if this doesn't fail though. await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) .wait(300.millis) - except CatchableError as exc: - discard checkExpiring: - # we still expect a new connection to be open by the other peer acting as the dcutr server + # we still expect a new connection to be open by the receiver peer acting as the dcutr server behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2 await allFutures(behindNATSwitch.stop(), publicSwitch.stop()) From 836ba2403f80eb7ba8c123609036469055cdb722 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 5 Apr 2023 11:52:03 +0200 Subject: [PATCH 070/112] support only tcp addrs --- libp2p/protocols/connectivity/dcutr/client.nim | 13 ++++++++++++- libp2p/protocols/connectivity/dcutr/core.nim | 7 ++++++- libp2p/protocols/connectivity/dcutr/server.nim | 16 ++++++++++++++-- tests/testdcutr.nim | 2 +- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 3944fbca04..d556ca4e29 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -39,11 +39,22 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: var stream: Connection try: + var ourDialableAddrs = getTCPAddrs(addrs) + if ourDialableAddrs.len == 0: + debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr." + return + stream = await switch.dial(remotePeerId, DcutrCodec) await sendConnectMsg(stream, addrs) debug "Dcutr initiator has sent a Connect message." let rttStart = Moment.now() let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) + + var peerDialableAddrs = getTCPAddrs(connectAnswer.addrs) + if peerDialableAddrs.len == 0: + debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr." + return + let rttEnd = Moment.now() debug "Dcutr initiator has received a Connect message back.", connectAnswer let halfRtt = (rttEnd - rttStart) div 2'i64 @@ -51,7 +62,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) - await switch.connect(remotePeerId, connectAnswer.addrs, forceDial = true, reuseConnection = false, upgradeDir = Direction.In) + await switch.connect(remotePeerId, peerDialableAddrs, forceDial = true, reuseConnection = false, upgradeDir = Direction.In) debug "Dcutr initiator has directly connected to the remote peer." except CatchableError as err: error "Unexpected error when trying direct conn", err = err.msg diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index 07613ac223..9ed10ff1c2 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import std/options +import std/sequtils import chronos import stew/objects @@ -58,3 +58,8 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, proc sendConnectMsg*(conn: Connection, addrs: seq[MultiAddress]) {.async.} = let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() await conn.writeLp(pb.buffer) + +proc getTCPAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] = + var tcpAddrs = addrs + tcpAddrs.keepItIf(TCP.matchPartial(it)) + return tcpAddrs diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 351cd00089..0391d330fe 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -1,5 +1,5 @@ # Nim-LibP2P -# Copyright (c) 2022 Status Research & Development GmbH +# Copyright (c) 2023 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -35,15 +35,27 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = try: let connectMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver received a Connect message.", connectMsg + var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts() # likely empty when the peer is reachable if ourAddrs.len == 0: # this list should be the same as the peer's public addrs when it is reachable ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it)) + var ourDialableAddrs = getTCPAddrs(ourAddrs) + if ourDialableAddrs.len == 0: + debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr." + return + await sendConnectMsg(stream, ourAddrs) debug "Dcutr receiver has sent a Connect message back." let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver has received a Sync message.", syncMsg - await switch.connect(stream.peerId, connectMsg.addrs, true, false) + + var peerDialableAddrs = getTCPAddrs(connectMsg.addrs) + if peerDialableAddrs.len == 0: + debug "DDcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr." + return + + await switch.connect(stream.peerId, peerDialableAddrs, forceDial = true, reuseConnection = false) debug "Dcutr receiver has directly connected to the remote peer." except CatchableError as err: error "Unexpected error in dcutr handler", msg = err.msg diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index ced7a9eb2d..e21697540a 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -1,5 +1,5 @@ # Nim-LibP2P -# Copyright (c) 2022 Status Research & Development GmbH +# Copyright (c) 2023 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) From 2fcea83faaaf02329ea206359c91ce07edacea68 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 5 Apr 2023 14:44:38 +0200 Subject: [PATCH 071/112] server connects in parallel --- .../protocols/connectivity/dcutr/server.nim | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 0391d330fe..921ddac262 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -20,7 +20,8 @@ import chronos, chronicles import core import ../../protocol, ../../../stream/connection, - ../../../switch + ../../../switch, + ../../../utils/future export chronicles @@ -29,9 +30,10 @@ type Dcutr* = ref object of LPProtocol logScope: topics = "libp2p dcutr" -proc new*(T: typedesc[Dcutr], switch: Switch): T = +proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T = proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} = + var peerDialableAddrs: seq[MultiAddress] try: let connectMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver received a Connect message.", connectMsg @@ -50,15 +52,28 @@ proc new*(T: typedesc[Dcutr], switch: Switch): T = let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver has received a Sync message.", syncMsg - var peerDialableAddrs = getTCPAddrs(connectMsg.addrs) + peerDialableAddrs = getTCPAddrs(connectMsg.addrs) if peerDialableAddrs.len == 0: - debug "DDcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr." + debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr." return - await switch.connect(stream.peerId, peerDialableAddrs, forceDial = true, reuseConnection = false) - debug "Dcutr receiver has directly connected to the remote peer." + if peerDialableAddrs.len > maxDialableAddrs: + peerDialableAddrs = peerDialableAddrs[0.. Date: Wed, 5 Apr 2023 15:13:01 +0200 Subject: [PATCH 072/112] client connects in parallel --- .../protocols/connectivity/dcutr/client.nim | 37 +++++++++++++++---- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index d556ca4e29..6b95bf0d8e 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -12,22 +12,27 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} +import std/sequtils + import stew/results import chronos, chronicles import core import ../../protocol, ../../../stream/connection, - ../../../switch + ../../../switch, + ../../../utils/future type DcutrClient* = ref object of RootObj + connectTimeout: Duration + maxDialableAddrs: int logScope: topics = "libp2p dcutrclient" -proc new*(T: typedesc[DcutrClient]): T = - return T() +proc new*(T: typedesc[DcutrClient], connectTimeout = 15.seconds, maxDialableAddrs = 8): T = + return T(connectTimeout: connectTimeout, maxDialableAddrs: maxDialableAddrs) proc sendSyncMsg(stream: Connection, addrs: seq[MultiAddress]) {.async.} = let pb = DcutrMsg(msgType: MsgType.Sync, addrs: addrs).encode() @@ -37,7 +42,9 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: logScope: peerId = switch.peerInfo.peerId - var stream: Connection + var + peerDialableAddrs: seq[MultiAddress] + stream: Connection try: var ourDialableAddrs = getTCPAddrs(addrs) if ourDialableAddrs.len == 0: @@ -50,7 +57,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: let rttStart = Moment.now() let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) - var peerDialableAddrs = getTCPAddrs(connectAnswer.addrs) + peerDialableAddrs = getTCPAddrs(connectAnswer.addrs) if peerDialableAddrs.len == 0: debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr." return @@ -62,10 +69,24 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: await sendSyncMsg(stream, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) - await switch.connect(remotePeerId, peerDialableAddrs, forceDial = true, reuseConnection = false, upgradeDir = Direction.In) - debug "Dcutr initiator has directly connected to the remote peer." + + if peerDialableAddrs.len > self.maxDialableAddrs: + peerDialableAddrs = peerDialableAddrs[0.. Date: Wed, 5 Apr 2023 15:19:30 +0200 Subject: [PATCH 073/112] remove unnecessary proc --- libp2p/stream/lpstream.nim | 5 ----- 1 file changed, 5 deletions(-) diff --git a/libp2p/stream/lpstream.nim b/libp2p/stream/lpstream.nim index 90e5df320f..6dfe501c4f 100644 --- a/libp2p/stream/lpstream.nim +++ b/libp2p/stream/lpstream.nim @@ -80,11 +80,6 @@ type opened*: uint64 closed*: uint64 -# proc `$`*(dir: Direction): string = -# case dir -# of In: "In" -# of Out: "Out" - proc setupStreamTracker*(name: string): StreamTracker = let tracker = new StreamTracker From 2d8d56ca6ac1a87d3525b5bbf2f8d9278e4f6b81 Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 6 Apr 2023 11:02:21 +0200 Subject: [PATCH 074/112] remove unnecessary line --- libp2p/dialer.nim | 1 - 1 file changed, 1 deletion(-) diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index 9f8ffe5ef1..369359fab5 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -77,7 +77,6 @@ proc dialAndUpgrade( try: dialed.transportDir = upgradeDir let upgradedConn = await transport.upgrade(dialed, upgradeDir, peerId) - doAssert not isNil(upgradedConn), "connection died after upgrade " & $upgradeDir upgradedConn except CatchableError as exc: # If we failed to establish the connection through one transport, From 18addd2aeb17ab5f115a8fec8e448aef91f5374b Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 6 Apr 2023 12:27:17 +0200 Subject: [PATCH 075/112] simplifying send proc --- libp2p/protocols/connectivity/dcutr/client.nim | 8 ++------ libp2p/protocols/connectivity/dcutr/core.nim | 4 ++-- libp2p/protocols/connectivity/dcutr/server.nim | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 6b95bf0d8e..4c064b859b 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -34,10 +34,6 @@ logScope: proc new*(T: typedesc[DcutrClient], connectTimeout = 15.seconds, maxDialableAddrs = 8): T = return T(connectTimeout: connectTimeout, maxDialableAddrs: maxDialableAddrs) -proc sendSyncMsg(stream: Connection, addrs: seq[MultiAddress]) {.async.} = - let pb = DcutrMsg(msgType: MsgType.Sync, addrs: addrs).encode() - await stream.writeLp(pb.buffer) - proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]) {.async.} = logScope: peerId = switch.peerInfo.peerId @@ -52,7 +48,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: return stream = await switch.dial(remotePeerId, DcutrCodec) - await sendConnectMsg(stream, addrs) + await stream.send(MsgType.Connect, addrs) debug "Dcutr initiator has sent a Connect message." let rttStart = Moment.now() let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) @@ -66,7 +62,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: debug "Dcutr initiator has received a Connect message back.", connectAnswer let halfRtt = (rttEnd - rttStart) div 2'i64 echo halfRtt.type - await sendSyncMsg(stream, addrs) + await stream.send(MsgType.Sync, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index 9ed10ff1c2..ba7882d443 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -55,8 +55,8 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, raise newException(DcutrError, "Received malformed message") return dcutrMsg -proc sendConnectMsg*(conn: Connection, addrs: seq[MultiAddress]) {.async.} = - let pb = DcutrMsg(msgType: MsgType.Connect, addrs: addrs).encode() +proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} = + let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode() await conn.writeLp(pb.buffer) proc getTCPAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] = diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 921ddac262..ea8ae8d717 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -47,7 +47,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr." return - await sendConnectMsg(stream, ourAddrs) + await stream.send(MsgType.Connect, ourAddrs) debug "Dcutr receiver has sent a Connect message back." let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver has received a Sync message.", syncMsg From d5c4d94bbf9944f851f064a73a2bf49ed5ed20f5 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 10 Apr 2023 01:10:35 +0200 Subject: [PATCH 076/112] testing error cases with stub switch --- tests/testdcutr.nim | 121 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index e21697540a..cc0bd2bf6c 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -14,8 +14,39 @@ import ../libp2p/protocols/connectivity/dcutr/core as dcore import ../libp2p/protocols/connectivity/dcutr/[client, server] from ../libp2p/protocols/connectivity/autonat/core import NetworkReachability import ../libp2p/builders +import ../libp2p/utils/future import ./helpers +type + SwitchStub* = ref object of Switch + switch: Switch + connectStub*: proc(): Future[void] {.async.} + +proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: proc (): Future[void] {.async.} = nil): T = + return SwitchStub( + switch: switch, + peerInfo: switch.peerInfo, + ms: switch.ms, + transports: switch.transports, + connManager: switch.connManager, + peerStore: switch.peerStore, + dialer: switch.dialer, + nameResolver: switch.nameResolver, + services: switch.services, + connectStub: connectStub) + +method connect*( + self: SwitchStub, + peerId: PeerId, + addrs: seq[MultiAddress], + forceDial = false, + reuseConnection = true, + upgradeDir = Direction.Out) {.async.} = + if (self.connectStub != nil): + await self.connectStub() + else: + await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir) + suite "Dcutr": teardown: checkTrackers() @@ -65,3 +96,93 @@ suite "Dcutr": behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2 await allFutures(behindNATSwitch.stop(), publicSwitch.stop()) + + template ductrClientTest(behindNATSwitch: Switch, publicSwitch: Switch, body: untyped) = + let dcutrProto = Dcutr.new(publicSwitch) + publicSwitch.mount(dcutrProto) + + await allFutures(behindNATSwitch.start(), publicSwitch.start()) + + await publicSwitch.connect(behindNATSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + + for t in behindNATSwitch.transports: + t.networkReachability = NetworkReachability.NotReachable + + body + + checkExpiring: + # we still expect a new connection to be open by the receiver peer acting as the dcutr server + behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2 + + await allFutures(behindNATSwitch.stop(), publicSwitch.stop()) + + asyncTest "Client connect timeout": + + proc connectTimeoutProc(): Future[void] {.async.} = + await sleepAsync(100.millis) + + let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc) + let publicSwitch = newStandardSwitch() + ductrClientTest(behindNATSwitch, publicSwitch): + try: + let client = DcutrClient.new(connectTimeout = 5.millis) + await client.startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + except DcutrError as err: + check err.parent of AsyncTimeoutError + + asyncTest "All client connect attempts fail": + + proc connectErrorProc(): Future[void] {.async.} = + raise newException(CatchableError, "error") + + let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc) + let publicSwitch = newStandardSwitch() + ductrClientTest(behindNATSwitch, publicSwitch): + try: + let client = DcutrClient.new(connectTimeout = 5.millis) + await client.startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + except DcutrError as err: + check err.parent of AllFuturesFailedError + + proc ductrServerTest(connectStub: proc (): Future[void] {.async.}) {.async.} = + let behindNATSwitch = newStandardSwitch() + let publicSwitch = SwitchStub.new(newStandardSwitch()) + + let dcutrProto = Dcutr.new(publicSwitch, connectTimeout = 5.millis) + publicSwitch.mount(dcutrProto) + + await allFutures(behindNATSwitch.start(), publicSwitch.start()) + + await publicSwitch.connect(behindNATSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + + publicSwitch.connectStub = connectStub + + for t in behindNATSwitch.transports: + t.networkReachability = NetworkReachability.NotReachable + + expect CatchableError: + # we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result + # in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the + # tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case. + await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs) + .wait(300.millis) + + checkExpiring: + # we still expect a new connection to be open by the receiver peer acting as the dcutr server + behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1 + + await allFutures(behindNATSwitch.stop(), publicSwitch.stop()) + + asyncTest "DCUtR server timeout when establishing a new connection": + + proc connectProc(): Future[void] {.async.} = + await sleepAsync(100.millis) + + await ductrServerTest(connectProc) + + asyncTest "DCUtR server error when establishing a new connection": + + proc connectProc(): Future[void] {.async.} = + raise newException(CatchableError, "error") + + await ductrServerTest(connectProc) From ff0dc4957a63dc3bfa26a3ae0503358e5b317591 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 11 Apr 2023 17:00:42 +0200 Subject: [PATCH 077/112] reraising error --- libp2p/protocols/connectivity/dcutr/client.nim | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 4c064b859b..9b656a8bba 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -75,14 +75,16 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: debug "Dcutr initiator has directly connected to the remote peer." else: debug "Dcutr initiator could not connect to the remote peer.", msg = fut.error.msg - except CancelledError as exc: - raise exc - except AllFuturesFailedError as exc: - debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = exc.msg - except AsyncTimeoutError as exc: - debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = exc.msg + except CancelledError as err: + raise err + except AllFuturesFailedError as err: + debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg + raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts failed", err) + except AsyncTimeoutError as err: + debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg + raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err) except CatchableError as err: - warn "Unexpected error when trying direct conn", err = err.msg + debug "Unexpected error when trying direct conn", err = err.msg raise newException(DcutrError, "Unexpected error when trying a direct conn", err) finally: if stream != nil: From 25258f912aaaec94ed67d6b53f9b0f25c92f1e4f Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Tue, 11 Apr 2023 22:29:59 +0200 Subject: [PATCH 078/112] Apply suggestions from code review Co-authored-by: Tanguy --- libp2p/dialer.nim | 3 +-- libp2p/protocols/connectivity/dcutr/client.nim | 7 +++---- libp2p/protocols/connectivity/dcutr/core.nim | 4 +--- libp2p/protocols/connectivity/dcutr/server.nim | 2 +- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index 369359fab5..a96af4d0ea 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -76,8 +76,7 @@ proc dialAndUpgrade( let mux = try: dialed.transportDir = upgradeDir - let upgradedConn = await transport.upgrade(dialed, upgradeDir, peerId) - upgradedConn + await transport.upgrade(dialed, upgradeDir, peerId) except CatchableError as exc: # If we failed to establish the connection through one transport, # we won't succeeded through another - no use in trying again diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 9b656a8bba..991a0ddb3d 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -24,7 +24,7 @@ import ../../protocol, ../../../utils/future type - DcutrClient* = ref object of RootObj + DcutrClient* = ref object connectTimeout: Duration maxDialableAddrs: int @@ -44,7 +44,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: try: var ourDialableAddrs = getTCPAddrs(addrs) if ourDialableAddrs.len == 0: - debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr." + debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr.", addrs return stream = await switch.dial(remotePeerId, DcutrCodec) @@ -55,13 +55,12 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: peerDialableAddrs = getTCPAddrs(connectAnswer.addrs) if peerDialableAddrs.len == 0: - debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr." + debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.adds return let rttEnd = Moment.now() debug "Dcutr initiator has received a Connect message back.", connectAnswer let halfRtt = (rttEnd - rttStart) div 2'i64 - echo halfRtt.type await stream.send(MsgType.Sync, addrs) debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index ba7882d443..024224737f 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -60,6 +60,4 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async await conn.writeLp(pb.buffer) proc getTCPAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] = - var tcpAddrs = addrs - tcpAddrs.keepItIf(TCP.matchPartial(it)) - return tcpAddrs + addrs.filterIt(TCP.matchPartial(it)) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index ea8ae8d717..227b05396d 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -44,7 +44,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it)) var ourDialableAddrs = getTCPAddrs(ourAddrs) if ourDialableAddrs.len == 0: - debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr." + debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr.", ourAddrs return await stream.send(MsgType.Connect, ourAddrs) From 2c9b495182b3d95a3aad64db502f5c23397abebc Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Wed, 12 Apr 2023 11:36:38 +0200 Subject: [PATCH 079/112] Update libp2p/protocols/connectivity/dcutr/client.nim Co-authored-by: Tanguy --- libp2p/protocols/connectivity/dcutr/client.nim | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 991a0ddb3d..2ef2720a80 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -68,12 +68,11 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: if peerDialableAddrs.len > self.maxDialableAddrs: peerDialableAddrs = peerDialableAddrs[0.. Date: Wed, 12 Apr 2023 11:44:03 +0200 Subject: [PATCH 080/112] changes for code review --- libp2p/protocols/connectivity/dcutr/client.nim | 6 +++--- libp2p/protocols/connectivity/dcutr/core.nim | 2 +- libp2p/protocols/connectivity/dcutr/server.nim | 15 +++++++-------- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 2ef2720a80..ad0b29a4f6 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -42,7 +42,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: peerDialableAddrs: seq[MultiAddress] stream: Connection try: - var ourDialableAddrs = getTCPAddrs(addrs) + var ourDialableAddrs = getHolePunchableAddrs(addrs) if ourDialableAddrs.len == 0: debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr.", addrs return @@ -53,9 +53,9 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: let rttStart = Moment.now() let connectAnswer = DcutrMsg.decode(await stream.readLp(1024)) - peerDialableAddrs = getTCPAddrs(connectAnswer.addrs) + peerDialableAddrs = getHolePunchableAddrs(connectAnswer.addrs) if peerDialableAddrs.len == 0: - debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.adds + debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.addrs return let rttEnd = Moment.now() diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim index 024224737f..168fa99af6 100644 --- a/libp2p/protocols/connectivity/dcutr/core.nim +++ b/libp2p/protocols/connectivity/dcutr/core.nim @@ -59,5 +59,5 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode() await conn.writeLp(pb.buffer) -proc getTCPAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] = +proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] = addrs.filterIt(TCP.matchPartial(it)) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 227b05396d..2a8045767c 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -42,7 +42,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi if ourAddrs.len == 0: # this list should be the same as the peer's public addrs when it is reachable ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it)) - var ourDialableAddrs = getTCPAddrs(ourAddrs) + var ourDialableAddrs = getHolePunchableAddrs(ourAddrs) if ourDialableAddrs.len == 0: debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr.", ourAddrs return @@ -52,20 +52,19 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi let syncMsg = DcutrMsg.decode(await stream.readLp(1024)) debug "Dcutr receiver has received a Sync message.", syncMsg - peerDialableAddrs = getTCPAddrs(connectMsg.addrs) + peerDialableAddrs = getHolePunchableAddrs(connectMsg.addrs) if peerDialableAddrs.len == 0: - debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr." + debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectMsg.addrs return if peerDialableAddrs.len > maxDialableAddrs: peerDialableAddrs = peerDialableAddrs[0.. Date: Wed, 12 Apr 2023 13:59:34 +0200 Subject: [PATCH 081/112] reraising error on the server --- libp2p/protocols/connectivity/dcutr/server.nim | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 2a8045767c..0dada156fa 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -65,15 +65,17 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi debug "Dcutr receiver has directly connected to the remote peer." finally: for fut in futs: fut.cancel() - except CancelledError as exc: - raise exc - except AllFuturesFailedError as exc: - debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = exc.msg - except AsyncTimeoutError as exc: - debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = exc.msg + except CancelledError as err: + raise err + except AllFuturesFailedError as err: + debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg + raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err) + except AsyncTimeoutError as err: + debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg + raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err) except CatchableError as err: warn "Unexpected error in dcutr handler", msg = err.msg - raise newException(DcutrError, "Unexpected error when trying a direct conn", err) + raise newException(DcutrError, "Unexpected error in dcutr handler", err) let self = T() self.handler = handleStream From 2d22ba12ee29203d03914eb9c6e45556f8611f44 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 12 Apr 2023 14:04:57 +0200 Subject: [PATCH 082/112] typo --- libp2p/protocols/connectivity/dcutr/client.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index ad0b29a4f6..560741e6c6 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -55,7 +55,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: peerDialableAddrs = getHolePunchableAddrs(connectAnswer.addrs) if peerDialableAddrs.len == 0: - debug "DDcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.addrs + debug "Dcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.addrs return let rttEnd = Moment.now() From 3349949702b8132b331651251208bdf8a51d9b8f Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 21 Nov 2022 17:08:23 +0100 Subject: [PATCH 083/112] Basic version which asks peers recently connected about our nat status --- libp2p/services/hpservice.nim | 79 +++++++++++++++++++++++++++++++++++ tests/testhpservice.nim | 53 +++++++++++++++++++++++ 2 files changed, 132 insertions(+) create mode 100644 libp2p/services/hpservice.nim create mode 100644 tests/testhpservice.nim diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim new file mode 100644 index 0000000000..5c230fef84 --- /dev/null +++ b/libp2p/services/hpservice.nim @@ -0,0 +1,79 @@ +# Nim-LibP2P +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import ../switch +import chronos +import std/tables +import ../protocols/connectivity/autonat + +type + HPService* = ref object of Service + newPeerHandler: PeerEventHandler + networkReachability: NetworkReachability + t: CountTable[NetworkReachability] + maxConfidence: int + + NetworkReachability {.pure.} = enum + Private, Public, Unknown + +proc new*(T: typedesc[HPService], maxConfidence: int = 3): T = + return T( + newPeerHandler: nil, + networkReachability: NetworkReachability.Unknown, + maxConfidence: maxConfidence, + t: initCountTable[NetworkReachability]()) + +proc handleAnswer(self: HPService, ans: NetworkReachability) = + if ans == NetworkReachability.Unknown: + return + if ans == self.networkReachability: + if self.t[ans] == self.maxConfidence: + return + self.t.inc(ans) + else: + if self.t[self.networkReachability] > 0: + self.t.inc(self.networkReachability, -1) + if self.t[ans] < self.maxConfidence: + self.t.inc(ans) + if self.t[ans] == self.maxConfidence or self.t[self.networkReachability] == 0: + self.networkReachability = ans + +proc askPeer(self: HPService, s: Switch, peerId: PeerId): Future[void] {.async.} = + echo "Asking peer " & $(peerId) + let ans = + try: + let ma = await Autonat.new(s).dialMe(peerId) + NetworkReachability.Public + except AutonatError: + NetworkReachability.Private + self.handleAnswer(ans) + echo self.t + echo self.networkReachability + +proc h(self: HPService, switch: Switch) = + for p in switch.peerStore[AddressBook].book.keys: + discard askPeer(self, switch, p) + +method setup*(self: HPService, switch: Switch) {.async.} = + self.newPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] = + return askPeer(self, switch, peerId) + + switch.connManager.addPeerEventHandler(self.newPeerHandler, PeerEventKind.Joined) + +method run*(self: HPService, switch: Switch) {.async, gcsafe, public.} = + h(self, switch) + +method stop*(self: HPService, switch: Switch) {.async, gcsafe, public.} = + if not isNil(self.newPeerHandler): + switch.connManager.removePeerEventHandler(self.newPeerHandler, PeerEventKind.Joined) \ No newline at end of file diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim new file mode 100644 index 0000000000..21c4bacaad --- /dev/null +++ b/tests/testhpservice.nim @@ -0,0 +1,53 @@ +# Nim-LibP2P +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +## The switch is the core of libp2p, which brings together the +## transports, the connection manager, the upgrader and other +## parts to allow programs to use libp2p + +import chronos +import unittest2 +import ./helpers +import ../libp2p/[builders, + switch, + services/hpservice] + +proc createAutonatSwitch(): Switch = + result = SwitchBuilder.new() + .withRng(newRng()) + .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ]) + .withTcpTransport() + .withMplex() + .withAutonat() + .withNoise() + .build() + +suite "Hope Punching": + teardown: + checkTrackers() + asyncTest "Hope Punching test": + let switch1 = createAutonatSwitch() + let switch2 = createAutonatSwitch() + let switch3 = createAutonatSwitch() + let switch4 = createAutonatSwitch() + + switch1.addService(HPService.new()) + + await switch1.start() + await switch2.start() + + await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs) + await switch3.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs) + await switch4.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs) + + await sleepAsync(500.milliseconds) + + await allFuturesThrowing( + switch1.stop(), + switch2.stop()) \ No newline at end of file From 6758964cc426e74139d4d9cfb7ea2de54972b069 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 22 Nov 2022 16:05:39 +0100 Subject: [PATCH 084/112] Add more test --- libp2p/services/hpservice.nim | 5 ++- tests/testhpservice.nim | 79 +++++++++++++++++++++++++++++------ tests/testnative.nim | 3 +- 3 files changed, 73 insertions(+), 14 deletions(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 5c230fef84..c8cdec2ea7 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -24,7 +24,7 @@ type t: CountTable[NetworkReachability] maxConfidence: int - NetworkReachability {.pure.} = enum + NetworkReachability* {.pure.} = enum Private, Public, Unknown proc new*(T: typedesc[HPService], maxConfidence: int = 3): T = @@ -34,6 +34,9 @@ proc new*(T: typedesc[HPService], maxConfidence: int = 3): T = maxConfidence: maxConfidence, t: initCountTable[NetworkReachability]()) +proc networkReachability*(self: HPService): NetworkReachability {.inline.} = + return self.networkReachability + proc handleAnswer(self: HPService, ans: NetworkReachability) = if ans == NetworkReachability.Unknown: return diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 21c4bacaad..b15b673c97 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -7,10 +7,6 @@ # This file may not be copied, modified, or distributed except according to # those terms. -## The switch is the core of libp2p, which brings together the -## transports, the connection manager, the upgrader and other -## parts to allow programs to use libp2p - import chronos import unittest2 import ./helpers @@ -31,23 +27,82 @@ proc createAutonatSwitch(): Switch = suite "Hope Punching": teardown: checkTrackers() - asyncTest "Hope Punching test": + asyncTest "Hope Punching Private Reachability test": let switch1 = createAutonatSwitch() let switch2 = createAutonatSwitch() let switch3 = createAutonatSwitch() let switch4 = createAutonatSwitch() - switch1.addService(HPService.new()) + let hpservice = HPService.new() + check hpservice.networkReachability() == NetworkReachability.Unknown + switch1.addService(hpservice) await switch1.start() await switch2.start() + await switch3.start() + await switch4.start() + + echo $switch1.peerInfo.listenAddrs + switch1.peerInfo.listenAddrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/1").tryGet()] + await switch1.peerInfo.update() + echo $switch1.peerInfo.listenAddrs - await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs) - await switch3.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs) - await switch4.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs) + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) + await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) + await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - await sleepAsync(500.milliseconds) + check hpservice.networkReachability() == NetworkReachability.Private await allFuturesThrowing( - switch1.stop(), - switch2.stop()) \ No newline at end of file + switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) + + asyncTest "Hope Punching Public Reachability test": + let switch1 = createAutonatSwitch() + let switch2 = createAutonatSwitch() + let switch3 = createAutonatSwitch() + let switch4 = createAutonatSwitch() + + let hpservice = HPService.new() + check hpservice.networkReachability() == NetworkReachability.Unknown + switch1.addService(hpservice) + + await switch1.start() + await switch2.start() + await switch3.start() + await switch4.start() + + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) + await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) + await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) + + check hpservice.networkReachability() == NetworkReachability.Public + + await allFuturesThrowing( + switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) + + # asyncTest "IPFS Hope Punching test": + # let switch1 = createAutonatSwitch() + + # switch1.addService(HPService.new()) + + # await switch1.start() + + # asyncSpawn switch1.connect( + # PeerId.init("QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN").get(), + # @[MultiAddress.init("/ip4/139.178.91.71/tcp/4001").get()] + # ) + + # asyncSpawn switch1.connect( + # PeerId.init("QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt").get(), + # @[MultiAddress.init("/ip4/145.40.118.135/tcp/4001").get()] + # ) + + # asyncSpawn switch1.connect( + # PeerId.init("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ").get(), + # @[MultiAddress.init("/ip4/104.131.131.82/tcp/4001").get()] + # ) + + # await sleepAsync(20.seconds) + + # await allFuturesThrowing( + # switch1.stop()) \ No newline at end of file diff --git a/tests/testnative.nim b/tests/testnative.nim index 317e976b09..48e971c1dc 100644 --- a/tests/testnative.nim +++ b/tests/testnative.nim @@ -44,4 +44,5 @@ import testtcptransport, testautonat, testautonatservice, testautorelay, - testdcutr + testdcutr, + testhpservice From d8eb0dd52e45b1905b59396d3aff60362c72c5ae Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 29 Nov 2022 17:17:21 +0100 Subject: [PATCH 085/112] Refactor hp service --- libp2p/services/hpservice.nim | 95 ++++++++++----------- tests/testhpservice.nim | 156 ++++++++++++++++++++++------------ 2 files changed, 147 insertions(+), 104 deletions(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index c8cdec2ea7..853dc85ce3 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -15,68 +15,63 @@ else: import ../switch import chronos import std/tables -import ../protocols/connectivity/autonat +import ../protocols/rendezvous +import ../services/autonatservice +import ../discovery/[rendezvousinterface, discoverymngr] +import ../protocols/connectivity/relay/[relay, client] type HPService* = ref object of Service - newPeerHandler: PeerEventHandler - networkReachability: NetworkReachability - t: CountTable[NetworkReachability] - maxConfidence: int + rdv: RendezVous + dm: DiscoveryManager + relayClient: RelayClient + autonatService: AutonatService + onNewStatusHandler: NewStatusHandler + callb: Callb - NetworkReachability* {.pure.} = enum - Private, Public, Unknown + Callb* = proc (ma: MultiAddress): Future[void] {.gcsafe, raises: [Defect].} -proc new*(T: typedesc[HPService], maxConfidence: int = 3): T = +proc new*(T: typedesc[HPService], rdv: RendezVous, relayClient: RelayClient, autonatService: AutonatService): T = + let dm = DiscoveryManager() + dm.add(RendezVousInterface.new(rdv)) return T( - newPeerHandler: nil, - networkReachability: NetworkReachability.Unknown, - maxConfidence: maxConfidence, - t: initCountTable[NetworkReachability]()) + rdv: rdv, + dm: dm, + relayClient: relayClient, + autonatService: autonatService) -proc networkReachability*(self: HPService): NetworkReachability {.inline.} = - return self.networkReachability +proc relay(self: HPService) {.async.} = + let queryRelay = self.dm.request(RdvNamespace("relay")) + let res = await queryRelay.getPeer() + let rsvp = await self.relayClient.reserve(res[PeerId], res.getAll(MultiAddress)) + let relayedAddr = MultiAddress.init($rsvp.addrs[0] & + "/p2p-circuit/p2p/" & + $rsvp.voucher.get().reservingPeerId).tryGet() -proc handleAnswer(self: HPService, ans: NetworkReachability) = - if ans == NetworkReachability.Unknown: - return - if ans == self.networkReachability: - if self.t[ans] == self.maxConfidence: - return - self.t.inc(ans) - else: - if self.t[self.networkReachability] > 0: - self.t.inc(self.networkReachability, -1) - if self.t[ans] < self.maxConfidence: - self.t.inc(ans) - if self.t[ans] == self.maxConfidence or self.t[self.networkReachability] == 0: - self.networkReachability = ans + await self.callb(relayedAddr) -proc askPeer(self: HPService, s: Switch, peerId: PeerId): Future[void] {.async.} = - echo "Asking peer " & $(peerId) - let ans = - try: - let ma = await Autonat.new(s).dialMe(peerId) - NetworkReachability.Public - except AutonatError: - NetworkReachability.Private - self.handleAnswer(ans) - echo self.t - echo self.networkReachability + # switch1.peerInfo.listenAddrs = @[relayedAddr] + # await switch1.peerInfo.update() -proc h(self: HPService, switch: Switch) = - for p in switch.peerStore[AddressBook].book.keys: - discard askPeer(self, switch, p) +# proc networkReachability*(self: HPService): NetworkReachability {.inline.} = +# return self.networkReachability method setup*(self: HPService, switch: Switch) {.async.} = - self.newPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] = - return askPeer(self, switch, peerId) + await self.autonatService.setup(switch) - switch.connManager.addPeerEventHandler(self.newPeerHandler, PeerEventKind.Joined) + self.onNewStatusHandler = proc (networkReachability: NetworkReachability) {.gcsafe, async.} = + if networkReachability == NetworkReachability.Private: + await self.relay() -method run*(self: HPService, switch: Switch) {.async, gcsafe, public.} = - h(self, switch) + self.autonatService.onNewStatuswithMaxConfidence(self.onNewStatusHandler) -method stop*(self: HPService, switch: Switch) {.async, gcsafe, public.} = - if not isNil(self.newPeerHandler): - switch.connManager.removePeerEventHandler(self.newPeerHandler, PeerEventKind.Joined) \ No newline at end of file +method run*(self: HPService, switch: Switch) {.async, public.} = + await self.autonatService.run(switch) + +method stop*(self: HPService, switch: Switch) {.async, public.} = + await self.autonatService.stop(switch) + if not isNil(self.onNewStatusHandler): + discard #switch.connManager.removePeerEventHandler(self.newPeerHandler, PeerEventKind.Joined) + +proc onNewRelayAddr*(self: HPService, f: Callb) = + self.callb = f diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index b15b673c97..42306a0076 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -8,101 +8,149 @@ # those terms. import chronos + import unittest2 import ./helpers import ../libp2p/[builders, switch, - services/hpservice] - -proc createAutonatSwitch(): Switch = - result = SwitchBuilder.new() + services/hpservice, + services/autonatservice, + protocols/rendezvous] +import ../libp2p/protocols/connectivity/relay/[relay, client] +import ../libp2p/protocols/connectivity/autonat +import ../libp2p/discovery/[rendezvousinterface, discoverymngr] + +proc createSwitch(rdv: RendezVous = nil, relay: Relay = nil): Switch = + var builder = SwitchBuilder.new() .withRng(newRng()) .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ]) .withTcpTransport() .withMplex() .withAutonat() .withNoise() - .build() + + if (rdv != nil): + builder = builder.withRendezVous(rdv) + + if (relay != nil): + builder = builder.withCircuitRelay(relay) + + return builder.build() + +type + AutonatStub = ref object of Autonat + returnSuccess*: bool + +method dialMe*( + self: AutonatStub, + pid: PeerId, + addrs: seq[MultiAddress] = newSeq[MultiAddress]()): + Future[MultiAddress] {.async.} = + if self.returnSuccess: + return MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + else: + raise newException(AutonatError, "") suite "Hope Punching": teardown: checkTrackers() - asyncTest "Hope Punching Private Reachability test": - let switch1 = createAutonatSwitch() - let switch2 = createAutonatSwitch() - let switch3 = createAutonatSwitch() - let switch4 = createAutonatSwitch() - - let hpservice = HPService.new() - check hpservice.networkReachability() == NetworkReachability.Unknown + + asyncTest "Hope Punching Public Reachability test": + let rdv = RendezVous.new() + let relayClient = RelayClient.new() + let switch1 = createSwitch(rdv, relayClient) + + let switch2 = createSwitch() + let switch3 = createSwitch() + let switch4 = createSwitch() + + let autonatService = AutonatService.new(Autonat.new(switch1)) + let hpservice = HPService.new(rdv, relayClient, autonatService) + switch1.addService(hpservice) + proc f(ma: MultiAddress) {.gcsafe, async.} = + echo "onNewRelayAddr shouldn't be called" + fail() + + hpservice.onNewRelayAddr(f) + await switch1.start() await switch2.start() await switch3.start() await switch4.start() - echo $switch1.peerInfo.listenAddrs - switch1.peerInfo.listenAddrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/1").tryGet()] - await switch1.peerInfo.update() - echo $switch1.peerInfo.listenAddrs - await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - check hpservice.networkReachability() == NetworkReachability.Private + await sleepAsync(1.seconds) await allFuturesThrowing( switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) - asyncTest "Hope Punching Public Reachability test": - let switch1 = createAutonatSwitch() - let switch2 = createAutonatSwitch() - let switch3 = createAutonatSwitch() - let switch4 = createAutonatSwitch() + asyncTest "Hope Punching Full Reachability test": - let hpservice = HPService.new() - check hpservice.networkReachability() == NetworkReachability.Unknown - switch1.addService(hpservice) + let rdv1 = RendezVous.new() + let rdv2 = RendezVous.new() + + let relayClient = RelayClient.new() + let switch1 = createSwitch(rdv1, relayClient) + let switch2 = createSwitch(rdv2) + let switch3 = createSwitch() + let switch4 = createSwitch() + + let bootRdv = RendezVous.new() + let bootNode = createSwitch(rdv = bootRdv) + await bootNode.start() + let relay = Relay.new() + let relayRdv = RendezVous.new() + let relaySwitch = createSwitch(rdv = relayRdv, relay = relay) + await relaySwitch.start() + + await relaySwitch.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) + + let dm = DiscoveryManager() + dm.add(RendezVousInterface.new(relayRdv)) + dm.advertise(RdvNamespace("relay")) + + let autonatStub = AutonatStub.new() + autonatStub.returnSuccess = false + + let autonatService = AutonatService.new(autonatStub) + let hpservice = HPService.new(rdv1, relayClient, autonatService) + + switch1.addService(hpservice) await switch1.start() + + proc f(ma: MultiAddress) {.gcsafe, async.} = + autonatStub.returnSuccess = true + let expected = MultiAddress.init($relaySwitch.peerInfo.addrs[0] & "/p2p/" & + $relaySwitch.peerInfo.peerId & "/p2p-circuit/p2p/" & + $switch1.peerInfo.peerId).get() + check ma == expected + + hpservice.onNewRelayAddr(f) + await switch2.start() await switch3.start() await switch4.start() + await switch1.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - check hpservice.networkReachability() == NetworkReachability.Public - - await allFuturesThrowing( - switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) - - # asyncTest "IPFS Hope Punching test": - # let switch1 = createAutonatSwitch() + await sleepAsync(1.seconds) - # switch1.addService(HPService.new()) + await hpservice.run(switch1) - # await switch1.start() + await sleepAsync(1.seconds) - # asyncSpawn switch1.connect( - # PeerId.init("QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN").get(), - # @[MultiAddress.init("/ip4/139.178.91.71/tcp/4001").get()] - # ) + echo switch1.peerInfo.addrs[0] + await sleepAsync(1.seconds) - # asyncSpawn switch1.connect( - # PeerId.init("QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt").get(), - # @[MultiAddress.init("/ip4/145.40.118.135/tcp/4001").get()] - # ) - - # asyncSpawn switch1.connect( - # PeerId.init("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ").get(), - # @[MultiAddress.init("/ip4/104.131.131.82/tcp/4001").get()] - # ) - - # await sleepAsync(20.seconds) - - # await allFuturesThrowing( - # switch1.stop()) \ No newline at end of file + await allFuturesThrowing( + bootNode.stop(), relaySwitch.stop(), switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) \ No newline at end of file From 80b9d353e7b7c74cb3644e6669b99bfc914eb229 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 30 Nov 2022 11:39:38 +0100 Subject: [PATCH 086/112] Remove sleepAsync --- tests/stubs/torstub.nim | 2 +- tests/testhpservice.nim | 35 ++++++++++++----------------------- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/tests/stubs/torstub.nim b/tests/stubs/torstub.nim index ca5fe97769..906b1290dd 100644 --- a/tests/stubs/torstub.nim +++ b/tests/stubs/torstub.nim @@ -83,4 +83,4 @@ proc start*(self: TorServerStub, address: TransportAddress) {.async.} = proc stop*(self: TorServerStub) {.async.} = - await self.tcpTransport.stop() + await self.tcpTransport.stop() \ No newline at end of file diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 42306a0076..8cbc94cab9 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -17,9 +17,10 @@ import ../libp2p/[builders, services/autonatservice, protocols/rendezvous] import ../libp2p/protocols/connectivity/relay/[relay, client] -import ../libp2p/protocols/connectivity/autonat import ../libp2p/discovery/[rendezvousinterface, discoverymngr] +import stubs/autonatstub + proc createSwitch(rdv: RendezVous = nil, relay: Relay = nil): Switch = var builder = SwitchBuilder.new() .withRng(newRng()) @@ -37,20 +38,6 @@ proc createSwitch(rdv: RendezVous = nil, relay: Relay = nil): Switch = return builder.build() -type - AutonatStub = ref object of Autonat - returnSuccess*: bool - -method dialMe*( - self: AutonatStub, - pid: PeerId, - addrs: seq[MultiAddress] = newSeq[MultiAddress]()): - Future[MultiAddress] {.async.} = - if self.returnSuccess: - return MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() - else: - raise newException(AutonatError, "") - suite "Hope Punching": teardown: checkTrackers() @@ -64,7 +51,9 @@ suite "Hope Punching": let switch3 = createSwitch() let switch4 = createSwitch() - let autonatService = AutonatService.new(Autonat.new(switch1)) + let autonatStub = AutonatStub.new(expectedDials = 3) + + let autonatService = AutonatService.new(autonatStub) let hpservice = HPService.new(rdv, relayClient, autonatService) switch1.addService(hpservice) @@ -84,7 +73,7 @@ suite "Hope Punching": await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - await sleepAsync(1.seconds) + await autonatStub.finished await allFuturesThrowing( switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) @@ -115,7 +104,7 @@ suite "Hope Punching": dm.add(RendezVousInterface.new(relayRdv)) dm.advertise(RdvNamespace("relay")) - let autonatStub = AutonatStub.new() + let autonatStub = AutonatStub.new(expectedDials = 8) autonatStub.returnSuccess = false let autonatService = AutonatService.new(autonatStub) @@ -124,12 +113,15 @@ suite "Hope Punching": switch1.addService(hpservice) await switch1.start() + let awaiter = Awaiter.new() + proc f(ma: MultiAddress) {.gcsafe, async.} = autonatStub.returnSuccess = true let expected = MultiAddress.init($relaySwitch.peerInfo.addrs[0] & "/p2p/" & $relaySwitch.peerInfo.peerId & "/p2p-circuit/p2p/" & $switch1.peerInfo.peerId).get() check ma == expected + awaiter.finished.complete() hpservice.onNewRelayAddr(f) @@ -143,14 +135,11 @@ suite "Hope Punching": await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - await sleepAsync(1.seconds) + await awaiter.finished await hpservice.run(switch1) - await sleepAsync(1.seconds) - - echo switch1.peerInfo.addrs[0] - await sleepAsync(1.seconds) + await autonatStub.finished await allFuturesThrowing( bootNode.stop(), relaySwitch.stop(), switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) \ No newline at end of file From 690d92610c40df362dec21107caced9ad87bfe4b Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 1 Dec 2022 18:19:08 +0100 Subject: [PATCH 087/112] Fix hole punching test --- tests/testhpservice.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 8cbc94cab9..984cb6bd9c 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -53,7 +53,7 @@ suite "Hope Punching": let autonatStub = AutonatStub.new(expectedDials = 3) - let autonatService = AutonatService.new(autonatStub) + let autonatService = AutonatService.new(autonatStub, some(1.seconds)) let hpservice = HPService.new(rdv, relayClient, autonatService) switch1.addService(hpservice) @@ -107,7 +107,7 @@ suite "Hope Punching": let autonatStub = AutonatStub.new(expectedDials = 8) autonatStub.returnSuccess = false - let autonatService = AutonatService.new(autonatStub) + let autonatService = AutonatService.new(autonatStub, some(1.seconds)) let hpservice = HPService.new(rdv1, relayClient, autonatService) switch1.addService(hpservice) From fc8c1769a2a460d14b1b0e76b03eb8c621ef0539 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 2 Dec 2022 12:13:30 +0100 Subject: [PATCH 088/112] Rename NetworkReachability values --- libp2p/services/hpservice.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 853dc85ce3..d023a0e3ff 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -60,7 +60,7 @@ method setup*(self: HPService, switch: Switch) {.async.} = await self.autonatService.setup(switch) self.onNewStatusHandler = proc (networkReachability: NetworkReachability) {.gcsafe, async.} = - if networkReachability == NetworkReachability.Private: + if networkReachability == NetworkReachability.NotReachable: await self.relay() self.autonatService.onNewStatuswithMaxConfidence(self.onNewStatusHandler) From 0897a2558ab4fb327fa999d0f3d9fbf9f1aa862a Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 2 Dec 2022 15:04:53 +0100 Subject: [PATCH 089/112] Ask a configurable number of random peers --- libp2p/switch.nim | 3 +++ tests/testhpservice.nim | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/libp2p/switch.nim b/libp2p/switch.nim index bc2975d332..121ffe2d53 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -131,6 +131,9 @@ method addTransport*(s: Switch, t: Transport) = proc connectedPeers*(s: Switch, dir: Direction): seq[PeerId] = s.connManager.connectedPeers(dir) +proc connectedPeers*(s: Switch): seq[PeerId] = + s.connManager.connectedPeers + proc isConnected*(s: Switch, peerId: PeerId): bool {.public.} = ## returns true if the peer has one or more ## associated connections diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 984cb6bd9c..af38b24d5a 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -53,7 +53,7 @@ suite "Hope Punching": let autonatStub = AutonatStub.new(expectedDials = 3) - let autonatService = AutonatService.new(autonatStub, some(1.seconds)) + let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds)) let hpservice = HPService.new(rdv, relayClient, autonatService) switch1.addService(hpservice) @@ -107,7 +107,7 @@ suite "Hope Punching": let autonatStub = AutonatStub.new(expectedDials = 8) autonatStub.returnSuccess = false - let autonatService = AutonatService.new(autonatStub, some(1.seconds)) + let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds)) let hpservice = HPService.new(rdv1, relayClient, autonatService) switch1.addService(hpservice) From 1d5d23764014a42e9ddd57bc139a1b77352b37a5 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 2 Dec 2022 16:08:35 +0100 Subject: [PATCH 090/112] Ask only peers with at least one out connection --- libp2p/switch.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 121ffe2d53..1603b77742 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -131,8 +131,8 @@ method addTransport*(s: Switch, t: Transport) = proc connectedPeers*(s: Switch, dir: Direction): seq[PeerId] = s.connManager.connectedPeers(dir) -proc connectedPeers*(s: Switch): seq[PeerId] = - s.connManager.connectedPeers +proc connectedPeers*(s: Switch, dir: Direction): seq[PeerId] = + s.connManager.connectedPeers(dir) proc isConnected*(s: Switch, peerId: PeerId): bool {.public.} = ## returns true if the peer has one or more From ca75e997f0c0469e96a56ad31935d4a5248c51aa Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 16 Dec 2022 16:24:32 +0100 Subject: [PATCH 091/112] Fix rebase issues --- libp2p/switch.nim | 3 --- tests/stubs/torstub.nim | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/libp2p/switch.nim b/libp2p/switch.nim index 1603b77742..bc2975d332 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -131,9 +131,6 @@ method addTransport*(s: Switch, t: Transport) = proc connectedPeers*(s: Switch, dir: Direction): seq[PeerId] = s.connManager.connectedPeers(dir) -proc connectedPeers*(s: Switch, dir: Direction): seq[PeerId] = - s.connManager.connectedPeers(dir) - proc isConnected*(s: Switch, peerId: PeerId): bool {.public.} = ## returns true if the peer has one or more ## associated connections diff --git a/tests/stubs/torstub.nim b/tests/stubs/torstub.nim index 906b1290dd..ca5fe97769 100644 --- a/tests/stubs/torstub.nim +++ b/tests/stubs/torstub.nim @@ -83,4 +83,4 @@ proc start*(self: TorServerStub, address: TransportAddress) {.async.} = proc stop*(self: TorServerStub) {.async.} = - await self.tcpTransport.stop() \ No newline at end of file + await self.tcpTransport.stop() From bb6f0af3793908e41f8e50b529a3772cd2a82ba9 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 27 Jan 2023 13:26:57 +0100 Subject: [PATCH 092/112] Direct conn --- .../connectivity/autonat/service.nim | 2 + libp2p/protocols/connectivity/relay/relay.nim | 2 +- libp2p/services/hpservice.nim | 95 ++++--- tests/testhpservice.nim | 240 +++++++++++------- 4 files changed, 198 insertions(+), 141 deletions(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index d10f96c26f..0a62ff5811 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -20,6 +20,8 @@ import client import ../../../utils/heartbeat import ../../../crypto/crypto +export options + logScope: topics = "libp2p autonatservice" diff --git a/libp2p/protocols/connectivity/relay/relay.nim b/libp2p/protocols/connectivity/relay/relay.nim index 5e749702be..d099ff87b5 100644 --- a/libp2p/protocols/connectivity/relay/relay.nim +++ b/libp2p/protocols/connectivity/relay/relay.nim @@ -101,7 +101,7 @@ proc createReserveResponse( status: some(Ok)) return ok(msg) -proc isRelayed(conn: Connection): bool = +proc isRelayed*(conn: Connection): bool = var wrappedConn = conn while not isNil(wrappedConn): if wrappedConn of RelayConnection: diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index d023a0e3ff..e281c8e822 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -12,66 +12,77 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import ../switch -import chronos import std/tables +import ../switch, ../wire import ../protocols/rendezvous -import ../services/autonatservice +import ../services/autorelayservice import ../discovery/[rendezvousinterface, discoverymngr] -import ../protocols/connectivity/relay/[relay, client] +import ../protocols/connectivity/relay/relay +import ../protocols/connectivity/autonat/service +import chronos + +logScope: + topics = "libp2p hpservice" type HPService* = ref object of Service - rdv: RendezVous - dm: DiscoveryManager - relayClient: RelayClient + newConnectedPeerHandler: PeerEventHandler + onNewStatusHandler: StatusAndConfidenceHandler + autoRelayService: AutoRelayService autonatService: AutonatService - onNewStatusHandler: NewStatusHandler - callb: Callb + isPublicIPAddr: isPublicIPAddrFunc - Callb* = proc (ma: MultiAddress): Future[void] {.gcsafe, raises: [Defect].} + isPublicIPAddrFunc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].} -proc new*(T: typedesc[HPService], rdv: RendezVous, relayClient: RelayClient, autonatService: AutonatService): T = - let dm = DiscoveryManager() - dm.add(RendezVousInterface.new(rdv)) +proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService, + isPublicIPAddr: isPublicIPAddrFunc = isPublicAddr): T = return T( - rdv: rdv, - dm: dm, - relayClient: relayClient, - autonatService: autonatService) - -proc relay(self: HPService) {.async.} = - let queryRelay = self.dm.request(RdvNamespace("relay")) - let res = await queryRelay.getPeer() - let rsvp = await self.relayClient.reserve(res[PeerId], res.getAll(MultiAddress)) - let relayedAddr = MultiAddress.init($rsvp.addrs[0] & - "/p2p-circuit/p2p/" & - $rsvp.voucher.get().reservingPeerId).tryGet() - - await self.callb(relayedAddr) + autonatService: autonatService, + autoRelayService: autoRelayService, + isPublicIPAddr: isPublicIPAddr) - # switch1.peerInfo.listenAddrs = @[relayedAddr] - # await switch1.peerInfo.update() +proc startDirectConn(self: HPService, switch: Switch, relayedConnection: Connection, peerId: PeerId, + publicAddr: MultiAddress) {.async.} = + debug "starting direct connection" + await switch.connect(peerId, @[publicAddr], true, false) -# proc networkReachability*(self: HPService): NetworkReachability {.inline.} = -# return self.networkReachability +method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = + var hasBeenSetup = await procCall Service(self).setup(switch) + hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch) + hasBeenSetup = hasBeenSetup and await self.autoRelayService.setup(switch) + if hasBeenSetup: + self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} = + let conn = switch.connManager.selectConn(peerId) + await sleepAsync(100.milliseconds) # wait for AddressBook to be populated + if isRelayed(conn): + for address in switch.peerStore[AddressBook][peerId]: + if self.isPublicIPAddr(initTAddress(address).get()): + try: + await self.startDirectConn(switch, conn, peerId, address) + except CatchableError as exc: + debug "failed to start direct connection", exc = exc.msg + continue + await conn.close() + debug "direct connection started" -method setup*(self: HPService, switch: Switch) {.async.} = - await self.autonatService.setup(switch) + switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) - self.onNewStatusHandler = proc (networkReachability: NetworkReachability) {.gcsafe, async.} = - if networkReachability == NetworkReachability.NotReachable: - await self.relay() + self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} = + if networkReachability == NetworkReachability.NotReachable: + discard await self.autoRelayService.setup(switch) + elif networkReachability == NetworkReachability.Reachable: + discard await self.autoRelayService.stop(switch) - self.autonatService.onNewStatuswithMaxConfidence(self.onNewStatusHandler) + self.autonatService.statusAndConfidenceHandler(self.onNewStatusHandler) + return hasBeenSetup method run*(self: HPService, switch: Switch) {.async, public.} = await self.autonatService.run(switch) method stop*(self: HPService, switch: Switch) {.async, public.} = - await self.autonatService.stop(switch) - if not isNil(self.onNewStatusHandler): - discard #switch.connManager.removePeerEventHandler(self.newPeerHandler, PeerEventKind.Joined) + discard await self.autonatService.stop(switch) + if not isNil(self.newConnectedPeerHandler): + switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) -proc onNewRelayAddr*(self: HPService, f: Callb) = - self.callb = f +# proc onNewRelayAddr*(self: HPService, f: Callb) = +# self.callb = f diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index af38b24d5a..2ddac934f2 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -14,14 +14,16 @@ import ./helpers import ../libp2p/[builders, switch, services/hpservice, - services/autonatservice, - protocols/rendezvous] + services/autorelayservice] import ../libp2p/protocols/connectivity/relay/[relay, client] -import ../libp2p/discovery/[rendezvousinterface, discoverymngr] +import ../libp2p/protocols/connectivity/autonat/[client, service] +import ../libp2p/wire +import stubs/autonatclientstub -import stubs/autonatstub +proc isPublicAddrIPAddrMock*(ta: TransportAddress): bool = + return true -proc createSwitch(rdv: RendezVous = nil, relay: Relay = nil): Switch = +proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch = var builder = SwitchBuilder.new() .withRng(newRng()) .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ]) @@ -30,116 +32,158 @@ proc createSwitch(rdv: RendezVous = nil, relay: Relay = nil): Switch = .withAutonat() .withNoise() - if (rdv != nil): - builder = builder.withRendezVous(rdv) + if hpService != nil: + builder = builder.withServices(@[hpService]) - if (relay != nil): - builder = builder.withCircuitRelay(relay) + if r != nil: + builder = builder.withCircuitRelay(r) return builder.build() -suite "Hope Punching": +proc buildRelayMA(switchRelay: Switch, switchClient: Switch): MultiAddress = + MultiAddress.init($switchRelay.peerInfo.addrs[0] & "/p2p/" & + $switchRelay.peerInfo.peerId & "/p2p-circuit/p2p/" & + $switchClient.peerInfo.peerId).get() + +suite "Hole Punching": teardown: checkTrackers() - asyncTest "Hope Punching Public Reachability test": - let rdv = RendezVous.new() - let relayClient = RelayClient.new() - let switch1 = createSwitch(rdv, relayClient) - - let switch2 = createSwitch() - let switch3 = createSwitch() - let switch4 = createSwitch() - - let autonatStub = AutonatStub.new(expectedDials = 3) - - let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds)) - let hpservice = HPService.new(rdv, relayClient, autonatService) - - switch1.addService(hpservice) - - proc f(ma: MultiAddress) {.gcsafe, async.} = - echo "onNewRelayAddr shouldn't be called" - fail() - - hpservice.onNewRelayAddr(f) - - await switch1.start() - await switch2.start() - await switch3.start() - await switch4.start() - - await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) - await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) - await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - - await autonatStub.finished + asyncTest "Direct connection must work when peer address is public": - await allFuturesThrowing( - switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) - - asyncTest "Hope Punching Full Reachability test": - - let rdv1 = RendezVous.new() - let rdv2 = RendezVous.new() + let autonatService = AutonatService.new(AutonatClient.new(), newRng()) let relayClient = RelayClient.new() - let switch1 = createSwitch(rdv1, relayClient) - let switch2 = createSwitch(rdv2) - let switch3 = createSwitch() - let switch4 = createSwitch() - - let bootRdv = RendezVous.new() - let bootNode = createSwitch(rdv = bootRdv) - await bootNode.start() - - let relay = Relay.new() - let relayRdv = RendezVous.new() - let relaySwitch = createSwitch(rdv = relayRdv, relay = relay) - await relaySwitch.start() - - await relaySwitch.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) - - let dm = DiscoveryManager() - dm.add(RendezVousInterface.new(relayRdv)) - dm.advertise(RdvNamespace("relay")) - - let autonatStub = AutonatStub.new(expectedDials = 8) - autonatStub.returnSuccess = false - - let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds)) - let hpservice = HPService.new(rdv1, relayClient, autonatService) - - switch1.addService(hpservice) - await switch1.start() - - let awaiter = Awaiter.new() + let fut = newFuture[seq[MultiAddress]]() - proc f(ma: MultiAddress) {.gcsafe, async.} = - autonatStub.returnSuccess = true - let expected = MultiAddress.init($relaySwitch.peerInfo.addrs[0] & "/p2p/" & - $relaySwitch.peerInfo.peerId & "/p2p-circuit/p2p/" & - $switch1.peerInfo.peerId).get() - check ma == expected - awaiter.finished.complete() + let switch2 = createSwitch(RelayClient.new()) + proc checkMA(address: seq[MultiAddress]) = + if not fut.completed(): + echo $address + fut.complete(address) - hpservice.onNewRelayAddr(f) + let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng()) - await switch2.start() - await switch3.start() - await switch4.start() + let hpservice = HPService.new(autonatService, autoRelayService, isPublicAddrIPAddrMock) - await switch1.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) + let switch1 = createSwitch(relayClient, hpservice) + let switchRelay = createSwitch(Relay.new()) - await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) - await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) - await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) + await allFutures(switchRelay.start(), switch1.start(), switch2.start()) - await awaiter.finished + await switch1.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs) - await hpservice.run(switch1) + await switch2.connect(switch1.peerInfo.peerId, (await fut)) - await autonatStub.finished + checkExpiring: + switch1.connManager.connCount(switch2.peerInfo.peerId) == 1 and + not isRelayed(switch1.connManager.selectConn(switch2.peerInfo.peerId)) await allFuturesThrowing( - bootNode.stop(), relaySwitch.stop(), switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) \ No newline at end of file + switch1.stop(), switch2.stop(), switchRelay.stop()) + + # asyncTest "Hope Punching Public Reachability test": + # let switch1 = createSwitch() + # + # let switch2 = createSwitch() + # let switch3 = createSwitch() + # let switch4 = createSwitch() + # + # let autonatService = AutonatService.new(AutonatClient.new(), newRng()) + # + # let relayClient = RelayClient.new() + # let fut = newFuture[void]() + # proc checkMA(address: seq[MultiAddress]) = + # check: address[0] == buildRelayMA(switchRelay, switchClient) + # fut.complete() + # let autoRelayService = AutoRelayService.new(3, relayClient, checkMA, newRng()) + # + # let hpservice = HPService.new(autonatService, autoRelayService, newRng()) + # + # switch1.addService(hpservice) + # + # # proc f(ma: MultiAddress) {.gcsafe, async.} = + # # echo "onNewRelayAddr shouldn't be called" + # # fail() + # # + # # hpservice.onNewRelayAddr(f) + # + # await switch1.start() + # await switch2.start() + # await switch3.start() + # await switch4.start() + # + # await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) + # await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) + # await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) + # + # await autonatStub.finished + # + # await allFuturesThrowing( + # switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) + # + # asyncTest "Hope Punching Full Reachability test": + # + # let rdv1 = RendezVous.new() + # let rdv2 = RendezVous.new() + # + # let relayClient = RelayClient.new() + # let switch1 = createSwitch(rdv1, relayClient) + # let switch2 = createSwitch(rdv2) + # let switch3 = createSwitch() + # let switch4 = createSwitch() + # + # let bootRdv = RendezVous.new() + # let bootNode = createSwitch(rdv = bootRdv) + # await bootNode.start() + # + # let relay = Relay.new() + # let relayRdv = RendezVous.new() + # let relaySwitch = createSwitch(rdv = relayRdv, relay = relay) + # await relaySwitch.start() + # + # await relaySwitch.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) + # + # let dm = DiscoveryManager() + # dm.add(RendezVousInterface.new(relayRdv)) + # dm.advertise(RdvNamespace("relay")) + # + # let autonatStub = AutonatStub.new(expectedDials = 8) + # autonatStub.returnSuccess = false + # + # let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds)) + # let hpservice = HPService.new(rdv1, relayClient, autonatService) + # + # switch1.addService(hpservice) + # await switch1.start() + # + # let awaiter = Awaiter.new() + # + # proc f(ma: MultiAddress) {.gcsafe, async.} = + # autonatStub.returnSuccess = true + # let expected = MultiAddress.init($relaySwitch.peerInfo.addrs[0] & "/p2p/" & + # $relaySwitch.peerInfo.peerId & "/p2p-circuit/p2p/" & + # $switch1.peerInfo.peerId).get() + # check ma == expected + # awaiter.finished.complete() + # + # hpservice.onNewRelayAddr(f) + # + # await switch2.start() + # await switch3.start() + # await switch4.start() + # + # await switch1.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) + # + # await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) + # await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) + # await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) + # + # await awaiter.finished + # + # await hpservice.run(switch1) + # + # await autonatStub.finished + # + # await allFuturesThrowing( + # bootNode.stop(), relaySwitch.stop(), switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) \ No newline at end of file From 41125d3553082d7e25a8e5a647e906bd8e76aa43 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 14 Feb 2023 14:14:58 +0100 Subject: [PATCH 093/112] Waiting for PR in chronos --- libp2p/services/hpservice.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index e281c8e822..f01f75b264 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -35,7 +35,7 @@ type isPublicIPAddrFunc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].} proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService, - isPublicIPAddr: isPublicIPAddrFunc = isPublicAddr): T = + isPublicIPAddr: isPublicIPAddrFunc = proc(ta: TransportAddress): bool = return true): T = # FIXME: use chronos return T( autonatService: autonatService, autoRelayService: autoRelayService, From d2bae1d81fc68eb60680bd27bf51624540d882ca Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 14 Feb 2023 15:31:25 +0100 Subject: [PATCH 094/112] small refactoring --- libp2p/services/hpservice.nim | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index f01f75b264..efea130e62 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -41,10 +41,21 @@ proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayServi autoRelayService: autoRelayService, isPublicIPAddr: isPublicIPAddr) -proc startDirectConn(self: HPService, switch: Switch, relayedConnection: Connection, peerId: PeerId, - publicAddr: MultiAddress) {.async.} = - debug "starting direct connection" - await switch.connect(peerId, @[publicAddr], true, false) +proc startDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = + let conn = switch.connManager.selectConn(peerId) + await sleepAsync(100.milliseconds) # wait for AddressBook to be populated + if isRelayed(conn): + for address in switch.peerStore[AddressBook][peerId]: + if self.isPublicIPAddr(initTAddress(address).get()): + try: + await switch.connect(peerId, @[address], true, false) + await conn.close() + debug "direct connection started" + return true + except CatchableError as exc: + debug "failed to start direct connection", exc = exc.msg + continue + return false method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = var hasBeenSetup = await procCall Service(self).setup(switch) @@ -52,18 +63,8 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = hasBeenSetup = hasBeenSetup and await self.autoRelayService.setup(switch) if hasBeenSetup: self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} = - let conn = switch.connManager.selectConn(peerId) - await sleepAsync(100.milliseconds) # wait for AddressBook to be populated - if isRelayed(conn): - for address in switch.peerStore[AddressBook][peerId]: - if self.isPublicIPAddr(initTAddress(address).get()): - try: - await self.startDirectConn(switch, conn, peerId, address) - except CatchableError as exc: - debug "failed to start direct connection", exc = exc.msg - continue - await conn.close() - debug "direct connection started" + if await self.startDirectConn(switch, peerId): + return switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) From 0a4c383eb8bbfeebb369152949fe72cd57760a66 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 8 Mar 2023 16:38:29 +0100 Subject: [PATCH 095/112] Fix some issues and set reachability in transports --- libp2p/protocols/connectivity/autonat/service.nim | 2 +- libp2p/services/hpservice.nim | 10 +++++++--- tests/testhpservice.nim | 9 +++++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 0a62ff5811..87494632ee 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -20,7 +20,7 @@ import client import ../../../utils/heartbeat import ../../../crypto/crypto -export options +export options, core logScope: topics = "libp2p autonatservice" diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index efea130e62..4f1ce71cc5 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -42,7 +42,7 @@ proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayServi isPublicIPAddr: isPublicIPAddr) proc startDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = - let conn = switch.connManager.selectConn(peerId) + let conn = switch.connManager.selectMuxer(peerId).connection await sleepAsync(100.milliseconds) # wait for AddressBook to be populated if isRelayed(conn): for address in switch.peerStore[AddressBook][peerId]: @@ -60,7 +60,7 @@ proc startDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bo method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = var hasBeenSetup = await procCall Service(self).setup(switch) hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch) - hasBeenSetup = hasBeenSetup and await self.autoRelayService.setup(switch) + if hasBeenSetup: self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} = if await self.startDirectConn(switch, peerId): @@ -74,13 +74,17 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = elif networkReachability == NetworkReachability.Reachable: discard await self.autoRelayService.stop(switch) + # We do it here instead of in the AutonatService because this is useful only when hole punching. + for t in switch.transports: + t.networkReachability = networkReachability + self.autonatService.statusAndConfidenceHandler(self.onNewStatusHandler) return hasBeenSetup method run*(self: HPService, switch: Switch) {.async, public.} = await self.autonatService.run(switch) -method stop*(self: HPService, switch: Switch) {.async, public.} = +method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} = discard await self.autonatService.stop(switch) if not isNil(self.newConnectedPeerHandler): switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 2ddac934f2..256cf7fbe9 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -51,7 +51,9 @@ suite "Hole Punching": asyncTest "Direct connection must work when peer address is public": - let autonatService = AutonatService.new(AutonatClient.new(), newRng()) + let autonatClientStub = AutonatClientStub.new(expectedDials = 1) + autonatClientStub.answer = NotReachable + let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1) let relayClient = RelayClient.new() let fut = newFuture[seq[MultiAddress]]() @@ -77,7 +79,10 @@ suite "Hole Punching": checkExpiring: switch1.connManager.connCount(switch2.peerInfo.peerId) == 1 and - not isRelayed(switch1.connManager.selectConn(switch2.peerInfo.peerId)) + not isRelayed(switch1.connManager.selectMuxer(switch2.peerInfo.peerId).connection) + + for t in switch1.transports: + echo t.networkReachability await allFuturesThrowing( switch1.stop(), switch2.stop(), switchRelay.stop()) From ea22efc92042919e4ccd74742284364eee5e54b7 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 13:21:52 +0100 Subject: [PATCH 096/112] add dcutr to hp --- libp2p/services/hpservice.nim | 50 ++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 4f1ce71cc5..73dbbdffe1 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -12,13 +12,15 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import std/tables +import std/[tables, sequtils] import ../switch, ../wire import ../protocols/rendezvous import ../services/autorelayservice import ../discovery/[rendezvousinterface, discoverymngr] import ../protocols/connectivity/relay/relay import ../protocols/connectivity/autonat/service +from ../protocols/connectivity/dcutr/core import DcutrError +import ../protocols/connectivity/dcutr/client import chronos logScope: @@ -41,30 +43,46 @@ proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayServi autoRelayService: autoRelayService, isPublicIPAddr: isPublicIPAddr) -proc startDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = - let conn = switch.connManager.selectMuxer(peerId).connection +proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId, relayedConn: Connection): Future[bool] {.async.} = await sleepAsync(100.milliseconds) # wait for AddressBook to be populated - if isRelayed(conn): - for address in switch.peerStore[AddressBook][peerId]: - if self.isPublicIPAddr(initTAddress(address).get()): - try: - await switch.connect(peerId, @[address], true, false) - await conn.close() - debug "direct connection started" - return true - except CatchableError as exc: - debug "failed to start direct connection", exc = exc.msg - continue + for address in switch.peerStore[AddressBook][peerId]: + if self.isPublicIPAddr(initTAddress(address).get()): + try: + await switch.connect(peerId, @[address], true, false) + await relayedConn.close() + debug "direct connection created" + return true + except CatchableError as err: + debug "failed to create direct connection", err = err.msg + continue return false +proc guessNatAddrs(peerStore: PeerStore, addrs: seq[MultiAddress]): seq[MultiAddress] = + for a in addrs: + let guess = peerStore.replaceMAIpByMostObserved(a) + if guess.isSome(): + result.add(guess.get()) + method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = var hasBeenSetup = await procCall Service(self).setup(switch) hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch) if hasBeenSetup: self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} = - if await self.startDirectConn(switch, peerId): - return + try: + let conn = switch.connManager.selectMuxer(peerId).connection + if isRelayed(conn): + if await self.tryStartingDirectConn(switch, peerId, conn): + return + let dcutrClient = DcutrClient.new() + var natAddrs = switch.peerStore.getMostObservedIPsAndPorts() + if natAddrs.len == 0: + natAddrs = guessNatAddrs(switch.peerStore, switch.peerInfo.addrs) + await dcutrClient.startSync(switch, peerId, natAddrs) + await sleepAsync(2000.milliseconds) # grace period before closing relayed connection + await conn.close() + except DcutrError as err: + error "Hole punching failed during dcutr", err = err.msg switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) From c53bed61e803c6d1577b07fe2350b20a86843d67 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 13:36:34 +0100 Subject: [PATCH 097/112] relayed conn isn't necessary in tryStartingDirectConn --- libp2p/services/hpservice.nim | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 73dbbdffe1..96c9493aa1 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -43,13 +43,12 @@ proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayServi autoRelayService: autoRelayService, isPublicIPAddr: isPublicIPAddr) -proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId, relayedConn: Connection): Future[bool] {.async.} = +proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = await sleepAsync(100.milliseconds) # wait for AddressBook to be populated for address in switch.peerStore[AddressBook][peerId]: if self.isPublicIPAddr(initTAddress(address).get()): try: await switch.connect(peerId, @[address], true, false) - await relayedConn.close() debug "direct connection created" return true except CatchableError as err: @@ -72,7 +71,8 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = try: let conn = switch.connManager.selectMuxer(peerId).connection if isRelayed(conn): - if await self.tryStartingDirectConn(switch, peerId, conn): + if await self.tryStartingDirectConn(switch, peerId): + await conn.close() return let dcutrClient = DcutrClient.new() var natAddrs = switch.peerStore.getMostObservedIPsAndPorts() From 49380644f198a5f059dbc81a4ef906dffcd2d13d Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 15:53:21 +0100 Subject: [PATCH 098/112] improve naming --- libp2p/services/hpservice.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 96c9493aa1..f90a84ef84 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -77,7 +77,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = let dcutrClient = DcutrClient.new() var natAddrs = switch.peerStore.getMostObservedIPsAndPorts() if natAddrs.len == 0: - natAddrs = guessNatAddrs(switch.peerStore, switch.peerInfo.addrs) + natAddrs = guessDialableAddrs(switch.peerStore, switch.peerInfo.addrs) await dcutrClient.startSync(switch, peerId, natAddrs) await sleepAsync(2000.milliseconds) # grace period before closing relayed connection await conn.close() From 71017003787522ca6bfd4e0a4cedfba0d317834c Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 10 Mar 2023 17:41:10 +0100 Subject: [PATCH 099/112] fix export --- libp2p/protocols/connectivity/autonat/service.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 87494632ee..465dd26d5f 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -20,7 +20,7 @@ import client import ../../../utils/heartbeat import ../../../crypto/crypto -export options, core +export options, core.NetworkReachability logScope: topics = "libp2p autonatservice" From b58cc2a31db6bb213df9b46b0cde2d095830dd3c Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 14 Mar 2023 17:30:22 +0100 Subject: [PATCH 100/112] temp fix for multiaddr problems --- libp2p/multiaddress.nim | 2 +- libp2p/protocols/identify.nim | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/libp2p/multiaddress.nim b/libp2p/multiaddress.nim index c046dba06a..c227b1476b 100644 --- a/libp2p/multiaddress.nim +++ b/libp2p/multiaddress.nim @@ -1123,5 +1123,5 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int, value.add(ma.get()) else: value.setLen(0) - return err(ProtoError.IncorrectBlob) + #return err(ProtoError.IncorrectBlob) ok(true) diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index 85202d5967..2881061d63 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -261,3 +261,13 @@ proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, publi ## Send new `peerInfo`s to a connection var pb = encodeMsg(peerInfo, conn.observedAddr, true) await conn.writeLp(pb.buffer) + +proc getMostObservedIP*(self: Identify, ipVersion: IPVersion): Opt[MultiAddress] = + ## Returns the most observed IP address or none if the number of observations are less than minCount. + return self.observedAddrManager.getMostObservedIP(ipVersion) + +proc getMostObservedIPsAndPorts*(self: Identify): seq[MultiAddress] = + ## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations + ## are less than minCount. + echo self.observedAddrManager + return self.observedAddrManager.getMostObservedIPsAndPorts() From ed9f26b0474f44937c73adcd938e36161b68fdd7 Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 16 Mar 2023 15:31:57 +0100 Subject: [PATCH 101/112] remove unnecessary proc --- .../connectivity/autonat/service.nim | 6 +-- tests/testautonatservice.nim | 46 +++++++++---------- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim index 465dd26d5f..50edae185e 100644 --- a/libp2p/protocols/connectivity/autonat/service.nim +++ b/libp2p/protocols/connectivity/autonat/service.nim @@ -17,6 +17,7 @@ import chronos, metrics import ../../../switch import ../../../wire import client +from core import NetworkReachability, AutonatUnreachableError import ../../../utils/heartbeat import ../../../crypto/crypto @@ -32,7 +33,7 @@ type newConnectedPeerHandler: PeerEventHandler addressMapper: AddressMapper scheduleHandle: Future[void] - networkReachability: NetworkReachability + networkReachability*: NetworkReachability confidence: Option[float] answers: Deque[NetworkReachability] autonatClient: AutonatClient @@ -73,9 +74,6 @@ proc new*( dialTimeout: dialTimeout, enableAddressMapper: enableAddressMapper) -proc networkReachability*(self: AutonatService): NetworkReachability {.inline.} = - return self.networkReachability - proc callHandler(self: AutonatService) {.async.} = if not isNil(self.statusAndConfidenceHandler): await self.statusAndConfidenceHandler(self.networkReachability, self.confidence) diff --git a/tests/testautonatservice.nim b/tests/testautonatservice.nim index 21636c74b0..7479a1c13e 100644 --- a/tests/testautonatservice.nim +++ b/tests/testautonatservice.nim @@ -51,7 +51,7 @@ suite "Autonat Service": let switch3 = createSwitch() let switch4 = createSwitch() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown await switch1.start() await switch2.start() @@ -64,7 +64,7 @@ suite "Autonat Service": await autonatClientStub.finished - check autonatService.networkReachability() == NetworkReachability.NotReachable + check autonatService.networkReachability == NetworkReachability.NotReachable check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 0.3 await allFuturesThrowing( @@ -86,7 +86,7 @@ suite "Autonat Service": if not awaiter.finished: awaiter.complete() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) @@ -101,7 +101,7 @@ suite "Autonat Service": await awaiter - check autonatService.networkReachability() == NetworkReachability.Reachable + check autonatService.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3 check switch1.peerInfo.addrs == switch1.peerInfo.listenAddrs.mapIt(switch1.peerStore.guessDialableAddr(it)) @@ -131,7 +131,7 @@ suite "Autonat Service": autonatClientStub.answer = Reachable awaiter.complete() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) @@ -146,12 +146,12 @@ suite "Autonat Service": await awaiter - check autonatService.networkReachability() == NetworkReachability.NotReachable + check autonatService.networkReachability == NetworkReachability.NotReachable check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 0.3 await autonatClientStub.finished - check autonatService.networkReachability() == NetworkReachability.Reachable + check autonatService.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3 await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) @@ -172,7 +172,7 @@ suite "Autonat Service": if not awaiter.finished: awaiter.complete() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) @@ -187,7 +187,7 @@ suite "Autonat Service": await awaiter - check autonatService.networkReachability() == NetworkReachability.Reachable + check autonatService.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1 await allFuturesThrowing( @@ -213,7 +213,7 @@ suite "Autonat Service": autonatClientStub.answer = Unknown awaiter.complete() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) @@ -228,12 +228,12 @@ suite "Autonat Service": await awaiter - check autonatService.networkReachability() == NetworkReachability.NotReachable + check autonatService.networkReachability == NetworkReachability.NotReachable check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 1/3 await autonatClientStub.finished - check autonatService.networkReachability() == NetworkReachability.NotReachable + check autonatService.networkReachability == NetworkReachability.NotReachable check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 1/3 await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) @@ -264,7 +264,7 @@ suite "Autonat Service": if not awaiter.finished: awaiter.complete() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) @@ -275,7 +275,7 @@ suite "Autonat Service": await awaiter - check autonatService.networkReachability() == NetworkReachability.Reachable + check autonatService.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1 await allFuturesThrowing( @@ -304,8 +304,8 @@ suite "Autonat Service": if not awaiter2.finished: awaiter2.complete() - check autonatService1.networkReachability() == NetworkReachability.Unknown - check autonatService2.networkReachability() == NetworkReachability.Unknown + check autonatService1.networkReachability == NetworkReachability.Unknown + check autonatService2.networkReachability == NetworkReachability.Unknown autonatService1.statusAndConfidenceHandler(statusAndConfidenceHandler1) autonatService2.statusAndConfidenceHandler(statusAndConfidenceHandler2) @@ -321,8 +321,8 @@ suite "Autonat Service": await awaiter1 await awaiter2 - check autonatService1.networkReachability() == NetworkReachability.Reachable - check autonatService2.networkReachability() == NetworkReachability.Reachable + check autonatService1.networkReachability == NetworkReachability.Reachable + check autonatService2.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1 await allFuturesThrowing( @@ -342,7 +342,7 @@ suite "Autonat Service": if not awaiter1.finished: awaiter1.complete() - check autonatService1.networkReachability() == NetworkReachability.Unknown + check autonatService1.networkReachability == NetworkReachability.Unknown autonatService1.statusAndConfidenceHandler(statusAndConfidenceHandler1) @@ -360,7 +360,7 @@ suite "Autonat Service": await awaiter1 - check autonatService1.networkReachability() == NetworkReachability.Reachable + check autonatService1.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1 # Make sure remote peer can't create a connection to us @@ -385,7 +385,7 @@ suite "Autonat Service": if not awaiter.finished: awaiter.complete() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) @@ -407,7 +407,7 @@ suite "Autonat Service": await autonatService.run(switch1) await awaiter - check autonatService.networkReachability() == NetworkReachability.Reachable + check autonatService.networkReachability == NetworkReachability.Reachable check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1 await allFuturesThrowing( @@ -422,7 +422,7 @@ suite "Autonat Service": proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} = fail() - check autonatService.networkReachability() == NetworkReachability.Unknown + check autonatService.networkReachability == NetworkReachability.Unknown autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) From 49592d155d1a353252193655ed74af5179c79ffe Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 16 Mar 2023 17:30:05 +0100 Subject: [PATCH 102/112] remove comment --- libp2p/services/hpservice.nim | 3 --- 1 file changed, 3 deletions(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index f90a84ef84..7ad3446190 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -106,6 +106,3 @@ method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} = discard await self.autonatService.stop(switch) if not isNil(self.newConnectedPeerHandler): switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) - -# proc onNewRelayAddr*(self: HPService, f: Callb) = -# self.callb = f From d57bc41acf28bcc0ab5ccb826af2fa1b50e6e406 Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 16 Mar 2023 17:30:37 +0100 Subject: [PATCH 103/112] improve import --- libp2p/protocols/connectivity/autonat/client.nim | 2 -- tests/stubs/autonatclientstub.nim | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/libp2p/protocols/connectivity/autonat/client.nim b/libp2p/protocols/connectivity/autonat/client.nim index 8a74ef009b..0322a1edf6 100644 --- a/libp2p/protocols/connectivity/autonat/client.nim +++ b/libp2p/protocols/connectivity/autonat/client.nim @@ -20,8 +20,6 @@ import ../../../switch, ../../../peerid import core -export core - logScope: topics = "libp2p autonat" diff --git a/tests/stubs/autonatclientstub.nim b/tests/stubs/autonatclientstub.nim index f801660b78..9350244257 100644 --- a/tests/stubs/autonatclientstub.nim +++ b/tests/stubs/autonatclientstub.nim @@ -19,6 +19,7 @@ import ../../libp2p/[protocols/connectivity/autonat/client, peerid, multiaddress, switch] +from ../../libp2p/protocols/connectivity/autonat/core import NetworkReachability, AutonatUnreachableError, AutonatError type AutonatClientStub* = ref object of AutonatClient From f808cf75f8c0e4d87a1b425c1fe0f61e640796b7 Mon Sep 17 00:00:00 2001 From: Diego Date: Thu, 16 Mar 2023 20:34:58 +0100 Subject: [PATCH 104/112] fix for nim 1.2 --- tests/testhpservice.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 256cf7fbe9..6a7ece7d61 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -16,7 +16,7 @@ import ../libp2p/[builders, services/hpservice, services/autorelayservice] import ../libp2p/protocols/connectivity/relay/[relay, client] -import ../libp2p/protocols/connectivity/autonat/[client, service] +import ../libp2p/protocols/connectivity/autonat/[service] import ../libp2p/wire import stubs/autonatclientstub From f5cd0736de268beae7916e8c688c876a1bed334b Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 17 Mar 2023 12:22:30 +0100 Subject: [PATCH 105/112] improve naming --- tests/testhpservice.nim | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 6a7ece7d61..a84e6563d9 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -56,36 +56,36 @@ suite "Hole Punching": let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1) let relayClient = RelayClient.new() - let fut = newFuture[seq[MultiAddress]]() + let privatePeerRelayAddr = newFuture[seq[MultiAddress]]() - let switch2 = createSwitch(RelayClient.new()) + let publicPeerSwitch = createSwitch(RelayClient.new()) proc checkMA(address: seq[MultiAddress]) = - if not fut.completed(): + if not privatePeerRelayAddr.completed(): echo $address - fut.complete(address) + privatePeerRelayAddr.complete(address) let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng()) let hpservice = HPService.new(autonatService, autoRelayService, isPublicAddrIPAddrMock) - let switch1 = createSwitch(relayClient, hpservice) + let privatePeerSwitch = createSwitch(relayClient, hpservice) let switchRelay = createSwitch(Relay.new()) - await allFutures(switchRelay.start(), switch1.start(), switch2.start()) + await allFutures(switchRelay.start(), privatePeerSwitch.start(), publicPeerSwitch.start()) - await switch1.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs) + await privatePeerSwitch.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs) - await switch2.connect(switch1.peerInfo.peerId, (await fut)) + await publicPeerSwitch.connect(privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr)) checkExpiring: - switch1.connManager.connCount(switch2.peerInfo.peerId) == 1 and - not isRelayed(switch1.connManager.selectMuxer(switch2.peerInfo.peerId).connection) + privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1 and + not isRelayed(privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection) - for t in switch1.transports: + for t in privatePeerSwitch.transports: echo t.networkReachability await allFuturesThrowing( - switch1.stop(), switch2.stop(), switchRelay.stop()) + privatePeerSwitch.stop(), publicPeerSwitch.stop(), switchRelay.stop()) # asyncTest "Hope Punching Public Reachability test": # let switch1 = createSwitch() From a0cefe56ad3035e3f6f047f9eecc3788c8ef4e2b Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 21 Mar 2023 18:01:03 +0100 Subject: [PATCH 106/112] several fixes and more tests --- .../protocols/connectivity/dcutr/client.nim | 1 - libp2p/services/hpservice.nim | 21 ++- tests/testhpservice.nim | 169 +++++++----------- 3 files changed, 75 insertions(+), 116 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index 560741e6c6..b83f19dc80 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -62,7 +62,6 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: debug "Dcutr initiator has received a Connect message back.", connectAnswer let halfRtt = (rttEnd - rttStart) div 2'i64 await stream.send(MsgType.Sync, addrs) - debug "Dcutr initiator has sent a Sync message." await sleepAsync(halfRtt) if peerDialableAddrs.len > self.maxDialableAddrs: diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 7ad3446190..7ff77a1ff6 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -20,7 +20,7 @@ import ../discovery/[rendezvousinterface, discoverymngr] import ../protocols/connectivity/relay/relay import ../protocols/connectivity/autonat/service from ../protocols/connectivity/dcutr/core import DcutrError -import ../protocols/connectivity/dcutr/client +import ../protocols/connectivity/dcutr/[client, server] import chronos logScope: @@ -46,14 +46,14 @@ proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayServi proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = await sleepAsync(100.milliseconds) # wait for AddressBook to be populated for address in switch.peerStore[AddressBook][peerId]: - if self.isPublicIPAddr(initTAddress(address).get()): - try: + try: + if self.isPublicIPAddr(initTAddress(address).get()): await switch.connect(peerId, @[address], true, false) - debug "direct connection created" + debug "Direct connection created." return true - except CatchableError as err: - debug "failed to create direct connection", err = err.msg - continue + except Exception as err: + debug "Failed to create direct connection.", err = err.msg + continue return false proc guessNatAddrs(peerStore: PeerStore, addrs: seq[MultiAddress]): seq[MultiAddress] = @@ -67,10 +67,13 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch) if hasBeenSetup: + let dcutrProto = Dcutr.new(switch) + switch.mount(dcutrProto) + self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} = try: let conn = switch.connManager.selectMuxer(peerId).connection - if isRelayed(conn): + if isRelayed(conn) and conn.transportDir == Direction.In: if await self.tryStartingDirectConn(switch, peerId): await conn.close() return @@ -81,7 +84,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = await dcutrClient.startSync(switch, peerId, natAddrs) await sleepAsync(2000.milliseconds) # grace period before closing relayed connection await conn.close() - except DcutrError as err: + except CatchableError as err: error "Hole punching failed during dcutr", err = err.msg switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index a84e6563d9..85640ff1dc 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -61,7 +61,6 @@ suite "Hole Punching": let publicPeerSwitch = createSwitch(RelayClient.new()) proc checkMA(address: seq[MultiAddress]) = if not privatePeerRelayAddr.completed(): - echo $address privatePeerRelayAddr.complete(address) let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng()) @@ -87,108 +86,66 @@ suite "Hole Punching": await allFuturesThrowing( privatePeerSwitch.stop(), publicPeerSwitch.stop(), switchRelay.stop()) - # asyncTest "Hope Punching Public Reachability test": - # let switch1 = createSwitch() - # - # let switch2 = createSwitch() - # let switch3 = createSwitch() - # let switch4 = createSwitch() - # - # let autonatService = AutonatService.new(AutonatClient.new(), newRng()) - # - # let relayClient = RelayClient.new() - # let fut = newFuture[void]() - # proc checkMA(address: seq[MultiAddress]) = - # check: address[0] == buildRelayMA(switchRelay, switchClient) - # fut.complete() - # let autoRelayService = AutoRelayService.new(3, relayClient, checkMA, newRng()) - # - # let hpservice = HPService.new(autonatService, autoRelayService, newRng()) - # - # switch1.addService(hpservice) - # - # # proc f(ma: MultiAddress) {.gcsafe, async.} = - # # echo "onNewRelayAddr shouldn't be called" - # # fail() - # # - # # hpservice.onNewRelayAddr(f) - # - # await switch1.start() - # await switch2.start() - # await switch3.start() - # await switch4.start() - # - # await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) - # await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) - # await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - # - # await autonatStub.finished - # - # await allFuturesThrowing( - # switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) - # - # asyncTest "Hope Punching Full Reachability test": - # - # let rdv1 = RendezVous.new() - # let rdv2 = RendezVous.new() - # - # let relayClient = RelayClient.new() - # let switch1 = createSwitch(rdv1, relayClient) - # let switch2 = createSwitch(rdv2) - # let switch3 = createSwitch() - # let switch4 = createSwitch() - # - # let bootRdv = RendezVous.new() - # let bootNode = createSwitch(rdv = bootRdv) - # await bootNode.start() - # - # let relay = Relay.new() - # let relayRdv = RendezVous.new() - # let relaySwitch = createSwitch(rdv = relayRdv, relay = relay) - # await relaySwitch.start() - # - # await relaySwitch.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) - # - # let dm = DiscoveryManager() - # dm.add(RendezVousInterface.new(relayRdv)) - # dm.advertise(RdvNamespace("relay")) - # - # let autonatStub = AutonatStub.new(expectedDials = 8) - # autonatStub.returnSuccess = false - # - # let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds)) - # let hpservice = HPService.new(rdv1, relayClient, autonatService) - # - # switch1.addService(hpservice) - # await switch1.start() - # - # let awaiter = Awaiter.new() - # - # proc f(ma: MultiAddress) {.gcsafe, async.} = - # autonatStub.returnSuccess = true - # let expected = MultiAddress.init($relaySwitch.peerInfo.addrs[0] & "/p2p/" & - # $relaySwitch.peerInfo.peerId & "/p2p-circuit/p2p/" & - # $switch1.peerInfo.peerId).get() - # check ma == expected - # awaiter.finished.complete() - # - # hpservice.onNewRelayAddr(f) - # - # await switch2.start() - # await switch3.start() - # await switch4.start() - # - # await switch1.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs) - # - # await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) - # await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs) - # await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs) - # - # await awaiter.finished - # - # await hpservice.run(switch1) - # - # await autonatStub.finished - # - # await allFuturesThrowing( - # bootNode.stop(), relaySwitch.stop(), switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop()) \ No newline at end of file + asyncTest "Hole punching when peers addresses are private": + + # There's no check in this test cause it can't test hole punching locally. It exists just to be sure the rest of + # the code works properly. + + let autonatClientStub1 = AutonatClientStub.new(expectedDials = 1) + autonatClientStub1.answer = NotReachable + let autonatService1 = AutonatService.new(autonatClientStub1, newRng(), maxQueueSize = 1) + + let autonatClientStub2 = AutonatClientStub.new(expectedDials = 1) + autonatClientStub2.answer = NotReachable + let autonatService2 = AutonatService.new(autonatClientStub2, newRng(), maxQueueSize = 1) + + let relayClient1 = RelayClient.new() + let relayClient2 = RelayClient.new() + let privatePeerRelayAddr1 = newFuture[seq[MultiAddress]]() + + proc checkMA(address: seq[MultiAddress]) = + if not privatePeerRelayAddr1.completed(): + privatePeerRelayAddr1.complete(address) + + let autoRelayService1 = AutoRelayService.new(1, relayClient1, checkMA, newRng()) + let autoRelayService2 = AutoRelayService.new(1, relayClient2, nil, newRng()) + + let hpservice1 = HPService.new(autonatService1, autoRelayService1) + let hpservice2 = HPService.new(autonatService2, autoRelayService2) + + let privatePeerSwitch1 = createSwitch(relayClient1, hpservice1) + let privatePeerSwitch2 = createSwitch(relayClient2, hpservice2) + let switchRelay = createSwitch(Relay.new()) + let switchAux = createSwitch() + let switchAux2 = createSwitch() + let switchAux3 = createSwitch() + let switchAux4 = createSwitch() + + var awaiter = newFuture[void]() + + await allFutures( + switchRelay.start(), privatePeerSwitch1.start(), privatePeerSwitch2.start(), + switchAux.start(), switchAux2.start(), switchAux3.start(), switchAux4.start() + ) + + await privatePeerSwitch1.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs) + await privatePeerSwitch2.connect(switchAux.peerInfo.peerId, switchAux.peerInfo.addrs) + + await sleepAsync(200.millis) + + await privatePeerSwitch1.connect(switchAux2.peerInfo.peerId, switchAux2.peerInfo.addrs) + await privatePeerSwitch1.connect(switchAux3.peerInfo.peerId, switchAux3.peerInfo.addrs) + await privatePeerSwitch1.connect(switchAux4.peerInfo.peerId, switchAux4.peerInfo.addrs) + + await privatePeerSwitch2.connect(switchAux2.peerInfo.peerId, switchAux2.peerInfo.addrs) + await privatePeerSwitch2.connect(switchAux3.peerInfo.peerId, switchAux3.peerInfo.addrs) + await privatePeerSwitch2.connect(switchAux4.peerInfo.peerId, switchAux4.peerInfo.addrs) + + privatePeerSwitch1.dialer.isSimultaneousConnServer = false + await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1)) + + await sleepAsync(200.millis) + + await allFuturesThrowing( + privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(), + switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop()) From f8b578a7667d8081251376b7f89b516b0ad9ba84 Mon Sep 17 00:00:00 2001 From: Diego Date: Tue, 21 Mar 2023 22:50:20 +0100 Subject: [PATCH 107/112] remove echo --- tests/testhpservice.nim | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 85640ff1dc..8008b0dbb4 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -80,9 +80,6 @@ suite "Hole Punching": privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1 and not isRelayed(privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection) - for t in privatePeerSwitch.transports: - echo t.networkReachability - await allFuturesThrowing( privatePeerSwitch.stop(), publicPeerSwitch.stop(), switchRelay.stop()) From 9f86addb636bf16ea700e9cac5fbc687b0864e5f Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 5 Apr 2023 17:14:25 +0200 Subject: [PATCH 108/112] fixes after rebase --- libp2p/protocols/identify.nim | 10 ---------- libp2p/services/hpservice.nim | 10 ++-------- tests/testhpservice.nim | 1 - 3 files changed, 2 insertions(+), 19 deletions(-) diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index 2881061d63..85202d5967 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -261,13 +261,3 @@ proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, publi ## Send new `peerInfo`s to a connection var pb = encodeMsg(peerInfo, conn.observedAddr, true) await conn.writeLp(pb.buffer) - -proc getMostObservedIP*(self: Identify, ipVersion: IPVersion): Opt[MultiAddress] = - ## Returns the most observed IP address or none if the number of observations are less than minCount. - return self.observedAddrManager.getMostObservedIP(ipVersion) - -proc getMostObservedIPsAndPorts*(self: Identify): seq[MultiAddress] = - ## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations - ## are less than minCount. - echo self.observedAddrManager - return self.observedAddrManager.getMostObservedIPsAndPorts() diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 7ff77a1ff6..8d65a7246a 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -56,12 +56,6 @@ proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Fut continue return false -proc guessNatAddrs(peerStore: PeerStore, addrs: seq[MultiAddress]): seq[MultiAddress] = - for a in addrs: - let guess = peerStore.replaceMAIpByMostObserved(a) - if guess.isSome(): - result.add(guess.get()) - method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = var hasBeenSetup = await procCall Service(self).setup(switch) hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch) @@ -78,9 +72,9 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = await conn.close() return let dcutrClient = DcutrClient.new() - var natAddrs = switch.peerStore.getMostObservedIPsAndPorts() + var natAddrs = switch.peerStore.getMostObservedProtosAndPorts() if natAddrs.len == 0: - natAddrs = guessDialableAddrs(switch.peerStore, switch.peerInfo.addrs) + natAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it)) await dcutrClient.startSync(switch, peerId, natAddrs) await sleepAsync(2000.milliseconds) # grace period before closing relayed connection await conn.close() diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 8008b0dbb4..218450c4fb 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -138,7 +138,6 @@ suite "Hole Punching": await privatePeerSwitch2.connect(switchAux3.peerInfo.peerId, switchAux3.peerInfo.addrs) await privatePeerSwitch2.connect(switchAux4.peerInfo.peerId, switchAux4.peerInfo.addrs) - privatePeerSwitch1.dialer.isSimultaneousConnServer = false await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1)) await sleepAsync(200.millis) From 39ac49f62fe705f37f5048f7d5a16ae057aad095 Mon Sep 17 00:00:00 2001 From: Diego Date: Mon, 10 Apr 2023 23:33:37 +0200 Subject: [PATCH 109/112] increase test coverage --- libp2p/services/hpservice.nim | 15 +++++------ tests/stubs/switchstub.nim | 48 +++++++++++++++++++++++++++++++++++ tests/stubs/torstub.nim | 9 +++++++ tests/testdcutr.nim | 31 +--------------------- tests/testhpservice.nim | 36 ++++++++++++++++++++------ 5 files changed, 92 insertions(+), 47 deletions(-) create mode 100644 tests/stubs/switchstub.nim diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 8d65a7246a..25f4c4d833 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -32,26 +32,23 @@ type onNewStatusHandler: StatusAndConfidenceHandler autoRelayService: AutoRelayService autonatService: AutonatService - isPublicIPAddr: isPublicIPAddrFunc + isPublicIPAddrProc: IsPublicIPAddrProc - isPublicIPAddrFunc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].} + IsPublicIPAddrProc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].} proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService, - isPublicIPAddr: isPublicIPAddrFunc = proc(ta: TransportAddress): bool = return true): T = # FIXME: use chronos - return T( - autonatService: autonatService, - autoRelayService: autoRelayService, - isPublicIPAddr: isPublicIPAddr) + isPublicIPAddrProc: IsPublicIPAddrProc = isGlobal): T = + return T(autonatService: autonatService, autoRelayService: autoRelayService, isPublicIPAddrProc: isPublicIPAddrProc) proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = await sleepAsync(100.milliseconds) # wait for AddressBook to be populated for address in switch.peerStore[AddressBook][peerId]: try: - if self.isPublicIPAddr(initTAddress(address).get()): + if self.isPublicIPAddrProc(initTAddress(address).get()): await switch.connect(peerId, @[address], true, false) debug "Direct connection created." return true - except Exception as err: + except CatchableError as err: debug "Failed to create direct connection.", err = err.msg continue return false diff --git a/tests/stubs/switchstub.nim b/tests/stubs/switchstub.nim new file mode 100644 index 0000000000..38c99739ff --- /dev/null +++ b/tests/stubs/switchstub.nim @@ -0,0 +1,48 @@ +# Nim-LibP2P +# Copyright (c) 2023 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +{.used.} + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import chronos +import ../../libp2p/[peerid, multiaddress, switch] + +type + SwitchStub* = ref object of Switch + switch*: Switch + connectStub*: proc(): Future[void] {.async.} + +method connect*( + self: SwitchStub, + peerId: PeerId, + addrs: seq[MultiAddress], + forceDial = false, + reuseConnection = true, + upgradeDir = Direction.Out) {.async.} = + if (self.connectStub != nil): + await self.connectStub() + else: + await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir) + +proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: proc (): Future[void] {.async.} = nil): T = + return SwitchStub( + switch: switch, + peerInfo: switch.peerInfo, + ms: switch.ms, + transports: switch.transports, + connManager: switch.connManager, + peerStore: switch.peerStore, + dialer: switch.dialer, + nameResolver: switch.nameResolver, + services: switch.services, + connectStub: connectStub) diff --git a/tests/stubs/torstub.nim b/tests/stubs/torstub.nim index ca5fe97769..2b96bbfb31 100644 --- a/tests/stubs/torstub.nim +++ b/tests/stubs/torstub.nim @@ -1,3 +1,12 @@ +# Nim-LibP2P +# Copyright (c) 2023 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + {.used.} when (NimMajor, NimMinor) < (1, 4): diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim index cc0bd2bf6c..a5a196402e 100644 --- a/tests/testdcutr.nim +++ b/tests/testdcutr.nim @@ -16,36 +16,7 @@ from ../libp2p/protocols/connectivity/autonat/core import NetworkReachability import ../libp2p/builders import ../libp2p/utils/future import ./helpers - -type - SwitchStub* = ref object of Switch - switch: Switch - connectStub*: proc(): Future[void] {.async.} - -proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: proc (): Future[void] {.async.} = nil): T = - return SwitchStub( - switch: switch, - peerInfo: switch.peerInfo, - ms: switch.ms, - transports: switch.transports, - connManager: switch.connManager, - peerStore: switch.peerStore, - dialer: switch.dialer, - nameResolver: switch.nameResolver, - services: switch.services, - connectStub: connectStub) - -method connect*( - self: SwitchStub, - peerId: PeerId, - addrs: seq[MultiAddress], - forceDial = false, - reuseConnection = true, - upgradeDir = Direction.Out) {.async.} = - if (self.connectStub != nil): - await self.connectStub() - else: - await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir) +import ./stubs/switchstub suite "Dcutr": teardown: diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim index 218450c4fb..572727899e 100644 --- a/tests/testhpservice.nim +++ b/tests/testhpservice.nim @@ -7,10 +7,18 @@ # This file may not be copied, modified, or distributed except according to # those terms. +{.used.} + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + import chronos import unittest2 import ./helpers +import ./stubs/switchstub import ../libp2p/[builders, switch, services/hpservice, @@ -20,10 +28,10 @@ import ../libp2p/protocols/connectivity/autonat/[service] import ../libp2p/wire import stubs/autonatclientstub -proc isPublicAddrIPAddrMock*(ta: TransportAddress): bool = +proc isPublicAddrIPAddrMock(ta: TransportAddress): bool = return true -proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch = +proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch {.raises: [LPError, Defect].} = var builder = SwitchBuilder.new() .withRng(newRng()) .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ]) @@ -50,7 +58,6 @@ suite "Hole Punching": checkTrackers() asyncTest "Direct connection must work when peer address is public": - let autonatClientStub = AutonatClientStub.new(expectedDials = 1) autonatClientStub.answer = NotReachable let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1) @@ -83,8 +90,9 @@ suite "Hole Punching": await allFuturesThrowing( privatePeerSwitch.stop(), publicPeerSwitch.stop(), switchRelay.stop()) - asyncTest "Hole punching when peers addresses are private": - + proc holePunchingTest(connectStub: proc (): Future[void] {.async.}, + isPublicIPAddrProc: IsPublicIPAddrProc, + answer: Answer) {.async.} = # There's no check in this test cause it can't test hole punching locally. It exists just to be sure the rest of # the code works properly. @@ -93,7 +101,7 @@ suite "Hole Punching": let autonatService1 = AutonatService.new(autonatClientStub1, newRng(), maxQueueSize = 1) let autonatClientStub2 = AutonatClientStub.new(expectedDials = 1) - autonatClientStub2.answer = NotReachable + autonatClientStub2.answer = answer let autonatService2 = AutonatService.new(autonatClientStub2, newRng(), maxQueueSize = 1) let relayClient1 = RelayClient.new() @@ -107,10 +115,10 @@ suite "Hole Punching": let autoRelayService1 = AutoRelayService.new(1, relayClient1, checkMA, newRng()) let autoRelayService2 = AutoRelayService.new(1, relayClient2, nil, newRng()) - let hpservice1 = HPService.new(autonatService1, autoRelayService1) + let hpservice1 = HPService.new(autonatService1, autoRelayService1, isPublicIPAddrProc) let hpservice2 = HPService.new(autonatService2, autoRelayService2) - let privatePeerSwitch1 = createSwitch(relayClient1, hpservice1) + let privatePeerSwitch1 = SwitchStub.new(createSwitch(relayClient1, hpservice1)) let privatePeerSwitch2 = createSwitch(relayClient2, hpservice2) let switchRelay = createSwitch(Relay.new()) let switchAux = createSwitch() @@ -138,6 +146,7 @@ suite "Hole Punching": await privatePeerSwitch2.connect(switchAux3.peerInfo.peerId, switchAux3.peerInfo.addrs) await privatePeerSwitch2.connect(switchAux4.peerInfo.peerId, switchAux4.peerInfo.addrs) + privatePeerSwitch1.connectStub = connectStub await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1)) await sleepAsync(200.millis) @@ -145,3 +154,14 @@ suite "Hole Punching": await allFuturesThrowing( privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(), switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop()) + + asyncTest "Hole punching when peers addresses are private": + await holePunchingTest(nil, isGlobal, NotReachable) + + asyncTest "Hole punching when there is an error during unilateral direct connection": + + proc connectStub(): Future[void] {.async.} = + raise newException(CatchableError, "error") + + await holePunchingTest(connectStub, isPublicAddrIPAddrMock, Reachable) + From 5f4745ec9040ee94001bde851ff2f4e475503fa7 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 12 Apr 2023 14:35:38 +0200 Subject: [PATCH 110/112] fixes for code review --- libp2p/protocols/connectivity/dcutr/client.nim | 2 ++ libp2p/protocols/connectivity/dcutr/server.nim | 3 ++- libp2p/services/hpservice.nim | 10 ++++++---- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim index b83f19dc80..52003187c3 100644 --- a/libp2p/protocols/connectivity/dcutr/client.nim +++ b/libp2p/protocols/connectivity/dcutr/client.nim @@ -23,6 +23,8 @@ import ../../protocol, ../../../switch, ../../../utils/future +export DcutrError + type DcutrClient* = ref object connectTimeout: Duration diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim index 0dada156fa..4903634ea0 100644 --- a/libp2p/protocols/connectivity/dcutr/server.nim +++ b/libp2p/protocols/connectivity/dcutr/server.nim @@ -23,7 +23,8 @@ import ../../protocol, ../../../switch, ../../../utils/future -export chronicles +export DcutrError +export chronicles #FIXME undertand why this is needed type Dcutr* = ref object of LPProtocol diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 25f4c4d833..55d17fe0fa 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -13,15 +13,17 @@ else: {.push raises: [].} import std/[tables, sequtils] + +import chronos, chronicles + import ../switch, ../wire import ../protocols/rendezvous import ../services/autorelayservice import ../discovery/[rendezvousinterface, discoverymngr] import ../protocols/connectivity/relay/relay import ../protocols/connectivity/autonat/service -from ../protocols/connectivity/dcutr/core import DcutrError import ../protocols/connectivity/dcutr/[client, server] -import chronos + logScope: topics = "libp2p hpservice" @@ -41,7 +43,7 @@ proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayServi return T(autonatService: autonatService, autoRelayService: autoRelayService, isPublicIPAddrProc: isPublicIPAddrProc) proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} = - await sleepAsync(100.milliseconds) # wait for AddressBook to be populated + await sleepAsync(500.milliseconds) # wait for AddressBook to be populated for address in switch.peerStore[AddressBook][peerId]: try: if self.isPublicIPAddrProc(initTAddress(address).get()): @@ -76,7 +78,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} = await sleepAsync(2000.milliseconds) # grace period before closing relayed connection await conn.close() except CatchableError as err: - error "Hole punching failed during dcutr", err = err.msg + debug "Hole punching failed during dcutr", err = err.msg switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined) From 4d2d21c3673a0afba3a98cb8c31cb939a389afc7 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 14 Apr 2023 17:59:49 +0200 Subject: [PATCH 111/112] improve log --- libp2p/wire.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/wire.nim b/libp2p/wire.nim index 0888149455..f375e7b771 100644 --- a/libp2p/wire.nim +++ b/libp2p/wire.nim @@ -71,7 +71,7 @@ proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] = res.port = Port(fromBytesBE(uint16, pbuf)) ok(res) else: - err("MultiAddress must be wire address (tcp, udp or unix)") + err("MultiAddress must be wire address (tcp, udp or unix): " & $ma) proc connect*( ma: MultiAddress, From f05b2c31b8ba401a1be927861604f7a05166b0f1 Mon Sep 17 00:00:00 2001 From: Diego Date: Fri, 14 Apr 2023 18:29:22 +0200 Subject: [PATCH 112/112] handle relay addrs in the address book --- libp2p/services/hpservice.nim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim index 55d17fe0fa..a2ee4974a6 100644 --- a/libp2p/services/hpservice.nim +++ b/libp2p/services/hpservice.nim @@ -46,7 +46,8 @@ proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Fut await sleepAsync(500.milliseconds) # wait for AddressBook to be populated for address in switch.peerStore[AddressBook][peerId]: try: - if self.isPublicIPAddrProc(initTAddress(address).get()): + let ta = initTAddress(address) + if ta.isOk() and self.isPublicIPAddrProc(ta.get()): await switch.connect(peerId, @[address], true, false) debug "Direct connection created." return true