diff --git a/.cargo/config.toml b/.cargo/config.toml index 95976aaa800..df790f03322 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] # Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped. -custom-clippy = "clippy -- -A clippy::type_complexity -A clippy::pedantic -D warnings" +custom-clippy = "clippy --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings" diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..ef1f9254a82 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,8 @@ +# This file contains revisions that are to be ignored by git when running `git blame`. +# +# This does NOT work automatically, you first need to tell Git about this file. +# To do so, run `git config --global blame.ignoreRevsFile .git-blame-ignore-revs`. +# You may want to run this without `--global` if you have a different naming convention for this file in other repositories. +# +# Format with rustfmt +f701b24ec0f99be49444a6e7de950c66b01b2f3f diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index cb81ca42c79..d13ee922496 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -5,7 +5,7 @@ about: Create a bug report for rust-libp2p. - + ## Summary diff --git a/.github/workflows/cargo-audit.yml b/.github/workflows/cargo-audit.yml index 0a39eb0dc96..2b5abe19292 100644 --- a/.github/workflows/cargo-audit.yml +++ b/.github/workflows/cargo-audit.yml @@ -2,14 +2,7 @@ name: cargo audit on: schedule: - cron: '0 0 * * *' - push: - paths: - - '**/Cargo.toml' - - '**/Cargo.lock' - pull_request: - paths: - - '**/Cargo.toml' - - '**/Cargo.lock' + jobs: audit: runs-on: ubuntu-latest diff --git a/.github/workflows/cargo-deny-pr.yml b/.github/workflows/cargo-deny-pr.yml new file mode 100644 index 00000000000..16b16d16a65 --- /dev/null +++ b/.github/workflows/cargo-deny-pr.yml @@ -0,0 +1,22 @@ +name: cargo deny +on: + push: + paths: + - '**/Cargo.toml' + pull_request: + paths: + - '**/Cargo.toml' +jobs: + cargo-deny: + runs-on: ubuntu-latest + strategy: + matrix: + checks: + - advisories + - bans licenses sources + + steps: + - uses: actions/checkout@v3 + - uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check ${{ matrix.checks }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5cda587983c..c1509406f8a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,9 +24,12 @@ jobs: with: access_token: ${{ github.token }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # v1.4.0 + - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 with: key: ${{ matrix.args }} @@ -56,6 +59,9 @@ jobs: with: access_token: ${{ github.token }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - uses: actions/checkout@v3 - name: Install Rust ${{ matrix.toolchain }} @@ -72,7 +78,7 @@ jobs: - name: Install CMake run: sudo apt-get install -y cmake - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # v1.4.0 + - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 with: key: ${{ matrix.toolchain }} @@ -91,6 +97,9 @@ jobs: with: access_token: ${{ github.token }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1.0.7 @@ -99,7 +108,7 @@ jobs: toolchain: stable override: true - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # v1.4.0 + - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 - name: Check rustdoc links run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items --all-features @@ -113,6 +122,9 @@ jobs: with: access_token: ${{ github.token }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1.0.7 @@ -122,7 +134,7 @@ jobs: override: true components: clippy - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # v1.4.0 + - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 - name: Run cargo clippy uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 @@ -139,6 +151,9 @@ jobs: with: access_token: ${{ github.token }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1.0.7 @@ -147,7 +162,7 @@ jobs: toolchain: stable override: true - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # v1.4.0 + - uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0 - name: Run ipfs-kad example run: RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run --example ipfs-kad diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml new file mode 100644 index 00000000000..9413cce7e95 --- /dev/null +++ b/.github/workflows/interop-test.yml @@ -0,0 +1,27 @@ +on: + pull_request: + push: + branches: + - master +name: Interoperability Testing + +jobs: + # NOTE: during a pull request run, github creates a merge commit referenced in `github.sha` + # that merge commit is not a regular commit. You won't find it with a regular `git checkout SHA` and + # tools like `go get repo@SHA` won't find it. + # + # As a workaround, we generate a path to the actual pull request's commit, it looks like: + # `github.com/external-org/go-libp2p@latest-commit-on-their-branch` + run-ping-interop-cross-version: + uses: "libp2p/test-plans/.github/workflows/run-composition.yml@master" + with: + composition_file: "ping/_compositions/rust-cross-versions.toml" + custom_git_target: github.com/${{ github.event.pull_request.head.repo.full_name || github.event.repository.full_name }} + custom_git_reference: ${{ github.event.pull_request.head.sha || github.sha }} + run-ping-interop-cross-implementation: + uses: "libp2p/test-plans/.github/workflows/run-composition.yml@master" + with: + composition_file: "ping/_compositions/go-rust-interop-latest.toml" + custom_git_target: github.com/${{ github.event.pull_request.head.repo.full_name || github.event.repository.full_name }} + custom_git_reference: ${{ github.event.pull_request.head.sha || github.sha }} + custom_interop_target: rust \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index b4496d987a0..2aa74dae1df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,10 +43,48 @@ # `libp2p` facade crate -# 0.47.0 [unreleased] +# 0.49.0 - [unreleased] + +- Update to [`libp2p-tcp` `v0.37.0`](transports/tcp/CHANGELOG.md#0370). + +- Update to [`libp2p-swarm-derive` `v0.30.1`](swarm-derive/CHANGELOG.md#0301). + +- Update to [`libp2p-metrics` `v0.10.0`](misc/metrics/CHANGELOG.md#0100). + +- Update to [`libp2p-kad` `v0.41.0`](protocols/kad/CHANGELOG.md#0410). + +# 0.48.0 + +- Update to [`libp2p-core` `v0.36.0`](core/CHANGELOG.md#0360). + +- Update to [`libp2p-swarm-derive` `v0.30.0`](swarm-derive/CHANGELOG.md#0300). + +- Update to [`libp2p-dcutr` `v0.6.0`](protocols/dcutr/CHANGELOG.md#060). + +- Update to [`libp2p-rendezvous` `v0.9.0`](protocols/rendezvous/CHANGELOG.md#090). + +- Update to [`libp2p-ping` `v0.39.0`](protocols/ping/CHANGELOG.md#0390). + +- Update to [`libp2p-identify` `v0.39.0`](protocols/identify/CHANGELOG.md#0390). + +- Update to [`libp2p-floodsub` `v0.39.0`](protocols/floodsub/CHANGELOG.md#0390). + +- Update to [`libp2p-relay` `v0.12.0`](protocols/relay/CHANGELOG.md#0120). + +- Update to [`libp2p-metrics` `v0.9.0`](misc/metrics/CHANGELOG.md#090). + +- Update to [`libp2p-kad` `v0.40.0`](protocols/kad/CHANGELOG.md#0400). + +- Update to [`libp2p-autonat` `v0.7.0`](protocols/autonat/CHANGELOG.md#070). + +- Update to [`libp2p-request-response` `v0.21.0`](protocols/request-response/CHANGELOG.md#0210). + +# 0.47.0 - Update to [`libp2p-dcutr` `v0.5.0`](protocols/dcutr/CHANGELOG.md#050). +- Update to [`libp2p-derive` `v0.29.0`](swarm-derive/CHANGELOG.md#0290). + - Update to [`libp2p-rendezvous` `v0.8.0`](protocols/rendezvous/CHANGELOG.md#080). - Update to [`libp2p-ping` `v0.38.0`](protocols/ping/CHANGELOG.md#0380). diff --git a/Cargo.toml b/Cargo.toml index 29606815b30..c4184ea2e7b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition = "2021" rust-version = "1.60.0" description = "Peer-to-peer networking library" -version = "0.47.0" +version = "0.49.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,10 +16,10 @@ default = [ "deflate", "dns-async-std", "floodsub", - "gossipsub", "identify", "kad", - "mdns", + "gossipsub", + "mdns-async-io", "mplex", "noise", "ping", @@ -29,6 +29,7 @@ default = [ "relay", "request-response", "rendezvous", + "rsa", "secp256k1", "tcp-async-io", "uds", @@ -47,7 +48,8 @@ identify = ["dep:libp2p-identify", "libp2p-metrics?/identify"] kad = ["dep:libp2p-kad", "libp2p-metrics?/kad"] gossipsub = ["dep:libp2p-gossipsub", "libp2p-metrics?/gossipsub"] metrics = ["dep:libp2p-metrics"] -mdns = ["dep:libp2p-mdns"] +mdns-async-io = ["dep:libp2p-mdns", "libp2p-mdns?/async-io"] +mdns-tokio = ["dep:libp2p-mdns", "libp2p-mdns?/tokio"] mplex = ["dep:libp2p-mplex"] noise = ["dep:libp2p-noise"] ping = ["dep:libp2p-ping", "libp2p-metrics?/ping"] @@ -66,6 +68,7 @@ wasm-ext-websocket = ["wasm-ext", "libp2p-wasm-ext?/websocket"] websocket = ["dep:libp2p-websocket"] yamux = ["dep:libp2p-yamux"] secp256k1 = ["libp2p-core/secp256k1"] +rsa = ["libp2p-core/rsa"] serde = ["libp2p-core/serde", "libp2p-kad?/serde", "libp2p-gossipsub?/serde"] [package.metadata.docs.rs] @@ -79,26 +82,26 @@ getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature instant = "0.1.11" # Explicit dependency to be used in `wasm-bindgen` feature lazy_static = "1.2" -libp2p-autonat = { version = "0.6.0", path = "protocols/autonat", optional = true } -libp2p-core = { version = "0.34.0", path = "core", default-features = false } -libp2p-dcutr = { version = "0.5.0", path = "protocols/dcutr", optional = true } -libp2p-floodsub = { version = "0.38.0", path = "protocols/floodsub", optional = true } -libp2p-identify = { version = "0.38.0", path = "protocols/identify", optional = true } -libp2p-kad = { version = "0.39.0", path = "protocols/kad", optional = true } -libp2p-metrics = { version = "0.8.0", path = "misc/metrics", optional = true } -libp2p-mplex = { version = "0.34.0", path = "muxers/mplex", optional = true } -libp2p-noise = { version = "0.37.0", path = "transports/noise", optional = true } -libp2p-ping = { version = "0.38.0", path = "protocols/ping", optional = true } -libp2p-plaintext = { version = "0.34.0", path = "transports/plaintext", optional = true } +libp2p-autonat = { version = "0.7.0", path = "protocols/autonat", optional = true } +libp2p-core = { version = "0.36.0", path = "core", default-features = false } +libp2p-dcutr = { version = "0.6.0", path = "protocols/dcutr", optional = true } +libp2p-floodsub = { version = "0.39.0", path = "protocols/floodsub", optional = true } +libp2p-identify = { version = "0.39.0", path = "protocols/identify", optional = true } +libp2p-kad = { version = "0.41.0", path = "protocols/kad", optional = true } +libp2p-metrics = { version = "0.10.0", path = "misc/metrics", optional = true } +libp2p-mplex = { version = "0.36.0", path = "muxers/mplex", optional = true } +libp2p-noise = { version = "0.39.1", path = "transports/noise", optional = true } +libp2p-ping = { version = "0.39.0", path = "protocols/ping", optional = true } +libp2p-plaintext = { version = "0.36.0", path = "transports/plaintext", optional = true } libp2p-pnet = { version = "0.22.0", path = "transports/pnet", optional = true } -libp2p-relay = { version = "0.11.0", path = "protocols/relay", optional = true } -libp2p-rendezvous = { version = "0.8.0", path = "protocols/rendezvous", optional = true } -libp2p-request-response = { version = "0.20.0", path = "protocols/request-response", optional = true } -libp2p-swarm = { version = "0.38.0", path = "swarm" } -libp2p-swarm-derive = { version = "0.28.0", path = "swarm-derive" } -libp2p-uds = { version = "0.33.0", path = "transports/uds", optional = true } -libp2p-wasm-ext = { version = "0.34.0", path = "transports/wasm-ext", default-features = false, optional = true } -libp2p-yamux = { version = "0.38.0", path = "muxers/yamux", optional = true } +libp2p-relay = { version = "0.12.0", path = "protocols/relay", optional = true } +libp2p-rendezvous = { version = "0.9.0", path = "protocols/rendezvous", optional = true } +libp2p-request-response = { version = "0.21.0", path = "protocols/request-response", optional = true } +libp2p-swarm = { version = "0.39.0", path = "swarm" } +libp2p-swarm-derive = { version = "0.30.1", path = "swarm-derive" } +libp2p-uds = { version = "0.35.0", path = "transports/uds", optional = true } +libp2p-wasm-ext = { version = "0.36.0", path = "transports/wasm-ext", default-features = false, optional = true } +libp2p-yamux = { version = "0.40.0", path = "muxers/yamux", optional = true } multiaddr = { version = "0.14.0" } parking_lot = "0.12.0" pin-project = "1.0.0" @@ -106,15 +109,15 @@ rand = "0.7.3" # Explicit dependency to be used in `wasm-bindgen` feature smallvec = "1.6.1" [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] -libp2p-deflate = { version = "0.34.0", path = "transports/deflate", optional = true } -libp2p-dns = { version = "0.34.0", path = "transports/dns", optional = true, default-features = false } -libp2p-mdns = { version = "0.39.0", path = "protocols/mdns", optional = true } +libp2p-deflate = { version = "0.36.0", path = "transports/deflate", optional = true } +libp2p-dns = { version = "0.36.0", path = "transports/dns", optional = true, default-features = false } +libp2p-mdns = { version = "0.40.0", path = "protocols/mdns", optional = true, default-features = false } libp2p-quic = { version = "0.7.0", path = "transports/quic", optional = true } -libp2p-tcp = { version = "0.34.0", path = "transports/tcp", default-features = false, optional = true } -libp2p-websocket = { version = "0.36.0", path = "transports/websocket", optional = true } +libp2p-tcp = { version = "0.37.0", path = "transports/tcp", default-features = false, optional = true } +libp2p-websocket = { version = "0.38.0", path = "transports/websocket", optional = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] -libp2p-gossipsub = { version = "0.40.0", path = "protocols/gossipsub", optional = true } +libp2p-gossipsub = { version = "0.41.0", path = "protocols/gossipsub", optional = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } @@ -164,7 +167,7 @@ required-features = ["floodsub"] [[example]] name = "chat-tokio" -required-features = ["tcp-tokio", "mdns"] +required-features = ["tcp-tokio", "mdns-tokio"] [[example]] name = "file-sharing" diff --git a/README.md b/README.md index fb45ac38c10..b2e5a7d9bac 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ [![dependency status](https://deps.rs/repo/github/libp2p/rust-libp2p/status.svg?style=flat-square)](https://deps.rs/repo/github/libp2p/rust-libp2p) +[![Crates.io](https://img.shields.io/crates/v/libp2p.svg)](https://crates.io/crates/libp2p) +[![docs.rs](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/libp2p) This repository is the central place for Rust development of the [libp2p](https://libp2p.io) spec. @@ -12,7 +14,7 @@ This repository is the central place for Rust development of the [libp2p](https: - The **[examples](examples)** folder contains small binaries showcasing the many protocols in this repository. -- For **security related issues** please reach out to security@ipfs.io. Please +- For **security related issues** please reach out to security@libp2p.io. Please do not file a public issue on GitHub. - To **report bugs, suggest improvements or request new features** please open a diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..4db2a630818 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +## Supported Versions + +By default we provide security patches for the latest released version only. On request we patch older versions. + +## Reporting a Vulnerability + +Please reach out to security@libp2p.io. Please do not file a public issue on GitHub. diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000000..f66cc0ac2da --- /dev/null +++ b/clippy.toml @@ -0,0 +1,3 @@ +disallowed-methods = [ + { path = "futures::channel::mpsc::unbounded", reason = "does not enforce backpressure" }, +] diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 442499f118a..4a5e07f14f8 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,6 +1,40 @@ +# 0.36.0 + +- Make RSA keypair support optional. To enable RSA support, `rsa` feature should be enabled. + See [PR 2860]. + +- Add `ReadyUpgrade`. See [PR 2855]. + +[PR 2855]: https://github.com/libp2p/rust-libp2p/pull/2855 +[PR 2860]: https://github.com/libp2p/rust-libp2p/pull/2860/ + +# 0.35.1 + +- Update to `p256` `v0.11.0`. See [PR 2636]. + +[PR 2636]: https://github.com/libp2p/rust-libp2p/pull/2636/ + +# 0.35.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. +- Drop `Unpin` requirement from `SubstreamBox`. See [PR 2762] and [PR 2776]. +- Drop `Sync` requirement on `StreamMuxer` for constructing `StreamMuxerBox`. See [PR 2775]. +- Use `Pin<&mut Self>` as the receiver type for all `StreamMuxer` poll functions. See [PR 2765]. +- Change `StreamMuxer` interface to be entirely poll-based. All functions on `StreamMuxer` now + require a `Context` and return `Poll`. This gives callers fine-grained control over what they + would like to make progress on. See [PR 2724] and [PR 2797]. + +[PR 2724]: https://github.com/libp2p/rust-libp2p/pull/2724 +[PR 2762]: https://github.com/libp2p/rust-libp2p/pull/2762 +[PR 2775]: https://github.com/libp2p/rust-libp2p/pull/2775 +[PR 2776]: https://github.com/libp2p/rust-libp2p/pull/2776 +[PR 2765]: https://github.com/libp2p/rust-libp2p/pull/2765 +[PR 2797]: https://github.com/libp2p/rust-libp2p/pull/2797 +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.34.0 -- Introduce `StreamMuxerEvent::map_inbound_stream`. See [PR 2691]. - Remove `{read,write,flush,shutdown,destroy}_substream` functions from `StreamMuxer` trait in favor of forcing `StreamMuxer::Substream` to implement `AsyncRead + AsyncWrite`. See [PR 2707]. - Replace `Into` bound on `StreamMuxer::Error` with `std::error::Error`. See [PR 2710]. diff --git a/core/Cargo.toml b/core/Cargo.toml index deb6479e433..0970b3e74d6 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = "1.56.1" description = "Core traits and structs of libp2p" -version = "0.34.0" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -25,10 +25,10 @@ log = "0.4" multiaddr = { version = "0.14.0" } multihash = { version = "0.16", default-features = false, features = ["std", "multihash-impl", "identity", "sha2"] } multistream-select = { version = "0.11", path = "../misc/multistream-select" } -p256 = { version = "0.10.0", default-features = false, features = ["ecdsa"], optional = true } +p256 = { version = "0.11.1", default-features = false, features = ["ecdsa"], optional = true } parking_lot = "0.12.0" pin-project = "1.0.0" -prost = "0.10" +prost = "0.11" rand = "0.8" rw-stream-sink = { version = "0.3.0", path = "../misc/rw-stream-sink" } sha2 = "0.10.0" @@ -37,15 +37,15 @@ thiserror = "1.0" unsigned-varint = "0.7" void = "1" zeroize = "1" -_serde = { package = "serde", version = "1", optional = true, features = ["derive"] } +serde = { version = "1", optional = true, features = ["derive"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false } +ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true} [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } base64 = "0.13.0" -criterion = "0.3" +criterion = "0.4" libp2p-mplex = { path = "../muxers/mplex" } libp2p-noise = { path = "../transports/noise" } libp2p-tcp = { path = "../transports/tcp" } @@ -56,13 +56,14 @@ rmp-serde = "1.0" serde_json = "1.0" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [features] default = [ "secp256k1", "ecdsa" ] secp256k1 = [ "libsecp256k1" ] ecdsa = [ "p256" ] -serde = ["multihash/serde-codec", "_serde"] +rsa = [ "dep:ring" ] +serde = ["multihash/serde-codec", "dep:serde"] [[bench]] name = "peer_id" diff --git a/core/src/either.rs b/core/src/either.rs index bce6e05aadf..a34552bf28f 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -18,8 +18,9 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::muxing::StreamMuxerEvent; use crate::{ - muxing::{StreamMuxer, StreamMuxerEvent}, + muxing::StreamMuxer, transport::{ListenerId, Transport, TransportError, TransportEvent}, Multiaddr, ProtocolName, }; @@ -202,78 +203,58 @@ where B: StreamMuxer, { type Substream = EitherOutput; - type OutboundSubstream = EitherOutbound; type Error = EitherError; - fn poll_event( - &self, + fn poll_inbound( + self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - match self { - EitherOutput::First(inner) => inner - .poll_event(cx) - .map_err(EitherError::A) - .map_ok(|event| event.map_inbound_stream(EitherOutput::First)), - EitherOutput::Second(inner) => inner - .poll_event(cx) - .map_err(EitherError::B) - .map_ok(|event| event.map_inbound_stream(EitherOutput::Second)), - } - } - - fn open_outbound(&self) -> Self::OutboundSubstream { - match self { - EitherOutput::First(inner) => EitherOutbound::A(inner.open_outbound()), - EitherOutput::Second(inner) => EitherOutbound::B(inner.open_outbound()), + ) -> Poll> { + match self.project() { + EitherOutputProj::First(inner) => inner + .poll_inbound(cx) + .map_ok(EitherOutput::First) + .map_err(EitherError::A), + EitherOutputProj::Second(inner) => inner + .poll_inbound(cx) + .map_ok(EitherOutput::Second) + .map_err(EitherError::B), } } fn poll_outbound( - &self, + self: Pin<&mut Self>, cx: &mut Context<'_>, - substream: &mut Self::OutboundSubstream, ) -> Poll> { - match (self, substream) { - (EitherOutput::First(ref inner), EitherOutbound::A(ref mut substream)) => inner - .poll_outbound(cx, substream) - .map(|p| p.map(EitherOutput::First)) + match self.project() { + EitherOutputProj::First(inner) => inner + .poll_outbound(cx) + .map_ok(EitherOutput::First) .map_err(EitherError::A), - (EitherOutput::Second(ref inner), EitherOutbound::B(ref mut substream)) => inner - .poll_outbound(cx, substream) - .map(|p| p.map(EitherOutput::Second)) + EitherOutputProj::Second(inner) => inner + .poll_outbound(cx) + .map_ok(EitherOutput::Second) .map_err(EitherError::B), - _ => panic!("Wrong API usage"), } } - fn destroy_outbound(&self, substream: Self::OutboundSubstream) { - match self { - EitherOutput::First(inner) => match substream { - EitherOutbound::A(substream) => inner.destroy_outbound(substream), - _ => panic!("Wrong API usage"), - }, - EitherOutput::Second(inner) => match substream { - EitherOutbound::B(substream) => inner.destroy_outbound(substream), - _ => panic!("Wrong API usage"), - }, + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.project() { + EitherOutputProj::First(inner) => inner.poll_close(cx).map_err(EitherError::A), + EitherOutputProj::Second(inner) => inner.poll_close(cx).map_err(EitherError::B), } } - fn poll_close(&self, cx: &mut Context<'_>) -> Poll> { - match self { - EitherOutput::First(inner) => inner.poll_close(cx).map_err(EitherError::A), - EitherOutput::Second(inner) => inner.poll_close(cx).map_err(EitherError::B), + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.project() { + EitherOutputProj::First(inner) => inner.poll(cx).map_err(EitherError::A), + EitherOutputProj::Second(inner) => inner.poll(cx).map_err(EitherError::B), } } } -#[derive(Debug, Copy, Clone)] -#[must_use = "futures do nothing unless polled"] -pub enum EitherOutbound { - A(A::OutboundSubstream), - B(B::OutboundSubstream), -} - /// Implements `Future` and dispatches all method calls to either `First` or `Second`. #[pin_project(project = EitherFutureProj)] #[derive(Debug, Copy, Clone)] diff --git a/core/src/identity.rs b/core/src/identity.rs index ee431e7ee9c..73be1c78b57 100644 --- a/core/src/identity.rs +++ b/core/src/identity.rs @@ -35,7 +35,7 @@ #[cfg(feature = "ecdsa")] pub mod ecdsa; pub mod ed25519; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub mod rsa; #[cfg(feature = "secp256k1")] pub mod secp256k1; @@ -64,11 +64,12 @@ use std::convert::{TryFrom, TryInto}; /// ``` /// #[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] pub enum Keypair { /// An Ed25519 keypair. Ed25519(ed25519::Keypair), - #[cfg(not(target_arch = "wasm32"))] /// An RSA keypair. + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Rsa(rsa::Keypair), /// A Secp256k1 keypair. #[cfg(feature = "secp256k1")] @@ -100,7 +101,7 @@ impl Keypair { /// format (i.e. unencrypted) as defined in [RFC5208]. /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { rsa::Keypair::from_pkcs8(pkcs8_der).map(Keypair::Rsa) } @@ -121,7 +122,7 @@ impl Keypair { use Keypair::*; match self { Ed25519(ref pair) => Ok(pair.sign(msg)), - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Rsa(ref pair) => pair.sign(msg), #[cfg(feature = "secp256k1")] Secp256k1(ref pair) => pair.secret().sign(msg), @@ -135,7 +136,7 @@ impl Keypair { use Keypair::*; match self { Ed25519(pair) => PublicKey::Ed25519(pair.public()), - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Rsa(pair) => PublicKey::Rsa(pair.public()), #[cfg(feature = "secp256k1")] Secp256k1(pair) => PublicKey::Secp256k1(pair.public().clone()), @@ -153,7 +154,7 @@ impl Keypair { r#type: keys_proto::KeyType::Ed25519.into(), data: data.encode().into(), }, - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Self::Rsa(_) => { return Err(DecodingError::new( "Encoding RSA key into Protobuf is unsupported", @@ -217,7 +218,7 @@ impl zeroize::Zeroize for keys_proto::PrivateKey { pub enum PublicKey { /// A public Ed25519 key. Ed25519(ed25519::PublicKey), - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] /// A public RSA key. Rsa(rsa::PublicKey), #[cfg(feature = "secp256k1")] @@ -238,7 +239,7 @@ impl PublicKey { use PublicKey::*; match self { Ed25519(pk) => pk.verify(msg, sig), - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Rsa(pk) => pk.verify(msg, sig), #[cfg(feature = "secp256k1")] Secp256k1(pk) => pk.verify(msg, sig), @@ -285,7 +286,7 @@ impl From<&PublicKey> for keys_proto::PublicKey { r#type: keys_proto::KeyType::Ed25519 as i32, data: key.encode().to_vec(), }, - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] PublicKey::Rsa(key) => keys_proto::PublicKey { r#type: keys_proto::KeyType::Rsa as i32, data: key.encode_x509(), @@ -315,11 +316,11 @@ impl TryFrom for PublicKey { keys_proto::KeyType::Ed25519 => { ed25519::PublicKey::decode(&pubkey.data).map(PublicKey::Ed25519) } - #[cfg(not(target_arch = "wasm32"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] keys_proto::KeyType::Rsa => { rsa::PublicKey::decode_x509(&pubkey.data).map(PublicKey::Rsa) } - #[cfg(target_arch = "wasm32")] + #[cfg(any(not(feature = "rsa"), target_arch = "wasm32"))] keys_proto::KeyType::Rsa => { log::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::new("Unsupported")) diff --git a/core/src/identity/ecdsa.rs b/core/src/identity/ecdsa.rs index b883243b13b..81dfec4b4e0 100644 --- a/core/src/identity/ecdsa.rs +++ b/core/src/identity/ecdsa.rs @@ -157,7 +157,7 @@ impl PublicKey { let buf = Self::del_asn1_header(k).ok_or_else(|| { DecodingError::new("failed to parse asn.1 encoded ecdsa p256 public key") })?; - Self::from_bytes(&buf) + Self::from_bytes(buf) } // ecPublicKey (ANSI X9.62 public key type) OID: 1.2.840.10045.2.1 @@ -198,8 +198,8 @@ impl PublicKey { if asn1_head[0] != 0x30 || asn1_head[2] != 0x30 || asn1_head[3] as usize != oids_len - || &oids_buf[..Self::EC_PUBLIC_KEY_OID.len()] != &Self::EC_PUBLIC_KEY_OID - || &oids_buf[Self::EC_PUBLIC_KEY_OID.len()..] != &Self::SECP_256_R1_OID + || oids_buf[..Self::EC_PUBLIC_KEY_OID.len()] != Self::EC_PUBLIC_KEY_OID + || oids_buf[Self::EC_PUBLIC_KEY_OID.len()..] != Self::SECP_256_R1_OID || bitstr_head[0] != 0x03 || bitstr_head[2] != 0x00 { diff --git a/core/src/identity/error.rs b/core/src/identity/error.rs index 76f41278d5d..32c7edc55a4 100644 --- a/core/src/identity/error.rs +++ b/core/src/identity/error.rs @@ -67,6 +67,7 @@ pub struct SigningError { /// An error during encoding of key material. impl SigningError { + #[cfg(any(feature = "secp256k1", feature = "rsa"))] pub(crate) fn new(msg: S) -> Self { Self { msg: msg.to_string(), @@ -74,6 +75,7 @@ impl SigningError { } } + #[cfg(feature = "rsa")] pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { Self { source: Some(Box::new(source)), diff --git a/core/src/lib.rs b/core/src/lib.rs index 315e20bc8cd..ac55537eb0b 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -35,9 +35,7 @@ //! define how to upgrade each individual substream to use a protocol. //! See the `upgrade` module. -#[cfg(feature = "serde")] -extern crate _serde as serde; - +#[allow(clippy::derive_partial_eq_without_eq)] mod keys_proto { include!(concat!(env!("OUT_DIR"), "/keys_proto.rs")); } @@ -46,6 +44,7 @@ mod envelope_proto { include!(concat!(env!("OUT_DIR"), "/envelope_proto.rs")); } +#[allow(clippy::derive_partial_eq_without_eq)] mod peer_record_proto { include!(concat!(env!("OUT_DIR"), "/peer_record_proto.rs")); } diff --git a/core/src/muxing.rs b/core/src/muxing.rs index 050d8d1bd35..9763436e94a 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -52,6 +52,8 @@ use futures::{task::Context, task::Poll, AsyncRead, AsyncWrite}; use multiaddr::Multiaddr; +use std::future::Future; +use std::pin::Pin; pub use self::boxed::StreamMuxerBox; pub use self::boxed::SubstreamBox; @@ -63,107 +65,153 @@ mod singleton; /// Provides multiplexing for a connection by allowing users to open substreams. /// /// A substream created by a [`StreamMuxer`] is a type that implements [`AsyncRead`] and [`AsyncWrite`]. -/// -/// Inbound substreams are reported via [`StreamMuxer::poll_event`]. -/// Outbound substreams can be opened via [`StreamMuxer::open_outbound`] and subsequent polling via -/// [`StreamMuxer::poll_outbound`]. +/// The [`StreamMuxer`] itself is modelled closely after [`AsyncWrite`]. It features `poll`-style +/// functions that allow the implementation to make progress on various tasks. pub trait StreamMuxer { /// Type of the object that represents the raw substream where data can be read and written. type Substream: AsyncRead + AsyncWrite; - /// Future that will be resolved when the outgoing substream is open. - type OutboundSubstream; - /// Error type of the muxer type Error: std::error::Error; - /// Polls for a connection-wide event. - /// - /// This function behaves the same as a `Stream`. + /// Poll for new inbound substreams. /// - /// If `Pending` is returned, then the current task will be notified once the muxer - /// is ready to be polled, similar to the API of `Stream::poll()`. - /// Only the latest task that was used to call this method may be notified. - /// - /// It is permissible and common to use this method to perform background - /// work, such as processing incoming packets and polling timers. - /// - /// An error can be generated if the connection has been closed. - fn poll_event( - &self, + /// This function should be called whenever callers are ready to accept more inbound streams. In + /// other words, callers may exercise back-pressure on incoming streams by not calling this + /// function if a certain limit is hit. + fn poll_inbound( + self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>>; - - /// Opens a new outgoing substream, and produces the equivalent to a future that will be - /// resolved when it becomes available. - /// - /// The API of `OutboundSubstream` is totally opaque, and the object can only be interfaced - /// through the methods on the `StreamMuxer` trait. - fn open_outbound(&self) -> Self::OutboundSubstream; + ) -> Poll>; - /// Polls the outbound substream. - /// - /// If `Pending` is returned, then the current task will be notified once the substream - /// is ready to be polled, similar to the API of `Future::poll()`. - /// However, for each individual outbound substream, only the latest task that was used to - /// call this method may be notified. - /// - /// May panic or produce an undefined result if an earlier polling of the same substream - /// returned `Ready` or `Err`. + /// Poll for a new, outbound substream. fn poll_outbound( - &self, + self: Pin<&mut Self>, cx: &mut Context<'_>, - s: &mut Self::OutboundSubstream, ) -> Poll>; - /// Destroys an outbound substream future. Use this after the outbound substream has finished, - /// or if you want to interrupt it. - fn destroy_outbound(&self, s: Self::OutboundSubstream); - - /// Closes this `StreamMuxer`. + /// Poll to close this [`StreamMuxer`]. /// - /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless. All - /// subsequent reads must return either `EOF` or an error. All subsequent writes, shutdowns, - /// or polls must generate an error or be ignored. + /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless and may be safely + /// dropped. /// /// > **Note**: You are encouraged to call this method and wait for it to return `Ready`, so /// > that the remote is properly informed of the shutdown. However, apart from /// > properly informing the remote, there is no difference between this and /// > immediately dropping the muxer. - fn poll_close(&self, cx: &mut Context<'_>) -> Poll>; -} - -/// Event about a connection, reported by an implementation of [`StreamMuxer`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum StreamMuxerEvent { - /// Remote has opened a new substream. Contains the substream in question. - InboundSubstream(T), + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - /// Address to the remote has changed. The previous one is now obsolete. + /// Poll to allow the underlying connection to make progress. /// - /// > **Note**: This can for example happen when using the QUIC protocol, where the two nodes - /// > can change their IP address while retaining the same QUIC connection. + /// In contrast to all other `poll`-functions on [`StreamMuxer`], this function MUST be called + /// unconditionally. Because it will be called regardless, this function can be used by + /// implementations to return events about the underlying connection that the caller MUST deal + /// with. + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>; +} + +/// An event produced by a [`StreamMuxer`]. +pub enum StreamMuxerEvent { + /// The address of the remote has changed. AddressChange(Multiaddr), } -impl StreamMuxerEvent { - /// If `self` is a [`StreamMuxerEvent::InboundSubstream`], returns the content. Otherwise - /// returns `None`. - pub fn into_inbound_substream(self) -> Option { - if let StreamMuxerEvent::InboundSubstream(s) = self { - Some(s) - } else { - None - } +/// Extension trait for [`StreamMuxer`]. +pub trait StreamMuxerExt: StreamMuxer + Sized { + /// Convenience function for calling [`StreamMuxer::poll_inbound`] for [`StreamMuxer`]s that are `Unpin`. + fn poll_inbound_unpin( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> + where + Self: Unpin, + { + Pin::new(self).poll_inbound(cx) + } + + /// Convenience function for calling [`StreamMuxer::poll_outbound`] for [`StreamMuxer`]s that are `Unpin`. + fn poll_outbound_unpin( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> + where + Self: Unpin, + { + Pin::new(self).poll_outbound(cx) + } + + /// Convenience function for calling [`StreamMuxer::poll`] for [`StreamMuxer`]s that are `Unpin`. + fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> + where + Self: Unpin, + { + Pin::new(self).poll(cx) + } + + /// Convenience function for calling [`StreamMuxer::poll_close`] for [`StreamMuxer`]s that are `Unpin`. + fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll> + where + Self: Unpin, + { + Pin::new(self).poll_close(cx) + } + + /// Returns a future that resolves to the next inbound `Substream` opened by the remote. + fn next_inbound(&mut self) -> NextInbound<'_, Self> { + NextInbound(self) + } + + /// Returns a future that opens a new outbound `Substream` with the remote. + fn next_outbound(&mut self) -> NextOutbound<'_, Self> { + NextOutbound(self) + } + + /// Returns a future for closing this [`StreamMuxer`]. + fn close(self) -> Close { + Close(self) + } +} + +impl StreamMuxerExt for S where S: StreamMuxer {} + +pub struct NextInbound<'a, S>(&'a mut S); + +pub struct NextOutbound<'a, S>(&'a mut S); + +pub struct Close(S); + +impl<'a, S> Future for NextInbound<'a, S> +where + S: StreamMuxer + Unpin, +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.0.poll_inbound_unpin(cx) } +} + +impl<'a, S> Future for NextOutbound<'a, S> +where + S: StreamMuxer + Unpin, +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.0.poll_outbound_unpin(cx) + } +} + +impl Future for Close +where + S: StreamMuxer + Unpin, +{ + type Output = Result<(), S::Error>; - /// Map the stream within [`StreamMuxerEvent::InboundSubstream`] to a new type. - pub fn map_inbound_stream(self, map: impl FnOnce(T) -> O) -> StreamMuxerEvent { - match self { - StreamMuxerEvent::InboundSubstream(stream) => { - StreamMuxerEvent::InboundSubstream(map(stream)) - } - StreamMuxerEvent::AddressChange(addr) => StreamMuxerEvent::AddressChange(addr), - } + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.0.poll_close_unpin(cx) } } diff --git a/core/src/muxing/boxed.rs b/core/src/muxing/boxed.rs index ad39ef0532d..99f7a87c6a5 100644 --- a/core/src/muxing/boxed.rs +++ b/core/src/muxing/boxed.rs @@ -1,94 +1,74 @@ -use crate::muxing::StreamMuxerEvent; -use crate::StreamMuxer; -use fnv::FnvHashMap; -use futures::{ready, AsyncRead, AsyncWrite}; -use parking_lot::Mutex; +use crate::muxing::{StreamMuxer, StreamMuxerEvent}; +use futures::{AsyncRead, AsyncWrite}; +use pin_project::pin_project; use std::error::Error; use std::fmt; use std::io; use std::io::{IoSlice, IoSliceMut}; use std::pin::Pin; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; /// Abstract `StreamMuxer`. pub struct StreamMuxerBox { - inner: Box< - dyn StreamMuxer - + Send - + Sync, - >, + inner: Pin + Send>>, } /// Abstract type for asynchronous reading and writing. /// /// A [`SubstreamBox`] erases the concrete type it is given and only retains its `AsyncRead` /// and `AsyncWrite` capabilities. -pub struct SubstreamBox(Box); +pub struct SubstreamBox(Pin>); +#[pin_project] struct Wrap where T: StreamMuxer, { + #[pin] inner: T, - outbound: Mutex>, - next_outbound: AtomicUsize, } impl StreamMuxer for Wrap where T: StreamMuxer, - T::Substream: Send + Unpin + 'static, + T::Substream: Send + 'static, T::Error: Send + Sync + 'static, { type Substream = SubstreamBox; - type OutboundSubstream = usize; // TODO: use a newtype type Error = io::Error; - #[inline] - fn poll_event( - &self, + fn poll_inbound( + self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let event = ready!(self.inner.poll_event(cx).map_err(into_io_error)?) - .map_inbound_stream(SubstreamBox::new); - - Poll::Ready(Ok(event)) - } - - #[inline] - fn open_outbound(&self) -> Self::OutboundSubstream { - let outbound = self.inner.open_outbound(); - let id = self.next_outbound.fetch_add(1, Ordering::Relaxed); - self.outbound.lock().insert(id, outbound); - id + ) -> Poll> { + self.project() + .inner + .poll_inbound(cx) + .map_ok(SubstreamBox::new) + .map_err(into_io_error) } - #[inline] fn poll_outbound( - &self, + self: Pin<&mut Self>, cx: &mut Context<'_>, - substream: &mut Self::OutboundSubstream, ) -> Poll> { - let mut list = self.outbound.lock(); - let stream = ready!(self + self.project() .inner - .poll_outbound(cx, list.get_mut(substream).unwrap()) - .map_err(into_io_error)?); - - Poll::Ready(Ok(SubstreamBox::new(stream))) + .poll_outbound(cx) + .map_ok(SubstreamBox::new) + .map_err(into_io_error) } #[inline] - fn destroy_outbound(&self, substream: Self::OutboundSubstream) { - let mut list = self.outbound.lock(); - self.inner - .destroy_outbound(list.remove(&substream).unwrap()) + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().inner.poll_close(cx).map_err(into_io_error) } - #[inline] - fn poll_close(&self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_close(cx).map_err(into_io_error) + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.project().inner.poll(cx).map_err(into_io_error) } } @@ -103,65 +83,59 @@ impl StreamMuxerBox { /// Turns a stream muxer into a `StreamMuxerBox`. pub fn new(muxer: T) -> StreamMuxerBox where - T: StreamMuxer + Send + Sync + 'static, - T::OutboundSubstream: Send, - T::Substream: Send + Unpin + 'static, + T: StreamMuxer + Send + 'static, + T::Substream: Send + 'static, T::Error: Send + Sync + 'static, { - let wrap = Wrap { - inner: muxer, - outbound: Mutex::new(Default::default()), - next_outbound: AtomicUsize::new(0), - }; + let wrap = Wrap { inner: muxer }; StreamMuxerBox { - inner: Box::new(wrap), + inner: Box::pin(wrap), } } + + fn project( + self: Pin<&mut Self>, + ) -> Pin<&mut (dyn StreamMuxer + Send)> { + self.get_mut().inner.as_mut() + } } impl StreamMuxer for StreamMuxerBox { type Substream = SubstreamBox; - type OutboundSubstream = usize; // TODO: use a newtype type Error = io::Error; - #[inline] - fn poll_event( - &self, + fn poll_inbound( + self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - self.inner.poll_event(cx) - } - - #[inline] - fn open_outbound(&self) -> Self::OutboundSubstream { - self.inner.open_outbound() + ) -> Poll> { + self.project().poll_inbound(cx) } - #[inline] fn poll_outbound( - &self, + self: Pin<&mut Self>, cx: &mut Context<'_>, - s: &mut Self::OutboundSubstream, ) -> Poll> { - self.inner.poll_outbound(cx, s) + self.project().poll_outbound(cx) } #[inline] - fn destroy_outbound(&self, substream: Self::OutboundSubstream) { - self.inner.destroy_outbound(substream) + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().poll_close(cx) } - #[inline] - fn poll_close(&self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_close(cx) + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.project().poll(cx) } } impl SubstreamBox { /// Construct a new [`SubstreamBox`] from something that implements [`AsyncRead`] and [`AsyncWrite`]. - pub fn new(stream: S) -> Self { - Self(Box::new(stream)) + pub fn new(stream: S) -> Self { + Self(Box::pin(stream)) } } @@ -172,7 +146,7 @@ impl fmt::Debug for SubstreamBox { } /// Workaround because Rust does not allow `Box`. -trait AsyncReadWrite: AsyncRead + AsyncWrite + Unpin { +trait AsyncReadWrite: AsyncRead + AsyncWrite { /// Helper function to capture the erased inner type. /// /// Used to make the [`Debug`] implementation of [`SubstreamBox`] more useful. @@ -181,7 +155,7 @@ trait AsyncReadWrite: AsyncRead + AsyncWrite + Unpin { impl AsyncReadWrite for S where - S: AsyncRead + AsyncWrite + Unpin, + S: AsyncRead + AsyncWrite, { fn type_name(&self) -> &'static str { std::any::type_name::() @@ -190,44 +164,44 @@ where impl AsyncRead for SubstreamBox { fn poll_read( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_read(cx, buf) + self.0.as_mut().poll_read(cx, buf) } fn poll_read_vectored( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &mut [IoSliceMut<'_>], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_read_vectored(cx, bufs) + self.0.as_mut().poll_read_vectored(cx, bufs) } } impl AsyncWrite for SubstreamBox { fn poll_write( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_write(cx, buf) + self.0.as_mut().poll_write(cx, buf) } fn poll_write_vectored( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_write_vectored(cx, bufs) + self.0.as_mut().poll_write_vectored(cx, bufs) } - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_flush(cx) + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.0.as_mut().poll_flush(cx) } - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().0).poll_close(cx) + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.0.as_mut().poll_close(cx) } } diff --git a/core/src/muxing/singleton.rs b/core/src/muxing/singleton.rs index c461ed00fc3..3ba2c1cb366 100644 --- a/core/src/muxing/singleton.rs +++ b/core/src/muxing/singleton.rs @@ -18,13 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - connection::Endpoint, - muxing::{StreamMuxer, StreamMuxerEvent}, -}; +use crate::connection::Endpoint; +use crate::muxing::{StreamMuxer, StreamMuxerEvent}; use futures::prelude::*; use std::cell::Cell; +use std::pin::Pin; use std::{io, task::Context, task::Poll}; /// Implementation of `StreamMuxer` that allows only one substream on top of a connection, @@ -52,57 +51,51 @@ impl SingletonMuxer { } } -/// Outbound substream attempt of the `SingletonMuxer`. -pub struct OutboundSubstream {} - impl StreamMuxer for SingletonMuxer where TSocket: AsyncRead + AsyncWrite + Unpin, { type Substream = TSocket; - type OutboundSubstream = OutboundSubstream; type Error = io::Error; - fn poll_event( - &self, + fn poll_inbound( + self: Pin<&mut Self>, _: &mut Context<'_>, - ) -> Poll, io::Error>> { - match self.endpoint { - Endpoint::Dialer => return Poll::Pending, - Endpoint::Listener => {} - } + ) -> Poll> { + let this = self.get_mut(); - if let Some(stream) = self.inner.replace(None) { - Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(stream))) - } else { - Poll::Pending + match this.endpoint { + Endpoint::Dialer => Poll::Pending, + Endpoint::Listener => match this.inner.replace(None) { + None => Poll::Pending, + Some(stream) => Poll::Ready(Ok(stream)), + }, } } - fn open_outbound(&self) -> Self::OutboundSubstream { - OutboundSubstream {} - } - fn poll_outbound( - &self, + self: Pin<&mut Self>, _: &mut Context<'_>, - _: &mut Self::OutboundSubstream, - ) -> Poll> { - match self.endpoint { - Endpoint::Listener => return Poll::Pending, - Endpoint::Dialer => {} - } + ) -> Poll> { + let this = self.get_mut(); - if let Some(stream) = self.inner.replace(None) { - Poll::Ready(Ok(stream)) - } else { - Poll::Pending + match this.endpoint { + Endpoint::Listener => Poll::Pending, + Endpoint::Dialer => match this.inner.replace(None) { + None => Poll::Pending, + Some(stream) => Poll::Ready(Ok(stream)), + }, } } - fn destroy_outbound(&self, _: Self::OutboundSubstream) {} - - fn poll_close(&self, _cx: &mut Context<'_>) -> Poll> { + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } + + fn poll( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } } diff --git a/core/src/peer_id.rs b/core/src/peer_id.rs index d5e4e1496bd..cbe0a13395c 100644 --- a/core/src/peer_id.rs +++ b/core/src/peer_id.rs @@ -183,7 +183,7 @@ impl From for Vec { impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result where - S: _serde::Serializer, + S: serde::Serializer, { if serializer.is_human_readable() { serializer.serialize_str(&self.to_base58()) diff --git a/core/src/peer_record.rs b/core/src/peer_record.rs index f86df68f957..d0d8e21a4b5 100644 --- a/core/src/peer_record.rs +++ b/core/src/peer_record.rs @@ -13,7 +13,7 @@ const DOMAIN_SEP: &str = "libp2p-routing-state"; /// /// Peer records are designed to be distributable and carry a signature by being wrapped in a signed envelope. /// For more information see RFC0003 of the libp2p specifications: -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct PeerRecord { peer_id: PeerId, seq: u64, diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 94e94316473..33bfdf2d4f4 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -8,7 +8,7 @@ use unsigned_varint::encode::usize_buffer; /// A signed envelope contains an arbitrary byte string payload, a signature of the payload, and the public key that can be used to verify the signature. /// /// For more details see libp2p RFC0002: -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct SignedEnvelope { key: PublicKey, payload_type: Vec, diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index c872ec955e4..8fc0454794f 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -299,9 +299,8 @@ impl Multiplexed { T::Dial: Send + 'static, T::ListenerUpgrade: Send + 'static, T::Error: Send + Sync, - M: StreamMuxer + Send + Sync + 'static, - M::Substream: Send + Unpin + 'static, - M::OutboundSubstream: Send + 'static, + M: StreamMuxer + Send + 'static, + M::Substream: Send + 'static, M::Error: Send + Sync + 'static, { boxed(self.map(|(i, m), _| (i, StreamMuxerBox::new(m)))) diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 2cb3c060b90..de9ef765e16 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -64,6 +64,8 @@ mod error; mod from_fn; mod map; mod optional; +mod pending; +mod ready; mod select; mod transfer; @@ -77,6 +79,8 @@ pub use self::{ from_fn::{from_fn, FromFnUpgrade}, map::{MapInboundUpgrade, MapInboundUpgradeErr, MapOutboundUpgrade, MapOutboundUpgradeErr}, optional::OptionalUpgrade, + pending::PendingUpgrade, + ready::ReadyUpgrade, select::SelectUpgrade, transfer::{read_length_prefixed, read_varint, write_length_prefixed, write_varint}, }; diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs new file mode 100644 index 00000000000..15d3c31df48 --- /dev/null +++ b/core/src/upgrade/pending.rs @@ -0,0 +1,76 @@ +// Copyright 2022 Protocol Labs. +// Copyright 2017-2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; +use futures::future; +use std::iter; +use void::Void; + +/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always +/// returns a pending upgrade. +#[derive(Debug, Copy, Clone)] +pub struct PendingUpgrade

{ + protocol_name: P, +} + +impl

PendingUpgrade

{ + pub fn new(protocol_name: P) -> Self { + Self { protocol_name } + } +} + +impl

UpgradeInfo for PendingUpgrade

+where + P: ProtocolName + Clone, +{ + type Info = P; + type InfoIter = iter::Once

; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } +} + +impl InboundUpgrade for PendingUpgrade

+where + P: ProtocolName + Clone, +{ + type Output = Void; + type Error = Void; + type Future = future::Pending>; + + fn upgrade_inbound(self, _: C, _: Self::Info) -> Self::Future { + future::pending() + } +} + +impl OutboundUpgrade for PendingUpgrade

+where + P: ProtocolName + Clone, +{ + type Output = Void; + type Error = Void; + type Future = future::Pending>; + + fn upgrade_outbound(self, _: C, _: Self::Info) -> Self::Future { + future::pending() + } +} diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs new file mode 100644 index 00000000000..16a9b2867f4 --- /dev/null +++ b/core/src/upgrade/ready.rs @@ -0,0 +1,75 @@ +// Copyright 2022 Protocol Labs. +// Copyright 2017-2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; +use futures::future; +use std::iter; +use void::Void; + +/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream. +#[derive(Debug, Copy, Clone)] +pub struct ReadyUpgrade

{ + protocol_name: P, +} + +impl

ReadyUpgrade

{ + pub fn new(protocol_name: P) -> Self { + Self { protocol_name } + } +} + +impl

UpgradeInfo for ReadyUpgrade

+where + P: ProtocolName + Clone, +{ + type Info = P; + type InfoIter = iter::Once

; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } +} + +impl InboundUpgrade for ReadyUpgrade

+where + P: ProtocolName + Clone, +{ + type Output = C; + type Error = Void; + type Future = future::Ready>; + + fn upgrade_inbound(self, stream: C, _: Self::Info) -> Self::Future { + future::ready(Ok(stream)) + } +} + +impl OutboundUpgrade for ReadyUpgrade

+where + P: ProtocolName + Clone, +{ + type Output = C; + type Error = Void; + type Future = future::Ready>; + + fn upgrade_outbound(self, stream: C, _: Self::Info) -> Self::Future { + future::ready(Ok(stream)) + } +} diff --git a/core/tests/serde.rs b/core/tests/serde.rs index 3bb98d4a5b6..35796902dd1 100644 --- a/core/tests/serde.rs +++ b/core/tests/serde.rs @@ -4,8 +4,6 @@ use std::str::FromStr; use libp2p_core::PeerId; -extern crate _serde as serde; - #[test] pub fn serialize_peer_id_json() { let peer_id = PeerId::from_str("12D3KooWRNw2pJC9748Fmq4WNV27HoSTcX3r37132FLkQMrbKAiC").unwrap(); diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index ecba64dfb2f..dac84534369 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -mod util; - use futures::prelude::*; use libp2p_core::identity; use libp2p_core::transport::{MemoryTransport, Transport}; @@ -81,40 +79,24 @@ where fn upgrade_pipeline() { let listener_keys = identity::Keypair::generate_ed25519(); let listener_id = listener_keys.public().to_peer_id(); - let listener_noise_keys = noise::Keypair::::new() - .into_authentic(&listener_keys) - .unwrap(); let mut listener_transport = MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(listener_noise_keys).into_authenticated()) + .authenticate(noise::NoiseAuthenticated::xx(&listener_keys).unwrap()) .apply(HelloUpgrade {}) .apply(HelloUpgrade {}) .apply(HelloUpgrade {}) .multiplex(MplexConfig::default()) - .and_then(|(peer, mplex), _| { - // Gracefully close the connection to allow protocol - // negotiation to complete. - util::CloseMuxer::new(mplex).map_ok(move |mplex| (peer, mplex)) - }) .boxed(); let dialer_keys = identity::Keypair::generate_ed25519(); let dialer_id = dialer_keys.public().to_peer_id(); - let dialer_noise_keys = noise::Keypair::::new() - .into_authentic(&dialer_keys) - .unwrap(); let mut dialer_transport = MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(dialer_noise_keys).into_authenticated()) + .authenticate(noise::NoiseAuthenticated::xx(&dialer_keys).unwrap()) .apply(HelloUpgrade {}) .apply(HelloUpgrade {}) .apply(HelloUpgrade {}) .multiplex(MplexConfig::default()) - .and_then(|(peer, mplex), _| { - // Gracefully close the connection to allow protocol - // negotiation to complete. - util::CloseMuxer::new(mplex).map_ok(move |mplex| (peer, mplex)) - }) .boxed(); let listen_addr1 = Multiaddr::from(Protocol::Memory(random::())); diff --git a/core/tests/util.rs b/core/tests/util.rs deleted file mode 100644 index 7ca52188a52..00000000000 --- a/core/tests/util.rs +++ /dev/null @@ -1,47 +0,0 @@ -#![allow(dead_code)] - -use futures::prelude::*; -use libp2p_core::muxing::StreamMuxer; -use std::{pin::Pin, task::Context, task::Poll}; - -pub struct CloseMuxer { - state: CloseMuxerState, -} - -impl CloseMuxer { - pub fn new(m: M) -> CloseMuxer { - CloseMuxer { - state: CloseMuxerState::Close(m), - } - } -} - -pub enum CloseMuxerState { - Close(M), - Done, -} - -impl Future for CloseMuxer -where - M: StreamMuxer, - M::Error: From, -{ - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - loop { - match std::mem::replace(&mut self.state, CloseMuxerState::Done) { - CloseMuxerState::Close(muxer) => { - if !muxer.poll_close(cx)?.is_ready() { - self.state = CloseMuxerState::Close(muxer); - return Poll::Pending; - } - return Poll::Ready(Ok(muxer)); - } - CloseMuxerState::Done => panic!(), - } - } - } -} - -impl Unpin for CloseMuxer {} diff --git a/deny.toml b/deny.toml new file mode 100644 index 00000000000..6634887128e --- /dev/null +++ b/deny.toml @@ -0,0 +1,144 @@ +# This section is considered when running `cargo deny check advisories` +# More documentation for the advisories section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html +[advisories] +# The path where the advisory database is cloned/fetched into +db-path = "~/cargo/advisory-db" +# The url of the advisory database to use +db-urls = [ "https://github.com/rustsec/advisory-db" ] +# The lint level for security vulnerabilities +vulnerability = "deny" +# The lint level for unmaintained crates +unmaintained = "warn" +# The lint level for crates that have been yanked from their source registry +yanked = "warn" +# The lint level for crates with security notices. Note that as of +# 2019-12-17 there are no security notice advisories in +# https://github.com/rustsec/advisory-db +notice = "warn" +# A list of advisory IDs to ignore. Note that ignored advisories will still +# output a note when they are encountered. +ignore = [ + #"RUSTSEC-0000-0000", +] +# Threshold for security vulnerabilities, any vulnerability with a CVSS score +# lower than the range specified will be ignored. Note that ignored advisories +# will still output a note when they are encountered. +# * None - CVSS Score 0.0 +# * Low - CVSS Score 0.1 - 3.9 +# * Medium - CVSS Score 4.0 - 6.9 +# * High - CVSS Score 7.0 - 8.9 +# * Critical - CVSS Score 9.0 - 10.0 +#severity-threshold = + +# This section is considered when running `cargo deny check licenses` +# More documentation for the licenses section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html +[licenses] +# The lint level for crates which do not have a detectable license +unlicensed = "deny" +# List of explictly allowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +allow = [ + "Apache-2.0", + "BSD-2-Clause", + "MIT", + "Unlicense", +] +# List of explictly disallowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +deny = [] +# Lint level for licenses considered copyleft +copyleft = "allow" +# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses +# * both - The license will be approved if it is both OSI-approved *AND* FSF +# * either - The license will be approved if it is either OSI-approved *OR* FSF +# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF +# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved +# * neither - This predicate is ignored and the default lint level is used +allow-osi-fsf-free = "both" +# Lint level used when no other predicates are matched +# 1. License isn't in the allow or deny lists +# 2. License isn't copyleft +# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" +default = "deny" +# The confidence threshold for detecting a license from license text. +# The higher the value, the more closely the license text must be to the +# canonical license text of a valid SPDX license file. +# [possible values: any between 0.0 and 1.0]. +confidence-threshold = 0.8 +# Allow 1 or more licenses on a per-crate basis, so that particular licenses +# aren't accepted for every possible crate as with the normal allow list +exceptions = [ + # OpenSSL is Apache License v2.0 (ASL v2) + # https://www.openssl.org/blog/blog/2017/03/22/license/ + # ring crate is ISC & MIT + { allow = ["ISC", "MIT", "OpenSSL"], name = "ring" }, + # libp2p is not re-distributing unicode tables data by itself + { allow = ["MIT", "Apache-2.0", "Unicode-DFS-2016"], name = "unicode-ident" }, +] + +# Some crates don't have (easily) machine readable licensing information, +# adding a clarification entry for it allows you to manually specify the +# licensing information +[[licenses.clarify]] +name = "ring" +expression = "ISC AND MIT AND OpenSSL" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] + +[licenses.private] +# If true, ignores workspace crates that aren't published, or are only +# published to private registries +ignore = false +# One or more private registries that you might publish crates to, if a crate +# is only published to private registries, and ignore is true, the crate will +# not have its license(s) checked +#registries = [ +#] + +# This section is considered when running `cargo deny check bans`. +# More documentation about the 'bans' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html +[bans] +# Lint level for when multiple versions of the same crate are detected +multiple-versions = "warn" +# The graph highlighting used when creating dotgraphs for crates +# with multiple versions +# * lowest-version - The path to the lowest versioned duplicate is highlighted +# * simplest-path - The path to the version with the fewest edges is highlighted +# * all - Both lowest-version and simplest-path are used +highlight = "all" +# List of crates that are allowed. Use with care! +#allow = [ +#] +# List of crates to deny +#deny = [ +#] +# Certain crates/versions that will be skipped when doing duplicate detection. +#skip = [ +#] +# Similarly to `skip` allows you to skip certain crates during duplicate +# detection. Unlike skip, it also includes the entire tree of transitive +# dependencies starting at the specified crate, up to a certain depth, which is +# by default infinite +#skip-tree = [ +#] + +# This section is considered when running `cargo deny check sources`. +# More documentation about the 'sources' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html +[sources] +# Lint level for what to happen when a crate from a crate registry that is not +# in the allow list is encountered +unknown-registry = "deny" +# Lint level for what to happen when a crate from a git repository that is not +# in the allow list is encountered +unknown-git = "deny" +# List of URLs for allowed crate registries. Defaults to the crates.io index +# if not specified. If it is specified but empty, no registries are allowed. +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +# List of URLs for allowed Git repositories +#allow-git = [ +#] diff --git a/docs/architecture.svg b/docs/architecture.svg new file mode 100644 index 00000000000..354aaf3dda8 --- /dev/null +++ b/docs/architecture.svg @@ -0,0 +1,39 @@ +Swarmpoll()RootBehaviourpoll()ConnectionPoolpoll()Transportpoll()PingBehaviourpoll()IdentifyBehaviourpoll()KademliaBehaviourpoll() \ No newline at end of file diff --git a/docs/coding-guidelines.md b/docs/coding-guidelines.md new file mode 100644 index 00000000000..70a1ef53009 --- /dev/null +++ b/docs/coding-guidelines.md @@ -0,0 +1,301 @@ +# Coding Guidelines + + +**Table of Contents** + +- [Coding Guidelines](#coding-guidelines) + - [Hierarchical State Machines](#hierarchical-state-machines) + - [Conventions for `poll` implementations](#conventions-for-poll-implementations) + - [Prioritize local work over new work from a remote](#prioritize-local-work-over-new-work-from-a-remote) + - [Bound everything](#bound-everything) + - [Channels](#channels) + - [Local queues](#local-queues) + - [Tasks](#tasks) + - [Further reading](#further-reading) + - [No premature optimizations](#no-premature-optimizations) + - [Keep things sequential unless proven to be slow](#keep-things-sequential-unless-proven-to-be-slow) + - [Use `async/await` for sequential execution only](#use-asyncawait-for-sequential-execution-only) + - [Don't communicate by sharing memory; share memory by communicating.](#dont-communicate-by-sharing-memory-share-memory-by-communicating) + - [Further Reading](#further-reading) + - [Use iteration not recursion](#use-iteration-not-recursion) + - [Further Reading](#further-reading-1) + + + + +Below is a set of coding guidelines followed across the rust-libp2p code base. + +## Hierarchical State Machines + +If you sqint, rust-libp2p is just a big hierarchy of [state +machines](https://en.wikipedia.org/wiki/Finite-state_machine) where parents pass +events down to their children and children pass events up to their parents. + +![Architecture](architecture.svg) + +

+ Reproduce diagram + + ``` + @startuml + Swarm <|-- RootBehaviour + Swarm <|-- ConnectionPool + Swarm <|-- Transport + RootBehaviour <|-- PingBehaviour + RootBehaviour <|-- IdentifyBehaviour + RootBehaviour <|-- KademliaBehaviour + + Swarm : poll() + RootBehaviour : poll() + ConnectionPool : poll() + Transport : poll() + PingBehaviour : poll() + IdentifyBehaviour : poll() + KademliaBehaviour : poll() + @enduml + ``` +
+ +Using hierarchical state machines is a deliberate choice throughout the +rust-libp2p code base. It makes reasoning about control and data flow simple. It +works well with Rust's `Future` model. It allows fine-grain control e.g. on the +order child state machines are polled. + +The above comes with downsides. It feels more verbose. The mix of control flow (`loop`, `return`, +`break`, `continue`) in `poll` functions together with the asynchronous and thus decoupled +communication via events can be very hard to understand. Both are a form of complexity that we are +trading for correctness and performance which aligns with Rust's and rust-libp2p's goals. + +The architecture pattern of hierarchical state machines should be used wherever possible. + +### Conventions for `poll` implementations + +The `poll` method of a single state machine can be complex especially when that +state machine itself `poll`s many child state machines. The patterns shown below +have proven useful and should be followed across the code base. + +``` rust +fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll{ + loop { + match self.child_1.poll(cx) { + // The child made progress. + Poll::Ready(_) => { + // Either return an event to the parent: + return Poll::Ready(todo!()); + // or `continue`, thus polling `child_1` again. `child_1` can potentially make more progress. Try to exhaust + // it before moving on to the next child. + continue + // but NEVER move to the next child if the current child made progress. Given + // that the current child might be able to make more progress, it did not yet + // register the waker in order for the root task to be woken up later on. Moving + // on to the next child might result in the larger `Future` task to stall as it + // assumes that there is no more progress to be made. + } + + // The child did not make progress. It has registered the waker for a + // later wake up. Proceed with the other children. + Poll::Pending(_) => {} + } + + match self.child_2.poll(cx) { + Poll::Ready(child_2_event) => { + // Events can be dispatched from one child to the other. + self.child_1.handle_event(child_2_event); + + // Either `continue` thus polling `child_1` again, or `return Poll::Ready` with a result to the parent. + todo!() + } + Poll::Pending(_) => {} + } + + match self.child_3.poll(cx) { + Poll::Ready(__) => { + // Either `continue` thus polling `child_1` again, or `return Poll::Ready` with a result to the parent. + todo!() + } + Poll::Pending(_) => {} + } + + // None of the child state machines can make any more progress. Each registered + // the waker in order for the root `Future` task to be woken up again. + return Poll::Pending + } +} +``` + +### Prioritize local work over new work from a remote + +When handling multiple work streams, prioritize local work items over +accepting new work items from a remote. Take the following state machine as an +example, reading and writing from a socket, returning result to its parent: + +``` rust +struct SomeStateMachine { + socket: Socket, + events_to_return_to_parent: VecDeque, + messages_to_send_on_socket: VecDeque, +} + +impl Stream for SomeStateMachine { + type Item = Event; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + // First priority is returning local finished work. + if let Some(event) = events_to_return_to_parent.pop_front() { + return Poll::Ready(Some(event)); + } + + // Second priority is finishing local work, i.e. sending on the socket. + if let Poll::Ready(()) = socket.poll_ready(cx) { + todo!("Send messages") + continue // Go back to the top. One might be able to send more. + } + + // Last priority is accepting new work, i.e. reading from the socket. + if let Poll::Ready(work_item) = socket.poll_next(cx) { + todo!("Start work on new item") + continue // Go back to the top. There might be more progress to be made. + } + + // At this point in time, there is no more progress to be made. Return + // `Pending` and be woken up later. + return Poll::Pending; + } + } +} +``` + +This priotization provides: +- Low memory footprint as local queues (here `events_to_return_to_parent`) stay small. +- Low latency as accepted local work is not stuck in queues. +- DOS defense as a remote does not control the size of the local queue, nor starves local work with its remote work. + +## Bound everything + +The concept of unboundedness is an illusion. Use bounded mechanisms to prevent +unbounded memory growth and high latencies. + +### Channels + +When using channels (e.g. `futures::channel::mpsc` or `std::sync::mpsc`) +always use the bounded variant, never use the unbounded variant. When using a +bounded channel, a slow consumer eventually slows down a fast producer once +the channel bound is reached, ideally granting the slow consumer more system +resources e.g. CPU time, keeping queues small and thus latencies low. When +using an unbounded channel a fast producer continues being a fast producer, +growing the channel buffer indefinitely, increasing latency until the illusion +of unboundedness breaks and the system runs out of memory. + +One may use an unbounded channel if one enforces backpressure through an +out-of-band mechanism, e.g. the consumer granting the producer send-tokens +through a side-channel. + +### Local queues + +As for channels shared across potentially concurrent actors (e.g. future tasks +or OS threads), the same applies for queues owned by a single actor only. E.g. +reading events from a socket into a `Vec` without some mechanism +bounding the size of that `Vec` again can lead to unbounded memory +growth and high latencies. + +Note that rust-libp2p fails at this guideline, i.e. still has many unbounded +local queues. + +### Tasks + +Bound the number of +[tasks](https://docs.rs/futures/latest/futures/task/index.html) being spawned. +As an example, say we spawn one task per incoming request received from a +socket. If the number of pending requests is not bounded by some limit, a +misbehaving or malicious remote peer can send requests at a higher rate than the +local node can respond at. This results in unbounded growth in the number of +requests, and thus unbounded growth in the number of tasks and used memory. + +Simply put, rust-libp2p spawns one task per connection but limits the overall +number of connections, thus adhering to this guideline. + +### Further reading + +- https://en.wikipedia.org/wiki/Bufferbloat +- https://apenwarr.ca/log/20170814 +- https://twitter.com/peterbourgon/status/1212800031406739456 + +## No premature optimizations + +Optimizations that add complexity need to be accompanied with a proof of their +effectiveness. + +This as well applies to increasing buffer or channel sizes, as the downside of +such pseudo optimizations is increased memory footprint and latency. + +## Keep things sequential unless proven to be slow + +Concurrency adds complexity. Concurrency adds overhead due to synchronization. +Thus unless proven to be a bottleneck, don't make things concurrent. As an example +the hierarchical `NetworkBehaviour` state machine runs sequentially. It is easy +to debug as it runs sequentially. Thus far there has been no proof that +shows a speed up when running it concurrently. + +## Use `async/await` for sequential execution only + +Using `async/await` for sequential execution makes things significantly simpler. +Though unfortunately using `async/await` does not allow accesing methods on the +object being `await`ed unless paired with some synchronization mechanism like an +`Arc>`. + +Example: Read and once done write from/to a socket. Use `async/await`. + +``` rust +socket.read_exact(&mut read_buf).await; +socket.write(&write_buf).await; +``` + +Example: Read and concurrently write from/to a socket. Use `poll`. + +``` rust +loop { + match socket.poll_read(cx, &mut read_buf) { + Poll::Ready(_) => { + todo!(); + continue; + } + Poll::Pending => {} + } + match socket.poll_write(cx, &write_buf) { + Poll::Ready(_) => { + todo!(); + continue; + } + Poll::Pending => {} + } + + return Poll::Pending; +} +``` + +When providing `async` methods, make it explicit whether it is safe to cancel +the resulting `Future`, i.e. whether it is safe to drop the `Future` returned +by the `async` method. + +## Don't communicate by sharing memory; share memory by communicating. + +The majority of rust-libp2p's code base follows the above Golang philosophy, +e.g. using channels instead of mutexes. This pattern enforces single ownership +over data, which works well with Rust's ownership model and makes reasoning +about data flow easier. + +### Further Reading + +- https://go.dev/blog/codelab-share + +## Use iteration not recursion + +Rust does not support tail call optimization, thus using recursion may grow the +stack potentially unboundedly. Instead use iteration e.g. via `loop` or `for`. + +### Further Reading + +- https://en.wikipedia.org/wiki/Tail_call +- https://stackoverflow.com/questions/65948553/why-is-recursion-not-suggested-in-rust +- https://stackoverflow.com/questions/59257543/when-is-tail-recursion-guaranteed-in-rust diff --git a/examples/README.md b/examples/README.md index 225425e0ff7..6c16d77cf66 100644 --- a/examples/README.md +++ b/examples/README.md @@ -7,7 +7,7 @@ A set of examples showcasing how to use rust-libp2p. - [Ping](ping.rs) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See - [tutorial](../src/tutorial.rs) for a step-by-step guide building the example. + [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. ## Individual libp2p protocols diff --git a/examples/chat-tokio.rs b/examples/chat-tokio.rs index 66c25205246..5ee00f9eedc 100644 --- a/examples/chat-tokio.rs +++ b/examples/chat-tokio.rs @@ -25,7 +25,7 @@ //! The example is run per node as follows: //! //! ```sh -//! cargo run --example chat-tokio --features="tcp-tokio mdns" +//! cargo run --example chat-tokio --features="tcp-tokio mdns-tokio" //! ``` //! //! Alternatively, to run with the minimal set of features and crates: @@ -33,7 +33,7 @@ //! ```sh //!cargo run --example chat-tokio \\ //! --no-default-features \\ -//! --features="floodsub mplex noise tcp-tokio mdns" +//! --features="floodsub mplex noise tcp-tokio mdns-tokio" //! ``` use futures::StreamExt; @@ -41,10 +41,14 @@ use libp2p::{ core::upgrade, floodsub::{self, Floodsub, FloodsubEvent}, identity, - mdns::{Mdns, MdnsEvent}, + mdns::{ + MdnsEvent, + // `TokioMdns` is available through the `mdns-tokio` feature. + TokioMdns, + }, mplex, noise, - swarm::{dial_opts::DialOpts, NetworkBehaviourEventProcess, SwarmBuilder, SwarmEvent}, + swarm::{SwarmBuilder, SwarmEvent}, // `TokioTcpTransport` is available through the `tcp-tokio` feature. tcp::TokioTcpTransport, Multiaddr, @@ -66,71 +70,52 @@ async fn main() -> Result<(), Box> { let peer_id = PeerId::from(id_keys.public()); println!("Local peer id: {:?}", peer_id); - // Create a keypair for authenticated encryption of the transport. - let noise_keys = noise::Keypair::::new() - .into_authentic(&id_keys) - .expect("Signing libp2p-noise static DH keypair failed."); - // Create a tokio-based TCP transport use noise for authenticated // encryption and Mplex for multiplexing of substreams on a TCP stream. let transport = TokioTcpTransport::new(GenTcpConfig::default().nodelay(true)) .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate( + noise::NoiseAuthenticated::xx(&id_keys) + .expect("Signing libp2p-noise static DH keypair failed."), + ) .multiplex(mplex::MplexConfig::new()) .boxed(); // Create a Floodsub topic let floodsub_topic = floodsub::Topic::new("chat"); - // We create a custom network behaviour that combines floodsub and mDNS. - // The derive generates a delegating `NetworkBehaviour` impl which in turn - // requires the implementations of `NetworkBehaviourEventProcess` for - // the events of each behaviour. + // We create a custom behaviour that combines floodsub and mDNS. + // The derive generates a delegating `NetworkBehaviour` impl. #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] + #[behaviour(out_event = "MyBehaviourEvent")] struct MyBehaviour { floodsub: Floodsub, - mdns: Mdns, + mdns: TokioMdns, } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `floodsub` produces an event. - fn inject_event(&mut self, message: FloodsubEvent) { - if let FloodsubEvent::Message(message) = message { - println!( - "Received: '{:?}' from {:?}", - String::from_utf8_lossy(&message.data), - message.source - ); - } + #[allow(clippy::large_enum_variant)] + enum MyBehaviourEvent { + Floodsub(FloodsubEvent), + Mdns(MdnsEvent), + } + + impl From for MyBehaviourEvent { + fn from(event: FloodsubEvent) -> Self { + MyBehaviourEvent::Floodsub(event) } } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `mdns` produces an event. - fn inject_event(&mut self, event: MdnsEvent) { - match event { - MdnsEvent::Discovered(list) => { - for (peer, _) in list { - self.floodsub.add_node_to_partial_view(peer); - } - } - MdnsEvent::Expired(list) => { - for (peer, _) in list { - if !self.mdns.has_node(&peer) { - self.floodsub.remove_node_from_partial_view(&peer); - } - } - } - } + impl From for MyBehaviourEvent { + fn from(event: MdnsEvent) -> Self { + MyBehaviourEvent::Mdns(event) } } // Create a Swarm to manage peers and events. let mut swarm = { - let mdns = Mdns::new(Default::default()).await?; + let mdns = TokioMdns::new(Default::default()).await?; let mut behaviour = MyBehaviour { - floodsub: Floodsub::new(peer_id.clone()), + floodsub: Floodsub::new(peer_id), mdns, }; @@ -166,8 +151,34 @@ async fn main() -> Result<(), Box> { swarm.behaviour_mut().floodsub.publish(floodsub_topic.clone(), line.as_bytes()); } event = swarm.select_next_some() => { - if let SwarmEvent::NewListenAddr { address, .. } = event { - println!("Listening on {:?}", address); + match event { + SwarmEvent::NewListenAddr { address, .. } => { + println!("Listening on {:?}", address); + } + SwarmEvent::Behaviour(MyBehaviourEvent::Floodsub(FloodsubEvent::Message(message))) => { + println!( + "Received: '{:?}' from {:?}", + String::from_utf8_lossy(&message.data), + message.source + ); + } + SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(event)) => { + match event { + MdnsEvent::Discovered(list) => { + for (peer, _) in list { + swarm.behaviour_mut().floodsub.add_node_to_partial_view(peer); + } + } + MdnsEvent::Expired(list) => { + for (peer, _) in list { + if !swarm.behaviour().mdns.has_node(&peer) { + swarm.behaviour_mut().floodsub.remove_node_from_partial_view(&peer); + } + } + } + } + } + _ => {} } } } diff --git a/examples/chat.rs b/examples/chat.rs index d03c0a6f3e5..ee5527bfca7 100644 --- a/examples/chat.rs +++ b/examples/chat.rs @@ -79,21 +79,15 @@ async fn main() -> Result<(), Box> { let floodsub_topic = floodsub::Topic::new("chat"); // We create a custom network behaviour that combines floodsub and mDNS. - // In the future, we want to improve libp2p to make this easier to do. - // Use the derive to generate delegating NetworkBehaviour impl and require the - // NetworkBehaviourEventProcess implementations below. + // Use the derive to generate delegating NetworkBehaviour impl. #[derive(NetworkBehaviour)] #[behaviour(out_event = "OutEvent")] struct MyBehaviour { floodsub: Floodsub, mdns: Mdns, - - // Struct fields which do not implement NetworkBehaviour need to be ignored - #[behaviour(ignore)] - #[allow(dead_code)] - ignored_member: bool, } + #[allow(clippy::large_enum_variant)] #[derive(Debug)] enum OutEvent { Floodsub(FloodsubEvent), @@ -118,7 +112,6 @@ async fn main() -> Result<(), Box> { let mut behaviour = MyBehaviour { floodsub: Floodsub::new(local_peer_id), mdns, - ignored_member: false, }; behaviour.floodsub.subscribe(floodsub_topic.clone()); diff --git a/examples/distributed-key-value-store.rs b/examples/distributed-key-value-store.rs index 6bf28bf0ff9..7fef717cf78 100644 --- a/examples/distributed-key-value-store.rs +++ b/examples/distributed-key-value-store.rs @@ -50,7 +50,7 @@ use libp2p::kad::{ use libp2p::{ development_transport, identity, mdns::{Mdns, MdnsConfig, MdnsEvent}, - swarm::{NetworkBehaviourEventProcess, SwarmEvent}, + swarm::SwarmEvent, NetworkBehaviour, PeerId, Swarm, }; use std::error::Error; @@ -68,28 +68,60 @@ async fn main() -> Result<(), Box> { // We create a custom network behaviour that combines Kademlia and mDNS. #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] + #[behaviour(out_event = "MyBehaviourEvent")] struct MyBehaviour { kademlia: Kademlia, mdns: Mdns, } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `mdns` produces an event. - fn inject_event(&mut self, event: MdnsEvent) { - if let MdnsEvent::Discovered(list) = event { - for (peer_id, multiaddr) in list { - self.kademlia.add_address(&peer_id, multiaddr); - } - } + enum MyBehaviourEvent { + Kademlia(KademliaEvent), + Mdns(MdnsEvent), + } + + impl From for MyBehaviourEvent { + fn from(event: KademliaEvent) -> Self { + MyBehaviourEvent::Kademlia(event) + } + } + + impl From for MyBehaviourEvent { + fn from(event: MdnsEvent) -> Self { + MyBehaviourEvent::Mdns(event) } } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `kademlia` produces an event. - fn inject_event(&mut self, message: KademliaEvent) { - match message { - KademliaEvent::OutboundQueryCompleted { result, .. } => match result { + // Create a swarm to manage peers and events. + let mut swarm = { + // Create a Kademlia behaviour. + let store = MemoryStore::new(local_peer_id); + let kademlia = Kademlia::new(local_peer_id, store); + let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?; + let behaviour = MyBehaviour { kademlia, mdns }; + Swarm::new(transport, behaviour, local_peer_id) + }; + + // Read full lines from stdin + let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); + + // Listen on all interfaces and whatever port the OS assigns. + swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; + + // Kick it off. + loop { + select! { + line = stdin.select_next_some() => handle_input_line(&mut swarm.behaviour_mut().kademlia, line.expect("Stdin not to close")), + event = swarm.select_next_some() => match event { + SwarmEvent::NewListenAddr { address, .. } => { + println!("Listening in {:?}", address); + }, + SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(MdnsEvent::Discovered(list))) => { + for (peer_id, multiaddr) in list { + swarm.behaviour_mut().kademlia.add_address(&peer_id, multiaddr); + } + } + SwarmEvent::Behaviour(MyBehaviourEvent::Kademlia(KademliaEvent::OutboundQueryCompleted { result, ..})) => { + match result { QueryResult::GetProviders(Ok(ok)) => { for peer in ok.providers { println!( @@ -137,38 +169,10 @@ async fn main() -> Result<(), Box> { eprintln!("Failed to put provider record: {:?}", err); } _ => {} - }, - _ => {} + } } + _ => {} } - } - - // Create a swarm to manage peers and events. - let mut swarm = { - // Create a Kademlia behaviour. - let store = MemoryStore::new(local_peer_id); - let kademlia = Kademlia::new(local_peer_id, store); - let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?; - let behaviour = MyBehaviour { kademlia, mdns }; - Swarm::new(transport, behaviour, local_peer_id) - }; - - // Read full lines from stdin - let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); - - // Listen on all interfaces and whatever port the OS assigns. - swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; - - // Kick it off. - loop { - select! { - line = stdin.select_next_some() => handle_input_line(&mut swarm.behaviour_mut().kademlia, line.expect("Stdin not to close")), - event = swarm.select_next_some() => match event { - SwarmEvent::NewListenAddr { address, .. } => { - println!("Listening in {:?}", address); - }, - _ => {} - } } } } diff --git a/examples/file-sharing.rs b/examples/file-sharing.rs index 492eee11eeb..21fa45d54fc 100644 --- a/examples/file-sharing.rs +++ b/examples/file-sharing.rs @@ -84,6 +84,7 @@ use futures::prelude::*; use libp2p::core::{Multiaddr, PeerId}; use libp2p::multiaddr::Protocol; use std::error::Error; +use std::io::Write; use std::path::PathBuf; #[async_std::main] @@ -134,8 +135,9 @@ async fn main() -> Result<(), Box> { // Reply with the content of the file on incoming requests. Some(network::Event::InboundRequest { request, channel }) => { if request == name { - let file_content = std::fs::read_to_string(&path)?; - network_client.respond_file(file_content, channel).await; + network_client + .respond_file(std::fs::read(&path)?, channel) + .await; } } e => todo!("{:?}", e), @@ -158,12 +160,12 @@ async fn main() -> Result<(), Box> { }); // Await the requests, ignore the remaining once a single one succeeds. - let file = futures::future::select_ok(requests) + let file_content = futures::future::select_ok(requests) .await .map_err(|_| "None of the providers returned file.")? .0; - println!("Content of file {}: {}", name, file); + std::io::stdout().write_all(&file_content)?; } } @@ -219,7 +221,7 @@ mod network { }; use libp2p::swarm::{ConnectionHandlerUpgrErr, SwarmBuilder, SwarmEvent}; use libp2p::{NetworkBehaviour, Swarm}; - use std::collections::{HashMap, HashSet}; + use std::collections::{hash_map, HashMap, HashSet}; use std::iter; /// Creates the network components, namely: @@ -337,7 +339,7 @@ mod network { &mut self, peer: PeerId, file_name: String, - ) -> Result> { + ) -> Result, Box> { let (sender, receiver) = oneshot::channel(); self.sender .send(Command::RequestFile { @@ -351,7 +353,11 @@ mod network { } /// Respond with the provided file content to the given request. - pub async fn respond_file(&mut self, file: String, channel: ResponseChannel) { + pub async fn respond_file( + &mut self, + file: Vec, + channel: ResponseChannel, + ) { self.sender .send(Command::RespondFile { file, channel }) .await @@ -367,7 +373,7 @@ mod network { pending_start_providing: HashMap>, pending_get_providers: HashMap>>, pending_request_file: - HashMap>>>, + HashMap, Box>>>, } impl EventLoop { @@ -476,7 +482,7 @@ mod network { )) => {} SwarmEvent::NewListenAddr { address, .. } => { let local_peer_id = *self.swarm.local_peer_id(); - println!( + eprintln!( "Local node is listening on {:?}", address.with(Protocol::P2p(local_peer_id.into())) ); @@ -500,7 +506,7 @@ mod network { } } SwarmEvent::IncomingConnectionError { .. } => {} - SwarmEvent::Dialing(peer_id) => println!("Dialing {}", peer_id), + SwarmEvent::Dialing(peer_id) => eprintln!("Dialing {}", peer_id), e => panic!("{:?}", e), } } @@ -518,9 +524,7 @@ mod network { peer_addr, sender, } => { - if self.pending_dial.contains_key(&peer_id) { - todo!("Already dialing peer."); - } else { + if let hash_map::Entry::Vacant(e) = self.pending_dial.entry(peer_id) { self.swarm .behaviour_mut() .kademlia @@ -530,12 +534,14 @@ mod network { .dial(peer_addr.with(Protocol::P2p(peer_id.into()))) { Ok(()) => { - self.pending_dial.insert(peer_id, sender); + e.insert(sender); } Err(e) => { let _ = sender.send(Err(Box::new(e))); } } + } else { + todo!("Already dialing peer."); } } Command::StartProviding { file_name, sender } => { @@ -625,10 +631,10 @@ mod network { RequestFile { file_name: String, peer: PeerId, - sender: oneshot::Sender>>, + sender: oneshot::Sender, Box>>, }, RespondFile { - file: String, + file: Vec, channel: ResponseChannel, }, } @@ -650,7 +656,7 @@ mod network { #[derive(Debug, Clone, PartialEq, Eq)] struct FileRequest(String); #[derive(Debug, Clone, PartialEq, Eq)] - pub struct FileResponse(String); + pub struct FileResponse(Vec); impl ProtocolName for FileExchangeProtocol { fn protocol_name(&self) -> &[u8] { @@ -689,13 +695,13 @@ mod network { where T: AsyncRead + Unpin + Send, { - let vec = read_length_prefixed(io, 1_000_000).await?; + let vec = read_length_prefixed(io, 500_000_000).await?; // update transfer maximum if vec.is_empty() { return Err(io::ErrorKind::UnexpectedEof.into()); } - Ok(FileResponse(String::from_utf8(vec).unwrap())) + Ok(FileResponse(vec)) } async fn write_request( diff --git a/examples/gossipsub-chat.rs b/examples/gossipsub-chat.rs index 976fcafb470..d6ea44dcef6 100644 --- a/examples/gossipsub-chat.rs +++ b/examples/gossipsub-chat.rs @@ -101,7 +101,6 @@ async fn main() -> Result<(), Box> { // add an explicit peer if one was provided if let Some(explicit) = std::env::args().nth(2) { - let explicit = explicit.clone(); match explicit.parse() { Ok(id) => gossipsub.add_explicit_peer(&id), Err(err) => println!("Failed to parse explicit peer id: {:?}", err), diff --git a/examples/ipfs-kad.rs b/examples/ipfs-kad.rs index b3e6b211b46..a36ed97737b 100644 --- a/examples/ipfs-kad.rs +++ b/examples/ipfs-kad.rs @@ -34,7 +34,7 @@ use libp2p::{ }; use std::{env, error::Error, str::FromStr, time::Duration}; -const BOOTNODES: [&'static str; 4] = [ +const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", "QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", "QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", diff --git a/examples/ipfs-private.rs b/examples/ipfs-private.rs index 113bdf988f2..c0596816919 100644 --- a/examples/ipfs-private.rs +++ b/examples/ipfs-private.rs @@ -41,9 +41,10 @@ use libp2p::{ identify::{Identify, IdentifyConfig, IdentifyEvent}, identity, multiaddr::Protocol, - noise, ping, + noise, + ping::{self, PingEvent}, pnet::{PnetConfig, PreSharedKey}, - swarm::{NetworkBehaviourEventProcess, SwarmEvent}, + swarm::SwarmEvent, tcp::TcpTransport, yamux::YamuxConfig, Multiaddr, NetworkBehaviour, PeerId, Swarm, Transport, @@ -56,10 +57,7 @@ pub fn build_transport( key_pair: identity::Keypair, psk: Option, ) -> transport::Boxed<(PeerId, StreamMuxerBox)> { - let noise_keys = noise::Keypair::::new() - .into_authentic(&key_pair) - .unwrap(); - let noise_config = noise::NoiseConfig::xx(noise_keys).into_authenticated(); + let noise_config = noise::NoiseAuthenticated::xx(&key_pair).unwrap(); let yamux_config = YamuxConfig::default(); let base_transport = TcpTransport::new(GenTcpConfig::default().nodelay(true)); @@ -91,7 +89,7 @@ fn get_ipfs_path() -> Box { } /// Read the pre shared key file from the given ipfs directory -fn get_psk(path: Box) -> std::io::Result> { +fn get_psk(path: &Path) -> std::io::Result> { let swarm_key_file = path.join("swarm.key"); match fs::read_to_string(swarm_key_file) { Ok(text) => Ok(Some(text)), @@ -135,9 +133,9 @@ fn parse_legacy_multiaddr(text: &str) -> Result> { async fn main() -> Result<(), Box> { env_logger::init(); - let ipfs_path: Box = get_ipfs_path(); + let ipfs_path = get_ipfs_path(); println!("using IPFS_PATH {:?}", ipfs_path); - let psk: Option = get_psk(ipfs_path)? + let psk: Option = get_psk(&ipfs_path)? .map(|text| PreSharedKey::from_str(&text)) .transpose()?; @@ -145,7 +143,7 @@ async fn main() -> Result<(), Box> { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(local_key.public()); println!("using random peer id: {:?}", local_peer_id); - for psk in psk { + if let Some(psk) = psk { println!("using swarm key with fingerprint: {}", psk.fingerprint()); } @@ -157,78 +155,34 @@ async fn main() -> Result<(), Box> { // We create a custom network behaviour that combines gossipsub, ping and identify. #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] + #[behaviour(out_event = "MyBehaviourEvent")] struct MyBehaviour { gossipsub: Gossipsub, identify: Identify, ping: ping::Behaviour, } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `identify` produces an event. - fn inject_event(&mut self, event: IdentifyEvent) { - println!("identify: {:?}", event); + enum MyBehaviourEvent { + Gossipsub(GossipsubEvent), + Identify(IdentifyEvent), + Ping(PingEvent), + } + + impl From for MyBehaviourEvent { + fn from(event: GossipsubEvent) -> Self { + MyBehaviourEvent::Gossipsub(event) } } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `gossipsub` produces an event. - fn inject_event(&mut self, event: GossipsubEvent) { - match event { - GossipsubEvent::Message { - propagation_source: peer_id, - message_id: id, - message, - } => println!( - "Got message: {} with id: {} from peer: {:?}", - String::from_utf8_lossy(&message.data), - id, - peer_id - ), - _ => {} - } + impl From for MyBehaviourEvent { + fn from(event: IdentifyEvent) -> Self { + MyBehaviourEvent::Identify(event) } } - impl NetworkBehaviourEventProcess for MyBehaviour { - // Called when `ping` produces an event. - fn inject_event(&mut self, event: ping::Event) { - match event { - ping::Event { - peer, - result: Result::Ok(ping::Success::Ping { rtt }), - } => { - println!( - "ping: rtt to {} is {} ms", - peer.to_base58(), - rtt.as_millis() - ); - } - ping::Event { - peer, - result: Result::Ok(ping::Success::Pong), - } => { - println!("ping: pong from {}", peer.to_base58()); - } - ping::Event { - peer, - result: Result::Err(ping::Failure::Timeout), - } => { - println!("ping: timeout to {}", peer.to_base58()); - } - ping::Event { - peer, - result: Result::Err(ping::Failure::Unsupported), - } => { - println!("ping: {} does not support ping protocol", peer.to_base58()); - } - ping::Event { - peer, - result: Result::Err(ping::Failure::Other { error }), - } => { - println!("ping: ping::Failure with {}: {}", peer.to_base58(), error); - } - } + impl From for MyBehaviourEvent { + fn from(event: PingEvent) -> Self { + MyBehaviourEvent::Ping(event) } } @@ -282,8 +236,64 @@ async fn main() -> Result<(), Box> { } }, event = swarm.select_next_some() => { - if let SwarmEvent::NewListenAddr { address, .. } = event { - println!("Listening on {:?}", address); + match event { + SwarmEvent::NewListenAddr { address, .. } => { + println!("Listening on {:?}", address); + } + SwarmEvent::Behaviour(MyBehaviourEvent::Identify(event)) => { + println!("identify: {:?}", event); + } + SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(GossipsubEvent::Message { + propagation_source: peer_id, + message_id: id, + message, + })) => { + println!( + "Got message: {} with id: {} from peer: {:?}", + String::from_utf8_lossy(&message.data), + id, + peer_id + ) + } + SwarmEvent::Behaviour(MyBehaviourEvent::Ping(event)) => { + match event { + ping::Event { + peer, + result: Result::Ok(ping::Success::Ping { rtt }), + } => { + println!( + "ping: rtt to {} is {} ms", + peer.to_base58(), + rtt.as_millis() + ); + } + ping::Event { + peer, + result: Result::Ok(ping::Success::Pong), + } => { + println!("ping: pong from {}", peer.to_base58()); + } + ping::Event { + peer, + result: Result::Err(ping::Failure::Timeout), + } => { + println!("ping: timeout to {}", peer.to_base58()); + } + ping::Event { + peer, + result: Result::Err(ping::Failure::Unsupported), + } => { + println!("ping: {} does not support ping protocol", peer.to_base58()); + } + ping::Event { + peer, + result: Result::Err(ping::Failure::Other { error }), + } => { + println!("ping: ping::Failure with {}: {}", peer.to_base58(), error); + } + } + } + _ => {} } } } diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 614aa2e6bf2..36e01709df9 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -13,5 +13,5 @@ clap = {version = "3.1.6", features = ["derive"]} zeroize = "1" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" -libp2p-core = { path = "../../core", default-features = false, version = "0.34.0"} +libp2p-core = { path = "../../core", default-features = false, version = "0.36.0"} base64 = "0.13.0" diff --git a/misc/metrics/CHANGELOG.md b/misc/metrics/CHANGELOG.md index 979617bdb29..1b784ffa1f9 100644 --- a/misc/metrics/CHANGELOG.md +++ b/misc/metrics/CHANGELOG.md @@ -1,4 +1,24 @@ -# 0.8.0 [unreleased] +# 0.10.0 [unreleased] + +- Update to `libp2p-kad` `v0.41.0`. + +# 0.9.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-dcutr` `v0.6.0`. + +- Update to `libp2p-ping` `v0.39.0`. + +- Update to `libp2p-identify` `v0.39.0`. + +- Update to `libp2p-relay` `v0.12.0`. + +- Update to `libp2p-kad` `v0.40.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.8.0 - Update to `libp2p-swarm` `v0.38.0`. @@ -12,6 +32,16 @@ - Update to `libp2p-kad` `v0.39.0`. +- Track number of connected nodes supporting a specific protocol via the identify protocol. See [PR 2734]. + +- Update to `libp2p-core` `v0.35.0`. + +- Update to `prometheus-client` `v0.18.0`. See [PR 2822]. + +[PR 2822]: https://github.com/libp2p/rust-libp2p/pull/2761/ + +[PR 2734]: https://github.com/libp2p/rust-libp2p/pull/2734/ + # 0.7.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/misc/metrics/Cargo.toml b/misc/metrics/Cargo.toml index f38e192947f..1c29fceec24 100644 --- a/misc/metrics/Cargo.toml +++ b/misc/metrics/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-metrics" edition = "2021" rust-version = "1.56.1" description = "Metrics for libp2p" -version = "0.8.0" +version = "0.10.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -19,21 +19,21 @@ relay = ["libp2p-relay"] dcutr = ["libp2p-dcutr"] [dependencies] -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-dcutr = { version = "0.5.0", path = "../../protocols/dcutr", optional = true } -libp2p-identify = { version = "0.38.0", path = "../../protocols/identify", optional = true } -libp2p-kad = { version = "0.39.0", path = "../../protocols/kad", optional = true } -libp2p-ping = { version = "0.38.0", path = "../../protocols/ping", optional = true } -libp2p-relay = { version = "0.11.0", path = "../../protocols/relay", optional = true } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } -prometheus-client = "0.16.0" +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-dcutr = { version = "0.6.0", path = "../../protocols/dcutr", optional = true } +libp2p-identify = { version = "0.39.0", path = "../../protocols/identify", optional = true } +libp2p-kad = { version = "0.41.0", path = "../../protocols/kad", optional = true } +libp2p-ping = { version = "0.39.0", path = "../../protocols/ping", optional = true } +libp2p-relay = { version = "0.12.0", path = "../../protocols/relay", optional = true } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +prometheus-client = "0.18.0" [target.'cfg(not(target_os = "unknown"))'.dependencies] -libp2p-gossipsub = { version = "0.40.0", path = "../../protocols/gossipsub", optional = true } +libp2p-gossipsub = { version = "0.41.0", path = "../../protocols/gossipsub", optional = true } [dev-dependencies] log = "0.4.0" -env_logger = "0.8.1" +env_logger = "0.9.0" futures = "0.3.1" libp2p = { path = "../../", default-features = false, features = ["metrics", "ping", "tcp-async-io", "dns-async-std", "websocket", "noise", "mplex", "yamux"] } hyper = { version="0.14", features = ["server", "tcp", "http1"] } diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index 27dcbc08dc8..b90e784f9b7 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -77,10 +77,9 @@ impl From<&libp2p_dcutr::behaviour::Event> for EventType { } } -impl super::Recorder for super::Metrics { +impl super::Recorder for Metrics { fn record(&self, event: &libp2p_dcutr::behaviour::Event) { - self.dcutr - .events + self.events .get_or_create(&EventLabels { event: event.into(), }) diff --git a/misc/metrics/src/gossipsub.rs b/misc/metrics/src/gossipsub.rs index 0bb6af5f452..a82c1a72a24 100644 --- a/misc/metrics/src/gossipsub.rs +++ b/misc/metrics/src/gossipsub.rs @@ -40,10 +40,10 @@ impl Metrics { } } -impl super::Recorder for super::Metrics { +impl super::Recorder for Metrics { fn record(&self, event: &libp2p_gossipsub::GossipsubEvent) { if let libp2p_gossipsub::GossipsubEvent::Message { .. } = event { - self.gossipsub.messages.inc(); + self.messages.inc(); } } } diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 7431eda5d25..730528167a8 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -18,12 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use libp2p_core::PeerId; +use prometheus_client::encoding::text::{EncodeMetric, Encoder}; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::metrics::MetricType; use prometheus_client::registry::Registry; +use std::collections::HashMap; use std::iter; +use std::sync::{Arc, Mutex}; pub struct Metrics { + protocols: Protocols, error: Counter, pushed: Counter, received: Counter, @@ -36,6 +42,15 @@ impl Metrics { pub fn new(registry: &mut Registry) -> Self { let sub_registry = registry.sub_registry_with_prefix("identify"); + let protocols = Protocols::default(); + sub_registry.register( + "protocols", + "Number of connected nodes supporting a specific protocol, with \ + \"unrecognized\" for each peer supporting one or more unrecognized \ + protocols", + Box::new(protocols.clone()), + ); + let error = Counter::default(); sub_registry.register( "errors", @@ -86,6 +101,7 @@ impl Metrics { ); Self { + protocols, error, pushed, received, @@ -96,27 +112,136 @@ impl Metrics { } } -impl super::Recorder for super::Metrics { +impl super::Recorder for Metrics { fn record(&self, event: &libp2p_identify::IdentifyEvent) { match event { libp2p_identify::IdentifyEvent::Error { .. } => { - self.identify.error.inc(); + self.error.inc(); } libp2p_identify::IdentifyEvent::Pushed { .. } => { - self.identify.pushed.inc(); + self.pushed.inc(); } - libp2p_identify::IdentifyEvent::Received { info, .. } => { - self.identify.received.inc(); - self.identify - .received_info_protocols + libp2p_identify::IdentifyEvent::Received { peer_id, info, .. } => { + { + let mut protocols: Vec = info + .protocols + .iter() + .filter(|p| { + let allowed_protocols: &[&[u8]] = &[ + #[cfg(feature = "dcutr")] + libp2p_dcutr::PROTOCOL_NAME, + // #[cfg(feature = "gossipsub")] + // #[cfg(not(target_os = "unknown"))] + // TODO: Add Gossipsub protocol name + libp2p_identify::PROTOCOL_NAME, + libp2p_identify::PUSH_PROTOCOL_NAME, + #[cfg(feature = "kad")] + libp2p_kad::protocol::DEFAULT_PROTO_NAME, + #[cfg(feature = "ping")] + libp2p_ping::PROTOCOL_NAME, + #[cfg(feature = "relay")] + libp2p_relay::v2::STOP_PROTOCOL_NAME, + #[cfg(feature = "relay")] + libp2p_relay::v2::HOP_PROTOCOL_NAME, + ]; + + allowed_protocols.contains(&p.as_bytes()) + }) + .cloned() + .collect(); + + // Signal via an additional label value that one or more + // protocols of the remote peer have not been recognized. + if protocols.len() < info.protocols.len() { + protocols.push("unrecognized".to_string()); + } + + protocols.sort_unstable(); + protocols.dedup(); + + self.protocols.add(*peer_id, protocols); + } + + self.received.inc(); + self.received_info_protocols .observe(info.protocols.len() as f64); - self.identify - .received_info_listen_addrs + self.received_info_listen_addrs .observe(info.listen_addrs.len() as f64); } libp2p_identify::IdentifyEvent::Sent { .. } => { - self.identify.sent.inc(); + self.sent.inc(); } } } } + +impl super::Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { + if let libp2p_swarm::SwarmEvent::ConnectionClosed { + peer_id, + num_established, + .. + } = event + { + if *num_established == 0 { + self.protocols.remove(*peer_id) + } + } + } +} + +#[derive(Default, Clone)] +struct Protocols { + peers: Arc>>>, +} + +impl Protocols { + fn add(&self, peer: PeerId, protocols: Vec) { + self.peers + .lock() + .expect("Lock not to be poisoned") + .insert(peer, protocols); + } + + fn remove(&self, peer: PeerId) { + self.peers + .lock() + .expect("Lock not to be poisoned") + .remove(&peer); + } +} + +impl EncodeMetric for Protocols { + fn encode(&self, mut encoder: Encoder) -> Result<(), std::io::Error> { + let count_by_protocol = self + .peers + .lock() + .expect("Lock not to be poisoned") + .iter() + .fold( + HashMap::::default(), + |mut acc, (_, protocols)| { + for protocol in protocols { + let count = acc.entry(protocol.to_string()).or_default(); + *count += 1; + } + acc + }, + ); + + for (protocol, count) in count_by_protocol { + encoder + .with_label_set(&("protocol", protocol)) + .no_suffix()? + .no_bucket()? + .encode_value(count)? + .no_exemplar()?; + } + + Ok(()) + } + + fn metric_type(&self) -> MetricType { + MetricType::Gauge + } +} diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs index 8ab71befe91..5e5a1056060 100644 --- a/misc/metrics/src/kad.rs +++ b/misc/metrics/src/kad.rs @@ -159,25 +159,21 @@ impl Metrics { } } -impl super::Recorder for super::Metrics { +impl super::Recorder for Metrics { fn record(&self, event: &libp2p_kad::KademliaEvent) { match event { libp2p_kad::KademliaEvent::OutboundQueryCompleted { result, stats, .. } => { - self.kad - .query_result_num_requests + self.query_result_num_requests .get_or_create(&result.into()) .observe(stats.num_requests().into()); - self.kad - .query_result_num_success + self.query_result_num_success .get_or_create(&result.into()) .observe(stats.num_successes().into()); - self.kad - .query_result_num_failure + self.query_result_num_failure .get_or_create(&result.into()) .observe(stats.num_failures().into()); if let Some(duration) = stats.duration() { - self.kad - .query_result_duration + self.query_result_duration .get_or_create(&result.into()) .observe(duration.as_secs_f64()); } @@ -185,36 +181,30 @@ impl super::Recorder for super::Metrics { match result { libp2p_kad::QueryResult::GetRecord(result) => match result { Ok(ok) => self - .kad .query_result_get_record_ok .observe(ok.records.len() as f64), Err(error) => { - self.kad - .query_result_get_record_error + self.query_result_get_record_error .get_or_create(&error.into()) .inc(); } }, libp2p_kad::QueryResult::GetClosestPeers(result) => match result { Ok(ok) => self - .kad .query_result_get_closest_peers_ok .observe(ok.peers.len() as f64), Err(error) => { - self.kad - .query_result_get_closest_peers_error + self.query_result_get_closest_peers_error .get_or_create(&error.into()) .inc(); } }, libp2p_kad::QueryResult::GetProviders(result) => match result { Ok(ok) => self - .kad .query_result_get_providers_ok .observe(ok.providers.len() as f64), Err(error) => { - self.kad - .query_result_get_providers_error + self.query_result_get_providers_error .get_or_create(&error.into()) .inc(); } @@ -230,16 +220,14 @@ impl super::Recorder for super::Metrics { } => { let bucket = low.ilog2().unwrap_or(0); if *is_new_peer { - self.kad - .routing_updated + self.routing_updated .get_or_create(&RoutingUpdated { action: RoutingAction::Added, bucket, }) .inc(); } else { - self.kad - .routing_updated + self.routing_updated .get_or_create(&RoutingUpdated { action: RoutingAction::Updated, bucket, @@ -248,8 +236,7 @@ impl super::Recorder for super::Metrics { } if old_peer.is_some() { - self.kad - .routing_updated + self.routing_updated .get_or_create(&RoutingUpdated { action: RoutingAction::Evicted, bucket, @@ -259,10 +246,7 @@ impl super::Recorder for super::Metrics { } libp2p_kad::KademliaEvent::InboundRequest { request } => { - self.kad - .inbound_requests - .get_or_create(&request.into()) - .inc(); + self.inbound_requests.get_or_create(&request.into()).inc(); } _ => {} } diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index 634d13590df..d9fa3c40ffe 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -95,3 +95,55 @@ pub trait Recorder { /// Record the given event. fn record(&self, event: &Event); } + +#[cfg(feature = "dcutr")] +impl Recorder for Metrics { + fn record(&self, event: &libp2p_dcutr::behaviour::Event) { + self.dcutr.record(event) + } +} + +#[cfg(feature = "gossipsub")] +#[cfg(not(target_os = "unknown"))] +impl Recorder for Metrics { + fn record(&self, event: &libp2p_gossipsub::GossipsubEvent) { + self.gossipsub.record(event) + } +} + +#[cfg(feature = "identify")] +impl Recorder for Metrics { + fn record(&self, event: &libp2p_identify::IdentifyEvent) { + self.identify.record(event) + } +} + +#[cfg(feature = "kad")] +impl Recorder for Metrics { + fn record(&self, event: &libp2p_kad::KademliaEvent) { + self.kad.record(event) + } +} + +#[cfg(feature = "ping")] +impl Recorder for Metrics { + fn record(&self, event: &libp2p_ping::PingEvent) { + self.ping.record(event) + } +} + +#[cfg(feature = "relay")] +impl Recorder for Metrics { + fn record(&self, event: &libp2p_relay::v2::relay::Event) { + self.relay.record(event) + } +} + +impl Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { + self.swarm.record(event); + + #[cfg(feature = "identify")] + self.identify.record(event) + } +} diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs index 76d50b54d17..b7c3ef60f9b 100644 --- a/misc/metrics/src/ping.rs +++ b/misc/metrics/src/ping.rs @@ -92,17 +92,17 @@ impl Metrics { } } -impl super::Recorder for super::Metrics { +impl super::Recorder for Metrics { fn record(&self, event: &libp2p_ping::PingEvent) { match &event.result { Ok(libp2p_ping::PingSuccess::Pong) => { - self.ping.pong_received.inc(); + self.pong_received.inc(); } Ok(libp2p_ping::PingSuccess::Ping { rtt }) => { - self.ping.rtt.observe(rtt.as_secs_f64()); + self.rtt.observe(rtt.as_secs_f64()); } Err(failure) => { - self.ping.failure.get_or_create(&failure.into()).inc(); + self.failure.get_or_create(&failure.into()).inc(); } } } diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 479dcaab724..9267a975b08 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -102,10 +102,9 @@ impl From<&libp2p_relay::v2::relay::Event> for EventType { } } -impl super::Recorder for super::Metrics { +impl super::Recorder for Metrics { fn record(&self, event: &libp2p_relay::v2::relay::Event) { - self.relay - .events + self.events .get_or_create(&EventLabels { event: event.into(), }) diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index d0fb0c664f2..e9c5a0493ce 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -138,34 +138,29 @@ impl Metrics { } } -impl super::Recorder> - for super::Metrics -{ +impl super::Recorder> for Metrics { fn record(&self, event: &libp2p_swarm::SwarmEvent) { match event { libp2p_swarm::SwarmEvent::Behaviour(_) => {} libp2p_swarm::SwarmEvent::ConnectionEstablished { endpoint, .. } => { - self.swarm - .connections_established + self.connections_established .get_or_create(&ConnectionEstablishedLabels { role: endpoint.into(), }) .inc(); } libp2p_swarm::SwarmEvent::ConnectionClosed { endpoint, .. } => { - self.swarm - .connections_closed + self.connections_closed .get_or_create(&ConnectionClosedLabels { role: endpoint.into(), }) .inc(); } libp2p_swarm::SwarmEvent::IncomingConnection { .. } => { - self.swarm.connections_incoming.inc(); + self.connections_incoming.inc(); } libp2p_swarm::SwarmEvent::IncomingConnectionError { error, .. } => { - self.swarm - .connections_incoming_error + self.connections_incoming_error .get_or_create(&IncomingConnectionErrorLabels { error: error.into(), }) @@ -178,8 +173,7 @@ impl super::Recorder super::Recorder { - self.swarm.connected_to_banned_peer.inc(); + self.connected_to_banned_peer.inc(); } libp2p_swarm::SwarmEvent::NewListenAddr { .. } => { - self.swarm.new_listen_addr.inc(); + self.new_listen_addr.inc(); } libp2p_swarm::SwarmEvent::ExpiredListenAddr { .. } => { - self.swarm.expired_listen_addr.inc(); + self.expired_listen_addr.inc(); } libp2p_swarm::SwarmEvent::ListenerClosed { .. } => { - self.swarm.listener_closed.inc(); + self.listener_closed.inc(); } libp2p_swarm::SwarmEvent::ListenerError { .. } => { - self.swarm.listener_error.inc(); + self.listener_error.inc(); } libp2p_swarm::SwarmEvent::Dialing(_) => { - self.swarm.dial_attempt.inc(); + self.dial_attempt.inc(); } } } diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index 1cfdcc4b588..d1374ef7495 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -179,7 +179,7 @@ impl Message { // If it starts with a `/`, ends with a line feed without any // other line feeds in-between, it must be a protocol name. - if msg.get(0) == Some(&b'/') + if msg.first() == Some(&b'/') && msg.last() == Some(&b'\n') && !msg[..msg.len() - 1].contains(&b'\n') { diff --git a/misc/prost-codec/CHANGELOG.md b/misc/prost-codec/CHANGELOG.md new file mode 100644 index 00000000000..d9380ea34ca --- /dev/null +++ b/misc/prost-codec/CHANGELOG.md @@ -0,0 +1,5 @@ +# 0.2.0 + +- Update to prost(-build) `v0.11`. See [PR 2788]. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788/ \ No newline at end of file diff --git a/misc/prost-codec/Cargo.toml b/misc/prost-codec/Cargo.toml index 02969651acc..f4d1e90f844 100644 --- a/misc/prost-codec/Cargo.toml +++ b/misc/prost-codec/Cargo.toml @@ -3,7 +3,7 @@ name = "prost-codec" edition = "2021" rust-version = "1.56.1" description = "Asynchronous de-/encoding of Protobuf structs using asynchronous-codec, unsigned-varint and prost." -version = "0.1.0" +version = "0.2.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,9 +13,9 @@ categories = ["asynchronous"] [dependencies] asynchronous-codec = { version = "0.6" } bytes = { version = "1" } -prost = "0.10" +prost = "0.11" thiserror = "1.0" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [dev-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index add3d1ace0d..925013d1e29 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.36.0 + +- Update to `libp2p-core` `v0.36.0` + +# 0.35.0 + +- Update to `libp2p-core` `v0.35.0` + # 0.34.0 - `Substream` now implements `AsyncRead` and `AsyncWrite`. See [PR 2706]. diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index 3b5a82cd959..d4ea5342262 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-mplex" edition = "2021" rust-version = "1.56.1" description = "Mplex multiplexing protocol for libp2p" -version = "0.34.0" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,7 +14,7 @@ categories = ["network-programming", "asynchronous"] bytes = "1" futures = "0.3.1" asynchronous-codec = "0.6" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" @@ -24,7 +24,7 @@ unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [dev-dependencies] async-std = "1.7.0" -criterion = "0.3" +criterion = "0.4" env_logger = "0.9" futures = "0.3" libp2p-tcp = { path = "../../transports/tcp" } diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index f5bf771d1ab..f74bcd1046f 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -26,9 +26,9 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughpu use futures::future::poll_fn; use futures::prelude::*; use futures::{channel::oneshot, future::join}; +use libp2p_core::muxing::StreamMuxerExt; use libp2p_core::{ - identity, multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, PeerId, StreamMuxer, - Transport, + identity, multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, PeerId, Transport, }; use libp2p_mplex as mplex; use libp2p_plaintext::PlainText2Config; @@ -113,12 +113,8 @@ fn run( addr_sender.take().unwrap().send(listen_addr).unwrap(); } transport::TransportEvent::Incoming { upgrade, .. } => { - let (_peer, conn) = upgrade.await.unwrap(); - let mut s = poll_fn(|cx| conn.poll_event(cx)) - .await - .expect("unexpected error") - .into_inbound_substream() - .expect("Unexpected muxer event"); + let (_peer, mut conn) = upgrade.await.unwrap(); + let mut s = conn.next_inbound().await.expect("unexpected error"); let mut buf = vec![0u8; payload_len]; let mut off = 0; @@ -142,11 +138,8 @@ fn run( // Spawn and block on the sender, i.e. until all data is sent. let sender = async move { let addr = addr_receiver.await.unwrap(); - let (_peer, conn) = sender_trans.dial(addr).unwrap().await.unwrap(); - let mut handle = conn.open_outbound(); - let mut stream = poll_fn(|cx| conn.poll_outbound(cx, &mut handle)) - .await - .unwrap(); + let (_peer, mut conn) = sender_trans.dial(addr).unwrap().await.unwrap(); + let mut stream = conn.next_outbound().await.unwrap(); let mut off = 0; loop { let n = poll_fn(|cx| Pin::new(&mut stream).poll_write(cx, &payload[off..])) diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index 80b1db16481..501c4dd6735 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -27,11 +27,8 @@ pub use config::{MaxBufferBehaviour, MplexConfig}; use bytes::Bytes; use codec::LocalStreamId; use futures::{future, prelude::*, ready}; -use libp2p_core::{ - muxing::StreamMuxerEvent, - upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, - StreamMuxer, -}; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use parking_lot::Mutex; use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll}; @@ -75,9 +72,6 @@ where } /// Multiplexer. Implements the `StreamMuxer` trait. -/// -/// This implementation isn't capable of detecting when the underlying socket changes its address, -/// and no [`StreamMuxerEvent::AddressChange`] event is ever emitted. pub struct Multiplex { io: Arc>>, } @@ -87,43 +81,40 @@ where C: AsyncRead + AsyncWrite + Unpin, { type Substream = Substream; - type OutboundSubstream = OutboundSubstream; type Error = io::Error; - fn poll_event( - &self, + fn poll_inbound( + self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - let stream_id = ready!(self.io.lock().poll_next_stream(cx))?; - let stream = Substream::new(stream_id, self.io.clone()); - Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(stream))) - } - - fn open_outbound(&self) -> Self::OutboundSubstream { - OutboundSubstream {} + ) -> Poll> { + self.io + .lock() + .poll_next_stream(cx) + .map_ok(|stream_id| Substream::new(stream_id, self.io.clone())) } fn poll_outbound( - &self, + self: Pin<&mut Self>, cx: &mut Context<'_>, - _: &mut Self::OutboundSubstream, - ) -> Poll> { - let stream_id = ready!(self.io.lock().poll_open_stream(cx))?; - Poll::Ready(Ok(Substream::new(stream_id, self.io.clone()))) + ) -> Poll> { + self.io + .lock() + .poll_open_stream(cx) + .map_ok(|stream_id| Substream::new(stream_id, self.io.clone())) } - fn destroy_outbound(&self, _substream: Self::OutboundSubstream) { - // Nothing to do, since `open_outbound` creates no new local state. + fn poll( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending } - fn poll_close(&self, cx: &mut Context<'_>) -> Poll> { + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.io.lock().poll_close(cx) } } -/// Active attempt to open an outbound substream. -pub struct OutboundSubstream {} - impl AsyncRead for Substream where C: AsyncRead + AsyncWrite + Unpin, diff --git a/muxers/mplex/tests/async_write.rs b/muxers/mplex/tests/async_write.rs index 9dbda1a198d..bfbabf0f776 100644 --- a/muxers/mplex/tests/async_write.rs +++ b/muxers/mplex/tests/async_write.rs @@ -18,11 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::future::poll_fn; use futures::{channel::oneshot, prelude::*}; -use libp2p_core::{upgrade, StreamMuxer, Transport}; +use libp2p_core::muxing::StreamMuxerExt; +use libp2p_core::{upgrade, Transport}; use libp2p_tcp::TcpTransport; -use std::sync::Arc; #[test] fn async_write() { @@ -50,7 +49,7 @@ fn async_write() { tx.send(addr).unwrap(); - let client = transport + let mut client = transport .next() .await .expect("some event") @@ -60,10 +59,7 @@ fn async_write() { .await .unwrap(); - let mut outbound_token = client.open_outbound(); - let mut outbound = poll_fn(|cx| client.poll_outbound(cx, &mut outbound_token)) - .await - .unwrap(); + let mut outbound = client.next_outbound().await.unwrap(); let mut buf = Vec::new(); outbound.read_to_end(&mut buf).await.unwrap(); @@ -75,16 +71,9 @@ fn async_write() { let mut transport = TcpTransport::default() .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)); - let client = Arc::new(transport.dial(rx.await.unwrap()).unwrap().await.unwrap()); - let mut inbound = loop { - if let Some(s) = poll_fn(|cx| client.poll_event(cx)) - .await - .unwrap() - .into_inbound_substream() - { - break s; - } - }; + let mut client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); + + let mut inbound = client.next_inbound().await.unwrap(); inbound.write_all(b"hello world").await.unwrap(); // The test consists in making sure that this flushes the substream. diff --git a/muxers/mplex/tests/two_peers.rs b/muxers/mplex/tests/two_peers.rs index 4283452fe07..d30fcc1063d 100644 --- a/muxers/mplex/tests/two_peers.rs +++ b/muxers/mplex/tests/two_peers.rs @@ -18,11 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::future::poll_fn; use futures::{channel::oneshot, prelude::*}; -use libp2p_core::{upgrade, StreamMuxer, Transport}; +use libp2p_core::muxing::StreamMuxerExt; +use libp2p_core::{upgrade, Transport}; use libp2p_tcp::TcpTransport; -use std::sync::Arc; #[test] fn client_to_server_outbound() { @@ -50,7 +49,7 @@ fn client_to_server_outbound() { tx.send(addr).unwrap(); - let client = transport + let mut client = transport .next() .await .expect("some event") @@ -60,10 +59,7 @@ fn client_to_server_outbound() { .await .unwrap(); - let mut outbound_token = client.open_outbound(); - let mut outbound = poll_fn(|cx| client.poll_outbound(cx, &mut outbound_token)) - .await - .unwrap(); + let mut outbound = client.next_outbound().await.unwrap(); let mut buf = Vec::new(); outbound.read_to_end(&mut buf).await.unwrap(); @@ -76,16 +72,8 @@ fn client_to_server_outbound() { .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)) .boxed(); - let client = Arc::new(transport.dial(rx.await.unwrap()).unwrap().await.unwrap()); - let mut inbound = loop { - if let Some(s) = poll_fn(|cx| client.poll_event(cx)) - .await - .unwrap() - .into_inbound_substream() - { - break s; - } - }; + let mut client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); + let mut inbound = client.next_inbound().await.unwrap(); inbound.write_all(b"hello world").await.unwrap(); inbound.close().await.unwrap(); @@ -119,27 +107,17 @@ fn client_to_server_inbound() { tx.send(addr).unwrap(); - let client = Arc::new( - transport - .next() - .await - .expect("some event") - .into_incoming() - .unwrap() - .0 - .await - .unwrap(), - ); - - let mut inbound = loop { - if let Some(s) = poll_fn(|cx| client.poll_event(cx)) - .await - .unwrap() - .into_inbound_substream() - { - break s; - } - }; + let mut client = transport + .next() + .await + .expect("some event") + .into_incoming() + .unwrap() + .0 + .await + .unwrap(); + + let mut inbound = client.next_inbound().await.unwrap(); let mut buf = Vec::new(); inbound.read_to_end(&mut buf).await.unwrap(); @@ -152,12 +130,9 @@ fn client_to_server_inbound() { .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)) .boxed(); - let client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); + let mut client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); - let mut outbound_token = client.open_outbound(); - let mut outbound = poll_fn(|cx| client.poll_outbound(cx, &mut outbound_token)) - .await - .unwrap(); + let mut outbound = client.next_outbound().await.unwrap(); outbound.write_all(b"hello world").await.unwrap(); outbound.close().await.unwrap(); @@ -189,7 +164,7 @@ fn protocol_not_match() { tx.send(addr).unwrap(); - let client = transport + let mut client = transport .next() .await .expect("some event") @@ -199,10 +174,7 @@ fn protocol_not_match() { .await .unwrap(); - let mut outbound_token = client.open_outbound(); - let mut outbound = poll_fn(|cx| client.poll_outbound(cx, &mut outbound_token)) - .await - .unwrap(); + let mut outbound = client.next_outbound().await.unwrap(); let mut buf = Vec::new(); outbound.read_to_end(&mut buf).await.unwrap(); diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index 95d01fbbfcd..54eb19865a2 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,3 +1,15 @@ +# 0.40.0 + +- Update to `libp2p-core` `v0.36.0` + +- Remove `OpenSubstreamToken` as it is dead code. See [PR 2873]. + +[PR 2873]: https://github.com/libp2p/rust-libp2p/pull/2873/ + +# 0.39.0 + +- Update to `libp2p-core` `v0.35.0` + # 0.38.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index a7c55f08949..1ee7b4ae667 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-yamux" edition = "2021" rust-version = "1.56.1" description = "Yamux multiplexing protocol for libp2p" -version = "0.38.0" +version = "0.40.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } parking_lot = "0.12" thiserror = "1.0" yamux = "0.10.0" diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index 8eb6fb3e895..1c4c9e7c7c9 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -24,38 +24,31 @@ use futures::{ future, prelude::*, - ready, stream::{BoxStream, LocalBoxStream}, }; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use parking_lot::Mutex; use std::{ fmt, io, iter, mem, pin::Pin, task::{Context, Poll}, }; use thiserror::Error; +use yamux::ConnectionError; /// A Yamux connection. -pub struct Yamux(Mutex>); - -impl fmt::Debug for Yamux { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Yamux") - } -} - -struct Inner { +pub struct Yamux { /// The [`futures::stream::Stream`] of incoming substreams. incoming: S, /// Handle to control the connection. control: yamux::Control, } -/// A token to poll for an outbound substream. -#[derive(Debug)] -pub struct OpenSubstreamToken(()); +impl fmt::Debug for Yamux { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Yamux") + } +} impl Yamux> where @@ -65,14 +58,14 @@ where fn new(io: C, cfg: yamux::Config, mode: yamux::Mode) -> Self { let conn = yamux::Connection::new(io, cfg, mode); let ctrl = conn.control(); - let inner = Inner { + + Yamux { incoming: Incoming { stream: yamux::into_stream(conn).err_into().boxed(), _marker: std::marker::PhantomData, }, control: ctrl, - }; - Yamux(Mutex::new(inner)) + } } } @@ -84,70 +77,64 @@ where fn local(io: C, cfg: yamux::Config, mode: yamux::Mode) -> Self { let conn = yamux::Connection::new(io, cfg, mode); let ctrl = conn.control(); - let inner = Inner { + + Yamux { incoming: LocalIncoming { stream: yamux::into_stream(conn).err_into().boxed_local(), _marker: std::marker::PhantomData, }, control: ctrl, - }; - Yamux(Mutex::new(inner)) + } } } pub type YamuxResult = Result; -/// > **Note**: This implementation never emits [`StreamMuxerEvent::AddressChange`] events. impl StreamMuxer for Yamux where S: Stream> + Unpin, { type Substream = yamux::Stream; - type OutboundSubstream = OpenSubstreamToken; type Error = YamuxError; - fn poll_event( - &self, - c: &mut Context<'_>, - ) -> Poll>> { - let mut inner = self.0.lock(); - match ready!(inner.incoming.poll_next_unpin(c)) { - Some(Ok(s)) => Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(s))), - Some(Err(e)) => Poll::Ready(Err(e)), - None => Poll::Ready(Err(yamux::ConnectionError::Closed.into())), - } - } - - fn open_outbound(&self) -> Self::OutboundSubstream { - OpenSubstreamToken(()) + fn poll_inbound( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.incoming.poll_next_unpin(cx).map(|maybe_stream| { + let stream = maybe_stream + .transpose()? + .ok_or(YamuxError(ConnectionError::Closed))?; + + Ok(stream) + }) } fn poll_outbound( - &self, - c: &mut Context<'_>, - _: &mut OpenSubstreamToken, - ) -> Poll> { - let mut inner = self.0.lock(); - Pin::new(&mut inner.control) - .poll_open_stream(c) + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.control) + .poll_open_stream(cx) .map_err(YamuxError) } - fn destroy_outbound(&self, _: Self::OutboundSubstream) { - self.0.lock().control.abort_open_stream() + fn poll( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Pending } - fn poll_close(&self, c: &mut Context<'_>) -> Poll> { - let mut inner = self.0.lock(); - - if let Poll::Ready(()) = Pin::new(&mut inner.control) + fn poll_close(mut self: Pin<&mut Self>, c: &mut Context<'_>) -> Poll> { + if let Poll::Ready(()) = Pin::new(&mut self.control) .poll_close(c) .map_err(YamuxError)? { return Poll::Ready(Ok(())); } - while let Poll::Ready(maybe_inbound_stream) = inner.incoming.poll_next_unpin(c)? { + while let Poll::Ready(maybe_inbound_stream) = self.incoming.poll_next_unpin(c)? { match maybe_inbound_stream { Some(inbound_stream) => mem::drop(inbound_stream), None => return Poll::Ready(Ok(())), diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index eafaecff2a5..1b585e7ddf9 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,9 +1,24 @@ -# 0.6.0 [unreleased] +# 0.7.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-request-response` `v0.21.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.6.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. - Update to `libp2p-request-response` `v0.20.0`. +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.5.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 9e0d272785d..b5fc9760d6d 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-autonat" edition = "2021" rust-version = "1.56.1" description = "NAT and firewall detection for libp2p" -version = "0.6.0" +version = "0.7.0" authors = ["David Craven ", "Elena Frank "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,19 +11,19 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] async-trait = "0.1" futures = "0.3" futures-timer = "3.0" instant = "0.1" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } -libp2p-request-response = { version = "0.20.0", path = "../request-response" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-request-response = { version = "0.21.0", path = "../request-response" } log = "0.4" rand = "0.8" -prost = "0.10" +prost = "0.11" [dev-dependencies] async-std = { version = "1.10", features = ["attributes"] } diff --git a/protocols/autonat/src/behaviour.rs b/protocols/autonat/src/behaviour.rs index f98bf5af532..b39a7b141b4 100644 --- a/protocols/autonat/src/behaviour.rs +++ b/protocols/autonat/src/behaviour.rs @@ -130,7 +130,7 @@ impl ProbeId { } /// Event produced by [`Behaviour`]. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Event { /// Event on an inbound probe. InboundProbe(InboundProbeEvent), diff --git a/protocols/autonat/src/behaviour/as_client.rs b/protocols/autonat/src/behaviour/as_client.rs index 8d0097deab8..5a5e18b6531 100644 --- a/protocols/autonat/src/behaviour/as_client.rs +++ b/protocols/autonat/src/behaviour/as_client.rs @@ -40,7 +40,7 @@ use std::{ }; /// Outbound probe failed or was aborted. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum OutboundProbeError { /// Probe was aborted because no server is known, or all servers /// are throttled through [`Config::throttle_server_period`]. @@ -54,7 +54,7 @@ pub enum OutboundProbeError { Response(ResponseError), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum OutboundProbeEvent { /// A dial-back request was sent to a remote peer. Request { diff --git a/protocols/autonat/src/behaviour/as_server.rs b/protocols/autonat/src/behaviour/as_server.rs index 9b045c02b4c..681076b92cb 100644 --- a/protocols/autonat/src/behaviour/as_server.rs +++ b/protocols/autonat/src/behaviour/as_server.rs @@ -38,7 +38,7 @@ use std::{ }; /// Inbound probe failed. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum InboundProbeError { /// Receiving the dial-back request or sending a response failed. InboundRequest(InboundFailure), @@ -46,7 +46,7 @@ pub enum InboundProbeError { Response(ResponseError), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum InboundProbeEvent { /// A dial-back request was received from a remote peer. Request { diff --git a/protocols/autonat/src/lib.rs b/protocols/autonat/src/lib.rs index 3a1bf63cc8e..b21c73dd0d0 100644 --- a/protocols/autonat/src/lib.rs +++ b/protocols/autonat/src/lib.rs @@ -31,6 +31,7 @@ pub use self::{ }; pub use libp2p_request_response::{InboundFailure, OutboundFailure}; +#[allow(clippy::derive_partial_eq_without_eq)] mod structs_proto { include!(concat!(env!("OUT_DIR"), "/structs.rs")); } diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index 7d144a20772..f6742cf5581 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,7 +1,29 @@ -# 0.5.0 [unreleased] +# 0.6.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.5.1 + +- Make default features of `libp2p-core` optional. See [PR 2836]. + +[PR 2836]: https://github.com/libp2p/rust-libp2p/pull/2836/ + +# 0.5.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Expose `PROTOCOL_NAME`. See [PR 2734]. + +- Update to `libp2p-core` `v0.35.0`. + +[PR 2734]: https://github.com/libp2p/rust-libp2p/pull/2734/ +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.4.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 1786412cadf..dd059e41422 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dcutr" edition = "2021" rust-version = "1.56.1" description = "Direct connection upgrade through relay" -version = "0.5.0" +version = "0.6.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -17,19 +17,19 @@ either = "1.6.0" futures = "0.3.1" futures-timer = "3.0" instant = "0.1.11" -libp2p-core = { version = "0.34.0", path = "../../core" } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4" -prost-codec = { version = "0.1", path = "../../misc/prost-codec" } -prost = "0.10" +prost-codec = { version = "0.2", path = "../../misc/prost-codec" } +prost = "0.11" thiserror = "1.0" void = "1" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dev-dependencies] -env_logger = "0.8.3" +env_logger = "0.9.0" libp2p = { path = "../..", default-features = false, features = ["dcutr", "relay", "plaintext", "identify", "tcp-async-io", "ping", "noise", "dns-async-std"] } libp2p-identify = { path = "../identify" } libp2p-plaintext = { path = "../../transports/plaintext" } diff --git a/protocols/dcutr/examples/client.rs b/protocols/dcutr/examples/client.rs index dd73b7d3ac3..54448ff635d 100644 --- a/protocols/dcutr/examples/client.rs +++ b/protocols/dcutr/examples/client.rs @@ -89,10 +89,6 @@ fn main() -> Result<(), Box> { let (relay_transport, client) = Client::new_transport_and_behaviour(local_peer_id); - let noise_keys = noise::Keypair::::new() - .into_authentic(&local_key) - .expect("Signing libp2p-noise static DH keypair failed."); - let transport = OrTransport::new( relay_transport, block_on(DnsConfig::system(TcpTransport::new( @@ -101,7 +97,10 @@ fn main() -> Result<(), Box> { .unwrap(), ) .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate( + noise::NoiseAuthenticated::xx(&local_key) + .expect("Signing libp2p-noise static DH keypair failed."), + ) .multiplex(libp2p_yamux::YamuxConfig::default()) .boxed(); diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 893148890b7..5d93d90b339 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -65,6 +65,7 @@ pub enum UpgradeError { Handler(ConnectionHandlerUpgrErr), } +#[derive(Default)] pub struct Behaviour { /// Queue of actions to return when polled. queued_actions: VecDeque, @@ -145,40 +146,35 @@ impl NetworkBehaviour for Behaviour { handler: Self::ConnectionHandler, _error: &DialError, ) { - match handler { - handler::Prototype::DirectConnection { - relayed_connection_id, - role: handler::Role::Initiator { attempt }, - } => { - let peer_id = - peer_id.expect("Peer of `Prototype::DirectConnection` is always known."); - if attempt < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { - self.queued_actions.push_back(ActionBuilder::Connect { + if let handler::Prototype::DirectConnection { + relayed_connection_id, + role: handler::Role::Initiator { attempt }, + } = handler + { + let peer_id = peer_id.expect("Peer of `Prototype::DirectConnection` is always known."); + if attempt < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { + self.queued_actions.push_back(ActionBuilder::Connect { + peer_id, + handler: NotifyHandler::One(relayed_connection_id), + attempt: attempt + 1, + }); + } else { + self.queued_actions.extend([ + NetworkBehaviourAction::NotifyHandler { peer_id, handler: NotifyHandler::One(relayed_connection_id), - attempt: attempt + 1, - }); - } else { - self.queued_actions.extend([ - NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::One(relayed_connection_id), - event: Either::Left( - handler::relayed::Command::UpgradeFinishedDontKeepAlive, - ), - } - .into(), - NetworkBehaviourAction::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: peer_id, - error: UpgradeError::Dial, - }, - ) - .into(), - ]); - } + event: Either::Left( + handler::relayed::Command::UpgradeFinishedDontKeepAlive, + ), + } + .into(), + NetworkBehaviourAction::GenerateEvent(Event::DirectConnectionUpgradeFailed { + remote_peer_id: peer_id, + error: UpgradeError::Dial, + }) + .into(), + ]); } - _ => {} } } @@ -324,7 +320,6 @@ impl NetworkBehaviour for Behaviour { /// A [`NetworkBehaviourAction`], either complete, or still requiring data from [`PollParameters`] /// before being returned in [`Behaviour::poll`]. -#[allow(clippy::large_enum_variant)] enum ActionBuilder { Done(NetworkBehaviourAction), Connect { @@ -333,7 +328,7 @@ enum ActionBuilder { peer_id: PeerId, }, AcceptInboundConnect { - inbound_connect: protocol::inbound::PendingConnect, + inbound_connect: Box, handler: NotifyHandler, peer_id: PeerId, }, diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 9f9e2e01c13..e172b8f6993 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -44,7 +44,7 @@ pub enum Command { }, AcceptInboundConnect { obs_addrs: Vec, - inbound_connect: protocol::inbound::PendingConnect, + inbound_connect: Box, }, /// Upgrading the relayed connection to a direct connection either failed for good or succeeded. /// There is no need to keep the relayed connection alive for the sake of upgrading to a direct @@ -76,7 +76,7 @@ impl fmt::Debug for Command { pub enum Event { InboundConnectRequest { - inbound_connect: protocol::inbound::PendingConnect, + inbound_connect: Box, remote_addr: Multiaddr, }, InboundNegotiationFailed { @@ -201,7 +201,7 @@ impl ConnectionHandler for Handler { }; self.queued_events.push_back(ConnectionHandlerEvent::Custom( Event::InboundConnectRequest { - inbound_connect, + inbound_connect: Box::new(inbound_connect), remote_addr, }, )); @@ -245,9 +245,10 @@ impl ConnectionHandler for Handler { inbound_connect, obs_addrs, } => { - if let Some(_) = self + if self .inbound_connect .replace(inbound_connect.accept(obs_addrs).boxed()) + .is_some() { log::warn!( "New inbound connect stream while still upgrading previous one. \ @@ -337,8 +338,7 @@ impl ConnectionHandler for Handler { _ => { // Anything else is considered a fatal error or misbehaviour of // the remote peer and results in closing the connection. - self.pending_error = - Some(error.map_upgrade_err(|e| e.map_err(|e| EitherError::B(e)))); + self.pending_error = Some(error.map_upgrade_err(|e| e.map_err(EitherError::B))); } } } diff --git a/protocols/dcutr/src/lib.rs b/protocols/dcutr/src/lib.rs index 20ca846d99b..4a843bac2b7 100644 --- a/protocols/dcutr/src/lib.rs +++ b/protocols/dcutr/src/lib.rs @@ -27,8 +27,10 @@ mod protocol; pub use protocol::{ inbound::UpgradeError as InboundUpgradeError, outbound::UpgradeError as OutboundUpgradeError, + PROTOCOL_NAME, }; +#[allow(clippy::derive_partial_eq_without_eq)] mod message_proto { include!(concat!(env!("OUT_DIR"), "/holepunch.pb.rs")); } diff --git a/protocols/dcutr/src/protocol.rs b/protocols/dcutr/src/protocol.rs index d2b8b39a6d0..67f9af69f70 100644 --- a/protocols/dcutr/src/protocol.rs +++ b/protocols/dcutr/src/protocol.rs @@ -21,6 +21,6 @@ pub mod inbound; pub mod outbound; -const PROTOCOL_NAME: &[u8; 13] = b"/libp2p/dcutr"; +pub const PROTOCOL_NAME: &[u8; 13] = b"/libp2p/dcutr"; const MAX_MESSAGE_SIZE_BYTES: usize = 4096; diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index 491c87b99b5..5a91e14642a 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,7 +1,20 @@ -# 0.38.0 [unreleased] +# 0.39.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.38.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.37.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 2556384f00a..58977415005 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-floodsub" edition = "2021" rust-version = "1.56.1" description = "Floodsub protocol for libp2p" -version = "0.38.0" +version = "0.39.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,12 +14,12 @@ categories = ["network-programming", "asynchronous"] cuckoofilter = "0.5.0" fnv = "1.0" futures = "0.3.1" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4" -prost = "0.10" +prost = "0.11" rand = "0.7" smallvec = "1.6.1" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 059ff505080..4256e39b7dc 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -29,8 +29,8 @@ use fnv::FnvHashSet; use libp2p_core::{connection::ConnectionId, PeerId}; use libp2p_core::{ConnectedPoint, Multiaddr}; use libp2p_swarm::{ - dial_opts::{self, DialOpts}, - NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, OneShotHandler, PollParameters, + dial_opts::DialOpts, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, OneShotHandler, + PollParameters, }; use log::warn; use smallvec::SmallVec; @@ -109,9 +109,7 @@ impl Floodsub { if self.target_peers.insert(peer_id) { let handler = self.new_handler(); self.events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(peer_id) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(peer_id).build(), handler, }); } @@ -343,9 +341,7 @@ impl NetworkBehaviour for Floodsub { if self.target_peers.contains(id) { let handler = self.new_handler(); self.events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(*id) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(*id).build(), handler, }); } diff --git a/protocols/floodsub/src/lib.rs b/protocols/floodsub/src/lib.rs index 16f0df610a9..109d0db8795 100644 --- a/protocols/floodsub/src/lib.rs +++ b/protocols/floodsub/src/lib.rs @@ -28,6 +28,7 @@ pub mod protocol; mod layer; mod topic; +#[allow(clippy::derive_partial_eq_without_eq)] mod rpc_proto { include!(concat!(env!("OUT_DIR"), "/floodsub.pb.rs")); } diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 0d0a5fa333f..1ba36ad6607 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,7 +1,27 @@ -# 0.40.0 [unreleased] +# 0.41.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +- Allow publishing with any `impl Into` as a topic. See [PR 2862]. + +[PR 2862]: https://github.com/libp2p/rust-libp2p/pull/2862 + +# 0.40.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Update to `libp2p-core` `v0.35.0`. + +- Update to `prometheus-client` `v0.18.0`. See [PR 2822]. + +[PR 2822]: https://github.com/libp2p/rust-libp2p/pull/2761/ +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.39.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 5e787076647..2adccfd6607 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-gossipsub" edition = "2021" rust-version = "1.56.1" description = "Gossipsub protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,8 +11,8 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } bytes = "1.0" byteorder = "1.3.4" fnv = "1.0.7" @@ -24,14 +24,14 @@ log = "0.4.11" sha2 = "0.10.0" base64 = "0.13.0" smallvec = "1.6.1" -prost = "0.10" +prost = "0.11" hex_fmt = "0.3.0" regex = "1.5.5" serde = { version = "1", optional = true, features = ["derive"] } wasm-timer = "0.2.5" instant = "0.1.11" # Metrics dependencies -prometheus-client = "0.16.0" +prometheus-client = "0.18.0" [dev-dependencies] async-std = "1.6.3" @@ -45,4 +45,4 @@ hex = "0.4.2" derive_builder = "0.11.1" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 3ec0b117d58..21dd77562df 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -41,8 +41,8 @@ use libp2p_core::{ multiaddr::Protocol::Ip6, ConnectedPoint, Multiaddr, PeerId, }; use libp2p_swarm::{ - dial_opts::{self, DialOpts}, - IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, + dial_opts::DialOpts, IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, + NotifyHandler, PollParameters, }; use wasm_timer::Instant; @@ -587,19 +587,20 @@ where } /// Publishes a message with multiple topics to the network. - pub fn publish( + pub fn publish( &mut self, - topic: Topic, + topic: impl Into, data: impl Into>, ) -> Result { let data = data.into(); + let topic = topic.into(); // Transform the data before building a raw_message. let transformed_data = self .data_transform - .outbound_transform(&topic.hash(), data.clone())?; + .outbound_transform(&topic, data.clone())?; - let raw_message = self.build_raw_message(topic.into(), transformed_data)?; + let raw_message = self.build_raw_message(topic, transformed_data)?; // calculate the message id from the un-transformed data let msg_id = self.config.message_id(&GossipsubMessage { @@ -648,7 +649,7 @@ where set.iter() .filter(|p| { self.explicit_peers.contains(*p) - || !self.score_below_threshold(*p, |ts| ts.publish_threshold).0 + || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 }) .cloned(), ); @@ -945,14 +946,11 @@ where ); // remove explicit peers, peers with negative scores, and backoffed peers - peers = peers - .into_iter() - .filter(|p| { - !self.explicit_peers.contains(p) - && !self.score_below_threshold(p, |_| 0.0).0 - && !self.backoffs.is_backoff_with_slack(topic_hash, p) - }) - .collect(); + peers.retain(|p| { + !self.explicit_peers.contains(p) + && !self.score_below_threshold(p, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, p) + }); // Add up to mesh_n of them them to the mesh // NOTE: These aren't randomly added, currently FIFO @@ -1143,9 +1141,7 @@ where debug!("Connecting to explicit peer {:?}", peer_id); let handler = self.new_handler(); self.events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(*peer_id) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(*peer_id).build(), handler, }); } @@ -1626,7 +1622,7 @@ where // //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a // signed peer record? - px = px.into_iter().filter(|p| p.peer_id.is_some()).collect(); + px.retain(|p| p.peer_id.is_some()); if px.len() > n { // only use at most prune_peers many random peers let mut rng = thread_rng(); @@ -1644,9 +1640,7 @@ where // dial peer let handler = self.new_handler(); self.events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(peer_id) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(peer_id).build(), handler, }); } @@ -3207,7 +3201,7 @@ where debug!("Peer disconnected: {}", peer_id); { let topics = match self.peer_topics.get(peer_id) { - Some(topics) => (topics), + Some(topics) => topics, None => { debug_assert!( self.blacklisted_peers.contains(peer_id), @@ -3339,7 +3333,7 @@ where )); } else if let Some(conn) = self.connected_peers.get_mut(&propagation_source) { // Only change the value if the old value is Floodsub (the default set in - // inject_connected). All other PeerKind changes are ignored. + // inject_connection_established). All other PeerKind changes are ignored. debug!( "New peer type found: {} for peer: {}", kind, propagation_source diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 93939757c63..a0a8d5e46a1 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -50,7 +50,7 @@ pub enum ValidationMode { } /// Selector for custom Protocol Id -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum GossipsubVersion { V1_0, V1_1, diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index d86263aace4..4022a23185d 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -97,10 +97,9 @@ //! //! // Set up an encrypted TCP Transport over the Mplex //! // This is test transport (memory). -//! let noise_keys = libp2p_noise::Keypair::::new().into_authentic(&local_key).unwrap(); //! let transport = MemoryTransport::default() //! .upgrade(libp2p_core::upgrade::Version::V1) -//! .authenticate(libp2p_noise::NoiseConfig::xx(noise_keys).into_authenticated()) +//! .authenticate(libp2p_noise::NoiseAuthenticated::xx(&local_key).unwrap()) //! .multiplex(libp2p_mplex::MplexConfig::new()) //! .boxed(); //! diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index ce337c96455..0eb5f4ee56b 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -613,6 +613,7 @@ mod tests { struct TestKeypair(Keypair); impl Arbitrary for TestKeypair { + #[cfg(feature = "rsa")] fn arbitrary(g: &mut G) -> Self { let keypair = if g.gen() { // Small enough to be inlined. @@ -624,6 +625,12 @@ mod tests { }; TestKeypair(keypair) } + + #[cfg(not(feature = "rsa"))] + fn arbitrary(_g: &mut G) -> Self { + // Small enough to be inlined. + TestKeypair(Keypair::generate_ed25519()) + } } impl std::fmt::Debug for TestKeypair { diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index b9fa8106c6a..3903318fbfb 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -17,6 +17,7 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +#![allow(clippy::derive_partial_eq_without_eq)] include!(concat!(env!("OUT_DIR"), "/gossipsub.pb.rs")); diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 6ffde514e37..29c72d1f044 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -86,7 +86,7 @@ declare_message_id_type!(MessageId, "MessageId"); // filter duplicates quickly without performing the overhead of decompression. declare_message_id_type!(FastMessageId, "FastMessageId"); -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct PeerConnections { /// The kind of protocol the peer supports. pub kind: PeerKind, diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 60c77cd032a..1455966b091 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,7 +1,23 @@ -# 0.38.0 [unreleased] +# 0.39.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.38.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Expose `PROTOCOL_NAME` and `PUSH_PROTOCOL_NAME`. See [PR 2734]. + +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 +[PR 2734]: https://github.com/libp2p/rust-libp2p/pull/2734/ + # 0.37.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 2fc604d1c25..f147c5bb2c8 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identify" edition = "2021" rust-version = "1.56.1" description = "Nodes identifcation protocol for libp2p" -version = "0.38.0" +version = "0.39.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,12 +14,12 @@ categories = ["network-programming", "asynchronous"] asynchronous-codec = "0.6" futures = "0.3.1" futures-timer = "3.0.2" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4.1" lru = "0.7.2" -prost-codec = { version = "0.1", path = "../../misc/prost-codec" } -prost = "0.10" +prost-codec = { version = "0.2", path = "../../misc/prost-codec" } +prost = "0.11" smallvec = "1.6.1" thiserror = "1.0" void = "1.0" @@ -37,4 +37,4 @@ libp2p = { path = "../..", default-features = false, features = [ ]} [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/identify.rs index 9ed56b3265e..d30e98e1400 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/identify.rs @@ -26,9 +26,9 @@ use libp2p_core::{ Multiaddr, PeerId, PublicKey, }; use libp2p_swarm::{ - dial_opts::{self, DialOpts}, - AddressScore, ConnectionHandler, ConnectionHandlerUpgrErr, DialError, IntoConnectionHandler, - NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, + dial_opts::DialOpts, AddressScore, ConnectionHandler, ConnectionHandlerUpgrErr, DialError, + IntoConnectionHandler, NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, + NotifyHandler, PollParameters, }; use lru::LruCache; use std::{ @@ -196,9 +196,7 @@ impl Identify { if self.pending_push.insert(p) && !self.connected.contains_key(&p) { let handler = self.new_handler(); self.events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(p) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(p).build(), handler, }); } diff --git a/protocols/identify/src/lib.rs b/protocols/identify/src/lib.rs index f5de8f7a6ac..2f73a5a0cad 100644 --- a/protocols/identify/src/lib.rs +++ b/protocols/identify/src/lib.rs @@ -45,12 +45,13 @@ //! [`IdentifyInfo`]: self::IdentifyInfo pub use self::identify::{Identify, IdentifyConfig, IdentifyEvent}; -pub use self::protocol::{IdentifyInfo, UpgradeError}; +pub use self::protocol::{IdentifyInfo, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; mod handler; mod identify; mod protocol; +#[allow(clippy::derive_partial_eq_without_eq)] mod structs_proto { include!(concat!(env!("OUT_DIR"), "/structs.rs")); } diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 735fbcb342b..163ac0aa396 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -34,6 +34,10 @@ use void::Void; const MAX_MESSAGE_SIZE_BYTES: usize = 4096; +pub const PROTOCOL_NAME: &[u8; 14] = b"/ipfs/id/1.0.0"; + +pub const PUSH_PROTOCOL_NAME: &[u8; 19] = b"/ipfs/id/push/1.0.0"; + /// Substream upgrade protocol for `/ipfs/id/1.0.0`. #[derive(Debug, Clone)] pub struct IdentifyProtocol; @@ -104,7 +108,7 @@ impl UpgradeInfo for IdentifyProtocol { type InfoIter = iter::Once; fn protocol_info(&self) -> Self::InfoIter { - iter::once(b"/ipfs/id/1.0.0") + iter::once(PROTOCOL_NAME) } } @@ -136,7 +140,7 @@ impl UpgradeInfo for IdentifyPushProtocol { type InfoIter = iter::Once; fn protocol_info(&self) -> Self::InfoIter { - iter::once(b"/ipfs/id/push/1.0.0") + iter::once(PUSH_PROTOCOL_NAME) } } diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 66730eecde5..2f41a093019 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,7 +1,33 @@ -# 0.39.0 [unreleased] +# 0.41.0 [unreleased] + +- Remove deprecated `set_protocol_name()` from `KademliaConfig` & `KademliaProtocolConfig`. + Use `set_protocol_names()` instead. See [PR 2866]. + +[PR 2866]: https://github.com/libp2p/rust-libp2p/pull/2866 + +# 0.40.0 + +- Add support for multiple protocol names. Update `Kademlia`, `KademliaConfig`, + and `KademliaProtocolConfig` accordingly. See [Issue 2837]. See [PR 2846]. + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +[Issue 2837]: https://github.com/libp2p/rust-libp2p/issues/2837 +[PR 2846]: https://github.com/libp2p/rust-libp2p/pull/2846 + +# 0.39.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.38.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 6686664f9b2..c65e34ecdbc 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-kad" edition = "2021" rust-version = "1.56.1" description = "Kademlia protocol for libp2p" -version = "0.39.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -18,9 +18,9 @@ fnv = "1.0" asynchronous-codec = "0.6" futures = "0.3.1" log = "0.4" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } -prost = "0.10" +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +prost = "0.11" rand = "0.7.2" sha2 = "0.10.0" smallvec = "1.6.1" @@ -29,7 +29,7 @@ unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0" futures-timer = "3.0.2" instant = "0.1.11" -_serde = { package = "serde", version = "1.0", optional = true, features = ["derive"] } +serde = { version = "1.0", optional = true, features = ["derive"] } thiserror = "1" [dev-dependencies] @@ -40,7 +40,7 @@ libp2p-yamux = { path = "../../muxers/yamux" } quickcheck = "0.9.0" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [features] -serde = ["_serde", "bytes/serde"] +serde = ["dep:serde", "bytes/serde"] diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 59d63b36e9d..b267f87d386 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -214,13 +214,17 @@ impl Default for KademliaConfig { } impl KademliaConfig { - /// Sets a custom protocol name. + /// Sets custom protocol names. /// /// Kademlia nodes only communicate with other nodes using the same protocol - /// name. Using a custom name therefore allows to segregate the DHT from + /// name. Using custom name(s) therefore allows to segregate the DHT from /// others, if that is desired. - pub fn set_protocol_name(&mut self, name: impl Into>) -> &mut Self { - self.protocol_config.set_protocol_name(name); + /// + /// More than one protocol name can be supplied. In this case the node will + /// be able to talk to other nodes supporting any of the provided names. + /// Multiple names must be used with caution to avoid network partitioning. + pub fn set_protocol_names(&mut self, names: Vec>) -> &mut Self { + self.protocol_config.set_protocol_names(names); self } @@ -403,8 +407,8 @@ where } /// Get the protocol name of this kademlia instance. - pub fn protocol_name(&self) -> &[u8] { - self.protocol_config.protocol_name() + pub fn protocol_names(&self) -> &[Cow<'static, [u8]>] { + self.protocol_config.protocol_names() } /// Creates a new `Kademlia` network behaviour with the given configuration. @@ -565,9 +569,7 @@ where kbucket::InsertResult::Pending { disconnected } => { let handler = self.new_handler(); self.queued_events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(disconnected.into_preimage()) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(disconnected.into_preimage()).build(), handler, }); RoutingUpdate::Pending @@ -1162,7 +1164,6 @@ where let handler = self.new_handler(); self.queued_events.push_back(NetworkBehaviourAction::Dial { opts: DialOpts::peer_id(disconnected.into_preimage()) - .condition(dial_opts::PeerCondition::Disconnected) .build(), handler, }) @@ -2342,9 +2343,7 @@ where query.inner.pending_rpcs.push((peer_id, event)); let handler = self.new_handler(); self.queued_events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(peer_id) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(peer_id).build(), handler, }); } diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 1f67be5a19d..aab7fa0ef28 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -56,12 +56,9 @@ fn build_node() -> (Multiaddr, TestSwarm) { fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let noise_keys = noise::Keypair::::new() - .into_authentic(&local_key) - .unwrap(); let transport = MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate(noise::NoiseAuthenticated::xx(&local_key).unwrap()) .multiplex(yamux::YamuxConfig::default()) .boxed(); diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index bcadb57f44c..5be9ae17737 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -658,14 +658,7 @@ where let pos = self .inbound_substreams .iter() - .position(|state| match state { - InboundSubstreamState::WaitingUser(ref conn_id, _) - if conn_id == &request_id.connec_unique_id => - { - true - } - _ => false, - }); + .position(|state| matches!(state, InboundSubstreamState::WaitingUser(ref conn_id, _) if conn_id == &request_id.connec_unique_id)); if let Some(pos) = pos { let (conn_id, substream) = match self.inbound_substreams.remove(pos) { @@ -737,14 +730,7 @@ where let pos = self .inbound_substreams .iter() - .position(|state| match state { - InboundSubstreamState::WaitingUser(ref conn_id, _) - if conn_id == &request_id.connec_unique_id => - { - true - } - _ => false, - }); + .position(|state| matches!(state, InboundSubstreamState::WaitingUser(ref conn_id, _) if conn_id == &request_id.connec_unique_id)); if let Some(pos) = pos { let (conn_id, substream) = match self.inbound_substreams.remove(pos) { diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index e46e2b16b54..de6f8159e0b 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -39,9 +39,6 @@ // be useful later for record store #![allow(dead_code)] -#[cfg(feature = "serde")] -extern crate _serde as serde; - pub mod handler; pub mod kbucket; pub mod protocol; @@ -52,6 +49,7 @@ mod behaviour; mod jobs; mod query; +#[allow(clippy::derive_partial_eq_without_eq)] mod dht_proto { include!(concat!(env!("OUT_DIR"), "/dht.pb.rs")); } diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 656917b54f6..707edd8fe02 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -142,21 +142,21 @@ impl From for proto::message::Peer { // `OutboundUpgrade` to be just a single message #[derive(Debug, Clone)] pub struct KademliaProtocolConfig { - protocol_name: Cow<'static, [u8]>, + protocol_names: Vec>, /// Maximum allowed size of a packet. max_packet_size: usize, } impl KademliaProtocolConfig { /// Returns the configured protocol name. - pub fn protocol_name(&self) -> &[u8] { - &self.protocol_name + pub fn protocol_names(&self) -> &[Cow<'static, [u8]>] { + &self.protocol_names } - /// Modifies the protocol name used on the wire. Can be used to create incompatibilities + /// Modifies the protocol names used on the wire. Can be used to create incompatibilities /// between networks on purpose. - pub fn set_protocol_name(&mut self, name: impl Into>) { - self.protocol_name = name.into(); + pub fn set_protocol_names(&mut self, names: Vec>) { + self.protocol_names = names; } /// Modifies the maximum allowed size of a single Kademlia packet. @@ -168,7 +168,7 @@ impl KademliaProtocolConfig { impl Default for KademliaProtocolConfig { fn default() -> Self { KademliaProtocolConfig { - protocol_name: Cow::Borrowed(DEFAULT_PROTO_NAME), + protocol_names: iter::once(Cow::Borrowed(DEFAULT_PROTO_NAME)).collect(), max_packet_size: DEFAULT_MAX_PACKET_SIZE, } } @@ -176,10 +176,10 @@ impl Default for KademliaProtocolConfig { impl UpgradeInfo for KademliaProtocolConfig { type Info = Cow<'static, [u8]>; - type InfoIter = iter::Once; + type InfoIter = std::vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol_name.clone()) + self.protocol_names.clone().into_iter() } } diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs index e321992e5c7..2a40292c243 100644 --- a/protocols/kad/src/record.rs +++ b/protocols/kad/src/record.rs @@ -32,7 +32,6 @@ use std::hash::{Hash, Hasher}; /// The (opaque) key of a record. #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(crate = "_serde"))] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Key(Bytes); diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 320f8b13a2e..2be2db4079b 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,6 +1,23 @@ -# 0.39.0 [unreleased] +# 0.40.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Allow users to choose between async-io and tokio runtime + in the mdns protocol implementation. `async-io` is a default + feature, with an additional `tokio` feature (see [PR 2748]) + +- Fix high CPU usage with Tokio library (see [PR 2748]). + +- Update to `libp2p-core` `v0.36.0`. + +[PR 2748]: https://github.com/libp2p/rust-libp2p/pull/2748 + +# 0.39.0 - Update to `libp2p-swarm` `v0.38.0`. +- Update to `if-watch` `v1.1.1`. + +- Update to `libp2p-core` `v0.35.0`. # 0.38.0 diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index f97c185030a..2ec4ac44958 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = "1.56.1" -version = "0.39.0" +version = "0.40.0" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -11,22 +11,39 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = "1.3.1" data-encoding = "2.3.2" dns-parser = "0.8.0" futures = "0.3.13" -if-watch = "1.0.0" +if-watch = "1.1.1" lazy_static = "1.4.0" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4.14" rand = "0.8.3" smallvec = "1.6.1" socket2 = { version = "0.4.0", features = ["all"] } void = "1.0.2" +async-io = { version = "1.3.1", optional = true } +tokio = { version = "1.19", default-features = false, features = ["net", "time"], optional = true} + +[features] +default = ["async-io"] +tokio = ["dep:tokio"] +async-io = ["dep:async-io"] + [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } env_logger = "0.9.0" -libp2p = { path = "../..", default-features = false, features = ["mdns", "tcp-async-io", "dns-async-std", "websocket", "noise", "mplex", "yamux"] } -tokio = { version = "1.15", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } +libp2p = { path = "../..", default-features = false, features = ["mdns-async-io", "tcp-async-io", "dns-async-std", "tcp-tokio", "dns-tokio", "websocket", "noise", "mplex", "yamux"] } +tokio = { version = "1.19", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } + + +[[test]] +name = "use-async-std" +required-features = ["async-io"] + +[[test]] +name = "use-tokio" +required-features = ["tokio"] + diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 244b2b784dd..854bd885a22 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -19,11 +19,14 @@ // DEALINGS IN THE SOFTWARE. mod iface; +mod socket; +mod timer; use self::iface::InterfaceState; +use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::MdnsConfig; -use async_io::Timer; use futures::prelude::*; +use futures::Stream; use if_watch::{IfEvent, IfWatcher}; use libp2p_core::transport::ListenerId; use libp2p_core::{Multiaddr, PeerId}; @@ -35,10 +38,24 @@ use smallvec::SmallVec; use std::collections::hash_map::{Entry, HashMap}; use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant}; +#[cfg(feature = "async-io")] +use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer}; + +/// The type of a [`GenMdns`] using the `async-io` implementation. +#[cfg(feature = "async-io")] +pub type Mdns = GenMdns; + +#[cfg(feature = "tokio")] +use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer}; + +/// The type of a [`GenMdns`] using the `tokio` implementation. +#[cfg(feature = "tokio")] +pub type TokioMdns = GenMdns; + /// A `NetworkBehaviour` for mDNS. Automatically discovers peers on the local network and adds /// them to the topology. #[derive(Debug)] -pub struct Mdns { +pub struct GenMdns { /// InterfaceState config. config: MdnsConfig, @@ -46,7 +63,7 @@ pub struct Mdns { if_watch: IfWatcher, /// Mdns interface states. - iface_states: HashMap, + iface_states: HashMap>, /// List of nodes that we have discovered, the address, and when their TTL expires. /// @@ -57,10 +74,13 @@ pub struct Mdns { /// Future that fires when the TTL of at least one node in `discovered_nodes` expires. /// /// `None` if `discovered_nodes` is empty. - closest_expiration: Option, + closest_expiration: Option, } -impl Mdns { +impl GenMdns +where + T: Builder, +{ /// Builds a new `Mdns` behaviour. pub async fn new(config: MdnsConfig) -> io::Result { let if_watch = if_watch::IfWatcher::new().await?; @@ -91,11 +111,15 @@ impl Mdns { *expires = now; } } - self.closest_expiration = Some(Timer::at(now)); + self.closest_expiration = Some(T::at(now)); } } -impl NetworkBehaviour for Mdns { +impl NetworkBehaviour for GenMdns +where + T: Builder + Stream, + S: AsyncSocket, +{ type ConnectionHandler = DummyConnectionHandler; type OutEvent = MdnsEvent; @@ -219,8 +243,9 @@ impl NetworkBehaviour for Mdns { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); } if let Some(closest_expiration) = closest_expiration { - let mut timer = Timer::at(closest_expiration); - let _ = Pin::new(&mut timer).poll(cx); + let mut timer = T::at(closest_expiration); + let _ = Pin::new(&mut timer).poll_next(cx); + self.closest_expiration = Some(timer); } Poll::Pending diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index e4971e36b1a..b2d0506b226 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -23,9 +23,8 @@ mod query; use self::dns::{build_query, build_query_response, build_service_discovery_response}; use self::query::MdnsPacket; +use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::MdnsConfig; -use async_io::{Async, Timer}; -use futures::prelude::*; use libp2p_core::{address_translation, multiaddr::Protocol, Multiaddr, PeerId}; use libp2p_swarm::PollParameters; use socket2::{Domain, Socket, Type}; @@ -34,20 +33,20 @@ use std::{ io, iter, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, pin::Pin, - task::Context, + task::{Context, Poll}, time::{Duration, Instant}, }; /// An mDNS instance for a networking interface. To discover all peers when having multiple /// interfaces an [`InterfaceState`] is required for each interface. #[derive(Debug)] -pub struct InterfaceState { +pub struct InterfaceState { /// Address this instance is bound to. addr: IpAddr, /// Receive socket. - recv_socket: Async, + recv_socket: U, /// Send socket. - send_socket: Async, + send_socket: U, /// Buffer used for receiving data from the main socket. /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000 /// bytes, if it can be ensured that all participating devices can handle such large packets. @@ -60,7 +59,7 @@ pub struct InterfaceState { /// Discovery interval. query_interval: Duration, /// Discovery timer. - timeout: Timer, + timeout: T, /// Multicast address. multicast_addr: IpAddr, /// Discovered addresses. @@ -69,7 +68,11 @@ pub struct InterfaceState { ttl: Duration, } -impl InterfaceState { +impl InterfaceState +where + U: AsyncSocket, + T: Builder + futures::Stream, +{ /// Builds a new [`InterfaceState`]. pub fn new(addr: IpAddr, config: MdnsConfig) -> io::Result { log::info!("creating instance on iface {}", addr); @@ -82,8 +85,8 @@ impl InterfaceState { socket.bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 5353).into())?; socket.set_multicast_loop_v4(true)?; socket.set_multicast_ttl_v4(255)?; - socket.join_multicast_v4(&*crate::IPV4_MDNS_MULTICAST_ADDRESS, &addr)?; - Async::new(UdpSocket::from(socket))? + socket.join_multicast_v4(&crate::IPV4_MDNS_MULTICAST_ADDRESS, &addr)?; + U::from_std(UdpSocket::from(socket))? } IpAddr::V6(_) => { let socket = Socket::new(Domain::IPV6, Type::DGRAM, Some(socket2::Protocol::UDP))?; @@ -93,8 +96,8 @@ impl InterfaceState { socket.bind(&SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 5353).into())?; socket.set_multicast_loop_v6(true)?; // TODO: find interface matching addr. - socket.join_multicast_v6(&*crate::IPV6_MDNS_MULTICAST_ADDRESS, 0)?; - Async::new(UdpSocket::from(socket))? + socket.join_multicast_v6(&crate::IPV6_MDNS_MULTICAST_ADDRESS, 0)?; + U::from_std(UdpSocket::from(socket))? } }; let bind_addr = match addr { @@ -107,7 +110,8 @@ impl InterfaceState { SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0) } }; - let send_socket = Async::new(UdpSocket::bind(bind_addr)?)?; + let send_socket = U::from_std(UdpSocket::bind(bind_addr)?)?; + // randomize timer to prevent all converging and firing at the same time. let query_interval = { use rand::Rng; @@ -127,19 +131,18 @@ impl InterfaceState { send_buffer: Default::default(), discovered: Default::default(), query_interval, - timeout: Timer::interval_at(Instant::now(), query_interval), + timeout: T::interval_at(Instant::now(), query_interval), multicast_addr, ttl: config.ttl, }) } pub fn reset_timer(&mut self) { - self.timeout.set_interval(self.query_interval); + self.timeout = T::interval(self.query_interval); } pub fn fire_timer(&mut self) { - self.timeout - .set_interval_at(Instant::now(), self.query_interval); + self.timeout = T::interval_at(Instant::now(), self.query_interval); } fn inject_mdns_packet(&mut self, packet: MdnsPacket, params: &impl PollParameters) { @@ -171,17 +174,17 @@ impl InterfaceState { let new_expiration = Instant::now() + peer.ttl(); - let mut addrs: Vec = Vec::new(); for addr in peer.addresses() { if let Some(new_addr) = address_translation(addr, &observed) { - addrs.push(new_addr.clone()) + self.discovered.push_back(( + *peer.id(), + new_addr.clone(), + new_expiration, + )); } - addrs.push(addr.clone()) - } - for addr in addrs { self.discovered - .push_back((*peer.id(), addr, new_expiration)); + .push_back((*peer.id(), addr.clone(), new_expiration)); } } } @@ -198,43 +201,49 @@ impl InterfaceState { params: &impl PollParameters, ) -> Option<(PeerId, Multiaddr, Instant)> { // Poll receive socket. - while self.recv_socket.poll_readable(cx).is_ready() { - match self - .recv_socket - .recv_from(&mut self.recv_buffer) - .now_or_never() - { - Some(Ok((len, from))) => { + while let Poll::Ready(data) = + Pin::new(&mut self.recv_socket).poll_read(cx, &mut self.recv_buffer) + { + match data { + Ok((len, from)) => { if let Some(packet) = MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from) { self.inject_mdns_packet(packet, params); } } - Some(Err(err)) => log::error!("Failed reading datagram: {}", err), - None => {} + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + // No more bytes available on the socket to read + break; + } + Err(err) => { + log::error!("failed reading datagram: {}", err); + } } } + // Send responses. - while self.send_socket.poll_writable(cx).is_ready() { - if let Some(packet) = self.send_buffer.pop_front() { - match self - .send_socket - .send_to(&packet, SocketAddr::new(self.multicast_addr, 5353)) - .now_or_never() - { - Some(Ok(_)) => log::trace!("sent packet on iface {}", self.addr), - Some(Err(err)) => { - log::error!("error sending packet on iface {}: {}", self.addr, err) - } - None => self.send_buffer.push_front(packet), + while let Some(packet) = self.send_buffer.pop_front() { + match Pin::new(&mut self.send_socket).poll_write( + cx, + &packet, + SocketAddr::new(self.multicast_addr, 5353), + ) { + Poll::Ready(Ok(_)) => log::trace!("sent packet on iface {}", self.addr), + Poll::Ready(Err(err)) => { + log::error!("error sending packet on iface {} {}", self.addr, err); + } + Poll::Pending => { + self.send_buffer.push_front(packet); + break; } - } else if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { - log::trace!("sending query on iface {}", self.addr); - self.send_buffer.push_back(build_query()); - } else { - break; } } + + if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { + log::trace!("sending query on iface {}", self.addr); + self.send_buffer.push_back(build_query()); + } + // Emit discovered event. self.discovered.pop_front() } diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 5bb190f4c2c..4590e1e266e 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -246,7 +246,7 @@ fn query_response_packet(id: u16, peer_id: &[u8], records: &[Vec], ttl: u32) fn duration_to_secs(duration: Duration) -> u32 { let secs = duration .as_secs() - .saturating_add(if duration.subsec_nanos() > 0 { 1 } else { 0 }); + .saturating_add(u64::from(duration.subsec_nanos() > 0)); cmp::min(secs, From::from(u32::max_value())) as u32 } diff --git a/protocols/mdns/src/behaviour/socket.rs b/protocols/mdns/src/behaviour/socket.rs new file mode 100644 index 00000000000..4406ed33fde --- /dev/null +++ b/protocols/mdns/src/behaviour/socket.rs @@ -0,0 +1,134 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + io::Error, + marker::Unpin, + net::{SocketAddr, UdpSocket}, + task::{Context, Poll}, +}; + +/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async mode +pub trait AsyncSocket: Unpin + Send + 'static { + /// Create the async socket from the [`std::net::UdpSocket`] + fn from_std(socket: UdpSocket) -> std::io::Result + where + Self: Sized; + + /// Attempts to receive a single packet on the socket from the remote address to which it is connected. + fn poll_read( + &mut self, + _cx: &mut Context, + _buf: &mut [u8], + ) -> Poll>; + + /// Attempts to send data on the socket to a given address. + fn poll_write( + &mut self, + _cx: &mut Context, + _packet: &[u8], + _to: SocketAddr, + ) -> Poll>; +} + +#[cfg(feature = "async-io")] +pub mod asio { + use super::*; + use async_io::Async; + use futures::FutureExt; + + /// AsyncIo UdpSocket + pub type AsyncUdpSocket = Async; + + impl AsyncSocket for AsyncUdpSocket { + fn from_std(socket: UdpSocket) -> std::io::Result { + Async::new(socket) + } + + fn poll_read( + &mut self, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll> { + // Poll receive socket. + futures::ready!(self.poll_readable(cx))?; + match self.recv_from(buf).now_or_never() { + Some(data) => Poll::Ready(data), + None => Poll::Pending, + } + } + + fn poll_write( + &mut self, + cx: &mut Context, + packet: &[u8], + to: SocketAddr, + ) -> Poll> { + futures::ready!(self.poll_writable(cx))?; + match self.send_to(packet, to).now_or_never() { + Some(Ok(_)) => Poll::Ready(Ok(())), + Some(Err(err)) => Poll::Ready(Err(err)), + None => Poll::Pending, + } + } + } +} + +#[cfg(feature = "tokio")] +pub mod tokio { + use super::*; + use ::tokio::{io::ReadBuf, net::UdpSocket as TkUdpSocket}; + + /// Tokio ASync Socket` + pub type TokioUdpSocket = TkUdpSocket; + + impl AsyncSocket for TokioUdpSocket { + fn from_std(socket: UdpSocket) -> std::io::Result { + socket.set_nonblocking(true)?; + TokioUdpSocket::from_std(socket) + } + + fn poll_read( + &mut self, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll> { + let mut rbuf = ReadBuf::new(buf); + match self.poll_recv_from(cx, &mut rbuf) { + Poll::Pending => Poll::Pending, + Poll::Ready(Err(err)) => Poll::Ready(Err(err)), + Poll::Ready(Ok(addr)) => Poll::Ready(Ok((rbuf.filled().len(), addr))), + } + } + + fn poll_write( + &mut self, + cx: &mut Context, + packet: &[u8], + to: SocketAddr, + ) -> Poll> { + match self.poll_send_to(cx, packet, to) { + Poll::Pending => Poll::Pending, + Poll::Ready(Err(err)) => Poll::Ready(Err(err)), + Poll::Ready(Ok(_len)) => Poll::Ready(Ok(())), + } + } + } +} diff --git a/protocols/mdns/src/behaviour/timer.rs b/protocols/mdns/src/behaviour/timer.rs new file mode 100644 index 00000000000..fbdeb065b70 --- /dev/null +++ b/protocols/mdns/src/behaviour/timer.rs @@ -0,0 +1,128 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + marker::Unpin, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + +/// Simple wrapper for the differents type of timers +#[derive(Debug)] +pub struct Timer { + inner: T, +} + +/// Builder interface to homogenize the differents implementations +pub trait Builder: Send + Unpin + 'static { + /// Creates a timer that emits an event once at the given time instant. + fn at(instant: Instant) -> Self; + + /// Creates a timer that emits events periodically. + fn interval(duration: Duration) -> Self; + + /// Creates a timer that emits events periodically, starting at start. + fn interval_at(start: Instant, duration: Duration) -> Self; +} + +#[cfg(feature = "async-io")] +pub mod asio { + use super::*; + use async_io::Timer as AsioTimer; + use futures::Stream; + + /// Async Timer + pub type AsyncTimer = Timer; + + impl Builder for AsyncTimer { + fn at(instant: Instant) -> Self { + Self { + inner: AsioTimer::at(instant), + } + } + + fn interval(duration: Duration) -> Self { + Self { + inner: AsioTimer::interval(duration), + } + } + + fn interval_at(start: Instant, duration: Duration) -> Self { + Self { + inner: AsioTimer::interval_at(start, duration), + } + } + } + + impl Stream for AsyncTimer { + type Item = Instant; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_next(cx) + } + } +} + +#[cfg(feature = "tokio")] +pub mod tokio { + use super::*; + use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; + use futures::Stream; + + /// Tokio wrapper + pub type TokioTimer = Timer; + + impl Builder for TokioTimer { + fn at(instant: Instant) -> Self { + // Taken from: https://docs.rs/async-io/1.7.0/src/async_io/lib.rs.html#91 + let mut inner = time::interval_at( + TokioInstant::from_std(instant), + Duration::new(std::u64::MAX, 1_000_000_000 - 1), + ); + inner.set_missed_tick_behavior(MissedTickBehavior::Skip); + Self { inner } + } + + fn interval(duration: Duration) -> Self { + let mut inner = time::interval_at(TokioInstant::now() + duration, duration); + inner.set_missed_tick_behavior(MissedTickBehavior::Skip); + Self { inner } + } + + fn interval_at(start: Instant, duration: Duration) -> Self { + let mut inner = time::interval_at(TokioInstant::from_std(start), duration); + inner.set_missed_tick_behavior(MissedTickBehavior::Skip); + Self { inner } + } + } + + impl Stream for TokioTimer { + type Item = TokioInstant; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_tick(cx).map(Some) + } + + fn size_hint(&self) -> (usize, Option) { + (std::usize::MAX, None) + } + } +} diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index a99eab691a2..3b484c91daa 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -26,16 +26,22 @@ //! //! # Usage //! -//! This crate provides the `Mdns` struct which implements the `NetworkBehaviour` trait. This -//! struct will automatically discover other libp2p nodes on the local network. +//! This crate provides a `Mdns` and `TokioMdns`, depending on the enabled features, which +//! implements the `NetworkBehaviour` trait. This struct will automatically discover other +//! libp2p nodes on the local network. //! use lazy_static::lazy_static; use std::net::{Ipv4Addr, Ipv6Addr}; use std::time::Duration; mod behaviour; +pub use crate::behaviour::{GenMdns, MdnsEvent}; -pub use crate::behaviour::{Mdns, MdnsEvent}; +#[cfg(feature = "async-io")] +pub use crate::behaviour::Mdns; + +#[cfg(feature = "tokio")] +pub use crate::behaviour::TokioMdns; /// The DNS service name for all libp2p peers used to query for addresses. const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; diff --git a/protocols/mdns/tests/smoke.rs b/protocols/mdns/tests/use-async-std.rs similarity index 87% rename from protocols/mdns/tests/smoke.rs rename to protocols/mdns/tests/use-async-std.rs index d123e5abce7..683aed338ce 100644 --- a/protocols/mdns/tests/smoke.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -28,6 +28,35 @@ use libp2p::{ use std::error::Error; use std::time::Duration; +#[async_std::test] +async fn test_discovery_async_std_ipv4() -> Result<(), Box> { + run_discovery_test(MdnsConfig::default()).await +} + +#[async_std::test] +async fn test_discovery_async_std_ipv6() -> Result<(), Box> { + let config = MdnsConfig { + enable_ipv6: true, + ..Default::default() + }; + run_discovery_test(config).await +} + +#[async_std::test] +async fn test_expired_async_std() -> Result<(), Box> { + env_logger::try_init().ok(); + let config = MdnsConfig { + ttl: Duration::from_secs(1), + query_interval: Duration::from_secs(10), + ..Default::default() + }; + + async_std::future::timeout(Duration::from_secs(6), run_peer_expiration_test(config)) + .await + .map(|_| ()) + .map_err(|e| Box::new(e) as Box) +} + async fn create_swarm(config: MdnsConfig) -> Result, Box> { let id_keys = identity::Keypair::generate_ed25519(); let peer_id = PeerId::from(id_keys.public()); @@ -78,34 +107,6 @@ async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box> { } } -#[async_std::test] -async fn test_discovery_async_std_ipv4() -> Result<(), Box> { - run_discovery_test(MdnsConfig::default()).await -} - -#[tokio::test] -async fn test_discovery_tokio_ipv4() -> Result<(), Box> { - run_discovery_test(MdnsConfig::default()).await -} - -#[async_std::test] -async fn test_discovery_async_std_ipv6() -> Result<(), Box> { - let config = MdnsConfig { - enable_ipv6: true, - ..Default::default() - }; - run_discovery_test(config).await -} - -#[tokio::test] -async fn test_discovery_tokio_ipv6() -> Result<(), Box> { - let config = MdnsConfig { - enable_ipv6: true, - ..Default::default() - }; - run_discovery_test(config).await -} - async fn run_peer_expiration_test(config: MdnsConfig) -> Result<(), Box> { let mut a = create_swarm(config.clone()).await?; let mut b = create_swarm(config).await?; @@ -136,32 +137,3 @@ async fn run_peer_expiration_test(config: MdnsConfig) -> Result<(), Box Result<(), Box> { - env_logger::try_init().ok(); - let config = MdnsConfig { - ttl: Duration::from_secs(1), - query_interval: Duration::from_secs(10), - ..Default::default() - }; - - async_std::future::timeout(Duration::from_secs(6), run_peer_expiration_test(config)) - .await - .map(|_| ()) - .map_err(|e| Box::new(e) as Box) -} - -#[tokio::test] -async fn test_expired_tokio() -> Result<(), Box> { - env_logger::try_init().ok(); - let config = MdnsConfig { - ttl: Duration::from_secs(1), - query_interval: Duration::from_secs(10), - ..Default::default() - }; - - tokio::time::timeout(Duration::from_secs(6), run_peer_expiration_test(config)) - .await - .unwrap() -} diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs new file mode 100644 index 00000000000..9d6cacd76cb --- /dev/null +++ b/protocols/mdns/tests/use-tokio.rs @@ -0,0 +1,153 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use futures::StreamExt; +use libp2p::{ + identity, + mdns::{MdnsConfig, MdnsEvent, TokioMdns}, + swarm::{Swarm, SwarmEvent}, + PeerId, +}; +use std::error::Error; +use std::time::Duration; + +#[tokio::test] +async fn test_discovery_tokio_ipv4() -> Result<(), Box> { + run_discovery_test(MdnsConfig::default()).await +} + +#[tokio::test] +async fn test_discovery_tokio_ipv6() -> Result<(), Box> { + let config = MdnsConfig { + enable_ipv6: true, + ..Default::default() + }; + run_discovery_test(config).await +} + +#[tokio::test] +async fn test_expired_tokio() -> Result<(), Box> { + env_logger::try_init().ok(); + let config = MdnsConfig { + ttl: Duration::from_secs(1), + query_interval: Duration::from_secs(10), + ..Default::default() + }; + + run_peer_expiration_test(config).await +} + +async fn create_swarm(config: MdnsConfig) -> Result, Box> { + let id_keys = identity::Keypair::generate_ed25519(); + let peer_id = PeerId::from(id_keys.public()); + let transport = libp2p::tokio_development_transport(id_keys)?; + let behaviour = TokioMdns::new(config).await?; + let mut swarm = Swarm::new(transport, behaviour, peer_id); + swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; + Ok(swarm) +} + +async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box> { + env_logger::try_init().ok(); + let mut a = create_swarm(config.clone()).await?; + let mut b = create_swarm(config).await?; + let mut discovered_a = false; + let mut discovered_b = false; + loop { + futures::select! { + ev = a.select_next_some() => match ev { + SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + if discovered_a { + return Ok(()); + } else { + discovered_b = true; + } + } + } + } + _ => {} + }, + ev = b.select_next_some() => match ev { + SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + if discovered_b { + return Ok(()); + } else { + discovered_a = true; + } + } + } + } + _ => {} + } + } + } +} + +async fn run_peer_expiration_test(config: MdnsConfig) -> Result<(), Box> { + let mut a = create_swarm(config.clone()).await?; + let mut b = create_swarm(config).await?; + let expired_at = tokio::time::sleep(Duration::from_secs(15)); + tokio::pin!(expired_at); + + loop { + tokio::select! { + _ev = &mut expired_at => { + panic!(); + }, + ev = a.select_next_some() => match ev { + SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + return Ok(()); + } + } + } + SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + expired_at.as_mut().reset(tokio::time::Instant::now() + tokio::time::Duration::from_secs(2)); + } + } + } + _ => {} + }, + ev = b.select_next_some() => match ev { + SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + return Ok(()); + } + } + } + SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + expired_at.as_mut().reset(tokio::time::Instant::now() + tokio::time::Duration::from_secs(2)); + } + } + } + _ => {} + } + } + } +} diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index a31b17d02f5..b1037ac1ef7 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,7 +1,19 @@ -# 0.38.0 [unreleased] +# 0.39.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.38.0 - Update to `libp2p-swarm` `v0.38.0`. +- Expose `PROTOCOL_NAME`. See [PR 2734]. + +- Update to `libp2p-core` `v0.35.0`. + +[PR 2734]: https://github.com/libp2p/rust-libp2p/pull/2734/ + # 0.37.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 741cbf05fbc..aa2b596d5f1 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-ping" edition = "2021" rust-version = "1.56.1" description = "Ping protocol for libp2p" -version = "0.38.0" +version = "0.39.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,8 +14,8 @@ categories = ["network-programming", "asynchronous"] futures = "0.3.1" futures-timer = "3.0.2" instant = "0.1.11" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4.1" rand = "0.7.2" void = "1.0" diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 850f4ebc05f..f0e71fb070e 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol; +use crate::{protocol, PROTOCOL_NAME}; use futures::future::BoxFuture; use futures::prelude::*; use futures_timer::Delay; +use libp2p_core::upgrade::ReadyUpgrade; use libp2p_core::{upgrade::NegotiationError, UpgradeError}; use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, @@ -225,13 +226,13 @@ impl ConnectionHandler for Handler { type InEvent = Void; type OutEvent = crate::Result; type Error = Failure; - type InboundProtocol = protocol::Ping; - type OutboundProtocol = protocol::Ping; + type InboundProtocol = ReadyUpgrade<&'static [u8]>; + type OutboundProtocol = ReadyUpgrade<&'static [u8]>; type OutboundOpenInfo = (); type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(protocol::Ping, ()) + fn listen_protocol(&self) -> SubstreamProtocol, ()> { + SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) } fn inject_fully_negotiated_inbound(&mut self, stream: NegotiatedSubstream, (): ()) { @@ -274,7 +275,8 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll, (), crate::Result, Self::Error>> + { match self.state { State::Inactive { reported: true } => { return Poll::Pending; // nothing to do on this connection @@ -366,7 +368,7 @@ impl ConnectionHandler for Handler { } None => { self.outbound = Some(PingState::OpenStream); - let protocol = SubstreamProtocol::new(protocol::Ping, ()) + let protocol = SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) .with_timeout(self.config.timeout); return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol, diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 81133b86d74..2a01025ee6d 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -57,8 +57,8 @@ use std::{ note = "Use re-exports that omit `Ping` prefix, i.e. `libp2p::ping::Config` etc" )] pub use self::{ - Config as PingConfig, Event as PingEvent, Failure as PingFailure, Result as PingResult, - Success as PingSuccess, + protocol::PROTOCOL_NAME, Config as PingConfig, Event as PingEvent, Failure as PingFailure, + Result as PingResult, Success as PingSuccess, }; #[deprecated(since = "0.30.0", note = "Use libp2p::ping::Behaviour instead.")] pub use Behaviour as Ping; diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 499c5ad4a0f..3c44adcd0b4 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -20,11 +20,10 @@ use futures::prelude::*; use instant::Instant; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_swarm::NegotiatedSubstream; use rand::{distributions, prelude::*}; -use std::{io, iter, time::Duration}; -use void::Void; +use std::{io, time::Duration}; + +pub const PROTOCOL_NAME: &[u8] = b"/ipfs/ping/1.0.0"; /// The `Ping` protocol upgrade. /// @@ -50,35 +49,6 @@ pub struct Ping; const PING_SIZE: usize = 32; -impl UpgradeInfo for Ping { - type Info = &'static [u8]; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(b"/ipfs/ping/1.0.0") - } -} - -impl InboundUpgrade for Ping { - type Output = NegotiatedSubstream; - type Error = Void; - type Future = future::Ready>; - - fn upgrade_inbound(self, stream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - future::ok(stream) - } -} - -impl OutboundUpgrade for Ping { - type Output = NegotiatedSubstream; - type Error = Void; - type Future = future::Ready>; - - fn upgrade_outbound(self, stream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - future::ok(stream) - } -} - /// Sends a ping and waits for the pong. pub async fn send_ping(mut stream: S) -> io::Result<(S, Duration)> where diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index ac45949ced7..2f75c09fb3d 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -243,14 +243,11 @@ fn unsupported_doesnt_fail() { fn mk_transport(muxer: MuxerChoice) -> (PeerId, transport::Boxed<(PeerId, StreamMuxerBox)>) { let id_keys = identity::Keypair::generate_ed25519(); let peer_id = id_keys.public().to_peer_id(); - let noise_keys = noise::Keypair::::new() - .into_authentic(&id_keys) - .unwrap(); ( peer_id, TcpTransport::new(GenTcpConfig::default().nodelay(true)) .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate(noise::NoiseAuthenticated::xx(&id_keys).unwrap()) .multiplex(match muxer { MuxerChoice::Yamux => upgrade::EitherUpgrade::A(yamux::YamuxConfig::default()), MuxerChoice::Mplex => upgrade::EitherUpgrade::B(mplex::MplexConfig::default()), diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index cd615778196..1262a250880 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,7 +1,23 @@ -# 0.11.0 [unreleased] +# 0.12.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.11.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Expose `HOP_PROTOCOL_NAME` and `STOP_PROTOCOL_NAME`. See [PR 2734]. + +- Update to `libp2p-core` `v0.35.0`. + +[PR 2734]: https://github.com/libp2p/rust-libp2p/pull/2734/ +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.10.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index 97d29ebdfd0..41964edcf41 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-relay" edition = "2021" rust-version = "1.56.1" description = "Communications relaying for libp2p" -version = "0.11.0" +version = "0.12.0" authors = ["Parity Technologies ", "Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -17,12 +17,12 @@ either = "1.6.0" futures = "0.3.1" futures-timer = "3" instant = "0.1.11" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4" pin-project = "1" -prost-codec = { version = "0.1", path = "../../misc/prost-codec" } -prost = "0.10" +prost-codec = { version = "0.2", path = "../../misc/prost-codec" } +prost = "0.11" rand = "0.8.4" smallvec = "1.6.1" static_assertions = "1" @@ -30,7 +30,7 @@ thiserror = "1.0" void = "1" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dev-dependencies] env_logger = "0.9.0" diff --git a/protocols/relay/examples/relay_v2.rs b/protocols/relay/examples/relay_v2.rs index 25d0bb7fc94..b89c88b2829 100644 --- a/protocols/relay/examples/relay_v2.rs +++ b/protocols/relay/examples/relay_v2.rs @@ -48,13 +48,12 @@ fn main() -> Result<(), Box> { let tcp_transport = TcpTransport::default(); - let noise_keys = noise::Keypair::::new() - .into_authentic(&local_key) - .expect("Signing libp2p-noise static DH keypair failed."); - let transport = tcp_transport .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate( + noise::NoiseAuthenticated::xx(&local_key) + .expect("Signing libp2p-noise static DH keypair failed."), + ) .multiplex(libp2p_yamux::YamuxConfig::default()) .boxed(); diff --git a/protocols/relay/src/v2.rs b/protocols/relay/src/v2.rs index 7219ab3d69c..dcfcdf609fb 100644 --- a/protocols/relay/src/v2.rs +++ b/protocols/relay/src/v2.rs @@ -21,6 +21,7 @@ //! Implementation of the [libp2p circuit relay v2 //! specification](https://github.com/libp2p/specs/issues/314). +#[allow(clippy::derive_partial_eq_without_eq)] mod message_proto { include!(concat!(env!("OUT_DIR"), "/message_v2.pb.rs")); } @@ -34,7 +35,8 @@ pub use protocol::{ inbound_hop::FatalUpgradeError as InboundHopFatalUpgradeError, inbound_stop::FatalUpgradeError as InboundStopFatalUpgradeError, outbound_hop::FatalUpgradeError as OutboundHopFatalUpgradeError, - outbound_stop::FatalUpgradeError as OutboundStopFatalUpgradeError, + outbound_stop::FatalUpgradeError as OutboundStopFatalUpgradeError, HOP_PROTOCOL_NAME, + STOP_PROTOCOL_NAME, }; /// The ID of an outgoing / incoming, relay / destination request. diff --git a/protocols/relay/src/v2/protocol.rs b/protocols/relay/src/v2/protocol.rs index ab2dc487b6f..6e9ccc14277 100644 --- a/protocols/relay/src/v2/protocol.rs +++ b/protocols/relay/src/v2/protocol.rs @@ -26,12 +26,12 @@ pub mod inbound_stop; pub mod outbound_hop; pub mod outbound_stop; -const HOP_PROTOCOL_NAME: &[u8; 31] = b"/libp2p/circuit/relay/0.2.0/hop"; -const STOP_PROTOCOL_NAME: &[u8; 32] = b"/libp2p/circuit/relay/0.2.0/stop"; +pub const HOP_PROTOCOL_NAME: &[u8; 31] = b"/libp2p/circuit/relay/0.2.0/hop"; +pub const STOP_PROTOCOL_NAME: &[u8; 32] = b"/libp2p/circuit/relay/0.2.0/stop"; const MAX_MESSAGE_SIZE: usize = 4096; -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Limit { duration: Option, data_in_bytes: Option, diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index 11120e0be1b..edf55a5d420 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,7 +1,20 @@ -# 0.8.0 [unreleased] +# 0.9.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.8.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. - Update to `libp2p-swarm` `v0.38.0`. +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.7.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index b8d81a87e20..3eee5c833e6 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-rendezvous" edition = "2021" rust-version = "1.56.1" description = "Rendezvous protocol for libp2p" -version = "0.8.0" +version = "0.9.0" authors = ["The COMIT guys "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,9 +12,9 @@ categories = ["network-programming", "asynchronous"] [dependencies] asynchronous-codec = "0.6" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } -prost = "0.10" +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } +prost = "0.11" void = "1" log = "0.4" futures = { version = "0.3", default-features = false, features = ["std"] } @@ -28,10 +28,10 @@ instant = "0.1.11" [dev-dependencies] async-trait = "0.1" -env_logger = "0.8" +env_logger = "0.9.0" libp2p = { path = "../..", default-features = false, features = ["ping", "identify", "tcp-async-io", "dns-async-std", "websocket", "noise", "mplex", "yamux", "rendezvous"] } rand = "0.8" tokio = { version = "1.15", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs index 3798cea70a8..375ebd6c228 100644 --- a/protocols/rendezvous/src/codec.rs +++ b/protocols/rendezvous/src/codec.rs @@ -182,7 +182,7 @@ impl NewRegistration { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Registration { pub namespace: Namespace, pub record: PeerRecord, @@ -594,6 +594,7 @@ impl From for ConversionError { #[error("The response code ({0:?}) cannot be mapped to our ErrorCode enum")] pub struct UnmappableStatusCode(wire::message::ResponseStatus); +#[allow(clippy::derive_partial_eq_without_eq)] mod wire { include!(concat!(env!("OUT_DIR"), "/rendezvous.pb.rs")); } diff --git a/protocols/rendezvous/tests/harness.rs b/protocols/rendezvous/tests/harness.rs index 555a5476bab..30dace245ff 100644 --- a/protocols/rendezvous/tests/harness.rs +++ b/protocols/rendezvous/tests/harness.rs @@ -27,7 +27,7 @@ use libp2p::core::transport::MemoryTransport; use libp2p::core::upgrade::SelectUpgrade; use libp2p::core::{identity, Multiaddr, PeerId, Transport}; use libp2p::mplex::MplexConfig; -use libp2p::noise::{Keypair, NoiseConfig, X25519Spec}; +use libp2p::noise::NoiseAuthenticated; use libp2p::swarm::{AddressScore, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::yamux::YamuxConfig; use std::fmt::Debug; @@ -43,14 +43,9 @@ where let identity = identity::Keypair::generate_ed25519(); let peer_id = PeerId::from(identity.public()); - let dh_keys = Keypair::::new() - .into_authentic(&identity) - .expect("failed to create dh_keys"); - let noise = NoiseConfig::xx(dh_keys).into_authenticated(); - let transport = MemoryTransport::default() .upgrade(Version::V1) - .authenticate(noise) + .authenticate(NoiseAuthenticated::xx(&identity).unwrap()) .multiplex(SelectUpgrade::new( YamuxConfig::default(), MplexConfig::new(), diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 8acc422ee40..2e7dd0f84d4 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,7 +1,15 @@ -# 0.20.0 [unreleased] +# 0.21.0 + +- Update to `libp2p-swarm` `v0.39.0`. + +- Update to `libp2p-core` `v0.36.0`. + +# 0.20.0 - Update to `libp2p-swarm` `v0.38.0`. +- Update to `libp2p-core` `v0.35.0`. + # 0.19.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 010647fe6b3..76e802cfca4 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = "1.56.1" description = "Generic Request/Response Protocols" -version = "0.20.0" +version = "0.21.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,8 +15,8 @@ async-trait = "0.1" bytes = "1" futures = "0.3.1" instant = "0.1.11" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } -libp2p-swarm = { version = "0.38.0", path = "../../swarm" } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } +libp2p-swarm = { version = "0.39.0", path = "../../swarm" } log = "0.4.11" rand = "0.7" smallvec = "1.6.1" diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index f8a4e2eb3a9..c4e18d894fb 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -66,9 +66,8 @@ use futures::channel::oneshot; use handler::{RequestProtocol, RequestResponseHandler, RequestResponseHandlerEvent}; use libp2p_core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}; use libp2p_swarm::{ - dial_opts::{self, DialOpts}, - DialError, IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, - PollParameters, + dial_opts::DialOpts, DialError, IntoConnectionHandler, NetworkBehaviour, + NetworkBehaviourAction, NotifyHandler, PollParameters, }; use smallvec::SmallVec; use std::{ @@ -148,7 +147,7 @@ pub enum RequestResponseEvent /// Possible failures occurring in the context of sending /// an outbound request and receiving the response. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum OutboundFailure { /// The request could not be sent because a dialing attempt failed. DialFailure, @@ -185,7 +184,7 @@ impl std::error::Error for OutboundFailure {} /// Possible failures occurring in the context of receiving an /// inbound request and sending a response. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum InboundFailure { /// The inbound request timed out, either while reading the /// incoming request or before a response is sent, e.g. if @@ -385,9 +384,7 @@ where if let Some(request) = self.try_send_request(peer, request) { let handler = self.new_handler(); self.pending_events.push_back(NetworkBehaviourAction::Dial { - opts: DialOpts::peer_id(*peer) - .condition(dial_opts::PeerCondition::Disconnected) - .build(), + opts: DialOpts::peer_id(*peer).build(), handler, }); self.pending_outbound_requests diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 8cbc06e7444..bfb8641c106 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -29,7 +29,7 @@ use libp2p_core::{ upgrade::{self, read_length_prefixed, write_length_prefixed}, Multiaddr, PeerId, }; -use libp2p_noise::{Keypair, NoiseConfig, X25519Spec}; +use libp2p_noise::NoiseAuthenticated; use libp2p_request_response::*; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_tcp::{GenTcpConfig, TcpTransport}; @@ -295,14 +295,12 @@ fn emits_inbound_connection_closed_if_channel_is_dropped() { fn mk_transport() -> (PeerId, transport::Boxed<(PeerId, StreamMuxerBox)>) { let id_keys = identity::Keypair::generate_ed25519(); let peer_id = id_keys.public().to_peer_id(); - let noise_keys = Keypair::::new() - .into_authentic(&id_keys) - .unwrap(); + ( peer_id, TcpTransport::new(GenTcpConfig::default().nodelay(true)) .upgrade(upgrade::Version::V1) - .authenticate(NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate(NoiseAuthenticated::xx(&id_keys).unwrap()) .multiplex(libp2p_yamux::YamuxConfig::default()) .boxed(), ) diff --git a/src/lib.rs b/src/lib.rs index 8437fd65a82..29b32b8abf6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -79,8 +79,11 @@ pub use libp2p_identify as identify; #[cfg_attr(docsrs, doc(cfg(feature = "kad")))] #[doc(inline)] pub use libp2p_kad as kad; -#[cfg(feature = "mdns")] -#[cfg_attr(docsrs, doc(cfg(feature = "mdns")))] +#[cfg(any(feature = "mdns-async-io", feature = "mdns-tokio"))] +#[cfg_attr( + docsrs, + doc(cfg(any(feature = "mdns-tokio", feature = "mdns-async-io"))) +)] #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_mdns as mdns; @@ -219,13 +222,9 @@ pub async fn development_transport( dns_tcp.or_transport(ws_dns_tcp) }; - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .expect("Signing libp2p-noise static DH keypair failed."); - Ok(transport .upgrade(core::upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate(noise::NoiseAuthenticated::xx(&keypair).unwrap()) .multiplex(core::upgrade::SelectUpgrade::new( yamux::YamuxConfig::default(), mplex::MplexConfig::default(), @@ -279,13 +278,9 @@ pub fn tokio_development_transport( dns_tcp.or_transport(ws_dns_tcp) }; - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .expect("Signing libp2p-noise static DH keypair failed."); - Ok(transport .upgrade(core::upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .authenticate(noise::NoiseAuthenticated::xx(&keypair).unwrap()) .multiplex(core::upgrade::SelectUpgrade::new( yamux::YamuxConfig::default(), mplex::MplexConfig::default(), diff --git a/swarm-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md index 4d467c5e76a..464bec7fe95 100644 --- a/swarm-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -1,3 +1,30 @@ +# 0.30.1 [unreleased] + +- Fix an issue where the derive would generate bad code if the type parameters between the behaviour and a custom + out event differed. See [PR 2907]. + +[PR 2907]: https://github.com/libp2p/rust-libp2p/pull/2907 + +# 0.30.0 + +- Remove support for removed `NetworkBehaviourEventProcess`. See [PR 2840]. + +- Remove support for custom `poll` method on `NetworkBehaviour` via `#[behaviour(poll_method = + "poll")]`. See [PR 2841]. + +[PR 2840]: https://github.com/libp2p/rust-libp2p/pull/2840 +[PR 2841]: https://github.com/libp2p/rust-libp2p/pull/2841 + +- Remove support for non-`NetworkBehaviour` fields on main `struct` via `#[behaviour(ignore)]`. See + [PR 2842]. + +[PR 2842]: https://github.com/libp2p/rust-libp2p/pull/2842 + +# 0.29.0 + +- Generate `NetworkBehaviour::OutEvent` if not provided through `#[behaviour(out_event = + "MyOutEvent")]` and event processing is disabled (default). + # 0.28.0 - Import `ListenerId` from `libp2p::core::transport`. See [PR 2652]. diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 97a205674d9..1d2c54a9da1 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm-derive" edition = "2021" rust-version = "1.56.1" description = "Procedural macros of libp2p-core" -version = "0.28.0" +version = "0.30.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,10 +14,12 @@ categories = ["network-programming", "asynchronous"] proc-macro = true [dependencies] -syn = { version = "1.0.8", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } +heck = "0.4" quote = "1.0" +syn = { version = "1.0.8", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } [dev-dependencies] libp2p = { path = "../", default-features = false, features = ["ping", "identify", "kad"] } either = "1.6.0" futures = "0.3.1" +void = "1" diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index 1216add96c0..5f901ed6ff8 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -20,9 +20,10 @@ #![recursion_limit = "256"] +use heck::ToUpperCamelCase; use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Ident}; +use syn::{parse_macro_input, Data, DataStruct, DeriveInput}; /// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See /// the trait documentation for better description. @@ -47,7 +48,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let (_, ty_generics, where_clause) = ast.generics.split_for_impl(); let multiaddr = quote! {::libp2p::core::Multiaddr}; let trait_to_impl = quote! {::libp2p::swarm::NetworkBehaviour}; - let net_behv_event_proc = quote! {::libp2p::swarm::NetworkBehaviourEventProcess}; let either_ident = quote! {::libp2p::core::either::EitherOutput}; let network_behaviour_action = quote! {::libp2p::swarm::NetworkBehaviourAction}; let into_connection_handler = quote! {::libp2p::swarm::IntoConnectionHandler}; @@ -70,78 +70,96 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { quote! {<#(#lf,)* #(#tp,)* #(#cst,)*>} }; - // Whether or not we require the `NetworkBehaviourEventProcess` trait to be implemented. - let event_process = { - let mut event_process = false; - - for meta_items in ast.attrs.iter().filter_map(get_meta_items) { - for meta_item in meta_items { - match meta_item { - syn::NestedMeta::Meta(syn::Meta::NameValue(ref m)) - if m.path.is_ident("event_process") => - { - if let syn::Lit::Bool(ref b) = m.lit { - event_process = b.value + let (out_event_name, out_event_definition, out_event_from_clauses) = { + // If we find a `#[behaviour(out_event = "Foo")]` attribute on the + // struct, we set `Foo` as the out event. If not, the `OutEvent` is + // generated. + let user_provided_out_event_name: Option = ast + .attrs + .iter() + .filter_map(get_meta_items) + .flatten() + .filter_map(|meta_item| { + if let syn::NestedMeta::Meta(syn::Meta::NameValue(ref m)) = meta_item { + if m.path.is_ident("out_event") { + if let syn::Lit::Str(ref s) = m.lit { + return Some(syn::parse_str(&s.value()).unwrap()); } } - _ => (), } + None + }) + .next(); + + match user_provided_out_event_name { + // User provided `OutEvent`. + Some(name) => { + let definition = None; + let from_clauses = data_struct + .fields + .iter() + .map(|field| { + let ty = &field.ty; + quote! {#name: From< <#ty as #trait_to_impl>::OutEvent >} + }) + .collect::>(); + (name, definition, from_clauses) } - } - - event_process - }; - - // The fields of the struct we are interested in (no ignored fields). - let data_struct_fields = data_struct - .fields - .iter() - .filter(|f| !is_ignored(f)) - .collect::>(); - - // The final out event. - // If we find a `#[behaviour(out_event = "Foo")]` attribute on the struct, we set `Foo` as - // the out event. Otherwise we use `()`. - let out_event = { - let mut out = quote! {()}; - for meta_items in ast.attrs.iter().filter_map(get_meta_items) { - for meta_item in meta_items { - match meta_item { - syn::NestedMeta::Meta(syn::Meta::NameValue(ref m)) - if m.path.is_ident("out_event") => - { - if let syn::Lit::Str(ref s) = m.lit { - let ident: syn::Type = syn::parse_str(&s.value()).unwrap(); - out = quote! {#ident}; + // User did not provide `OutEvent`. Generate it. + None => { + let name: syn::Type = syn::parse_str(&(ast.ident.to_string() + "Event")).unwrap(); + let definition = { + let fields = data_struct + .fields + .iter() + .map(|field| { + let variant: syn::Variant = syn::parse_str( + &field + .ident + .clone() + .expect( + "Fields of NetworkBehaviour implementation to be named.", + ) + .to_string() + .to_upper_camel_case(), + ) + .unwrap(); + let ty = &field.ty; + quote! {#variant(<#ty as ::libp2p::swarm::NetworkBehaviour>::OutEvent)} + }) + .collect::>(); + let visibility = &ast.vis; + + Some(quote! { + #[derive(::std::fmt::Debug)] + #visibility enum #name #impl_generics + #where_clause + { + #(#fields),* } - } - _ => (), - } + }) + }; + let from_clauses = vec![]; + (name, definition, from_clauses) } } - out }; // Build the `where ...` clause of the trait implementation. let where_clause = { - let additional = data_struct_fields + let additional = data_struct + .fields .iter() - .flat_map(|field| { + .map(|field| { let ty = &field.ty; - vec![ - quote! {#ty: #trait_to_impl}, - if event_process { - quote! {Self: #net_behv_event_proc<<#ty as #trait_to_impl>::OutEvent>} - } else { - quote! {#out_event: From< <#ty as #trait_to_impl>::OutEvent >} - }, - ] + quote! {#ty: #trait_to_impl} }) + .chain(out_event_from_clauses) .collect::>(); if let Some(where_clause) = where_clause { if where_clause.predicates.trailing_punct() { - Some(quote! {#where_clause #(#additional),*}) + Some(quote! {#where_clause #(#additional),* }) } else { Some(quote! {#where_clause, #(#additional),*}) } @@ -152,7 +170,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `addresses_of_peer()`. let addresses_of_peer_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -163,7 +182,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_connection_established()`. let inject_connection_established_stmts = { - data_struct_fields.iter().enumerate().map(move |(field_n, field)| { + data_struct.fields.iter().enumerate().map(move |(field_n, field)| { match field.ident { Some(ref i) => quote!{ self.#i.inject_connection_established(peer_id, connection_id, endpoint, errors, other_established); }, None => quote!{ self.#field_n.inject_connection_established(peer_id, connection_id, endpoint, errors, other_established); }, @@ -173,7 +192,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_address_change()`. let inject_address_change_stmts = { - data_struct_fields.iter().enumerate().map(move |(field_n, field)| { + data_struct.fields.iter().enumerate().map(move |(field_n, field)| { match field.ident { Some(ref i) => quote!{ self.#i.inject_address_change(peer_id, connection_id, old, new); }, None => quote!{ self.#field_n.inject_address_change(peer_id, connection_id, old, new); }, @@ -183,7 +202,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_connection_closed()`. let inject_connection_closed_stmts = { - data_struct_fields + data_struct.fields .iter() .enumerate() // The outmost handler belongs to the last behaviour. @@ -212,7 +231,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_dial_failure()`. let inject_dial_failure_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() // The outmost handler belongs to the last behaviour. @@ -246,7 +266,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_listen_failure()`. let inject_listen_failure_stmts = { - data_struct_fields + data_struct.fields .iter() .enumerate() .rev() @@ -274,7 +294,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_new_listener()`. let inject_new_listener_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -285,7 +306,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_new_listen_addr()`. let inject_new_listen_addr_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -296,7 +318,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_expired_listen_addr()`. let inject_expired_listen_addr_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -307,7 +330,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_new_external_addr()`. let inject_new_external_addr_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -318,7 +342,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_expired_external_addr()`. let inject_expired_external_addr_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -329,7 +354,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_listener_error()`. let inject_listener_error_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -340,7 +366,8 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // Build the list of statements to put in the body of `inject_listener_closed()`. let inject_listener_closed_stmts = { - data_struct_fields + data_struct + .fields .iter() .enumerate() .map(move |(field_n, field)| match field.ident { @@ -353,14 +380,14 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // // The event type is a construction of nested `#either_ident`s of the events of the children. // We call `inject_event` on the corresponding child. - let inject_node_event_stmts = data_struct_fields.iter().enumerate().enumerate().map(|(enum_n, (field_n, field))| { + let inject_node_event_stmts = data_struct.fields.iter().enumerate().enumerate().map(|(enum_n, (field_n, field))| { let mut elem = if enum_n != 0 { quote!{ #either_ident::Second(ev) } } else { quote!{ ev } }; - for _ in 0 .. data_struct_fields.len() - 1 - enum_n { + for _ in 0 .. data_struct.fields.len() - 1 - enum_n { elem = quote!{ #either_ident::First(#elem) }; } @@ -373,7 +400,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // The [`ConnectionHandler`] associated type. let connection_handler_ty = { let mut ph_ty = None; - for field in data_struct_fields.iter() { + for field in data_struct.fields.iter() { let ty = &field.ty; let field_info = quote! { <#ty as #trait_to_impl>::ConnectionHandler }; match ph_ty { @@ -390,7 +417,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let new_handler = { let mut out_handler = None; - for (field_n, field) in data_struct_fields.iter().enumerate() { + for (field_n, field) in data_struct.fields.iter().enumerate() { let field_name = match field.ident { Some(ref i) => quote! { self.#i }, None => quote! { self.#field_n }, @@ -411,44 +438,21 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { out_handler.unwrap_or(quote! {()}) // TODO: See test `empty`. }; - // The method to use to poll. - // If we find a `#[behaviour(poll_method = "poll")]` attribute on the struct, we call - // `self.poll()` at the end of the polling. - let poll_method = { - let mut poll_method = quote! {std::task::Poll::Pending}; - for meta_items in ast.attrs.iter().filter_map(get_meta_items) { - for meta_item in meta_items { - match meta_item { - syn::NestedMeta::Meta(syn::Meta::NameValue(ref m)) - if m.path.is_ident("poll_method") => - { - if let syn::Lit::Str(ref s) = m.lit { - let ident: Ident = syn::parse_str(&s.value()).unwrap(); - poll_method = quote! {#name::#ident(self, cx, poll_params)}; - } - } - _ => (), - } - } - } - poll_method - }; - // List of statements to put in `poll()`. // // We poll each child one by one and wrap around the output. - let poll_stmts = data_struct_fields.iter().enumerate().enumerate().map(|(enum_n, (field_n, field))| { - let field_name = match field.ident { - Some(ref i) => quote!{ self.#i }, - None => quote!{ self.#field_n }, - }; + let poll_stmts = data_struct.fields.iter().enumerate().map(|(field_n, field)| { + let field = field + .ident + .clone() + .expect("Fields of NetworkBehaviour implementation to be named."); - let mut wrapped_event = if enum_n != 0 { + let mut wrapped_event = if field_n != 0 { quote!{ #either_ident::Second(event) } } else { quote!{ event } }; - for _ in 0 .. data_struct_fields.len() - 1 - enum_n { + for _ in 0 .. data_struct.fields.len() - 1 - field_n { wrapped_event = quote!{ #either_ident::First(#wrapped_event) }; } @@ -459,8 +463,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let provided_handler_and_new_handlers = { let mut out_handler = None; - for (f_n, f) in data_struct_fields.iter().enumerate() { - + for (f_n, f) in data_struct.fields.iter().enumerate() { let f_name = match f.ident { Some(ref i) => quote! { self.#i }, None => quote! { self.#f_n }, @@ -485,23 +488,32 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { out_handler.unwrap_or(quote! {()}) // TODO: See test `empty`. }; - let generate_event_match_arm = if event_process { - quote! { - std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(event)) => { - #net_behv_event_proc::inject_event(self, event) - } - } - } else { + let generate_event_match_arm = { + // If the `NetworkBehaviour`'s `OutEvent` is generated by the derive macro, wrap the sub + // `NetworkBehaviour` `OutEvent` in the variant of the generated `OutEvent`. If the + // `NetworkBehaviour`'s `OutEvent` is provided by the user, use the corresponding `From` + // implementation. + let into_out_event = if out_event_definition.is_some() { + let event_variant: syn::Variant = syn::parse_str( + &field + .to_string() + .to_upper_camel_case() + ).unwrap(); + quote! { #out_event_name::#event_variant(event) } + } else { + quote! { event.into() } + }; + quote! { std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(event)) => { - return std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(event.into())) + return std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(#into_out_event)) } } }; Some(quote!{ loop { - match #trait_to_impl::poll(&mut #field_name, cx, poll_params) { + match #trait_to_impl::poll(&mut self.#field, cx, poll_params) { #generate_event_match_arm std::task::Poll::Ready(#network_behaviour_action::Dial { opts, handler: provided_handler }) => { return std::task::Poll::Ready(#network_behaviour_action::Dial { opts, handler: #provided_handler_and_new_handlers }); @@ -525,13 +537,21 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { }) }); + let out_event_reference = if out_event_definition.is_some() { + quote! { #out_event_name #ty_generics } + } else { + quote! { #out_event_name } + }; + // Now the magic happens. let final_quote = quote! { + #out_event_definition + impl #impl_generics #trait_to_impl for #name #ty_generics #where_clause { type ConnectionHandler = #connection_handler_ty; - type OutEvent = #out_event; + type OutEvent = #out_event_reference; fn new_handler(&mut self) -> Self::ConnectionHandler { use #into_connection_handler; @@ -606,8 +626,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { fn poll(&mut self, cx: &mut std::task::Context, poll_params: &mut impl #poll_parameters) -> std::task::Poll<#network_behaviour_action> { use libp2p::futures::prelude::*; #(#poll_stmts)* - let f: std::task::Poll<#network_behaviour_action> = #poll_method; - f + std::task::Poll::Pending } } }; @@ -629,19 +648,3 @@ fn get_meta_items(attr: &syn::Attribute) -> Option> { None } } - -/// Returns true if a field is marked as ignored by the user. -fn is_ignored(field: &syn::Field) -> bool { - for meta_items in field.attrs.iter().filter_map(get_meta_items) { - for meta_item in meta_items { - match meta_item { - syn::NestedMeta::Meta(syn::Meta::Path(ref m)) if m.is_ident("ignore") => { - return true; - } - _ => (), - } - } - } - - false -} diff --git a/swarm-derive/tests/test.rs b/swarm-derive/tests/test.rs index 4961806d357..e0f77eefd30 100644 --- a/swarm-derive/tests/test.rs +++ b/swarm-derive/tests/test.rs @@ -21,6 +21,7 @@ use futures::prelude::*; use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p_swarm_derive::*; +use std::fmt::Debug; /// Small utility to check that a type implements `NetworkBehaviour`. #[allow(dead_code)] @@ -38,18 +39,17 @@ fn empty() { fn one_field() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Foo { ping: libp2p::ping::Ping, } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - #[allow(dead_code)] + #[allow(unreachable_code)] fn foo() { - require_net_behaviour::(); + let _out_event: ::OutEvent = unimplemented!(); + match _out_event { + FooEvent::Ping(libp2p::ping::Event { .. }) => {} + } } } @@ -57,23 +57,21 @@ fn one_field() { fn two_fields() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Foo { ping: libp2p::ping::Ping, identify: libp2p::identify::Identify, } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::identify::IdentifyEvent) {} - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - #[allow(dead_code)] + #[allow(unreachable_code)] fn foo() { - require_net_behaviour::(); + let _out_event: ::OutEvent = unimplemented!(); + match _out_event { + FooEvent::Ping(libp2p::ping::Event { .. }) => {} + FooEvent::Identify(event) => { + let _: libp2p::identify::IdentifyEvent = event; + } + } } } @@ -81,89 +79,52 @@ fn two_fields() { fn three_fields() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Foo { ping: libp2p::ping::Ping, identify: libp2p::identify::Identify, kad: libp2p::kad::Kademlia, - #[behaviour(ignore)] - foo: String, - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::identify::IdentifyEvent) {} - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::kad::KademliaEvent) {} - } - - #[allow(dead_code)] - fn foo() { - require_net_behaviour::(); - } -} - -#[test] -fn three_fields_non_last_ignored() { - #[allow(dead_code)] - #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] - struct Foo { - ping: libp2p::ping::Ping, - #[behaviour(ignore)] - identify: String, - kad: libp2p::kad::Kademlia, - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::kad::KademliaEvent) {} } #[allow(dead_code)] + #[allow(unreachable_code)] fn foo() { - require_net_behaviour::(); + let _out_event: ::OutEvent = unimplemented!(); + match _out_event { + FooEvent::Ping(libp2p::ping::Event { .. }) => {} + FooEvent::Identify(event) => { + let _: libp2p::identify::IdentifyEvent = event; + } + FooEvent::Kad(event) => { + let _: libp2p::kad::KademliaEvent = event; + } + } } } #[test] -fn custom_polling() { +fn custom_event() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(poll_method = "foo", event_process = true)] + #[behaviour(out_event = "MyEvent")] struct Foo { ping: libp2p::ping::Ping, identify: libp2p::identify::Identify, } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} + enum MyEvent { + Ping(libp2p::ping::PingEvent), + Identify(libp2p::identify::IdentifyEvent), } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::identify::IdentifyEvent) {} + impl From for MyEvent { + fn from(event: libp2p::ping::PingEvent) -> Self { + MyEvent::Ping(event) + } } - impl Foo { - fn foo( - &mut self, - _: &mut std::task::Context, - _: &mut impl libp2p::swarm::PollParameters, - ) -> std::task::Poll< - libp2p::swarm::NetworkBehaviourAction< - ::OutEvent, - ::ConnectionHandler, - >, - > { - std::task::Poll::Pending + impl From for MyEvent { + fn from(event: libp2p::identify::IdentifyEvent) -> Self { + MyEvent::Identify(event) } } @@ -174,21 +135,30 @@ fn custom_polling() { } #[test] -fn custom_event_no_polling() { +fn custom_event_mismatching_field_names() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(out_event = "Vec", event_process = true)] + #[behaviour(out_event = "MyEvent")] struct Foo { - ping: libp2p::ping::Ping, - identify: libp2p::identify::Identify, + a: libp2p::ping::Ping, + b: libp2p::identify::Identify, + } + + enum MyEvent { + Ping(libp2p::ping::PingEvent), + Identify(libp2p::identify::IdentifyEvent), } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} + impl From for MyEvent { + fn from(event: libp2p::ping::PingEvent) -> Self { + MyEvent::Ping(event) + } } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::identify::IdentifyEvent) {} + impl From for MyEvent { + fn from(event: libp2p::identify::IdentifyEvent) -> Self { + MyEvent::Identify(event) + } } #[allow(dead_code)] @@ -198,41 +168,15 @@ fn custom_event_no_polling() { } #[test] -fn custom_event_and_polling() { +fn bound() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(poll_method = "foo", out_event = "String", event_process = true)] - struct Foo { + struct Foo + where + ::OutEvent: Debug, + { ping: libp2p::ping::Ping, - identify: libp2p::identify::Identify, - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::identify::IdentifyEvent) {} - } - - impl Foo { - fn foo( - &mut self, - _: &mut std::task::Context, - _: &mut impl libp2p::swarm::PollParameters, - ) -> std::task::Poll< - libp2p::swarm::NetworkBehaviourAction< - ::OutEvent, - ::ConnectionHandler, - >, - > { - std::task::Poll::Pending - } - } - - #[allow(dead_code)] - fn foo() { - require_net_behaviour::(); + bar: T, } } @@ -240,8 +184,11 @@ fn custom_event_and_polling() { fn where_clause() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] - struct Foo { + struct Foo + where + T: Copy + NetworkBehaviour, + ::OutEvent: Debug, + { ping: libp2p::ping::Ping, bar: T, } @@ -249,38 +196,30 @@ fn where_clause() { #[test] fn nested_derives_with_import() { - use libp2p::swarm::NetworkBehaviourEventProcess; - #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Foo { ping: libp2p::ping::Ping, } #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Bar { foo: Foo, } - impl NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - - impl NetworkBehaviourEventProcess<()> for Bar { - fn inject_event(&mut self, _: ()) {} - } - #[allow(dead_code)] - fn bar() { - require_net_behaviour::(); + #[allow(unreachable_code)] + fn foo() { + let _out_event: ::OutEvent = unimplemented!(); + match _out_event { + BarEvent::Foo(FooEvent::Ping(libp2p::ping::Event { .. })) => {} + } } } #[test] -fn event_process_false() { +fn custom_event_emit_event_through_poll() { enum BehaviourOutEvent { Ping(libp2p::ping::PingEvent), Identify(libp2p::identify::IdentifyEvent), @@ -331,20 +270,11 @@ fn with_toggle() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Foo { identify: libp2p::identify::Identify, ping: Toggle, } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::identify::IdentifyEvent) {} - } - - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::ping::PingEvent) {} - } - #[allow(dead_code)] fn foo() { require_net_behaviour::(); @@ -357,28 +287,11 @@ fn with_either() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] struct Foo { kad: libp2p::kad::Kademlia, ping_or_identify: Either, } - impl libp2p::swarm::NetworkBehaviourEventProcess for Foo { - fn inject_event(&mut self, _: libp2p::kad::KademliaEvent) {} - } - - impl - libp2p::swarm::NetworkBehaviourEventProcess< - Either, - > for Foo - { - fn inject_event( - &mut self, - _: Either, - ) { - } - } - #[allow(dead_code)] fn foo() { require_net_behaviour::(); @@ -386,7 +299,7 @@ fn with_either() { } #[test] -fn no_event_with_either() { +fn custom_event_with_either() { use either::Either; enum BehaviourOutEvent { @@ -394,14 +307,6 @@ fn no_event_with_either() { PingOrIdentify(Either), } - #[allow(dead_code)] - #[derive(NetworkBehaviour)] - #[behaviour(out_event = "BehaviourOutEvent", event_process = false)] - struct Foo { - kad: libp2p::kad::Kademlia, - ping_or_identify: Either, - } - impl From for BehaviourOutEvent { fn from(event: libp2p::kad::KademliaEvent) -> Self { BehaviourOutEvent::Kad(event) @@ -414,6 +319,14 @@ fn no_event_with_either() { } } + #[allow(dead_code)] + #[derive(NetworkBehaviour)] + #[behaviour(out_event = "BehaviourOutEvent")] + struct Foo { + kad: libp2p::kad::Kademlia, + ping_or_identify: Either, + } + #[allow(dead_code)] fn foo() { require_net_behaviour::(); @@ -421,28 +334,81 @@ fn no_event_with_either() { } #[test] -fn mixed_field_order() { - struct Foo {} +fn generated_out_event_derive_debug() { + #[allow(dead_code)] + #[derive(NetworkBehaviour)] + struct Foo { + ping: libp2p::ping::Ping, + } + + fn require_debug() + where + T: NetworkBehaviour, + ::OutEvent: Debug, + { + } + + require_debug::(); +} + +#[test] +fn custom_out_event_no_type_parameters() { + use libp2p::core::connection::ConnectionId; + use libp2p::swarm::handler::DummyConnectionHandler; + use libp2p::swarm::{ + ConnectionHandler, IntoConnectionHandler, NetworkBehaviourAction, PollParameters, + }; + use libp2p::PeerId; + use std::task::Context; + use std::task::Poll; + + pub struct TemplatedBehaviour { + _data: T, + } + + impl NetworkBehaviour for TemplatedBehaviour { + type ConnectionHandler = DummyConnectionHandler; + type OutEvent = void::Void; + + fn new_handler(&mut self) -> Self::ConnectionHandler { + DummyConnectionHandler::default() + } + + fn inject_event( + &mut self, + _peer: PeerId, + _connection: ConnectionId, + message: <::Handler as ConnectionHandler>::OutEvent, + ) { + void::unreachable(message); + } + + fn poll( + &mut self, + _ctx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll> { + Poll::Pending + } + } #[derive(NetworkBehaviour)] - #[behaviour(event_process = true)] - pub struct Behaviour { - #[behaviour(ignore)] - _foo: Foo, - _ping: libp2p::ping::Ping, - #[behaviour(ignore)] - _foo2: Foo, - _identify: libp2p::identify::Identify, - #[behaviour(ignore)] - _foo3: Foo, + #[behaviour(out_event = "OutEvent")] + struct Behaviour { + custom: TemplatedBehaviour, } - impl libp2p::swarm::NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, _evt: T) {} + #[derive(Debug)] + enum OutEvent { + None, } - #[allow(dead_code)] - fn behaviour() { - require_net_behaviour::(); + impl From for OutEvent { + fn from(_e: void::Void) -> Self { + Self::None + } } + + require_net_behaviour::>(); + require_net_behaviour::>(); } diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 174924889b0..abf54657cda 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,8 +1,98 @@ -# 0.38.0 [unreleased] +# 0.39.0 + +- Remove deprecated `NetworkBehaviourEventProcess`. See [libp2p-swarm v0.38.0 changelog entry] for + migration path. + +- Update to `libp2p-core` `v0.36.0`. + +[libp2p-swarm v0.38.0 changelog entry]: https://github.com/libp2p/rust-libp2p/blob/master/swarm/CHANGELOG.md#0380 + +# 0.38.0 + +- Deprecate `NetworkBehaviourEventProcess`. When deriving `NetworkBehaviour` on a custom `struct` users + should either bring their own `OutEvent` via `#[behaviour(out_event = "MyBehaviourEvent")]` or, + when not specified, have the derive macro generate one for the user. + + See [`NetworkBehaviour` + documentation](https://docs.rs/libp2p/latest/libp2p/swarm/trait.NetworkBehaviour.html) and [PR + 2784] for details. + + Previously + + ``` rust + #[derive(NetworkBehaviour)] + #[behaviour(event_process = true)] + struct MyBehaviour { + gossipsub: Gossipsub, + mdns: Mdns, + } + + impl NetworkBehaviourEventProcess for MyBehaviour { + fn inject_event(&mut self, message: GossipsubEvent) { + todo!("Handle event") + } + } + + impl NetworkBehaviourEventProcess for MyBehaviour { + fn inject_event(&mut self, message: MdnsEvent) { + todo!("Handle event") + } + } + ``` + + Now + + ``` rust + #[derive(NetworkBehaviour)] + #[behaviour(out_event = "MyBehaviourEvent")] + struct MyBehaviour { + gossipsub: Gossipsub, + mdns: Mdns, + } + + enum MyBehaviourEvent { + Gossipsub(GossipsubEvent), + Mdns(MdnsEvent), + } + + impl From for MyBehaviourEvent { + fn from(event: GossipsubEvent) -> Self { + MyBehaviourEvent::Gossipsub(event) + } + } + + impl From for MyBehaviourEvent { + fn from(event: MdnsEvent) -> Self { + MyBehaviourEvent::Mdns(event) + } + } + + match swarm.next().await.unwrap() { + SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(event)) => { + todo!("Handle event") + } + SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(event)) => { + todo!("Handle event") + } + } + ``` + +- When deriving `NetworkBehaviour` on a custom `struct` where the user does not specify their own + `OutEvent` via `#[behaviour(out_event = "MyBehaviourEvent")]` and where the user does not enable + `#[behaviour(event_process = true)]`, then the derive macro generates an `OutEvent` definition for + the user. + + See [`NetworkBehaviour` + documentation](https://docs.rs/libp2p/latest/libp2p/swarm/trait.NetworkBehaviour.html) and [PR + 2792] for details. - Update dial address concurrency factor to `8`, thus dialing up to 8 addresses concurrently for a single connection attempt. See `Swarm::dial_concurrency_factor` and [PR 2741]. +- Update to `libp2p-core` `v0.35.0`. + [PR 2741]: https://github.com/libp2p/rust-libp2p/pull/2741/ +[PR 2784]: https://github.com/libp2p/rust-libp2p/pull/2784 +[PR 2792]: https://github.com/libp2p/rust-libp2p/pull/2792 # 0.37.0 diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 2953c984368..a22b114f82e 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm" edition = "2021" rust-version = "1.56.1" description = "The libp2p swarm" -version = "0.38.0" +version = "0.39.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,7 +16,7 @@ fnv = "1.0" futures = "0.3.1" futures-timer = "3.0.2" instant = "0.1.11" -libp2p-core = { version = "0.34.0", path = "../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../core", default-features = false } log = "0.4" pin-project = "1.0.0" rand = "0.7" diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 3dd6ddf9588..a5bf0e06f06 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -79,20 +79,15 @@ pub(crate) type THandlerOutEvent = /// it will delegate to each `struct` member and return a concatenated array of all addresses /// returned by the struct members. /// -/// When creating a custom [`NetworkBehaviour`], you must choose one of two methods to respond to -/// incoming events: -/// * One option is setting a custom `out_event` with `#[behaviour(out_event = "AnotherType")]`. -/// In this case, events generated by the custom [`NetworkBehaviour`] struct members will be -/// converted to your custom `out_event` for you to handle after polling the swarm. -/// * Alternatively, users that need access to the root [`NetworkBehaviour`] implementation while -/// processing emitted events, can specify `#[behaviour(event_process = true)]` (default is false). -/// Events generated by the behaviour's struct members are delegated to [`NetworkBehaviourEventProcess`] -/// trait implementations. Those must be provided by the user on the type that [`NetworkBehaviour`] -/// is derived on. +/// Events ([`NetworkBehaviour::OutEvent`]) returned by each `struct` member are wrapped in a new +/// `enum` event, with an `enum` variant for each `struct` member. Users can define this event +/// `enum` themselves and provide the name to the derive macro via `#[behaviour(out_event = +/// "MyCustomOutEvent")]`. If the user does not specify an `out_event`, the derive macro generates +/// the event definition itself, naming it `Event`. /// -/// When setting a custom `out_event`, the aforementioned conversion of each of the event types -/// generated by the struct members to the custom `out_event` is handled by [`From`] -/// implementations the user needs to provide. +/// The aforementioned conversion of each of the event types generated by the struct members to the +/// custom `out_event` is handled by [`From`] implementations which the user needs to define in +/// addition to the event `enum` itself. /// /// ``` rust /// # use libp2p::identify::{Identify, IdentifyEvent}; @@ -122,53 +117,6 @@ pub(crate) type THandlerOutEvent = /// } /// } /// ``` -/// -/// When using `event_process = true` the [`NetworkBehaviourEventProcess`] trait implementations -/// are granted exclusive access to the [`NetworkBehaviour`], therefore -/// [blocking code](https://ryhl.io/blog/async-what-is-blocking/) in these implementations will -/// block the entire [`Swarm`](crate::Swarm) from processing new events, since the swarm cannot progress -/// without also having exclusive access to the [`NetworkBehaviour`]. A better alternative is to execute -/// blocking or asynchronous logic on a separate task, perhaps with the help of a bounded channel to -/// maintain backpressure. The sender for the channel could be included in the NetworkBehaviours constructor. -/// -/// Optionally one can provide a custom `poll` function through the `#[behaviour(poll_method = -/// "poll")]` attribute. This function must have the same signature as the [`NetworkBehaviour#poll`] -/// function and will be called last within the generated [`NetworkBehaviour`] implementation. -/// -/// Struct members that don't implement [`NetworkBehaviour`] must be annotated with -/// `#[behaviour(ignore)]`. -/// -/// ``` rust -/// # use libp2p::identify::{Identify, IdentifyEvent}; -/// # use libp2p::ping::{Ping, PingEvent}; -/// # use libp2p::NetworkBehaviour; -/// #[derive(NetworkBehaviour)] -/// #[behaviour(out_event = "Event")] -/// struct MyBehaviour { -/// identify: Identify, -/// ping: Ping, -/// -/// #[behaviour(ignore)] -/// some_string: String, -/// } -/// # -/// # enum Event { -/// # Identify(IdentifyEvent), -/// # Ping(PingEvent), -/// # } -/// # -/// # impl From for Event { -/// # fn from(event: IdentifyEvent) -> Self { -/// # Self::Identify(event) -/// # } -/// # } -/// # -/// # impl From for Event { -/// # fn from(event: PingEvent) -> Self { -/// # Self::Ping(event) -/// # } -/// # } -/// ``` pub trait NetworkBehaviour: 'static { /// Handler for all the protocols the network behaviour supports. type ConnectionHandler: IntoConnectionHandler; @@ -218,8 +166,7 @@ pub trait NetworkBehaviour: 'static { /// Informs the behaviour about a closed connection to a peer. /// /// A call to this method is always paired with an earlier call to - /// `inject_connection_established` with the same peer ID, connection ID and - /// endpoint. + /// [`NetworkBehaviour::inject_connection_established`] with the same peer ID, connection ID and endpoint. fn inject_connection_closed( &mut self, _: &PeerId, @@ -243,8 +190,8 @@ pub trait NetworkBehaviour: 'static { /// Informs the behaviour about an event generated by the handler dedicated to the peer identified by `peer_id`. /// for the behaviour. /// - /// The `peer_id` is guaranteed to be in a connected state. In other words, `inject_connected` - /// has previously been called with this `PeerId`. + /// The `peer_id` is guaranteed to be in a connected state. In other words, + /// [`NetworkBehaviour::inject_connection_established`] has previously been called with this `PeerId`. fn inject_event( &mut self, peer_id: PeerId, @@ -335,17 +282,6 @@ pub trait PollParameters { fn local_peer_id(&self) -> &PeerId; } -/// When deriving [`NetworkBehaviour`] this trait must by default be implemented for all the -/// possible event types generated by the inner behaviours. -/// -/// You can opt out of this behaviour through `#[behaviour(event_process = false)]`. See the -/// documentation of [`NetworkBehaviour`] for details. -pub trait NetworkBehaviourEventProcess { - /// Called when one of the fields of the type you're deriving `NetworkBehaviour` on generates - /// an event. - fn inject_event(&mut self, event: TEvent); -} - /// An action that a [`NetworkBehaviour`] can trigger in the [`Swarm`] /// in whose context it is executing. /// diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs index 54e60e77b3a..6bb1d95a519 100644 --- a/swarm/src/behaviour/either.rs +++ b/swarm/src/behaviour/either.rs @@ -19,10 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{either::IntoEitherHandler, ConnectionHandler, IntoConnectionHandler}; -use crate::{ - DialError, NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, - PollParameters, -}; +use crate::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use either::Either; use libp2p_core::{ connection::ConnectionId, transport::ListenerId, ConnectedPoint, Multiaddr, PeerId, @@ -236,17 +233,3 @@ where Poll::Ready(event) } } - -impl NetworkBehaviourEventProcess - for Either -where - TBehaviourLeft: NetworkBehaviourEventProcess, - TBehaviourRight: NetworkBehaviourEventProcess, -{ - fn inject_event(&mut self, event: TEvent) { - match self { - Either::Left(a) => a.inject_event(event), - Either::Right(b) => b.inject_event(event), - } - } -} diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 50ea6487770..07a15d56da9 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -23,10 +23,7 @@ use crate::handler::{ KeepAlive, SubstreamProtocol, }; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper}; -use crate::{ - DialError, NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, - PollParameters, -}; +use crate::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use either::Either; use libp2p_core::{ connection::ConnectionId, @@ -233,17 +230,6 @@ where } } -impl NetworkBehaviourEventProcess for Toggle -where - TBehaviour: NetworkBehaviourEventProcess, -{ - fn inject_event(&mut self, event: TEvent) { - if let Some(inner) = self.inner.as_mut() { - inner.inject_event(event); - } - } -} - /// Implementation of `IntoConnectionHandler` that can be in the disabled state. pub struct ToggleIntoConnectionHandler { inner: Option, diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 733016dceb0..24e54aba525 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -20,7 +20,6 @@ mod error; mod handler_wrapper; -mod substream; pub(crate) mod pool; @@ -30,18 +29,18 @@ pub use error::{ }; pub use pool::{ConnectionCounters, ConnectionLimits}; pub use pool::{EstablishedConnection, PendingConnection}; -pub use substream::{Close, SubstreamEndpoint}; use crate::handler::ConnectionHandler; use crate::IntoConnectionHandler; use handler_wrapper::HandlerWrapper; use libp2p_core::connection::ConnectedPoint; use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::StreamMuxerBox; +use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt}; use libp2p_core::upgrade; use libp2p_core::PeerId; -use std::{error::Error, fmt, pin::Pin, task::Context, task::Poll}; -use substream::{Muxing, SubstreamEvent}; +use std::collections::VecDeque; +use std::future::Future; +use std::{error::Error, fmt, io, pin::Pin, task::Context, task::Poll}; /// Information about a successfully established connection. #[derive(Debug, Clone, PartialEq, Eq)] @@ -52,6 +51,13 @@ pub struct Connected { pub peer_id: PeerId, } +/// Endpoint for a received substream. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum SubstreamEndpoint { + Dialer(TDialInfo), + Listener, +} + /// Event generated by a [`Connection`]. #[derive(Debug, Clone)] pub enum Event { @@ -67,19 +73,22 @@ where THandler: ConnectionHandler, { /// Node that handles the muxing. - muxing: substream::Muxing>, + muxing: StreamMuxerBox, /// Handler that processes substreams. handler: HandlerWrapper, + /// List of "open_info" that is waiting for new outbound substreams. + open_info: VecDeque>, } impl fmt::Debug for Connection where THandler: ConnectionHandler + fmt::Debug, + THandler::OutboundOpenInfo: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection") - .field("muxing", &self.muxing) .field("handler", &self.handler) + .field("open_info", &self.open_info) .finish() } } @@ -108,8 +117,9 @@ where max_negotiating_inbound_streams, ); Connection { - muxing: Muxing::new(muxer), + muxing: muxer, handler: wrapped_handler, + open_info: VecDeque::with_capacity(8), } } @@ -120,11 +130,8 @@ where /// Begins an orderly shutdown of the connection, returning the connection /// handler and a `Future` that resolves when connection shutdown is complete. - pub fn close(self) -> (THandler, Close) { - ( - self.handler.into_connection_handler(), - self.muxing.close().0, - ) + pub fn close(self) -> (THandler, impl Future>) { + (self.handler.into_connection_handler(), self.muxing.close()) } /// Polls the handler and the substream, forwarding events from the former to the latter and @@ -138,38 +145,47 @@ where match self.handler.poll(cx)? { Poll::Pending => {} Poll::Ready(handler_wrapper::Event::OutboundSubstreamRequest(user_data)) => { - self.muxing.open_substream(user_data); - continue; + self.open_info.push_back(user_data); + continue; // Poll handler until exhausted. } Poll::Ready(handler_wrapper::Event::Custom(event)) => { return Poll::Ready(Ok(Event::Handler(event))); } } - // Perform I/O on the connection through the muxer, informing the handler - // of new substreams. - match self.muxing.poll(cx)? { + match self.muxing.poll_unpin(cx)? { Poll::Pending => {} - Poll::Ready(SubstreamEvent::InboundSubstream { substream }) => { - self.handler - .inject_substream(substream, SubstreamEndpoint::Listener); - continue; - } - Poll::Ready(SubstreamEvent::OutboundSubstream { - user_data, - substream, - }) => { - let endpoint = SubstreamEndpoint::Dialer(user_data); - self.handler.inject_substream(substream, endpoint); - continue; - } - Poll::Ready(SubstreamEvent::AddressChange(address)) => { + Poll::Ready(StreamMuxerEvent::AddressChange(address)) => { self.handler.inject_address_change(&address); return Poll::Ready(Ok(Event::AddressChange(address))); } } - return Poll::Pending; + if !self.open_info.is_empty() { + match self.muxing.poll_outbound_unpin(cx)? { + Poll::Pending => {} + Poll::Ready(substream) => { + let user_data = self + .open_info + .pop_front() + .expect("`open_info` is not empty"); + let endpoint = SubstreamEndpoint::Dialer(user_data); + self.handler.inject_substream(substream, endpoint); + continue; // Go back to the top, handler can potentially make progress again. + } + } + } + + match self.muxing.poll_inbound_unpin(cx)? { + Poll::Pending => {} + Poll::Ready(substream) => { + self.handler + .inject_substream(substream, SubstreamEndpoint::Listener); + continue; // Go back to the top, handler can potentially make progress again. + } + } + + return Poll::Pending; // Nothing can make progress, return `Pending`. } } } diff --git a/swarm/src/connection/handler_wrapper.rs b/swarm/src/connection/handler_wrapper.rs index 77eb1d79bbf..03d09b3fbc1 100644 --- a/swarm/src/connection/handler_wrapper.rs +++ b/swarm/src/connection/handler_wrapper.rs @@ -440,3 +440,82 @@ pub enum Event { /// Other event. Custom(TCustom), } + +#[cfg(test)] +mod tests { + use super::*; + use crate::handler::PendingConnectionHandler; + use quickcheck::*; + use std::sync::Arc; + + #[test] + fn max_negotiating_inbound_streams() { + fn prop(max_negotiating_inbound_streams: u8) { + let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); + let mut wrapper = HandlerWrapper::new( + PeerId::random(), + ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }, + PendingConnectionHandler::new("test".to_string()), + None, + max_negotiating_inbound_streams, + ); + let alive_substreams_counter = Arc::new(()); + + for _ in 0..max_negotiating_inbound_streams { + let substream = + SubstreamBox::new(PendingSubstream(alive_substreams_counter.clone())); + wrapper.inject_substream(substream, SubstreamEndpoint::Listener); + } + + assert_eq!( + Arc::strong_count(&alive_substreams_counter), + max_negotiating_inbound_streams + 1, + "Expect none of the substreams up to the limit to be dropped." + ); + + let substream = SubstreamBox::new(PendingSubstream(alive_substreams_counter.clone())); + wrapper.inject_substream(substream, SubstreamEndpoint::Listener); + + assert_eq!( + Arc::strong_count(&alive_substreams_counter), + max_negotiating_inbound_streams + 1, + "Expect substream exceeding the limit to be dropped." + ); + } + + QuickCheck::new().quickcheck(prop as fn(_)); + } + + struct PendingSubstream(Arc<()>); + + impl AsyncRead for PendingSubstream { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &mut [u8], + ) -> Poll> { + Poll::Pending + } + } + + impl AsyncWrite for PendingSubstream { + fn poll_write( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &[u8], + ) -> Poll> { + Poll::Pending + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Pending + } + } +} diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 4bbdf9c4162..62e931e9510 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -38,7 +38,7 @@ use futures::{ stream::FuturesUnordered, }; use libp2p_core::connection::{ConnectionId, Endpoint, PendingPoint}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; +use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; use std::{ collections::{hash_map, HashMap}, convert::TryFrom as _, @@ -604,7 +604,7 @@ where match event { task::PendingConnectionEvent::ConnectionEstablished { id, - output: (obtained_peer_id, muxer), + output: (obtained_peer_id, mut muxer), outgoing, } => { let PendingConnectionInfo { @@ -692,7 +692,7 @@ where if let Err(error) = error { self.spawn( poll_fn(move |cx| { - if let Err(e) = ready!(muxer.poll_close(cx)) { + if let Err(e) = ready!(muxer.poll_close_unpin(cx)) { log::debug!( "Failed to close connection {:?} to peer {}: {:?}", id, diff --git a/swarm/src/connection/substream.rs b/swarm/src/connection/substream.rs deleted file mode 100644 index 47d5d315b20..00000000000 --- a/swarm/src/connection/substream.rs +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::prelude::*; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use smallvec::SmallVec; -use std::sync::Arc; -use std::{fmt, pin::Pin, task::Context, task::Poll}; - -/// Endpoint for a received substream. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum SubstreamEndpoint { - Dialer(TDialInfo), - Listener, -} - -/// Implementation of `Stream` that handles substream multiplexing. -/// -/// The stream will receive substreams and can be used to open new outgoing substreams. Destroying -/// the `Muxing` will **not** close the existing substreams. -/// -/// The stream will close once both the inbound and outbound channels are closed, and no more -/// outbound substream attempt is pending. -pub struct Muxing -where - TMuxer: StreamMuxer, -{ - /// The muxer used to manage substreams. - inner: Arc, - /// List of substreams we are currently opening. - outbound_substreams: SmallVec<[(TUserData, TMuxer::OutboundSubstream); 8]>, -} - -/// Future that signals the remote that we have closed the connection. -pub struct Close { - /// Muxer to close. - muxer: Arc, -} - -/// Event that can happen on the `Muxing`. -pub enum SubstreamEvent -where - TMuxer: StreamMuxer, -{ - /// A new inbound substream arrived. - InboundSubstream { - /// The newly-opened substream. Will return EOF of an error if the `Muxing` is - /// destroyed or `close_graceful` is called. - substream: TMuxer::Substream, - }, - - /// An outbound substream has successfully been opened. - OutboundSubstream { - /// User data that has been passed to the `open_substream` method. - user_data: TUserData, - /// The newly-opened substream. Will return EOF of an error if the `Muxing` is - /// destroyed or `close_graceful` is called. - substream: TMuxer::Substream, - }, - - /// Address to the remote has changed. The previous one is now obsolete. - /// - /// > **Note**: This can for example happen when using the QUIC protocol, where the two nodes - /// > can change their IP address while retaining the same QUIC connection. - AddressChange(Multiaddr), -} - -/// Identifier for a substream being opened. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct OutboundSubstreamId(usize); - -impl Muxing -where - TMuxer: StreamMuxer, -{ - /// Creates a new node events stream. - pub fn new(muxer: TMuxer) -> Self { - Muxing { - inner: Arc::new(muxer), - outbound_substreams: SmallVec::new(), - } - } - - /// Starts the process of opening a new outbound substream. - /// - /// After calling this method, polling the stream should eventually produce either an - /// `OutboundSubstream` event or an `OutboundClosed` event containing the user data that has - /// been passed to this method. - pub fn open_substream(&mut self, user_data: TUserData) { - let raw = self.inner.open_outbound(); - self.outbound_substreams.push((user_data, raw)); - } - - /// Destroys the node stream and returns all the pending outbound substreams, plus an object - /// that signals the remote that we shut down the connection. - #[must_use] - pub fn close(mut self) -> (Close, Vec) { - let substreams = self.cancel_outgoing(); - let close = Close { - muxer: self.inner.clone(), - }; - (close, substreams) - } - - /// Destroys all outbound streams and returns the corresponding user data. - pub fn cancel_outgoing(&mut self) -> Vec { - let mut out = Vec::with_capacity(self.outbound_substreams.len()); - for (user_data, outbound) in self.outbound_substreams.drain(..) { - out.push(user_data); - self.inner.destroy_outbound(outbound); - } - out - } - - /// Provides an API similar to `Future`. - pub fn poll( - &mut self, - cx: &mut Context<'_>, - ) -> Poll, TMuxer::Error>> { - // Polling inbound substream. - match self.inner.poll_event(cx) { - Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(substream))) => { - return Poll::Ready(Ok(SubstreamEvent::InboundSubstream { substream })); - } - Poll::Ready(Ok(StreamMuxerEvent::AddressChange(addr))) => { - return Poll::Ready(Ok(SubstreamEvent::AddressChange(addr))) - } - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => {} - } - - // Polling outbound substreams. - // We remove each element from `outbound_substreams` one by one and add them back. - for n in (0..self.outbound_substreams.len()).rev() { - let (user_data, mut outbound) = self.outbound_substreams.swap_remove(n); - match self.inner.poll_outbound(cx, &mut outbound) { - Poll::Ready(Ok(substream)) => { - self.inner.destroy_outbound(outbound); - return Poll::Ready(Ok(SubstreamEvent::OutboundSubstream { - user_data, - substream, - })); - } - Poll::Pending => { - self.outbound_substreams.push((user_data, outbound)); - } - Poll::Ready(Err(err)) => { - self.inner.destroy_outbound(outbound); - return Poll::Ready(Err(err)); - } - } - } - - // Nothing happened. Register our task to be notified and return. - Poll::Pending - } -} - -impl fmt::Debug for Muxing -where - TMuxer: StreamMuxer, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("Muxing") - .field("outbound_substreams", &self.outbound_substreams.len()) - .finish() - } -} - -impl Drop for Muxing -where - TMuxer: StreamMuxer, -{ - fn drop(&mut self) { - // The substreams that were produced will continue to work, as the muxer is held in an Arc. - // However we will no longer process any further inbound or outbound substream, and we - // therefore close everything. - for (_, outbound) in self.outbound_substreams.drain(..) { - self.inner.destroy_outbound(outbound); - } - } -} - -impl Future for Close -where - TMuxer: StreamMuxer, -{ - type Output = Result<(), TMuxer::Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.muxer.poll_close(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(Ok(())) => Poll::Ready(Ok(())), - Poll::Ready(Err(err)) => Poll::Ready(Err(err)), - } - } -} - -impl fmt::Debug for Close -where - TMuxer: StreamMuxer, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_struct("Close").finish() - } -} - -impl fmt::Debug for SubstreamEvent -where - TMuxer: StreamMuxer, - TMuxer::Substream: fmt::Debug, - TUserData: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SubstreamEvent::InboundSubstream { substream } => f - .debug_struct("SubstreamEvent::OutboundClosed") - .field("substream", substream) - .finish(), - SubstreamEvent::OutboundSubstream { - user_data, - substream, - } => f - .debug_struct("SubstreamEvent::OutboundSubstream") - .field("user_data", user_data) - .field("substream", substream) - .finish(), - SubstreamEvent::AddressChange(address) => f - .debug_struct("SubstreamEvent::AddressChange") - .field("address", address) - .finish(), - } - } -} diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 2a301b70889..c6125f277b1 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -44,6 +44,7 @@ mod map_in; mod map_out; pub mod multi; mod one_shot; +mod pending; mod select; pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; @@ -56,6 +57,7 @@ pub use dummy::DummyConnectionHandler; pub use map_in::MapInEvent; pub use map_out::MapOutEvent; pub use one_shot::{OneShotHandler, OneShotHandlerConfig}; +pub use pending::PendingConnectionHandler; pub use select::{ConnectionHandlerSelect, IntoConnectionHandlerSelect}; /// A handler for a set of protocols used on a connection with a remote. @@ -116,6 +118,12 @@ pub trait ConnectionHandler: Send + 'static { fn listen_protocol(&self) -> SubstreamProtocol; /// Injects the output of a successful upgrade on a new inbound substream. + /// + /// Note that it is up to the [`ConnectionHandler`] implementation to manage the lifetime of the + /// negotiated inbound substreams. E.g. the implementation has to enforce a limit on the number + /// of simultaneously open negotiated inbound substreams. In other words it is up to the + /// [`ConnectionHandler`] implementation to stop a malicious remote node to open and keep alive + /// an excessive amount of inbound substreams. fn inject_fully_negotiated_inbound( &mut self, protocol: ::Output, diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs new file mode 100644 index 00000000000..04c1696515c --- /dev/null +++ b/swarm/src/handler/pending.rs @@ -0,0 +1,120 @@ +// Copyright 2022 Protocol Labs. +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::handler::{ + ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, + SubstreamProtocol, +}; +use crate::NegotiatedSubstream; +use libp2p_core::{ + upgrade::{InboundUpgrade, OutboundUpgrade, PendingUpgrade}, + Multiaddr, +}; +use std::task::{Context, Poll}; +use void::Void; + +/// Implementation of [`ConnectionHandler`] that returns a pending upgrade. +#[derive(Clone, Debug)] +pub struct PendingConnectionHandler { + protocol_name: String, +} + +impl PendingConnectionHandler { + pub fn new(protocol_name: String) -> Self { + PendingConnectionHandler { protocol_name } + } +} + +impl ConnectionHandler for PendingConnectionHandler { + type InEvent = Void; + type OutEvent = Void; + type Error = Void; + type InboundProtocol = PendingUpgrade; + type OutboundProtocol = PendingUpgrade; + type OutboundOpenInfo = Void; + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(PendingUpgrade::new(self.protocol_name.clone()), ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + protocol: >::Output, + _: Self::InboundOpenInfo, + ) { + void::unreachable(protocol) + } + + fn inject_fully_negotiated_outbound( + &mut self, + protocol: >::Output, + _info: Self::OutboundOpenInfo, + ) { + void::unreachable(protocol); + #[allow(unreachable_code)] + { + void::unreachable(_info); + } + } + + fn inject_event(&mut self, v: Self::InEvent) { + void::unreachable(v) + } + + fn inject_address_change(&mut self, _: &Multiaddr) {} + + fn inject_dial_upgrade_error( + &mut self, + _: Self::OutboundOpenInfo, + _: ConnectionHandlerUpgrErr< + >::Error, + >, + ) { + } + + fn inject_listen_upgrade_error( + &mut self, + _: Self::InboundOpenInfo, + _: ConnectionHandlerUpgrErr< + >::Error, + >, + ) { + } + + fn connection_keep_alive(&self) -> KeepAlive { + KeepAlive::No + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + Poll::Pending + } +} diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index f32c2df56bf..7faaa43b44d 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -64,8 +64,7 @@ pub mod dial_opts; pub mod handler; pub use behaviour::{ - CloseConnection, NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, - NotifyHandler, PollParameters, + CloseConnection, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, }; pub use connection::{ ConnectionCounters, ConnectionError, ConnectionLimit, ConnectionLimits, PendingConnectionError, @@ -1625,7 +1624,6 @@ mod tests { use libp2p_core::transport::TransportEvent; use libp2p_core::Endpoint; use quickcheck::{quickcheck, Arbitrary, Gen, QuickCheck}; - use rand::prelude::SliceRandom; use rand::Rng; // Test execution state. @@ -1701,13 +1699,12 @@ mod tests { /// after which one peer bans the other. /// /// The test expects both behaviours to be notified via pairs of - /// inject_connected / inject_disconnected as well as - /// inject_connection_established / inject_connection_closed calls - /// while unbanned. + /// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] + /// calls while unbanned. /// /// While the ban is in effect, further dials occur. For these connections no - /// `inject_connected`, `inject_connection_established`, `inject_disconnected`, - /// `inject_connection_closed` calls should be registered. + /// [`NetworkBehaviour::inject_connection_established`], [`NetworkBehaviour::inject_connection_closed`] + /// calls should be registered. #[test] fn test_connect_disconnect_ban() { // Since the test does not try to open any substreams, we can @@ -1827,8 +1824,7 @@ mod tests { /// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`]. /// /// The test expects both behaviours to be notified via pairs of - /// inject_connected / inject_disconnected as well as - /// inject_connection_established / inject_connection_closed calls. + /// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] calls. #[test] fn test_swarm_disconnect() { // Since the test does not try to open any substreams, we can @@ -1896,8 +1892,7 @@ mod tests { /// using [`NetworkBehaviourAction::CloseConnection`] returned by a [`NetworkBehaviour`]. /// /// The test expects both behaviours to be notified via pairs of - /// inject_connected / inject_disconnected as well as - /// inject_connection_established / inject_connection_closed calls. + /// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] calls. #[test] fn test_behaviour_disconnect_all() { // Since the test does not try to open any substreams, we can @@ -1967,8 +1962,7 @@ mod tests { /// using [`NetworkBehaviourAction::CloseConnection`] returned by a [`NetworkBehaviour`]. /// /// The test expects both behaviours to be notified via pairs of - /// inject_connected / inject_disconnected as well as - /// inject_connection_established / inject_connection_closed calls. + /// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] calls. #[test] fn test_behaviour_disconnect_one() { // Since the test does not try to open any substreams, we can @@ -2430,60 +2424,51 @@ mod tests { assert!(!swarm.is_connected(&peer_id)); } - #[test] - fn multiple_addresses_err() { + #[async_std::test] + async fn multiple_addresses_err() { // Tries dialing multiple addresses, and makes sure there's one dialing error per address. let target = PeerId::random(); let mut swarm = new_test_swarm::<_, ()>(DummyConnectionHandler::default()).build(); - let mut addresses = Vec::new(); - for _ in 0..3 { - addresses.push(multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())]); - } - for _ in 0..5 { - addresses.push(multiaddr![Udp(rand::random::())]); - } - addresses.shuffle(&mut rand::thread_rng()); + let addresses = HashSet::from([ + multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], + multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], + multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], + multiaddr![Udp(rand::random::())], + multiaddr![Udp(rand::random::())], + multiaddr![Udp(rand::random::())], + multiaddr![Udp(rand::random::())], + multiaddr![Udp(rand::random::())], + ]); swarm .dial( DialOpts::peer_id(target) - .addresses(addresses.clone()) + .addresses(addresses.iter().cloned().collect()) .build(), ) .unwrap(); - futures::executor::block_on(future::poll_fn(|cx| -> Poll> { - loop { - match swarm.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::OutgoingConnectionError { - peer_id, - // multiaddr, - error: DialError::Transport(errors), - })) => { - assert_eq!(peer_id.unwrap(), target); + match swarm.next().await.unwrap() { + SwarmEvent::OutgoingConnectionError { + peer_id, + // multiaddr, + error: DialError::Transport(errors), + } => { + assert_eq!(target, peer_id.unwrap()); - let failed_addresses = - errors.into_iter().map(|(addr, _)| addr).collect::>(); - assert_eq!( - failed_addresses, - addresses - .clone() - .into_iter() - .map(|addr| addr.with(Protocol::P2p(target.into()))) - .collect::>() - ); + let failed_addresses = errors.into_iter().map(|(addr, _)| addr).collect::>(); + let expected_addresses = addresses + .into_iter() + .map(|addr| addr.with(Protocol::P2p(target.into()))) + .collect::>(); - return Poll::Ready(Ok(())); - } - Poll::Ready(_) => unreachable!(), - Poll::Pending => break Poll::Pending, - } + assert_eq!(expected_addresses, failed_addresses); } - })) - .unwrap(); + e => panic!("Unexpected event: {e:?}"), + } } #[test] diff --git a/transports/deflate/CHANGELOG.md b/transports/deflate/CHANGELOG.md index ebc2d811375..ead0f9cb68f 100644 --- a/transports/deflate/CHANGELOG.md +++ b/transports/deflate/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.36.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.35.0 + +- Update to `libp2p-core` `v0.35.0`. + # 0.34.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/deflate/Cargo.toml b/transports/deflate/Cargo.toml index 82536d8acc3..904500c1cd0 100644 --- a/transports/deflate/Cargo.toml +++ b/transports/deflate/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-deflate" edition = "2021" rust-version = "1.56.1" description = "Deflate encryption protocol for libp2p" -version = "0.34.0" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } flate2 = "1.0" [dev-dependencies] diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index a32d5a95eb5..a6c46fa4191 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.36.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.35.0 + +- Update to `libp2p-core` `v0.35.0`. + # 0.34.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 1aaa7a15302..51c9b688c73 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = "1.56.1" description = "DNS transport implementation for libp2p" -version = "0.34.0" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4.1" futures = "0.3.1" async-std-resolver = { version = "0.21", optional = true } diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index ca830796b59..1416aab4e30 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,3 +1,23 @@ +# 0.39.1 [unreleased] + +- Introduce `NoiseAuthenticated::xx` constructor, assuming a X25519 DH key exchange. An XX key exchange and X25519 keys + are the most common way of using noise in libp2p and thus deserve a convenience constructor. See [PR 2887]. + +[PR 2887]: https://github.com/libp2p/rust-libp2p/pull/2887 + +# 0.39.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.38.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. + +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.37.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index f0b8ef8996e..8fef520cb9a 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-noise" edition = "2021" rust-version = "1.56.1" description = "Cryptographic handshake protocol using the noise framework." -version = "0.37.0" +version = "0.39.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,9 +13,9 @@ bytes = "1" curve25519-dalek = "3.0.0" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4" -prost = "0.10" +prost = "0.11" rand = "0.8.3" sha2 = "0.10.0" static_assertions = "1" @@ -33,7 +33,8 @@ async-io = "1.2.0" env_logger = "0.9.0" libp2p-tcp = { path = "../../transports/tcp" } quickcheck = "0.9.0" -sodiumoxide = "0.2.5" +libsodium-sys-stable = { version = "1.19.22", features = ["fetch-latest"] } +ed25519-compact = "1.0.11" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index fa97798fb23..35099ea84dc 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -20,6 +20,7 @@ //! Noise protocol handshake I/O. +#[allow(clippy::derive_partial_eq_without_eq)] mod payload_proto { include!(concat!(env!("OUT_DIR"), "/payload.proto.rs")); } diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index ee609fd028d..1712176d7ef 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -41,12 +41,11 @@ //! ``` //! use libp2p_core::{identity, Transport, upgrade}; //! use libp2p_tcp::TcpTransport; -//! use libp2p_noise::{Keypair, X25519Spec, NoiseConfig}; +//! use libp2p_noise::{Keypair, X25519Spec, NoiseAuthenticated}; //! //! # fn main() { //! let id_keys = identity::Keypair::generate_ed25519(); -//! let dh_keys = Keypair::::new().into_authentic(&id_keys).unwrap(); -//! let noise = NoiseConfig::xx(dh_keys).into_authenticated(); +//! let noise = NoiseAuthenticated::xx(&id_keys).unwrap(); //! let builder = TcpTransport::default().upgrade(upgrade::Version::V1).authenticate(noise); //! // let transport = builder.multiplex(...); //! # } @@ -357,6 +356,19 @@ pub struct NoiseAuthenticated { config: NoiseConfig, } +impl NoiseAuthenticated { + /// Create a new [`NoiseAuthenticated`] for the `XX` handshake pattern using X25519 DH keys. + /// + /// For now, this is the only combination that is guaranteed to be compatible with other libp2p implementations. + pub fn xx(id_keys: &identity::Keypair) -> Result { + let dh_keys = Keypair::::new(); + let noise_keys = dh_keys.into_authentic(id_keys)?; + let config = NoiseConfig::xx(noise_keys); + + Ok(config.into_authenticated()) + } +} + impl UpgradeInfo for NoiseAuthenticated where NoiseConfig: UpgradeInfo, diff --git a/transports/noise/src/protocol/x25519.rs b/transports/noise/src/protocol/x25519.rs index bc22dcc70b9..0ffa9991ae6 100644 --- a/transports/noise/src/protocol/x25519.rs +++ b/transports/noise/src/protocol/x25519.rs @@ -120,6 +120,7 @@ impl Protocol for X25519 { Ok(PublicKey(X25519(pk))) } + #[allow(irrefutable_let_patterns)] fn linked(id_pk: &identity::PublicKey, dh_pk: &PublicKey) -> bool { if let identity::PublicKey::Ed25519(ref p) = id_pk { PublicKey::from_ed25519(p).as_ref() == dh_pk.as_ref() @@ -162,6 +163,7 @@ impl Keypair { /// > See also: /// > /// > * [Noise: Static Key Reuse](http://www.noiseprotocol.org/noise.html#security-considerations) + #[allow(unreachable_patterns)] pub fn from_identity(id_keys: &identity::Keypair) -> Option> { match id_keys { identity::Keypair::Ed25519(p) => { @@ -278,10 +280,13 @@ impl snow::types::Dh for Keypair { #[cfg(test)] mod tests { use super::*; + // Use the ed25519_compact for testing + use ed25519_compact; use libp2p_core::identity::ed25519; + // Use the libsodium-sys-stable crypto_sign imports for testing + use libsodium_sys::crypto_sign_ed25519_pk_to_curve25519; + use libsodium_sys::crypto_sign_ed25519_sk_to_curve25519; use quickcheck::*; - use sodiumoxide::crypto::sign; - use std::os::raw::c_int; use x25519_dalek::StaticSecret; // ed25519 to x25519 keypair conversion must yield the same results as @@ -292,9 +297,11 @@ mod tests { let ed25519 = ed25519::Keypair::generate(); let x25519 = Keypair::from(SecretKey::from_ed25519(&ed25519.secret())); - let sodium_sec = ed25519_sk_to_curve25519(&sign::SecretKey(ed25519.encode())); - let sodium_pub = - ed25519_pk_to_curve25519(&sign::PublicKey(ed25519.public().encode().clone())); + let sodium_sec = + ed25519_sk_to_curve25519(&ed25519_compact::SecretKey::new(ed25519.encode())); + let sodium_pub = ed25519_pk_to_curve25519(&ed25519_compact::PublicKey::new( + ed25519.public().encode().clone(), + )); let our_pub = x25519.public.0; // libsodium does the [clamping] of the scalar upon key construction, @@ -327,18 +334,10 @@ mod tests { quickcheck(prop as fn() -> _); } - // Bindings to libsodium's ed25519 to curve25519 key conversions, to check that - // they agree with the conversions performed in this module. - - extern "C" { - pub fn crypto_sign_ed25519_pk_to_curve25519(c: *mut u8, e: *const u8) -> c_int; - pub fn crypto_sign_ed25519_sk_to_curve25519(c: *mut u8, e: *const u8) -> c_int; - } - - pub fn ed25519_pk_to_curve25519(k: &sign::PublicKey) -> Option<[u8; 32]> { + pub fn ed25519_pk_to_curve25519(k: &ed25519_compact::PublicKey) -> Option<[u8; 32]> { let mut out = [0u8; 32]; unsafe { - if crypto_sign_ed25519_pk_to_curve25519(out.as_mut_ptr(), (&k.0).as_ptr()) == 0 { + if crypto_sign_ed25519_pk_to_curve25519(out.as_mut_ptr(), k.as_ptr()) == 0 { Some(out) } else { None @@ -346,10 +345,10 @@ mod tests { } } - pub fn ed25519_sk_to_curve25519(k: &sign::SecretKey) -> Option<[u8; 32]> { + pub fn ed25519_sk_to_curve25519(k: &ed25519_compact::SecretKey) -> Option<[u8; 32]> { let mut out = [0u8; 32]; unsafe { - if crypto_sign_ed25519_sk_to_curve25519(out.as_mut_ptr(), (&k.0).as_ptr()) == 0 { + if crypto_sign_ed25519_sk_to_curve25519(out.as_mut_ptr(), k.as_ptr()) == 0 { Some(out) } else { None diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index 0148d03b4d6..14d09621dd9 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -27,7 +27,8 @@ use libp2p_core::identity; use libp2p_core::transport::{self, Transport}; use libp2p_core::upgrade::{self, apply_inbound, apply_outbound, Negotiated}; use libp2p_noise::{ - Keypair, NoiseConfig, NoiseError, NoiseOutput, RemoteIdentity, X25519Spec, X25519, + Keypair, NoiseAuthenticated, NoiseConfig, NoiseError, NoiseOutput, RemoteIdentity, X25519Spec, + X25519, }; use libp2p_tcp::TcpTransport; use log::info; @@ -39,8 +40,7 @@ fn core_upgrade_compat() { // Tests API compaibility with the libp2p-core upgrade API, // i.e. if it compiles, the "test" is considered a success. let id_keys = identity::Keypair::generate_ed25519(); - let dh_keys = Keypair::::new().into_authentic(&id_keys).unwrap(); - let noise = NoiseConfig::xx(dh_keys).into_authenticated(); + let noise = NoiseAuthenticated::xx(&id_keys).unwrap(); let _ = TcpTransport::default() .upgrade(upgrade::Version::V1) .authenticate(noise); diff --git a/transports/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md index 560075bc0a2..bb070580986 100644 --- a/transports/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,3 +1,16 @@ +# 0.36.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.35.0 + +- Update prost requirement from 0.10 to 0.11 which no longer installs the protoc Protobuf compiler. + Thus you will need protoc installed locally. See [PR 2788]. + +- Update to `libp2p-core` `v0.35.0`. + +[PR 2788]: https://github.com/libp2p/rust-libp2p/pull/2788 + # 0.34.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index e5534f93c1e..f250c2a4287 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-plaintext" edition = "2021" rust-version = "1.56.1" description = "Plaintext encryption dummy protocol for libp2p" -version = "0.34.0" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,9 +14,9 @@ categories = ["network-programming", "asynchronous"] bytes = "1" futures = "0.3.1" asynchronous-codec = "0.6" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4.8" -prost = "0.10" +prost = "0.11" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0.2" @@ -26,4 +26,4 @@ quickcheck = "0.9.0" rand = "0.7" [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index 1e9cfecf66f..9855a3297b3 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -35,6 +35,7 @@ use void::Void; mod error; mod handshake; +#[allow(clippy::derive_partial_eq_without_eq)] mod structs_proto { include!(concat!(env!("OUT_DIR"), "/structs.rs")); } diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 9425209e68b..a5bd2fad149 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -11,8 +11,8 @@ license = "MIT" async-global-executor = "2.0.2" async-io = "1.6.0" futures = "0.3.15" -if-watch = "1.0.0" -libp2p-core = { version = "0.34.0", path = "../../core" } +if-watch = "2.0.0" +libp2p-core = { version = "0.36.0", path = "../../core" } parking_lot = "0.12.0" quinn-proto = { version = "0.8.2", default-features = false, features = ["tls-rustls"] } rand = "0.8.5" @@ -29,7 +29,7 @@ yasna = "0.5.0" anyhow = "1.0.41" async-std = { version = "1.10.0", features = ["attributes"] } async-trait = "0.1.50" -libp2p = { version = "0.47.0", default-features = false, features = ["request-response"], path = "../.." } +libp2p = { version = "0.49.0", default-features = false, features = ["request-response"], path = "../.." } rand = "0.8.4" tracing-subscriber = {version = "0.3.8", features = ["env-filter"] } quickcheck = "1" diff --git a/transports/quic/src/connection.rs b/transports/quic/src/connection.rs index 7027735c5e7..8b3f371ffe0 100644 --- a/transports/quic/src/connection.rs +++ b/transports/quic/src/connection.rs @@ -26,7 +26,7 @@ //! All interactions with a QUIC connection should be done through this struct. // TODO: docs -use crate::endpoint::Endpoint; +use crate::endpoint::{Endpoint, ToEndpoint}; use async_io::Timer; use futures::{channel::mpsc, prelude::*}; @@ -34,8 +34,6 @@ use libp2p_core::PeerId; use std::{ fmt, net::SocketAddr, - pin::Pin, - sync::Arc, task::{Context, Poll}, time::Instant, }; @@ -46,9 +44,9 @@ use std::{ /// Tied to a specific [`Endpoint`]. pub struct Connection { /// Endpoint this connection belongs to. - endpoint: Arc, - /// Future whose job is to send a message to the endpoint. Only one at a time. - pending_to_endpoint: Option + Send + Sync>>>, + endpoint: Endpoint, + /// Pending message to be sent to the background task that is driving the endpoint. + pending_to_endpoint: Option, /// Events that the endpoint will send in destination to our local [`quinn_proto::Connection`]. /// Passed at initialization. from_endpoint: mpsc::Receiver, @@ -60,17 +58,6 @@ pub struct Connection { connection_id: quinn_proto::ConnectionHandle, /// `Future` that triggers at the `Instant` that `self.connection.poll_timeout()` indicates. next_timeout: Option, - - /// In other to avoid race conditions where a "connected" event happens if we were not - /// handshaking, we cache whether the connection is handshaking and only set this to true - /// after a "connected" event has been received. - /// - /// In other words, this flag indicates whether a "connected" hasn't been received yet. - is_handshaking: bool, - /// Contains a `Some` if the connection is closed, with the reason of the closure. - /// Contains `None` if it is still open. - /// Contains `Some` if and only if a `ConnectionLost` event has been emitted. - closed: Option, } /// Error on the connection as a whole. @@ -79,6 +66,9 @@ pub enum Error { /// Endpoint has force-killed this connection because it was too busy. #[error("Endpoint has force-killed our connection")] ClosedChannel, + /// The background task driving the endpoint has crashed. + #[error("Background task crashed")] + TaskCrashed, /// Error in the inner state machine. #[error("{0}")] Quinn(#[from] quinn_proto::ConnectionError), @@ -99,16 +89,13 @@ impl Connection { /// /// This function assumes that the [`quinn_proto::Connection`] is completely fresh and none of /// its methods has ever been called. Failure to comply might lead to logic errors and panics. - // TODO: maybe abstract `to_endpoint` more and make it generic? dunno pub fn from_quinn_connection( - endpoint: Arc, + endpoint: Endpoint, connection: quinn_proto::Connection, connection_id: quinn_proto::ConnectionHandle, from_endpoint: mpsc::Receiver, ) -> Self { - assert!(!connection.is_closed()); - let is_handshaking = connection.is_handshaking(); - + debug_assert!(!connection.is_closed()); Connection { endpoint, pending_to_endpoint: None, @@ -116,44 +103,36 @@ impl Connection { next_timeout: None, from_endpoint, connection_id, - is_handshaking, - closed: None, } } - /// The local address which was used when the peer established the connection. + /// The local address which was used when the remote established the connection to us. /// - /// Works for server connections only. - pub fn local_addr(&self) -> SocketAddr { - debug_assert_eq!(self.connection.side(), quinn_proto::Side::Server); + /// `None` for client connections. + pub fn local_addr(&self) -> Option { + if self.connection.side().is_client() { + return None; + } let endpoint_addr = self.endpoint.socket_addr(); - self.connection - .local_ip() - .map(|ip| SocketAddr::new(ip, endpoint_addr.port())) - .unwrap_or_else(|| { - // In a normal case scenario this should not happen, because - // we get want to get a local addr for a server connection only. - tracing::error!("trying to get quinn::local_ip for a client"); - *endpoint_addr - }) + + // Local address may differ from the socket address if the socket is + // bound to a wildcard address. + let addr = match self.connection.local_ip() { + Some(ip) => SocketAddr::new(ip, endpoint_addr.port()), + // TODO: `quinn_proto::Connection::local_ip` is only supported for linux, + // so for other platforms we currently still return the endpoint address. + None => *endpoint_addr, + }; + Some(addr) } /// Returns the address of the node we're connected to. - // TODO: can change /!\ pub fn remote_addr(&self) -> SocketAddr { self.connection.remote_address() } - /// Returns `true` if this connection is still pending. Returns `false` if we are connected to - /// the remote or if the connection is closed. - pub fn is_handshaking(&self) -> bool { - self.is_handshaking - } - /// Returns the address of the node we're connected to. - /// Panics if the connection is still handshaking. pub fn remote_peer_id(&self) -> PeerId { - debug_assert!(!self.is_handshaking()); let session = self.connection.crypto_session(); let identity = session .peer_identity() @@ -172,22 +151,17 @@ impl Connection { /// Start closing the connection. A [`ConnectionEvent::ConnectionLost`] event will be /// produced in the future. pub fn close(&mut self) { - // TODO: what if the user calls this multiple times? // We send a dummy `0` error code with no message, as the API of StreamMuxer doesn't // support this. self.connection .close(Instant::now(), From::from(0u32), Default::default()); - self.endpoint.report_quinn_event_non_block( - self.connection_id, - quinn_proto::EndpointEvent::drained(), - ); } /// Pops a new substream opened by the remote. /// /// If `None` is returned, then a [`ConnectionEvent::StreamAvailable`] event will later be /// produced when a substream is available. - pub fn pop_incoming_substream(&mut self) -> Option { + pub fn accept_substream(&mut self) -> Option { self.connection.streams().accept(quinn_proto::Dir::Bi) } @@ -198,7 +172,7 @@ impl Connection { /// /// If `None` is returned, then a [`ConnectionEvent::StreamOpened`] event will later be /// produced when a substream is available. - pub fn pop_outgoing_substream(&mut self) -> Option { + pub fn open_substream(&mut self) -> Option { self.connection.streams().open(quinn_proto::Dir::Bi) } @@ -210,175 +184,106 @@ impl Connection { /// On success, a [`quinn_proto::StreamEvent::Finished`] event will later be produced when the /// substream has been effectively closed. A [`ConnectionEvent::StreamStopped`] event can also /// be emitted. - pub fn shutdown_substream( + pub fn finish_substream( &mut self, id: quinn_proto::StreamId, ) -> Result<(), quinn_proto::FinishError> { - // closes the write end of the substream without waiting for the remote to receive the - // event. use flush substream to wait for the remote to receive the event. self.connection.send_stream(id).finish() } - /// Polls the connection for an event that happend on it. + /// Polls the connection for an event that happened on it. pub fn poll_event(&mut self, cx: &mut Context<'_>) -> Poll { - // Nothing more can be done if the connection is closed. - // Return `Pending` without registering the waker, essentially freezing the task forever. - if self.closed.is_some() { - return Poll::Pending; - } - - // Process events that the endpoint has sent to us. + let mut closed = None; loop { - match Pin::new(&mut self.from_endpoint).poll_next(cx) { - Poll::Ready(Some(event)) => self.connection.handle_event(event), + match self.from_endpoint.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => { + self.connection.handle_event(event); + continue; + } Poll::Ready(None) => { - debug_assert!(self.closed.is_none()); - let err = Error::ClosedChannel; - self.closed = Some(err.clone()); - return Poll::Ready(ConnectionEvent::ConnectionLost(err)); + if closed.is_none() { + return Poll::Ready(ConnectionEvent::ConnectionLost(Error::ClosedChannel)); + } } - Poll::Pending => break, + Poll::Pending => {} } - } - 'send_pending: loop { // Sending the pending event to the endpoint. If the endpoint is too busy, we just // stop the processing here. - // There is a bit of a question in play here: should we continue to accept events - // through `from_endpoint` if `to_endpoint` is busy? // We need to be careful to avoid a potential deadlock if both `from_endpoint` and // `to_endpoint` are full. As such, we continue to transfer data from `from_endpoint` // to the `quinn_proto::Connection` (see above). // However we don't deliver substream-related events to the user as long as // `to_endpoint` is full. This should propagate the back-pressure of `to_endpoint` // being full to the user. - if let Some(pending_to_endpoint) = &mut self.pending_to_endpoint { - match Future::poll(Pin::new(pending_to_endpoint), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(()) => self.pending_to_endpoint = None, + if let Some(to_endpoint) = self.pending_to_endpoint.take() { + match self.endpoint.try_send(to_endpoint, cx) { + Ok(Ok(())) => {} + Ok(Err(to_endpoint)) => { + self.pending_to_endpoint = Some(to_endpoint); + return Poll::Pending; + } + Err(_) => { + return Poll::Ready(ConnectionEvent::ConnectionLost(Error::TaskCrashed)); + } } } - let now = Instant::now(); - // Poll the connection for packets to send on the UDP socket and try to send them on // `to_endpoint`. // FIXME max_datagrams - if let Some(transmit) = self.connection.poll_transmit(now, 1) { - let endpoint = self.endpoint.clone(); - debug_assert!(self.pending_to_endpoint.is_none()); - self.pending_to_endpoint = Some(Box::pin(async move { - // TODO: ECN bits not handled - endpoint - .send_udp_packet(transmit.destination, transmit.contents) - .await; - })); - continue 'send_pending; + if let Some(transmit) = self.connection.poll_transmit(Instant::now(), 1) { + // TODO: ECN bits not handled + self.pending_to_endpoint = Some(ToEndpoint::SendUdpPacket { + destination: transmit.destination, + data: transmit.contents, + }); + continue; } // Timeout system. - // We break out of the following loop until if `poll_timeout()` returns `None` or if - // polling `self.next_timeout` returns `Poll::Pending`. - loop { - if let Some(next_timeout) = &mut self.next_timeout { - match Future::poll(Pin::new(next_timeout), cx) { - Poll::Ready(when) => { - self.connection.handle_timeout(when); - self.next_timeout = None; - } - Poll::Pending => break, + if let Some(when) = self.connection.poll_timeout() { + let mut timer = Timer::at(when); + match timer.poll_unpin(cx) { + Poll::Ready(when) => { + self.connection.handle_timeout(when); + continue; } + Poll::Pending => self.next_timeout = Some(timer), } - if let Some(when) = self.connection.poll_timeout() { - self.next_timeout = Some(Timer::at(when)); - continue; - } - break; } // The connection also needs to be able to send control messages to the endpoint. This is // handled here, and we try to send them on `to_endpoint` as well. - if let Some(endpoint_event) = self.connection.poll_endpoint_events() { - let endpoint = self.endpoint.clone(); + if let Some(event) = self.connection.poll_endpoint_events() { let connection_id = self.connection_id; - debug_assert!(self.pending_to_endpoint.is_none()); - self.pending_to_endpoint = Some(Box::pin(async move { - endpoint - .report_quinn_event(connection_id, endpoint_event) - .await; - })); - continue 'send_pending; + self.pending_to_endpoint = Some(ToEndpoint::ProcessConnectionEvent { + connection_id, + event, + }); + continue; + } + + if let Some(closed) = closed { + return Poll::Ready(ConnectionEvent::ConnectionLost(closed)); } // The final step consists in handling the events related to the various substreams. - while let Some(event) = self.connection.poll() { - match event { - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Opened { - dir: quinn_proto::Dir::Uni, - }) - | quinn_proto::Event::Stream(quinn_proto::StreamEvent::Available { - dir: quinn_proto::Dir::Uni, - }) - | quinn_proto::Event::DatagramReceived => { - // We don't use datagrams or unidirectional streams. If these events - // happen, it is by some code not compatible with libp2p-quic. - self.connection - .close(Instant::now(), From::from(0u32), Default::default()); - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Readable { id }) => { - return Poll::Ready(ConnectionEvent::StreamReadable(id)); - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Writable { id }) => { - return Poll::Ready(ConnectionEvent::StreamWritable(id)); - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Stopped { - id, .. - }) => { - // The `Stop` QUIC event is more or less similar to a `Reset`, except that - // it applies only on the writing side of the pipe. - return Poll::Ready(ConnectionEvent::StreamStopped(id)); - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Available { - dir: quinn_proto::Dir::Bi, - }) => { - return Poll::Ready(ConnectionEvent::StreamAvailable); + match self.connection.poll() { + Some(ev) => match ConnectionEvent::try_from(ev) { + Ok(ConnectionEvent::ConnectionLost(reason)) => { + // Continue in the loop once more so that we can send a + // `EndpointEvent::drained` to the endpoint before returning. + closed = Some(reason); + continue; } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Opened { - dir: quinn_proto::Dir::Bi, - }) => { - return Poll::Ready(ConnectionEvent::StreamOpened); - } - quinn_proto::Event::ConnectionLost { reason } => { - debug_assert!(self.closed.is_none()); - self.is_handshaking = false; - let err = Error::Quinn(reason); - self.closed = Some(err.clone()); - // self.close(); - // self.connection - // .close(Instant::now(), From::from(0u32), Default::default()); - return Poll::Ready(ConnectionEvent::ConnectionLost(err)); - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Finished { id }) => { - return Poll::Ready(ConnectionEvent::StreamFinished(id)); - } - quinn_proto::Event::Connected => { - debug_assert!(self.is_handshaking); - debug_assert!(!self.connection.is_handshaking()); - self.is_handshaking = false; - return Poll::Ready(ConnectionEvent::Connected); - } - quinn_proto::Event::HandshakeDataReady => { - if !self.is_handshaking { - tracing::error!("Got HandshakeDataReady while not handshaking"); - } - } - } + Ok(event) => return Poll::Ready(event), + Err(_) => unreachable!("We don't use datagrams or unidirectional streams."), + }, + None => {} } - - break; + return Poll::Pending; } - - Poll::Pending } } @@ -388,15 +293,6 @@ impl fmt::Debug for Connection { } } -impl Drop for Connection { - fn drop(&mut self) { - let is_drained = self.connection.is_drained(); - if !is_drained { - self.close(); - } - } -} - /// Event generated by the [`Connection`]. #[derive(Debug)] pub enum ConnectionEvent { @@ -406,10 +302,10 @@ pub enum ConnectionEvent { /// Connection has been closed and can no longer be used. ConnectionLost(Error), - /// Generated after [`Connection::pop_incoming_substream`] has been called and has returned + /// Generated after [`Connection::accept_substream`] has been called and has returned /// `None`. After this event has been generated, this method is guaranteed to return `Some`. StreamAvailable, - /// Generated after [`Connection::pop_outgoing_substream`] has been called and has returned + /// Generated after [`Connection::open_substream`] has been called and has returned /// `None`. After this event has been generated, this method is guaranteed to return `Some`. StreamOpened, @@ -418,9 +314,50 @@ pub enum ConnectionEvent { /// Generated after `write_substream` has returned a `Blocked` error. StreamWritable(quinn_proto::StreamId), - /// Generated after [`Connection::shutdown_substream`] has been called. + /// Generated after [`Connection::finish_substream`] has been called. StreamFinished(quinn_proto::StreamId), /// A substream has been stopped. This concept is similar to the concept of a substream being /// "reset", as in a TCP socket being reset for example. StreamStopped(quinn_proto::StreamId), + + HandshakeDataReady, +} + +impl TryFrom for ConnectionEvent { + type Error = quinn_proto::Event; + + fn try_from(event: quinn_proto::Event) -> Result { + match event { + quinn_proto::Event::Stream(quinn_proto::StreamEvent::Readable { id }) => { + Ok(ConnectionEvent::StreamReadable(id)) + } + quinn_proto::Event::Stream(quinn_proto::StreamEvent::Writable { id }) => { + Ok(ConnectionEvent::StreamWritable(id)) + } + quinn_proto::Event::Stream(quinn_proto::StreamEvent::Stopped { id, .. }) => { + Ok(ConnectionEvent::StreamStopped(id)) + } + quinn_proto::Event::Stream(quinn_proto::StreamEvent::Available { + dir: quinn_proto::Dir::Bi, + }) => Ok(ConnectionEvent::StreamAvailable), + quinn_proto::Event::Stream(quinn_proto::StreamEvent::Opened { + dir: quinn_proto::Dir::Bi, + }) => Ok(ConnectionEvent::StreamOpened), + quinn_proto::Event::ConnectionLost { reason } => { + Ok(ConnectionEvent::ConnectionLost(Error::Quinn(reason))) + } + quinn_proto::Event::Stream(quinn_proto::StreamEvent::Finished { id }) => { + Ok(ConnectionEvent::StreamFinished(id)) + } + quinn_proto::Event::Connected => Ok(ConnectionEvent::Connected), + quinn_proto::Event::HandshakeDataReady => Ok(ConnectionEvent::HandshakeDataReady), + ev @ quinn_proto::Event::Stream(quinn_proto::StreamEvent::Opened { + dir: quinn_proto::Dir::Uni, + }) + | ev @ quinn_proto::Event::Stream(quinn_proto::StreamEvent::Available { + dir: quinn_proto::Dir::Uni, + }) + | ev @ quinn_proto::Event::DatagramReceived => Err(ev), + } + } } diff --git a/transports/quic/src/endpoint.rs b/transports/quic/src/endpoint.rs index b3e9568a092..3c34dd3b391 100644 --- a/transports/quic/src/endpoint.rs +++ b/transports/quic/src/endpoint.rs @@ -31,21 +31,23 @@ use crate::{connection::Connection, tls, transport}; use futures::{ - channel::{mpsc, oneshot}, - lock::Mutex, + channel::{ + mpsc::{self, SendError}, + oneshot, + }, prelude::*, }; use quinn_proto::{ClientConfig as QuinnClientConfig, ServerConfig as QuinnServerConfig}; use std::{ - collections::{HashMap, VecDeque}, + collections::HashMap, fmt, net::{Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, - sync::{Arc, Weak}, - task::{Poll, Waker}, + sync::Arc, + task::{Context, Poll}, time::{Duration, Instant}, }; -/// Represents the configuration for the [`Endpoint`]. +/// Represents the configuration for the QUIC endpoint. #[derive(Debug, Clone)] pub struct Config { /// The client configuration to pass to `quinn_proto`. @@ -70,6 +72,10 @@ impl Config { let mut server_config = QuinnServerConfig::with_crypto(Arc::new(server_tls_config)); server_config.transport = Arc::clone(&transport); + // Disables connection migration. + // Long-term this should be enabled, however we then need to handle address change + // on connections in the `QuicMuxer`. + server_config.migration(false); let mut client_config = QuinnClientConfig::new(Arc::new(client_tls_config)); client_config.transport = transport; @@ -82,17 +88,12 @@ impl Config { } /// Object containing all the QUIC resources shared between all connections. -// TODO: expand docs -// TODO: Debug trait -// TODO: remove useless fields +#[derive(Clone)] pub struct Endpoint { /// Channel to the background of the endpoint. - to_endpoint: Mutex>, - - /// Copy of [`Endpoint::to_endpoint`], except not behind a `Mutex`. Used if we want to be - /// guaranteed a slot in the messages buffer. - to_endpoint2: mpsc::Sender, - + to_endpoint: mpsc::Sender, + /// Address that the socket is bound to. + /// Note: this may be a wildcard ip address. socket_addr: SocketAddr, } @@ -101,14 +102,14 @@ impl Endpoint { pub fn new_bidirectional( config: Config, socket_addr: SocketAddr, - ) -> Result<(Arc, mpsc::Receiver), transport::Error> { + ) -> Result<(Endpoint, mpsc::Receiver), transport::Error> { let (new_connections_tx, new_connections_rx) = mpsc::channel(1); let endpoint = Self::new(config, socket_addr, Some(new_connections_tx))?; Ok((endpoint, new_connections_rx)) } /// Builds a new [`Endpoint`] that only supports outbound connections. - pub fn new_dialer(config: Config, is_ipv6: bool) -> Result, transport::Error> { + pub fn new_dialer(config: Config, is_ipv6: bool) -> Result { let socket_addr = if is_ipv6 { SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0) } else { @@ -121,17 +122,15 @@ impl Endpoint { config: Config, socket_addr: SocketAddr, new_connections: Option>, - ) -> Result, transport::Error> { + ) -> Result { // NOT blocking, as per man:bind(2), as we pass an IP address. let socket = std::net::UdpSocket::bind(&socket_addr)?; let (to_endpoint_tx, to_endpoint_rx) = mpsc::channel(32); - let to_endpoint2 = to_endpoint_tx.clone(); - let endpoint = Arc::new(Endpoint { - to_endpoint: Mutex::new(to_endpoint_tx), - to_endpoint2, + let endpoint = Endpoint { + to_endpoint: to_endpoint_tx, socket_addr: socket.local_addr()?, - }); + }; let server_config = new_connections.map(|c| (c, config.server_config.clone())); @@ -140,7 +139,7 @@ impl Endpoint { config.endpoint_config, config.client_config, server_config, - Arc::downgrade(&endpoint), + endpoint.clone(), async_io::Async::::new(socket)?, to_endpoint_rx.fuse(), )) @@ -153,89 +152,30 @@ impl Endpoint { &self.socket_addr } - /// Asks the endpoint to start dialing the given address. - /// - /// Note that this method only *starts* the dialing. `Ok` is returned as soon as possible, even - /// when the remote might end up being unreachable. - pub async fn dial(&self, addr: SocketAddr) -> Result { - // The two `expect`s below can panic if the background task has stopped. The background - // task can stop only if the `Endpoint` is destroyed or if the task itself panics. In other - // words, we panic here iff a panic has already happened somewhere else, which is a - // reasonable thing to do. - let (tx, rx) = oneshot::channel(); - self.to_endpoint - .lock() - .await - .send(ToEndpoint::Dial { addr, result: tx }) - .await - .expect("background task has crashed"); - rx.await.expect("background task has crashed") - } - - /// Asks the endpoint to send a UDP packet. - /// - /// Note that this method only queues the packet and returns as soon as the packet is in queue. - /// There is no guarantee that the packet will actually be sent, but considering that this is - /// a UDP packet, you cannot rely on the packet being delivered anyway. - pub async fn send_udp_packet(&self, destination: SocketAddr, data: impl Into>) { - let _ = self - .to_endpoint - .lock() - .await - .send(ToEndpoint::SendUdpPacket { - destination, - data: data.into(), - }) - .await; - } - - /// Report to the endpoint an event on a [`quinn_proto::Connection`]. + /// Try to send a message to the background task without blocking. /// - /// This is typically called by a [`Connection`]. + /// This first polls the channel for capacity. + /// If the channel is full, the message is returned in `Ok(Err(_))` + /// and the context's waker is registered for wake-up. /// - /// If `event.is_drained()` is true, the event indicates that the connection no longer exists. - /// This must therefore be the last event sent using this [`quinn_proto::ConnectionHandle`]. - pub async fn report_quinn_event( - &self, - connection_id: quinn_proto::ConnectionHandle, - event: quinn_proto::EndpointEvent, - ) { - self.to_endpoint - .lock() - .await - .send(ToEndpoint::ProcessConnectionEvent { - connection_id, - event, - }) - .await - .expect("background task has crashed"); - } - - /// Similar to [`Endpoint::report_quinn_event`], except that the message sending is guaranteed - /// to be instantaneous and to succeed. - /// - /// This method bypasses back-pressure mechanisms and is meant to be called only from - /// destructors, where waiting is not advisable. - pub fn report_quinn_event_non_block( - &self, - connection_id: quinn_proto::ConnectionHandle, - event: quinn_proto::EndpointEvent, - ) { - // We implement this by cloning the `mpsc::Sender`. Since each sender is guaranteed a slot - // in the buffer, cloning the sender reserves the slot and sending thus always succeeds. - let result = self - .to_endpoint2 - .clone() - .try_send(ToEndpoint::ProcessConnectionEvent { - connection_id, - event, - }); - assert!(result.is_ok()); + /// If the background task crashed `Err` is returned. + pub fn try_send( + &mut self, + to_endpoint: ToEndpoint, + cx: &mut Context<'_>, + ) -> Result, SendError> { + match self.to_endpoint.poll_ready_unpin(cx) { + Poll::Ready(Ok(())) => {} + Poll::Ready(Err(err)) => return Err(err), + Poll::Pending => return Ok(Err(to_endpoint)), + }; + self.to_endpoint.start_send(to_endpoint).map(Ok) } } + /// Message sent to the endpoint background task. #[derive(Debug)] -enum ToEndpoint { +pub enum ToEndpoint { /// Instruct the endpoint to start connecting to the given address. Dial { /// UDP address to connect to. @@ -259,7 +199,7 @@ enum ToEndpoint { }, } -/// Task that runs in the background for as long as the endpont is alive. Responsible for +/// Task that runs in the background for as long as the endpoint is alive. Responsible for /// processing messages and the UDP socket. /// /// The `receiver` parameter must be the receiving side of the `Endpoint::to_endpoint` sender. @@ -281,13 +221,11 @@ enum ToEndpoint { /// in play: /// /// - One channel, represented by `Endpoint::to_endpoint` and `receiver`, that communicates -/// messages from [`Endpoint`] to the background task and from the [`Connection`] to the -/// background task. +/// messages from [`Endpoint`] to the background task. /// - One channel per each existing connection that communicates messages from the background /// task to that [`Connection`]. /// - One channel for the background task to send newly-opened connections to. The receiving -/// side is normally processed by a "listener" as defined by the [`libp2p_core::Transport`] -/// trait. +/// side is processed by the [`QuicTransport`][crate::QuicTransport]. /// /// In order to avoid an unbounded buffering of events, we prioritize sending data on the UDP /// socket over everything else. If the network interface is too busy to process our packets, @@ -334,7 +272,7 @@ enum ToEndpoint { /// The background task shuts down if `endpoint_weak`, `receiver` or `new_connections` become /// disconnected/invalid. This corresponds to the lifetime of the associated [`Endpoint`]. /// -/// Keep in mind that we pass an `Arc` whenever we create a new connection, which +/// Keep in mind that we pass an `Endpoint` whenever we create a new connection, which /// guarantees that the [`Endpoint`], and therefore the background task, is properly kept alive /// for as long as any QUIC connection is open. /// @@ -342,7 +280,7 @@ async fn background_task( endpoint_config: Arc, client_config: quinn_proto::ClientConfig, server_config: Option<(mpsc::Sender, Arc)>, - endpoint_weak: Weak, + endpoint: Endpoint, udp_socket: async_io::Async, mut receiver: stream::Fuse>, ) { @@ -352,7 +290,7 @@ async fn background_task( }; // The actual QUIC state machine. - let mut endpoint = quinn_proto::Endpoint::new(endpoint_config.clone(), server_config); + let mut proto_endpoint = quinn_proto::Endpoint::new(endpoint_config.clone(), server_config); // List of all active connections, with a sender to notify them of events. let mut alive_connections = HashMap::>::new(); @@ -360,19 +298,10 @@ async fn background_task( // Buffer where we write packets received from the UDP socket. let mut socket_recv_buffer = vec![0; 65536]; - // The quinn_proto endpoint can give us new connections for as long as its accept buffer - // isn't full. This buffer is used to push these new connections while we are waiting to - // send them on the `new_connections` channel. We only call `endpoint.accept()` when we remove - // an element from this list, which guarantees that it doesn't grow unbounded. - // TODO: with_capacity? - let mut queued_new_connections = VecDeque::new(); - // Next packet waiting to be transmitted on the UDP socket, if any. - // Note that this variable isn't strictly necessary, but it reduces code duplication in the - // code below. let mut next_packet_out: Option<(SocketAddr, Vec)> = None; - let mut new_connection_waker: Option = None; + let mut is_orphaned = false; // Main loop of the task. loop { @@ -399,26 +328,23 @@ async fn background_task( // The endpoint might request packets to be sent out. This is handled in priority to avoid // buffering up packets. - if let Some(packet) = endpoint.poll_transmit() { - debug_assert!(next_packet_out.is_none()); + if let Some(packet) = proto_endpoint.poll_transmit() { next_packet_out = Some((packet.destination, packet.contents)); continue; } futures::select! { - message = receiver.next().fuse() => { + message = receiver.next() => { // Received a message from a different part of the code requesting us to // do something. match message { - // Shut down if the endpoint has shut down. - None => return, - + None => unreachable!("Sender side is never dropped or closed."), Some(ToEndpoint::Dial { addr, result }) => { // This `"l"` seems necessary because an empty string is an invalid domain // name. While we don't use domain names, the underlying rustls library // is based upon the assumption that we do. let (connection_id, connection) = - match endpoint.connect(client_config.clone(), addr, "l") { + match proto_endpoint.connect(client_config.clone(), addr, "l") { Ok(c) => c, Err(err) => { let _ = result.send(Err(err)); @@ -426,14 +352,9 @@ async fn background_task( } }; - let endpoint_arc = match endpoint_weak.upgrade() { - Some(ep) => ep, - None => return, // Shut down the task if the endpoint is dead. - }; - debug_assert_eq!(connection.side(), quinn_proto::Side::Client); let (tx, rx) = mpsc::channel(16); - let connection = Connection::from_quinn_connection(endpoint_arc, connection, connection_id, rx); + let connection = Connection::from_quinn_connection(endpoint.clone(), connection, connection_id, rx); alive_connections.insert(connection_id, tx); let _ = result.send(Ok(connection)); } @@ -449,9 +370,16 @@ async fn background_task( let is_drained_event = event.is_drained(); if is_drained_event { alive_connections.remove(&connection_id); + if is_orphaned && alive_connections.is_empty() { + tracing::info!( + "Listener closed and no active connections remain. Shutting down the background task." + ); + return; + } + } - let event_back = endpoint.handle_event(connection_id, event); + let event_back = proto_endpoint.handle_event(connection_id, event); if let Some(event_back) = event_back { debug_assert!(!is_drained_event); @@ -471,46 +399,6 @@ async fn background_task( } } } - - // The future we create here wakes up if two conditions are fulfilled: - // - // - The `new_connections` channel is ready to accept a new element. - // - `queued_new_connections` is not empty. - // - // When this happens, we pop an element from `queued_new_connections`, put it on the - // channel, and call `endpoint.accept()`, thereby allowing the QUIC state machine to - // feed a new incoming connection to us. - readiness = { - let active = !queued_new_connections.is_empty(); - let new_connections = &mut new_connections; - let new_connection_waker = &mut new_connection_waker; - future::poll_fn(move |cx| { - match new_connections.as_mut() { - Some(ref mut c) if active => { - c.poll_ready(cx) - } - _ => { - let _ = new_connection_waker.insert(cx.waker().clone()); - Poll::Pending - } - } - }) - .fuse() - } => { - if readiness.is_err() { - // new_connections channel has been dropped, meaning that the endpoint has - // been destroyed. - return; - } - - let elem = queued_new_connections.pop_front() - .expect("if queue is empty, the future above is always Pending; qed"); - let new_connections = new_connections.as_mut().expect("in case of None, the future above is always Pending; qed"); - new_connections.start_send(elem) - .expect("future is waken up only if poll_ready returned Ready; qed"); - //endpoint.accept(); - } - result = udp_socket.recv_from(&mut socket_recv_buffer).fuse() => { let (packet_len, packet_src) = match result { Ok(v) => v, @@ -527,7 +415,7 @@ async fn background_task( let packet = From::from(&socket_recv_buffer[..packet_len]); let local_ip = udp_socket.get_ref().local_addr().ok().map(|a| a.ip()); // TODO: ECN bits aren't handled - let event = endpoint.handle(Instant::now(), packet_src, local_ip, None, packet); + let event = proto_endpoint.handle(Instant::now(), packet_src, local_ip, None, packet); match event { None => {}, @@ -543,21 +431,34 @@ async fn background_task( // A new connection has been received. `connec_id` is a newly-allocated // identifier. debug_assert_eq!(connec.side(), quinn_proto::Side::Server); - let (tx, rx) = mpsc::channel(16); - alive_connections.insert(connec_id, tx); - let endpoint_arc = match endpoint_weak.upgrade() { - Some(ep) => ep, - None => return, // Shut down the task if the endpoint is dead. + let connection_tx = match new_connections.as_mut() { + Some(tx) => tx, + None => { + tracing::warn!( + "Endpoint reported a new connection even though server capabilities are disabled." + ); + continue + } }; - let connection = Connection::from_quinn_connection(endpoint_arc, connec, connec_id, rx); - - // As explained in the documentation, we put this new connection in an - // intermediary buffer. At the next loop iteration we will try to move it - // to the `new_connections` channel. We call `endpoint.accept()` only once - // the element has successfully been sent on `new_connections`. - queued_new_connections.push_back(connection); - if let Some(waker) = new_connection_waker.take() { - waker.wake(); + + let (tx, rx) = mpsc::channel(16); + let connection = Connection::from_quinn_connection(endpoint.clone(), connec, connec_id, rx); + match connection_tx.try_send(connection) { + Ok(()) => { + alive_connections.insert(connec_id, tx); + } + Err(e) if e.is_disconnected() => { + // Listener was closed. + proto_endpoint.reject_new_connections(); + new_connections = None; + is_orphaned = true; + if alive_connections.is_empty() { + return; + } + } + _ => tracing::warn!( + "Dropping new incoming connection because the channel to the listener is full." + ) } }, } @@ -568,6 +469,8 @@ async fn background_task( impl fmt::Debug for Endpoint { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("Endpoint").finish() + f.debug_struct("Endpoint") + .field("socket_addr", &self.socket_addr) + .finish() } } diff --git a/transports/quic/src/in_addr.rs b/transports/quic/src/in_addr.rs deleted file mode 100644 index 67b6abbf3f3..00000000000 --- a/transports/quic/src/in_addr.rs +++ /dev/null @@ -1,100 +0,0 @@ -use if_watch::{IfEvent, IfWatcher}; - -use futures::{ - future::{BoxFuture, FutureExt}, - stream::Stream, -}; - -use std::{ - io::Result, - net::IpAddr, - ops::DerefMut, - pin::Pin, - task::{Context, Poll}, -}; - -/// Watches for interface changes. -#[derive(Debug)] -pub enum InAddr { - /// The socket accepts connections on a single interface. - One { ip: Option }, - /// The socket accepts connections on all interfaces. - Any { if_watch: Box }, -} - -impl InAddr { - /// If ip is specified then only one `IfEvent::Up` with IpNet(ip)/32 will be generated. - /// If ip is unspecified then `IfEvent::Up/Down` events will be generated for all interfaces. - pub fn new(ip: IpAddr) -> Self { - if ip.is_unspecified() { - let watcher = IfWatch::Pending(IfWatcher::new().boxed()); - InAddr::Any { - if_watch: Box::new(watcher), - } - } else { - InAddr::One { ip: Some(ip) } - } - } -} - -pub enum IfWatch { - Pending(BoxFuture<'static, std::io::Result>), - Ready(Box), -} - -impl std::fmt::Debug for IfWatch { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match *self { - IfWatch::Pending(_) => write!(f, "Pending"), - IfWatch::Ready(_) => write!(f, "Ready"), - } - } -} -impl Stream for InAddr { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let me = Pin::into_inner(self); - loop { - match me { - // If the listener is bound to a single interface, make sure the - // address is reported once. - InAddr::One { ip } => { - if let Some(ip) = ip.take() { - return Poll::Ready(Some(Ok(IfEvent::Up(ip.into())))); - } - } - InAddr::Any { if_watch } => { - match if_watch.deref_mut() { - // If we listen on all interfaces, wait for `if-watch` to be ready. - IfWatch::Pending(f) => match futures::ready!(f.poll_unpin(cx)) { - Ok(watcher) => { - *if_watch = Box::new(IfWatch::Ready(Box::new(watcher))); - continue; - } - Err(err) => { - *if_watch = Box::new(IfWatch::Pending(IfWatcher::new().boxed())); - return Poll::Ready(Some(Err(err))); - } - }, - // Consume all events for up/down interface changes. - IfWatch::Ready(watcher) => { - if let Poll::Ready(ev) = watcher.poll_unpin(cx) { - match ev { - Ok(event) => { - return Poll::Ready(Some(Ok(event))); - } - Err(err) => { - return Poll::Ready(Some(Err(err))); - } - } - } - } - } - } - } - break; - } - Poll::Pending - } -} diff --git a/transports/quic/src/lib.rs b/transports/quic/src/lib.rs index 3dca1d3cbe3..2d9a4491bab 100644 --- a/transports/quic/src/lib.rs +++ b/transports/quic/src/lib.rs @@ -55,7 +55,6 @@ mod connection; mod endpoint; mod error; -mod in_addr; mod muxer; mod tls; mod upgrade; diff --git a/transports/quic/src/muxer.rs b/transports/quic/src/muxer.rs index 9fe6e7e4cd9..f2b6d3366bb 100644 --- a/transports/quic/src/muxer.rs +++ b/transports/quic/src/muxer.rs @@ -24,92 +24,213 @@ use crate::error::Error; use futures::{AsyncRead, AsyncWrite}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use parking_lot::Mutex; +use quinn_proto::FinishError; use std::{ - collections::{HashMap, VecDeque}, - fmt, io, + collections::HashMap, + io::{self, Write}, pin::Pin, - sync::{Arc, Weak}, + sync::Arc, task::{Context, Poll, Waker}, }; /// State for a single opened QUIC connection. +#[derive(Debug)] pub struct QuicMuxer { // Note: This could theoretically be an asynchronous future, in order to yield the current // task if a task running in parallel is already holding the lock. However, using asynchronous // mutexes without async/await is extremely tedious and maybe not worth the effort. - inner: Arc>, + inner: Arc>, } /// Mutex-protected fields of [`QuicMuxer`]. -struct QuicMuxerInner { +#[derive(Debug)] +struct Inner { /// Inner connection object that yields events. connection: Connection, // /// State of all the substreams that the muxer reports as open. substreams: HashMap, - /// A FIFO of wakers to wake if a new outgoing substream is opened. - pending_substreams: VecDeque, - /// Waker to wake if the connection is closed. - poll_close_waker: Option, - /// Waker to wake if any event is happened. - poll_event_waker: Option, + /// Waker to wake if a new outbound substream is opened. + poll_outbound_waker: Option, + /// Waker to wake if a new inbound substream was happened. + poll_inbound_waker: Option, + /// Waker to wake if the connection should be polled again. + poll_connection_waker: Option, } /// State of a single substream. -#[derive(Default, Clone)] +#[derive(Debug, Default, Clone)] struct SubstreamState { /// Waker to wake if the substream becomes readable or stopped. read_waker: Option, /// Waker to wake if the substream becomes writable or stopped. write_waker: Option, - /// True if the substream has been finished. - finished: bool, - /// True if the substream has been stopped. - stopped: bool, /// Waker to wake if the substream becomes closed or stopped. finished_waker: Option, } impl QuicMuxer { /// Crate-internal function that builds a [`QuicMuxer`] from a raw connection. - /// - /// # Panic - /// - /// Panics if `connection.is_handshaking()` returns `true`. pub(crate) fn from_connection(connection: Connection) -> Self { - assert!(!connection.is_handshaking()); - QuicMuxer { - inner: Arc::new(Mutex::new(QuicMuxerInner { + inner: Arc::new(Mutex::new(Inner { connection, substreams: Default::default(), - pending_substreams: Default::default(), - poll_close_waker: None, - poll_event_waker: None, + poll_outbound_waker: None, + poll_inbound_waker: None, + poll_connection_waker: None, })), } } } -pub struct Substream { - id: quinn_proto::StreamId, - muxer: Weak>, -} +impl StreamMuxer for QuicMuxer { + type Substream = Substream; + type Error = Error; -impl Substream { - fn new(id: quinn_proto::StreamId, muxer: Arc>) -> Self { - Self { - id, - muxer: Arc::downgrade(&muxer), + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let mut inner = self.inner.lock(); + // Poll the inner [`quinn_proto::Connection`] for events and wake + // the wakers of related poll-based methods. + while let Poll::Ready(event) = inner.connection.poll_event(cx) { + match event { + ConnectionEvent::Connected | ConnectionEvent::HandshakeDataReady => { + debug_assert!( + false, + "Unexpected event {:?} on established QUIC connection", + event + ); + } + ConnectionEvent::ConnectionLost(err) => { + return Poll::Ready(Err(Error::ConnectionLost(err))) + } + ConnectionEvent::StreamOpened => { + if let Some(waker) = inner.poll_outbound_waker.take() { + waker.wake(); + } + } + ConnectionEvent::StreamReadable(substream) => { + if let Some(substream) = inner.substreams.get_mut(&substream) { + if let Some(waker) = substream.read_waker.take() { + waker.wake(); + } + } + } + ConnectionEvent::StreamWritable(substream) => { + if let Some(substream) = inner.substreams.get_mut(&substream) { + if let Some(waker) = substream.write_waker.take() { + waker.wake(); + } + } + } + ConnectionEvent::StreamFinished(substream) + | ConnectionEvent::StreamStopped(substream) => { + if let Some(substream) = inner.substreams.get_mut(&substream) { + if let Some(waker) = substream.read_waker.take() { + waker.wake(); + } + if let Some(waker) = substream.write_waker.take() { + waker.wake(); + } + if let Some(waker) = substream.finished_waker.take() { + waker.wake(); + } + } + } + ConnectionEvent::StreamAvailable => { + if let Some(waker) = inner.poll_inbound_waker.take() { + waker.wake(); + } + } + } } + inner.poll_connection_waker = Some(cx.waker().clone()); + + // TODO: If connection migration is enabled (currently disabled) address + // change on the connection needs to be handled. + + Poll::Pending } -} -impl Drop for Substream { - fn drop(&mut self) { - if let Some(muxer) = self.muxer.upgrade() { - let mut muxer = muxer.lock(); - muxer.substreams.remove(&self.id); + fn poll_inbound( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let mut inner = self.inner.lock(); + let substream_id = match inner.connection.accept_substream() { + Some(id) => { + inner.poll_outbound_waker = None; + id + } + None => { + inner.poll_inbound_waker = Some(cx.waker().clone()); + return Poll::Pending; + } + }; + inner.substreams.insert(substream_id, Default::default()); + let substream = Substream::new(substream_id, self.inner.clone()); + Poll::Ready(Ok(substream)) + } + + fn poll_outbound( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let mut inner = self.inner.lock(); + let substream_id = match inner.connection.open_substream() { + Some(id) => { + inner.poll_outbound_waker = None; + id + } + None => { + inner.poll_outbound_waker = Some(cx.waker().clone()); + return Poll::Pending; + } + }; + inner.substreams.insert(substream_id, Default::default()); + let substream = Substream::new(substream_id, self.inner.clone()); + Poll::Ready(Ok(substream)) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut inner = self.inner.lock(); + if inner.connection.connection.is_drained() { + return Poll::Ready(Ok(())); + } + + if inner.connection.connection.streams().send_streams() != 0 { + for substream in inner.substreams.keys().cloned().collect::>() { + if let Err(e) = inner.connection.finish_substream(substream) { + tracing::warn!("substream finish error on muxer close: {}", e); + } + } + } + loop { + if inner.connection.connection.streams().send_streams() == 0 + && !inner.connection.connection.is_closed() + { + inner.connection.close() + } + match inner.connection.poll_event(cx) { + Poll::Ready(ConnectionEvent::ConnectionLost(_)) => return Poll::Ready(Ok(())), + Poll::Ready(_) => {} + Poll::Pending => break, + } } + Poll::Pending + } +} + +pub struct Substream { + id: quinn_proto::StreamId, + muxer: Arc>, +} + +impl Substream { + fn new(id: quinn_proto::StreamId, muxer: Arc>) -> Self { + Self { id, muxer } } } @@ -118,34 +239,20 @@ impl AsyncRead for Substream { self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: &mut [u8], - ) -> Poll> { + ) -> Poll> { use quinn_proto::{ReadError, ReadableError}; - use std::io::Write; - - let muxer = self - .muxer - .upgrade() - .expect("StreamMuxer::read_substream: muxer is dead"); - let mut muxer = muxer.lock(); - - let substream_state = muxer - .substreams - .get(&self.id) - .expect("invalid Substream::poll_read API usage"); - if substream_state.stopped { - return Poll::Ready(Ok(0)); - } + let mut muxer = self.muxer.lock(); let mut stream = muxer.connection.connection.recv_stream(self.id); let mut chunks = match stream.read(true) { Ok(chunks) => chunks, Err(ReadableError::UnknownStream) => { - return Poll::Ready(Ok(0)); // FIXME This is a hack, - // a rust client should close substream correctly - // return Poll::Ready(Err(Self::Error::ExpiredStream)) + return Poll::Ready(Ok(0)); } Err(ReadableError::IllegalOrderedRead) => { - panic!("Illegal ordered read can only happen if `stream.read(false)` is used."); + unreachable!( + "Illegal ordered read can only happen if `stream.read(false)` is used." + ); } }; let mut bytes = 0; @@ -159,17 +266,9 @@ impl AsyncRead for Substream { buf.write_all(&chunk.bytes).expect("enough buffer space"); bytes += chunk.bytes.len(); } - Ok(None) => { - break; - } - Err(ReadError::Reset(error_code)) => { - tracing::error!( - "substream {} was reset with error code {}", - self.id, - error_code - ); - bytes = 0; - break; + Ok(None) => break, + Err(err @ ReadError::Reset(_)) => { + return Poll::Ready(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))) } Err(ReadError::Blocked) => { pending = true; @@ -178,12 +277,12 @@ impl AsyncRead for Substream { } } if chunks.finalize().should_transmit() { - if let Some(waker) = muxer.poll_event_waker.take() { + if let Some(waker) = muxer.poll_connection_waker.take() { waker.wake(); } } if pending && bytes == 0 { - let mut substream_state = muxer + let substream_state = muxer .substreams .get_mut(&self.id) .expect("known substream; qed"); @@ -201,33 +300,23 @@ impl AsyncWrite for Substream { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - use quinn_proto::WriteError; - - let muxer = self - .muxer - .upgrade() - .expect("Substream::poll_write: muxer is dead"); - let mut muxer = muxer.lock(); + let mut muxer = self.muxer.lock(); match muxer.connection.connection.send_stream(self.id).write(buf) { Ok(bytes) => Poll::Ready(Ok(bytes)), - Err(WriteError::Blocked) => { - let mut substream = muxer + Err(quinn_proto::WriteError::Blocked) => { + let substream = muxer .substreams .get_mut(&self.id) .expect("known substream; qed"); substream.write_waker = Some(cx.waker().clone()); Poll::Pending } - Err(err @ WriteError::Stopped(_)) => { + Err(err @ quinn_proto::WriteError::Stopped(_)) => { Poll::Ready(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))) } - Err(WriteError::UnknownStream) => { - tracing::error!( - "The application used a connection that is already being \ - closed. This is a bug in the application or in libp2p." - ); - Poll::Pending + Err(quinn_proto::WriteError::UnknownStream) => { + Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) } } } @@ -238,195 +327,41 @@ impl AsyncWrite for Substream { } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let muxer = self - .muxer - .upgrade() - .expect("Substream::poll_close: muxer is dead"); - let mut muxer = muxer.lock(); - let muxer = &mut *muxer; - - let mut substream_state = muxer - .substreams - .get_mut(&self.id) - .expect("invalid Substream::poll_close API usage"); - if substream_state.finished { - return Poll::Ready(Ok(())); - } - - match muxer.connection.shutdown_substream(self.id) { + let mut muxer = self.muxer.lock(); + match muxer.connection.finish_substream(self.id) { Ok(()) => { + let substream_state = muxer + .substreams + .get_mut(&self.id) + .expect("Substream is not finished."); substream_state.finished_waker = Some(cx.waker().clone()); Poll::Pending } Err(err @ quinn_proto::FinishError::Stopped(_)) => { Poll::Ready(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))) } - Err(quinn_proto::FinishError::UnknownStream) => { - // Illegal usage of the API. - debug_assert!(false); - Poll::Ready(Ok(())) - // Poll::Ready(Err(Error::ExpiredStream)) FIXME - } + Err(quinn_proto::FinishError::UnknownStream) => Poll::Ready(Ok(())), } } } -impl StreamMuxer for QuicMuxer { - type OutboundSubstream = (); - type Substream = Substream; - type Error = Error; - - /// Polls for a connection-wide event. - /// - /// This function behaves the same as a `Stream`. - /// - /// If `Pending` is returned, then the current task will be notified once the muxer - /// is ready to be polled, similar to the API of `Stream::poll()`. - /// Only the latest task that was used to call this method may be notified. - /// - /// It is permissible and common to use this method to perform background - /// work, such as processing incoming packets and polling timers. - /// - /// An error can be generated if the connection has been closed. - fn poll_event( - &self, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - // We use `poll_event` to perform the background processing of the entire connection. - let mut inner = self.inner.lock(); - - while let Poll::Ready(event) = inner.connection.poll_event(cx) { - match event { - ConnectionEvent::Connected => { - tracing::error!("Unexpected Connected event on established QUIC connection"); - } - ConnectionEvent::ConnectionLost(_) => { - if let Some(waker) = inner.poll_close_waker.take() { - waker.wake(); - } - inner.connection.close(); - } - - ConnectionEvent::StreamOpened => { - if let Some(waker) = inner.pending_substreams.pop_front() { - waker.wake(); - } - } - ConnectionEvent::StreamReadable(substream) => { - if let Some(substream) = inner.substreams.get_mut(&substream) { - if let Some(waker) = substream.read_waker.take() { - waker.wake(); - } - } - } - ConnectionEvent::StreamWritable(substream) => { - if let Some(substream) = inner.substreams.get_mut(&substream) { - if let Some(waker) = substream.write_waker.take() { - waker.wake(); - } - } - } - ConnectionEvent::StreamFinished(substream) => { - if let Some(substream) = inner.substreams.get_mut(&substream) { - substream.finished = true; - if let Some(waker) = substream.finished_waker.take() { - waker.wake(); - } - } - } - ConnectionEvent::StreamStopped(substream) => { - if let Some(substream) = inner.substreams.get_mut(&substream) { - substream.stopped = true; - } - } - ConnectionEvent::StreamAvailable => { - // Handled below. - } - } - } - - if let Some(substream_id) = inner.connection.pop_incoming_substream() { - inner.substreams.insert(substream_id, Default::default()); - let substream = Substream::new(substream_id, self.inner.clone()); - Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(substream))) - } else { - inner.poll_event_waker = Some(cx.waker().clone()); - Poll::Pending - } - } - - /// Opens a new outgoing substream, and produces the equivalent to a future that will be - /// resolved when it becomes available. - /// - /// We provide the same handler to poll it by multiple tasks, which is done as a FIFO - /// queue via `poll_outbound`. - fn open_outbound(&self) -> Self::OutboundSubstream {} - - /// Polls the outbound substream. - /// - /// If `Pending` is returned, then the current task will be notified once the substream - /// is ready to be polled, similar to the API of `Future::poll()`. - fn poll_outbound( - &self, - cx: &mut Context<'_>, - _: &mut Self::OutboundSubstream, - ) -> Poll> { - let mut inner = self.inner.lock(); - if let Some(substream_id) = inner.connection.pop_outgoing_substream() { - inner.substreams.insert(substream_id, Default::default()); - let substream = Substream::new(substream_id, self.inner.clone()); - Poll::Ready(Ok(substream)) - } else { - inner.pending_substreams.push_back(cx.waker().clone()); - Poll::Pending - } - } - - /// Destroys an outbound substream future. Use this after the outbound substream has finished, - /// or if you want to interrupt it. - fn destroy_outbound(&self, _: Self::OutboundSubstream) { - // Do nothing because we don't know which waker should be destroyed. - // TODO `Self::OutboundSubstream` -> autoincrement id. - } - - fn poll_close(&self, cx: &mut Context<'_>) -> Poll> { - let mut inner = self.inner.lock(); - - if inner.connection.connection.is_drained() { - return Poll::Ready(Ok(())); - } - - if inner.substreams.is_empty() { - let connection = &mut inner.connection; - if !connection.connection.is_closed() { - connection.close(); - if let Some(waker) = inner.poll_event_waker.take() { - waker.wake(); - } - } else { - } - while let Poll::Ready(event) = inner.connection.poll_event(cx) { - if let ConnectionEvent::ConnectionLost(_) = event { - return Poll::Ready(Ok(())); - } - } - } else { - for substream in inner.substreams.clone().keys() { - if let Err(e) = inner.connection.shutdown_substream(*substream) { - tracing::error!("substream finish error on muxer close: {}", e); - } +impl Drop for Substream { + fn drop(&mut self) { + let mut muxer = self.muxer.lock(); + muxer.substreams.remove(&self.id); + let _ = muxer + .connection + .connection + .recv_stream(self.id) + .stop(0u32.into()); + let mut send_stream = muxer.connection.connection.send_stream(self.id); + match send_stream.finish() { + Ok(()) => {} + // Already finished or reset, which is fine. + Err(FinishError::UnknownStream) => {} + Err(FinishError::Stopped(reason)) => { + let _ = send_stream.reset(reason); } } - - // Register `cx.waker()` as being woken up if the connection closes. - inner.poll_close_waker = Some(cx.waker().clone()); - - Poll::Pending - } -} - -impl fmt::Debug for QuicMuxer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("QuicMuxer").finish() } } diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 8c0f7402f54..e915a29d532 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -23,23 +23,28 @@ //! Combines all the objects in the other modules to implement the trait. use crate::connection::Connection; +use crate::endpoint::ToEndpoint; use crate::Config; -use crate::{endpoint::Endpoint, in_addr::InAddr, muxer::QuicMuxer, upgrade::Upgrade}; +use crate::{endpoint::Endpoint, muxer::QuicMuxer, upgrade::Upgrade}; +use futures::channel::{mpsc, oneshot}; +use futures::ready; use futures::stream::StreamExt; -use futures::{channel::mpsc, prelude::*, stream::SelectAll}; +use futures::{prelude::*, stream::SelectAll}; -use if_watch::IfEvent; +use if_watch::{IfEvent, IfWatcher}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{ListenerId, TransportError, TransportEvent}, PeerId, Transport, }; +use std::collections::VecDeque; +use std::net::IpAddr; +use std::task::Waker; use std::{ net::SocketAddr, pin::Pin, - sync::Arc, task::{Context, Poll}, }; @@ -55,10 +60,10 @@ pub use quinn_proto::{ pub struct QuicTransport { config: Config, listeners: SelectAll, - /// Endpoints to use for dialing Ipv4 addresses if no matching listener exists. - ipv4_dialer: Option>, - /// Endpoints to use for dialing Ipv6 addresses if no matching listener exists. - ipv6_dialer: Option>, + /// Dialer for Ipv4 addresses if no matching listener exists. + ipv4_dialer: Option, + /// Dialer for Ipv6 addresses if no matching listener exists. + ipv6_dialer: Option, } impl QuicTransport { @@ -129,41 +134,51 @@ impl Transport for QuicTransport { let socket_addr = multiaddr_to_socketaddr(&addr) .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; if socket_addr.port() == 0 || socket_addr.ip().is_unspecified() { - tracing::error!("multiaddr not supported"); return Err(TransportError::MultiaddrNotSupported(addr)); } - let listeners = self + let mut listeners = self .listeners - .iter() + .iter_mut() .filter(|l| { let listen_addr = l.endpoint.socket_addr(); listen_addr.is_ipv4() == socket_addr.is_ipv4() && listen_addr.ip().is_loopback() == socket_addr.ip().is_loopback() }) .collect::>(); - let endpoint = if listeners.is_empty() { + + let (tx, rx) = oneshot::channel(); + let to_endpoint = ToEndpoint::Dial { + addr: socket_addr, + result: tx, + }; + if listeners.is_empty() { let dialer = match socket_addr { SocketAddr::V4(_) => &mut self.ipv4_dialer, SocketAddr::V6(_) => &mut self.ipv6_dialer, }; - match dialer { - Some(endpoint) => endpoint.clone(), - None => { - let endpoint = Endpoint::new_dialer(self.config.clone(), socket_addr.is_ipv6()) - .map_err(TransportError::Other)?; - let _ = dialer.insert(endpoint.clone()); - endpoint - } + if dialer.is_none() { + let _ = dialer.insert(Dialer::new(self.config.clone(), socket_addr.is_ipv6())?); } + dialer + .as_mut() + .unwrap() + .pending_dials + .push_back(to_endpoint); } else { // Pick a random listener to use for dialing. let n = rand::random::() % listeners.len(); - let listener = listeners.get(n).expect("Can not be out of bound."); - listener.endpoint.clone() + let listener = listeners.get_mut(n).expect("Can not be out of bound."); + listener.pending_dials.push_back(to_endpoint); + if let Some(waker) = listener.waker.take() { + waker.wake() + } }; Ok(async move { - let connection = endpoint.dial(socket_addr).await.map_err(Error::Reach)?; + let connection = rx + .await + .map_err(|_| Error::TaskCrashed)? + .map_err(Error::Reach)?; let final_connec = Upgrade::from_connection(connection).await?; Ok(final_connec) } @@ -185,6 +200,20 @@ impl Transport for QuicTransport { mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { + if let Some(dialer) = self.ipv4_dialer.as_mut() { + if dialer.drive_dials(cx).is_err() { + // Background task of dialer crashed. + // Drop dialer and all pending dials so that the connection receiver is notified. + self.ipv4_dialer = None; + } + } + if let Some(dialer) = self.ipv6_dialer.as_mut() { + if dialer.drive_dials(cx).is_err() { + // Background task of dialer crashed. + // Drop dialer and all pending dials so that the connection receiver is notified. + self.ipv4_dialer = None; + } + } match self.listeners.poll_next_unpin(cx) { Poll::Ready(Some(ev)) => Poll::Ready(ev), _ => Poll::Pending, @@ -192,26 +221,55 @@ impl Transport for QuicTransport { } } +#[derive(Debug)] +struct Dialer { + endpoint: Endpoint, + pending_dials: VecDeque, +} + +impl Dialer { + fn new(config: Config, is_ipv6: bool) -> Result> { + let endpoint = Endpoint::new_dialer(config, is_ipv6).map_err(TransportError::Other)?; + Ok(Dialer { + endpoint, + pending_dials: VecDeque::new(), + }) + } + + fn drive_dials(&mut self, cx: &mut Context<'_>) -> Result<(), mpsc::SendError> { + if let Some(to_endpoint) = self.pending_dials.pop_front() { + match self.endpoint.try_send(to_endpoint, cx) { + Ok(Ok(())) => {} + Ok(Err(to_endpoint)) => self.pending_dials.push_front(to_endpoint), + Err(err) => { + return Err(err); + } + } + } + Ok(()) + } +} + #[derive(Debug)] struct Listener { - endpoint: Arc, + endpoint: Endpoint, listener_id: ListenerId, /// Channel where new connections are being sent. new_connections_rx: mpsc::Receiver, - /// The IP addresses of network interfaces on which the listening socket - /// is accepting connections. - /// - /// If the listen socket listens on all interfaces, these may change over - /// time as interfaces become available or unavailable. - in_addr: InAddr, - - /// Set to `Some` if this [`Listener`] should close. - /// Optionally contains a [`TransportEvent::ListenerClosed`] that should be - /// reported before the listener's stream is terminated. - report_closed: Option::Item>>, + if_watcher: Option, + + /// Whether the listener was closed and the stream should terminate. + is_closed: bool, + + /// Pending event to reported. + pending_event: Option<::Item>, + + pending_dials: VecDeque, + + waker: Option, } impl Listener { @@ -220,82 +278,79 @@ impl Listener { socket_addr: SocketAddr, config: Config, ) -> Result { - let in_addr = InAddr::new(socket_addr.ip()); let (endpoint, new_connections_rx) = Endpoint::new_bidirectional(config, socket_addr)?; + + let if_watcher; + let pending_event; + if socket_addr.ip().is_unspecified() { + if_watcher = Some(IfWatcher::new()?); + pending_event = None; + } else { + if_watcher = None; + let ma = socketaddr_to_multiaddr(endpoint.socket_addr()); + pending_event = Some(TransportEvent::NewAddress { + listener_id, + listen_addr: ma, + }) + } + Ok(Listener { endpoint, listener_id, new_connections_rx, - in_addr, - report_closed: None, + if_watcher, + is_closed: false, + pending_event, + pending_dials: VecDeque::new(), + waker: None, }) } /// Report the listener as closed in a [`TransportEvent::ListenerClosed`] and /// terminate the stream. fn close(&mut self, reason: Result<(), Error>) { - match self.report_closed { - Some(_) => tracing::debug!("Listener was already closed."), - None => { - // Report the listener event as closed. - let _ = self - .report_closed - .insert(Some(TransportEvent::ListenerClosed { - listener_id: self.listener_id, - reason, - })); - } + if self.is_closed { + return; } + self.pending_event = Some(TransportEvent::ListenerClosed { + listener_id: self.listener_id, + reason, + }); + self.is_closed = true; } /// Poll for a next If Event. - fn poll_if_addr(&mut self, cx: &mut Context<'_>) -> Option<::Item> { + fn poll_if_addr(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { + let if_watcher = match self.if_watcher.as_mut() { + Some(iw) => iw, + None => return Poll::Pending, + }; loop { - match self.in_addr.poll_next_unpin(cx) { - Poll::Ready(mut item) => { - if let Some(item) = item.take() { - // Consume all events for up/down interface changes. - match item { - Ok(IfEvent::Up(inet)) => { - let ip = inet.addr(); - if self.endpoint.socket_addr().is_ipv4() == ip.is_ipv4() { - let socket_addr = - SocketAddr::new(ip, self.endpoint.socket_addr().port()); - let ma = socketaddr_to_multiaddr(&socket_addr); - tracing::debug!("New listen address: {}", ma); - return Some(TransportEvent::NewAddress { - listener_id: self.listener_id, - listen_addr: ma, - }); - } - } - Ok(IfEvent::Down(inet)) => { - let ip = inet.addr(); - if self.endpoint.socket_addr().is_ipv4() == ip.is_ipv4() { - let socket_addr = - SocketAddr::new(ip, self.endpoint.socket_addr().port()); - let ma = socketaddr_to_multiaddr(&socket_addr); - tracing::debug!("Expired listen address: {}", ma); - return Some(TransportEvent::AddressExpired { - listener_id: self.listener_id, - listen_addr: ma, - }); - } - } - Err(err) => { - tracing::debug! { - "Failure polling interfaces: {:?}.", - err - }; - return Some(TransportEvent::ListenerError { - listener_id: self.listener_id, - error: err.into(), - }); - } - } + match ready!(if_watcher.poll_if_event(cx)) { + Ok(IfEvent::Up(inet)) => { + if let Some(listen_addr) = ip_to_listenaddr(&self.endpoint, inet.addr()) { + tracing::debug!("New listen address: {}", listen_addr); + return Poll::Ready(TransportEvent::NewAddress { + listener_id: self.listener_id, + listen_addr, + }); + } + } + Ok(IfEvent::Down(inet)) => { + if let Some(listen_addr) = ip_to_listenaddr(&self.endpoint, inet.addr()) { + tracing::debug!("Expired listen address: {}", listen_addr); + return Poll::Ready(TransportEvent::AddressExpired { + listener_id: self.listener_id, + listen_addr, + }); } } - Poll::Pending => return None, + Err(err) => { + return Poll::Ready(TransportEvent::ListenerError { + listener_id: self.listener_id, + error: err.into(), + }) + } } } } @@ -304,33 +359,66 @@ impl Listener { impl Stream for Listener { type Item = TransportEvent<::ListenerUpgrade, Error>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if let Some(closed) = self.report_closed.as_mut() { - // Listener was closed. - // Report the transport event if there is one. On the next iteration, return - // `Poll::Ready(None)` to terminate the stream. - return Poll::Ready(closed.take()); - } - if let Some(event) = self.poll_if_addr(cx) { - return Poll::Ready(Some(event)); - } - let connection = match futures::ready!(self.new_connections_rx.poll_next_unpin(cx)) { - Some(c) => c, - None => { - self.close(Err(Error::TaskCrashed)); - return self.poll_next(cx); + loop { + if let Some(event) = self.pending_event.take() { + return Poll::Ready(Some(event)); } - }; + if self.is_closed { + return Poll::Ready(None); + } + match self.poll_if_addr(cx) { + Poll::Ready(event) => return Poll::Ready(Some(event)), + Poll::Pending => {} + } + if let Some(to_endpoint) = self.pending_dials.pop_front() { + match self.endpoint.try_send(to_endpoint, cx) { + Ok(Ok(())) => {} + Ok(Err(to_endpoint)) => self.pending_dials.push_front(to_endpoint), + Err(_) => { + self.close(Err(Error::TaskCrashed)); + continue; + } + } + } + match self.new_connections_rx.poll_next_unpin(cx) { + Poll::Ready(Some(connection)) => { + let local_addr = connection + .local_addr() + .expect("exists for server connections."); + let local_addr = socketaddr_to_multiaddr(&local_addr); + let send_back_addr = socketaddr_to_multiaddr(&connection.remote_addr()); + let event = TransportEvent::Incoming { + upgrade: Upgrade::from_connection(connection), + local_addr, + send_back_addr, + listener_id: self.listener_id, + }; + return Poll::Ready(Some(event)); + } + Poll::Ready(None) => { + self.close(Err(Error::TaskCrashed)); + continue; + } + Poll::Pending => {} + }; + self.waker = Some(cx.waker().clone()); + return Poll::Pending; + } + } +} - let local_addr = socketaddr_to_multiaddr(&connection.local_addr()); - let send_back_addr = socketaddr_to_multiaddr(&connection.remote_addr()); - let event = TransportEvent::Incoming { - upgrade: Upgrade::from_connection(connection), - local_addr, - send_back_addr, - listener_id: self.listener_id, - }; - Poll::Ready(Some(event)) +/// Turn an [`IpAddr`] into a listen-address for the endpoint. +/// +/// Returns `None` if the address is not the same socket family as the +/// address that the endpoint is bound to. +pub fn ip_to_listenaddr(endpoint: &Endpoint, ip: IpAddr) -> Option { + // True if either both addresses are Ipv4 or both Ipv6. + let is_same_ip_family = endpoint.socket_addr().is_ipv4() == ip.is_ipv4(); + if !is_same_ip_family { + return None; } + let socket_addr = SocketAddr::new(ip, endpoint.socket_addr().port()); + Some(socketaddr_to_multiaddr(&socket_addr)) } /// Tries to turn a QUIC multiaddress into a UDP [`SocketAddr`]. Returns None if the format diff --git a/transports/quic/src/upgrade.rs b/transports/quic/src/upgrade.rs index 4114c8c2df2..c1cc50110d5 100644 --- a/transports/quic/src/upgrade.rs +++ b/transports/quic/src/upgrade.rs @@ -26,7 +26,7 @@ use crate::{ transport, }; -use futures::prelude::*; +use futures::{prelude::*, ready}; use libp2p_core::PeerId; use std::{ fmt, @@ -57,29 +57,25 @@ impl Future for Upgrade { .as_mut() .expect("Future polled after it has completed"); - let event = Connection::poll_event(connection, cx); - match event { - Poll::Pending => Poll::Pending, - Poll::Ready(ConnectionEvent::Connected) => { - let peer_id = connection.remote_peer_id(); - let muxer = QuicMuxer::from_connection(self.connection.take().unwrap()); - Poll::Ready(Ok((peer_id, muxer))) - } - Poll::Ready(ConnectionEvent::ConnectionLost(err)) => { - Poll::Ready(Err(transport::Error::Established(err))) - } - // Other items are: - // - StreamAvailable - // - StreamOpened - // - StreamReadable - // - StreamWritable - // - StreamFinished - // - StreamStopped - Poll::Ready(_) => { - // They can happen only after we finished handshake and connected to the peer. - // But for `Upgrade` we get `Connected` event, wrap connection into a muxer - // and pass it to the result Stream of muxers. - unreachable!() + loop { + match ready!(connection.poll_event(cx)) { + ConnectionEvent::Connected => { + let peer_id = connection.remote_peer_id(); + let muxer = QuicMuxer::from_connection(self.connection.take().unwrap()); + return Poll::Ready(Ok((peer_id, muxer))); + } + ConnectionEvent::ConnectionLost(err) => { + return Poll::Ready(Err(transport::Error::Established(err))) + } + // Other items are: + // - HandshakeDataReady + // - StreamAvailable + // - StreamOpened + // - StreamReadable + // - StreamWritable + // - StreamFinished + // - StreamStopped + _ => {} } } } diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index bb4f1270950..d6507e73c45 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -1,6 +1,7 @@ use anyhow::Result; use async_trait::async_trait; -use futures::future::FutureExt; +use futures::channel::oneshot; +use futures::future::{join, FutureExt}; use futures::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use futures::select; use futures::stream::StreamExt; @@ -13,7 +14,7 @@ use libp2p::request_response::{ RequestResponseEvent, RequestResponseMessage, }; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; -use libp2p::swarm::{DialError, Swarm, SwarmEvent}; +use libp2p::swarm::{ConnectionError, DialError, Swarm, SwarmEvent}; use libp2p_quic::{Config as QuicConfig, QuicTransport}; use rand::RngCore; use std::num::NonZeroU8; @@ -30,14 +31,6 @@ async fn create_swarm(keylog: bool) -> Result>> let config = QuicConfig::new(&keypair).unwrap(); let transport = QuicTransport::new(config); - // TODO: - // transport - // .transport - // .max_idle_timeout(Some(quinn_proto::VarInt::from_u32(1_000u32).into())); - // if keylog { - // transport.enable_keylogger(); - // } - let transport = Transport::map(transport, |(peer, muxer), _| { (peer, StreamMuxerBox::new(muxer)) }) @@ -77,108 +70,133 @@ async fn smoke() -> Result<()> { let mut data = vec![0; 4096 * 10]; rng.fill_bytes(&mut data); + b.behaviour_mut().add_address(a.local_peer_id(), addr); b.behaviour_mut() - .add_address(&Swarm::local_peer_id(&a), addr); - b.behaviour_mut() - .send_request(&Swarm::local_peer_id(&a), Ping(data.clone())); + .send_request(a.local_peer_id(), Ping(data.clone())); - match b.next().await { - Some(SwarmEvent::Dialing(_)) => {} - e => panic!("{:?}", e), - } + let b_id = *b.local_peer_id(); - match a.next().await { - Some(SwarmEvent::IncomingConnection { .. }) => {} - e => panic!("{:?}", e), - }; + let (sync_tx, sync_rx) = oneshot::channel(); - match b.next().await { - Some(SwarmEvent::ConnectionEstablished { .. }) => {} - e => panic!("{:?}", e), - }; + let fut_a = async move { + match a.next().await { + Some(SwarmEvent::IncomingConnection { .. }) => {} + e => panic!("{:?}", e), + }; - match a.next().await { - Some(SwarmEvent::ConnectionEstablished { .. }) => {} - e => panic!("{:?}", e), + match a.next().await { + Some(SwarmEvent::ConnectionEstablished { .. }) => {} + e => panic!("{:?}", e), + }; + + match a.next().await { + Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { + message: + RequestResponseMessage::Request { + request: Ping(ping), + channel, + .. + }, + .. + })) => { + a.behaviour_mut() + .send_response(channel, Pong(ping)) + .unwrap(); + } + e => panic!("{:?}", e), + } + + match a.next().await { + Some(SwarmEvent::Behaviour(RequestResponseEvent::ResponseSent { .. })) => {} + e => panic!("{:?}", e), + } + + a.behaviour_mut() + .send_request(&b_id, Ping(b"another substream".to_vec())); + + assert!(a.next().now_or_never().is_none()); + + match a.next().await { + Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { + message: + RequestResponseMessage::Response { + response: Pong(data), + .. + }, + .. + })) => assert_eq!(data, b"another substream".to_vec()), + e => panic!("{:?}", e), + } + + sync_rx.await.unwrap(); + + a.disconnect_peer_id(b_id).unwrap(); + + match a.next().await { + Some(SwarmEvent::ConnectionClosed { cause: None, .. }) => {} + e => panic!("{:?}", e), + } }; - assert!(b.next().now_or_never().is_none()); - - match a.next().await { - Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { - message: - RequestResponseMessage::Request { - request: Ping(ping), - channel, - .. - }, - .. - })) => { - a.behaviour_mut() - .send_response(channel, Pong(ping)) - .unwrap(); + let fut_b = async { + match b.next().await { + Some(SwarmEvent::Dialing(_)) => {} + e => panic!("{:?}", e), } - e => panic!("{:?}", e), - } - match a.next().await { - Some(SwarmEvent::Behaviour(RequestResponseEvent::ResponseSent { .. })) => {} - e => panic!("{:?}", e), - } + match b.next().await { + Some(SwarmEvent::ConnectionEstablished { .. }) => {} + e => panic!("{:?}", e), + }; - match b.next().await { - Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { - message: - RequestResponseMessage::Response { - response: Pong(pong), - .. - }, - .. - })) => assert_eq!(data, pong), - e => panic!("{:?}", e), - } + assert!(b.next().now_or_never().is_none()); - a.behaviour_mut().send_request( - &Swarm::local_peer_id(&b), - Ping(b"another substream".to_vec()), - ); + match b.next().await { + Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { + message: + RequestResponseMessage::Response { + response: Pong(pong), + .. + }, + .. + })) => assert_eq!(data, pong), + e => panic!("{:?}", e), + } - assert!(a.next().now_or_never().is_none()); + match b.next().await { + Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { + message: + RequestResponseMessage::Request { + request: Ping(data), + channel, + .. + }, + .. + })) => { + b.behaviour_mut() + .send_response(channel, Pong(data)) + .unwrap(); + } + e => panic!("{:?}", e), + } - match b.next().await { - Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { - message: - RequestResponseMessage::Request { - request: Ping(data), - channel, - .. - }, - .. - })) => { - b.behaviour_mut() - .send_response(channel, Pong(data)) - .unwrap(); + match b.next().await { + Some(SwarmEvent::Behaviour(RequestResponseEvent::ResponseSent { .. })) => {} + e => panic!("{:?}", e), } - e => panic!("{:?}", e), - } - match b.next().await { - Some(SwarmEvent::Behaviour(RequestResponseEvent::ResponseSent { .. })) => {} - e => panic!("{:?}", e), - } + sync_tx.send(()).unwrap(); - match a.next().await { - Some(SwarmEvent::Behaviour(RequestResponseEvent::Message { - message: - RequestResponseMessage::Response { - response: Pong(data), - .. - }, - .. - })) => assert_eq!(data, b"another substream".to_vec()), - e => panic!("{:?}", e), - } + match b.next().await { + Some(SwarmEvent::ConnectionClosed { + cause: Some(ConnectionError::IO(_)), + .. + }) => {} + e => panic!("{:?}", e), + } + }; + join(fut_a, fut_b).await; Ok(()) } @@ -385,9 +403,9 @@ fn concurrent_connections_and_streams() { for (listener_peer_id, listener_addr) in &listeners { dialer .behaviour_mut() - .add_address(&listener_peer_id, listener_addr.clone()); + .add_address(listener_peer_id, listener_addr.clone()); - dialer.dial(listener_peer_id.clone()).unwrap(); + dialer.dial(*listener_peer_id).unwrap(); } // Wait for responses to each request. @@ -523,8 +541,8 @@ async fn endpoint_reuse() -> Result<()> { } _ => {} }, - ev = swarm_b.select_next_some() => match ev{ - SwarmEvent::ConnectionEstablished { endpoint, ..} => { + ev = swarm_b.select_next_some() => { + if let SwarmEvent::ConnectionEstablished { endpoint, ..} = ev { match endpoint { ConnectedPoint::Dialer{..} => panic!("Unexpected outbound connection"), ConnectedPoint::Listener {send_back_addr, local_addr} => { @@ -535,7 +553,6 @@ async fn endpoint_reuse() -> Result<()> { } } } - _ => {} }, } } diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index 4a9d0245b2b..ff34ae49407 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -1,3 +1,20 @@ +# 0.37.0 - [unreleased] + +- Update to `if-watch` `v2.0.0`. Simplify `IfWatcher` integration. + Use `if_watch::IfWatcher` for all runtimes. See [PR 2813]. + +[PR 2813]: https://github.com/libp2p/rust-libp2p/pull/2813 + +# 0.36.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.35.0 + +- Update to `libp2p-core` `v0.35.0`. + +- Update to `if-watch` `v1.1.1`. + # 0.34.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index e72f3eaee56..948d9507f0a 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-tcp" edition = "2021" rust-version = "1.56.1" description = "TCP/IP transport protocol for libp2p" -version = "0.34.0" +version = "0.37.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,21 +14,19 @@ categories = ["network-programming", "asynchronous"] async-io-crate = { package = "async-io", version = "1.2.0", optional = true } futures = "0.3.8" futures-timer = "3.0" -if-watch = { version = "1.0.0", optional = true } -if-addrs = { version = "0.7.0", optional = true } -ipnet = "2.0.0" +if-watch = "2.0.0" libc = "0.2.80" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4.11" socket2 = { version = "0.4.0", features = ["all"] } tokio-crate = { package = "tokio", version = "1.19.0", default-features = false, features = ["net"], optional = true } [features] default = ["async-io"] -tokio = ["tokio-crate", "if-addrs"] -async-io = ["async-io-crate", "if-watch"] +tokio = ["tokio-crate"] +async-io = ["async-io-crate"] [dev-dependencies] async-std = { version = "1.6.5", features = ["attributes"] } -tokio-crate = { package = "tokio", version = "1.0.1", default-features = false, features = ["net", "rt"] } +tokio-crate = { package = "tokio", version = "1.0.1", default-features = false, features = ["net", "rt", "macros"] } env_logger = "0.9.0" diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 981c896bcb5..f7b897c0d47 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -28,6 +28,7 @@ mod provider; +use if_watch::{IfEvent, IfWatcher}; #[cfg(feature = "async-io")] pub use provider::async_io; @@ -43,9 +44,8 @@ pub use provider::tokio; pub type TokioTcpTransport = GenTcpTransport; use futures::{ - future::{self, BoxFuture, Ready}, + future::{self, Ready}, prelude::*, - ready, }; use futures_timer::Delay; use libp2p_core::{ @@ -64,7 +64,7 @@ use std::{ time::Duration, }; -use provider::{IfEvent, Provider}; +use provider::{Incoming, Provider}; /// The configuration for a TCP/IP transport capability for libp2p. #[derive(Clone, Debug)] @@ -243,6 +243,9 @@ impl GenTcpConfig { /// # use libp2p_core::transport::{ListenerId, TransportEvent}; /// # use libp2p_core::{Multiaddr, Transport}; /// # use std::pin::Pin; + /// # #[cfg(not(feature = "async-io"))] + /// # fn main() {} + /// # /// #[cfg(feature = "async-io")] /// #[async_std::main] /// async fn main() -> std::io::Result<()> { @@ -368,7 +371,25 @@ where socket.bind(&socket_addr.into())?; socket.listen(self.config.backlog as _)?; socket.set_nonblocking(true)?; - TcpListenStream::::new(id, socket.into(), self.port_reuse.clone()) + let listener: TcpListener = socket.into(); + let local_addr = listener.local_addr()?; + + if local_addr.ip().is_unspecified() { + return TcpListenStream::::new( + id, + listener, + Some(IfWatcher::new()?), + self.port_reuse.clone(), + ); + } + + self.port_reuse.register(local_addr.ip(), local_addr.port()); + let listen_addr = ip_to_multiaddr(local_addr.ip(), local_addr.port()); + self.pending_events.push_back(TransportEvent::NewAddress { + listener_id: id, + listen_addr, + }); + TcpListenStream::::new(id, listener, None, self.port_reuse.clone()) } } @@ -398,7 +419,6 @@ impl Transport for GenTcpTransport where T: Provider + Send + 'static, T::Listener: Unpin, - T::IfWatcher: Unpin, T::Stream: Unpin, { type Output = T::Stream; @@ -605,25 +625,6 @@ pub enum TcpListenerEvent { Error(io::Error), } -enum IfWatch { - Pending(BoxFuture<'static, io::Result>), - Ready(TIfWatcher), -} - -/// The listening addresses of a [`TcpListenStream`]. -enum InAddr { - /// The stream accepts connections on a single interface. - One { - addr: IpAddr, - out: Option, - }, - /// The stream accepts connections on all interfaces. - Any { - addrs: HashSet, - if_watch: IfWatch, - }, -} - /// A stream of incoming connections on one or more interfaces. pub struct TcpListenStream where @@ -637,12 +638,12 @@ where listen_addr: SocketAddr, /// The async listening socket for incoming connections. listener: T::Listener, - /// The IP addresses of network interfaces on which the listening socket - /// is accepting connections. + /// Watcher for network interface changes. + /// Reports [`IfEvent`]s for new / deleted ip-addresses when interfaces + /// become or stop being available. /// - /// If the listen socket listens on all interfaces, these may change over - /// time as interfaces become available or unavailable. - in_addr: InAddr, + /// `None` if the socket is only listening on a single interface. + if_watcher: Option, /// The port reuse configuration for outgoing connections. /// /// If enabled, all IP addresses on which this listening stream @@ -666,27 +667,10 @@ where fn new( listener_id: ListenerId, listener: TcpListener, + if_watcher: Option, port_reuse: PortReuse, ) -> io::Result { let listen_addr = listener.local_addr()?; - - let in_addr = if match &listen_addr { - SocketAddr::V4(a) => a.ip().is_unspecified(), - SocketAddr::V6(a) => a.ip().is_unspecified(), - } { - // The `addrs` are populated via `if_watch` when the - // `TcpListenStream` is polled. - InAddr::Any { - addrs: HashSet::new(), - if_watch: IfWatch::Pending(T::if_watcher()), - } - } else { - InAddr::One { - out: Some(ip_to_multiaddr(listen_addr.ip(), listen_addr.port())), - addr: listen_addr.ip(), - } - }; - let listener = T::new_listener(listener)?; Ok(TcpListenStream { @@ -694,7 +678,7 @@ where listener, listener_id, listen_addr, - in_addr, + if_watcher, pause: None, sleep_on_error: Duration::from_millis(100), }) @@ -707,15 +691,16 @@ where /// /// Has no effect if port reuse is disabled. fn disable_port_reuse(&mut self) { - match &self.in_addr { - InAddr::One { addr, .. } => { - self.port_reuse.unregister(*addr, self.listen_addr.port()); - } - InAddr::Any { addrs, .. } => { - for addr in addrs { - self.port_reuse.unregister(*addr, self.listen_addr.port()); + match &self.if_watcher { + Some(if_watcher) => { + for ip_net in if_watcher.iter() { + self.port_reuse + .unregister(ip_net.addr(), self.listen_addr.port()); } } + None => self + .port_reuse + .unregister(self.listen_addr.ip(), self.listen_addr.port()), } } } @@ -734,116 +719,78 @@ where T: Provider, T::Listener: Unpin, T::Stream: Unpin, - T::IfWatcher: Unpin, { type Item = Result, io::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let me = Pin::into_inner(self); - loop { - match &mut me.in_addr { - InAddr::Any { if_watch, addrs } => match if_watch { - // If we listen on all interfaces, wait for `if-watch` to be ready. - IfWatch::Pending(f) => match ready!(Pin::new(f).poll(cx)) { - Ok(w) => { - *if_watch = IfWatch::Ready(w); - continue; - } - Err(err) => { - log::debug! { - "Failed to begin observing interfaces: {:?}. Scheduling retry.", - err - }; - *if_watch = IfWatch::Pending(T::if_watcher()); - me.pause = Some(Delay::new(me.sleep_on_error)); - return Poll::Ready(Some(Ok(TcpListenerEvent::Error(err)))); - } - }, - // Consume all events for up/down interface changes. - IfWatch::Ready(watch) => { - while let Poll::Ready(ev) = T::poll_interfaces(watch, cx) { - match ev { - Ok(IfEvent::Up(inet)) => { - let ip = inet.addr(); - if me.listen_addr.is_ipv4() == ip.is_ipv4() && addrs.insert(ip) - { - let ma = ip_to_multiaddr(ip, me.listen_addr.port()); - log::debug!("New listen address: {}", ma); - me.port_reuse.register(ip, me.listen_addr.port()); - return Poll::Ready(Some(Ok( - TcpListenerEvent::NewAddress(ma), - ))); - } - } - Ok(IfEvent::Down(inet)) => { - let ip = inet.addr(); - if me.listen_addr.is_ipv4() == ip.is_ipv4() && addrs.remove(&ip) - { - let ma = ip_to_multiaddr(ip, me.listen_addr.port()); - log::debug!("Expired listen address: {}", ma); - me.port_reuse.unregister(ip, me.listen_addr.port()); - return Poll::Ready(Some(Ok( - TcpListenerEvent::AddressExpired(ma), - ))); - } - } - Err(err) => { - log::debug! { - "Failure polling interfaces: {:?}. Scheduling retry.", - err - }; - me.pause = Some(Delay::new(me.sleep_on_error)); - return Poll::Ready(Some(Ok(TcpListenerEvent::Error(err)))); - } - } - } - } - }, - // If the listener is bound to a single interface, make sure the - // address is registered for port reuse and reported once. - InAddr::One { addr, out } => { - if let Some(multiaddr) = out.take() { - me.port_reuse.register(*addr, me.listen_addr.port()); - return Poll::Ready(Some(Ok(TcpListenerEvent::NewAddress(multiaddr)))); - } + if let Some(mut pause) = me.pause.take() { + match pause.poll_unpin(cx) { + Poll::Ready(_) => {} + Poll::Pending => { + me.pause = Some(pause); + return Poll::Pending; } } + } - if let Some(mut pause) = me.pause.take() { - match Pin::new(&mut pause).poll(cx) { - Poll::Ready(_) => {} - Poll::Pending => { - me.pause = Some(pause); - return Poll::Pending; + if let Some(if_watcher) = me.if_watcher.as_mut() { + while let Poll::Ready(event) = if_watcher.poll_if_event(cx) { + match event { + Ok(IfEvent::Up(inet)) => { + let ip = inet.addr(); + if me.listen_addr.is_ipv4() == ip.is_ipv4() { + let ma = ip_to_multiaddr(ip, me.listen_addr.port()); + log::debug!("New listen address: {}", ma); + me.port_reuse.register(ip, me.listen_addr.port()); + return Poll::Ready(Some(Ok(TcpListenerEvent::NewAddress(ma)))); + } + } + Ok(IfEvent::Down(inet)) => { + let ip = inet.addr(); + if me.listen_addr.is_ipv4() == ip.is_ipv4() { + let ma = ip_to_multiaddr(ip, me.listen_addr.port()); + log::debug!("Expired listen address: {}", ma); + me.port_reuse.unregister(ip, me.listen_addr.port()); + return Poll::Ready(Some(Ok(TcpListenerEvent::AddressExpired(ma)))); + } + } + Err(err) => { + me.pause = Some(Delay::new(me.sleep_on_error)); + return Poll::Ready(Some(Ok(TcpListenerEvent::Error(err)))); } } } + } - // Take the pending connection from the backlog. - let incoming = match T::poll_accept(&mut me.listener, cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Ok(incoming)) => incoming, - Poll::Ready(Err(e)) => { - // These errors are non-fatal for the listener stream. - log::error!("error accepting incoming connection: {}", e); - me.pause = Some(Delay::new(me.sleep_on_error)); - return Poll::Ready(Some(Ok(TcpListenerEvent::Error(e)))); - } - }; + // Take the pending connection from the backlog. + match T::poll_accept(&mut me.listener, cx) { + Poll::Ready(Ok(Incoming { + local_addr, + remote_addr, + stream, + })) => { + let local_addr = ip_to_multiaddr(local_addr.ip(), local_addr.port()); + let remote_addr = ip_to_multiaddr(remote_addr.ip(), remote_addr.port()); - let local_addr = ip_to_multiaddr(incoming.local_addr.ip(), incoming.local_addr.port()); - let remote_addr = - ip_to_multiaddr(incoming.remote_addr.ip(), incoming.remote_addr.port()); + log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); - log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); + return Poll::Ready(Some(Ok(TcpListenerEvent::Upgrade { + upgrade: future::ok(stream), + local_addr, + remote_addr, + }))); + } + Poll::Ready(Err(e)) => { + // These errors are non-fatal for the listener stream. + me.pause = Some(Delay::new(me.sleep_on_error)); + return Poll::Ready(Some(Ok(TcpListenerEvent::Error(e)))); + } + Poll::Pending => {} + }; - return Poll::Ready(Some(Ok(TcpListenerEvent::Upgrade { - upgrade: future::ok(incoming.stream), - local_addr, - remote_addr, - }))); - } + Poll::Pending } } @@ -991,7 +938,7 @@ mod tests { #[cfg(feature = "tokio")] { let (ready_tx, ready_rx) = mpsc::channel(1); - let listener = listener::(addr.clone(), ready_tx); + let listener = listener::(addr, ready_tx); let dialer = dialer::(ready_rx); let rt = tokio_crate::runtime::Builder::new_current_thread() .enable_io() @@ -1060,7 +1007,7 @@ mod tests { #[cfg(feature = "tokio")] { let (ready_tx, ready_rx) = mpsc::channel(1); - let listener = listener::(addr.clone(), ready_tx); + let listener = listener::(addr, ready_tx); let dialer = dialer::(ready_rx); let rt = tokio_crate::runtime::Builder::new_current_thread() .enable_io() @@ -1168,7 +1115,7 @@ mod tests { let (ready_tx, ready_rx) = mpsc::channel(1); let (port_reuse_tx, port_reuse_rx) = oneshot::channel(); let listener = listener::(addr.clone(), ready_tx, port_reuse_rx); - let dialer = dialer::(addr.clone(), ready_rx, port_reuse_tx); + let dialer = dialer::(addr, ready_rx, port_reuse_tx); let rt = tokio_crate::runtime::Builder::new_current_thread() .enable_io() .build() @@ -1209,10 +1156,7 @@ mod tests { match poll_fn(|cx| Pin::new(&mut tcp).poll(cx)).await { TransportEvent::NewAddress { listen_addr: addr2, .. - } => { - assert_eq!(addr1, addr2); - return; - } + } => assert_eq!(addr1, addr2), e => panic!("Unexpected transport event: {:?}", e), } } @@ -1229,7 +1173,7 @@ mod tests { #[cfg(feature = "tokio")] { - let listener = listen_twice::(addr.clone()); + let listener = listen_twice::(addr); let rt = tokio_crate::runtime::Builder::new_current_thread() .enable_io() .build() @@ -1267,7 +1211,7 @@ mod tests { .enable_io() .build() .unwrap(); - let new_addr = rt.block_on(listen::(addr.clone())); + let new_addr = rt.block_on(listen::(addr)); assert!(!new_addr.to_string().contains("tcp/0")); } } @@ -1290,7 +1234,7 @@ mod tests { #[cfg(feature = "tokio")] { let mut tcp = TokioTcpTransport::new(GenTcpConfig::new()); - assert!(tcp.listen_on(addr.clone()).is_err()); + assert!(tcp.listen_on(addr).is_err()); } } diff --git a/transports/tcp/src/provider.rs b/transports/tcp/src/provider.rs index 7ebeaa49ee8..a341026e7e6 100644 --- a/transports/tcp/src/provider.rs +++ b/transports/tcp/src/provider.rs @@ -28,18 +28,10 @@ pub mod tokio; use futures::future::BoxFuture; use futures::io::{AsyncRead, AsyncWrite}; -use ipnet::IpNet; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::task::{Context, Poll}; use std::{fmt, io}; -/// An event relating to a change of availability of an address -/// on a network interface. -pub enum IfEvent { - Up(IpNet), - Down(IpNet), -} - /// An incoming connection returned from [`Provider::poll_accept()`]. pub struct Incoming { pub stream: S, @@ -54,12 +46,6 @@ pub trait Provider: Clone + Send + 'static { type Stream: AsyncRead + AsyncWrite + Send + Unpin + fmt::Debug; /// The type of TCP listeners obtained from [`Provider::new_listener`]. type Listener: Send + Unpin; - /// The type of network interface observers obtained from [`Provider::if_watcher`]. - type IfWatcher: Send + Unpin; - - /// Creates an instance of [`Self::IfWatcher`] that can be polled for - /// network interface changes via [`Self::poll_interfaces`]. - fn if_watcher() -> BoxFuture<'static, io::Result>; /// Creates a new listener wrapping the given [`TcpListener`] that /// can be polled for incoming connections via [`Self::poll_accept()`]. @@ -77,8 +63,4 @@ pub trait Provider: Clone + Send + 'static { _: &mut Self::Listener, _: &mut Context<'_>, ) -> Poll>>; - - /// Polls a [`Self::IfWatcher`] for network interface changes, ensuring a task wakeup, - /// if necessary. - fn poll_interfaces(_: &mut Self::IfWatcher, _: &mut Context<'_>) -> Poll>; } diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs index acbb4fbdcca..fc613d8fe86 100644 --- a/transports/tcp/src/provider/async_io.rs +++ b/transports/tcp/src/provider/async_io.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{IfEvent, Incoming, Provider}; +use super::{Incoming, Provider}; use async_io_crate::Async; use futures::future::{BoxFuture, FutureExt}; @@ -32,11 +32,6 @@ pub enum Tcp {} impl Provider for Tcp { type Stream = Async; type Listener = Async; - type IfWatcher = if_watch::IfWatcher; - - fn if_watcher() -> BoxFuture<'static, io::Result> { - if_watch::IfWatcher::new().boxed() - } fn new_listener(l: net::TcpListener) -> io::Result { Async::new(l) @@ -87,11 +82,4 @@ impl Provider for Tcp { remote_addr, })) } - - fn poll_interfaces(w: &mut Self::IfWatcher, cx: &mut Context<'_>) -> Poll> { - w.poll_unpin(cx).map_ok(|e| match e { - if_watch::IfEvent::Up(a) => IfEvent::Up(a), - if_watch::IfEvent::Down(a) => IfEvent::Down(a), - }) - } } diff --git a/transports/tcp/src/provider/tokio.rs b/transports/tcp/src/provider/tokio.rs index fa9ebe3b3ff..994a12a33c7 100644 --- a/transports/tcp/src/provider/tokio.rs +++ b/transports/tcp/src/provider/tokio.rs @@ -18,45 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{IfEvent, Incoming, Provider}; +use super::{Incoming, Provider}; use futures::{ - future::{self, BoxFuture, FutureExt}, + future::{BoxFuture, FutureExt}, prelude::*, }; -use futures_timer::Delay; -use if_addrs::{get_if_addrs, IfAddr}; -use ipnet::{IpNet, Ipv4Net, Ipv6Net}; -use std::collections::HashSet; use std::convert::TryFrom; use std::io; use std::net; use std::pin::Pin; use std::task::{Context, Poll}; -use std::time::Duration; #[derive(Copy, Clone)] pub enum Tcp {} -pub struct IfWatcher { - addrs: HashSet, - delay: Delay, - pending: Vec, -} - impl Provider for Tcp { type Stream = TcpStream; type Listener = tokio_crate::net::TcpListener; - type IfWatcher = IfWatcher; - - fn if_watcher() -> BoxFuture<'static, io::Result> { - future::ready(Ok(IfWatcher { - addrs: HashSet::new(), - delay: Delay::new(Duration::from_secs(0)), - pending: Vec::new(), - })) - .boxed() - } fn new_listener(l: net::TcpListener) -> io::Result { tokio_crate::net::TcpListener::try_from(l) @@ -104,60 +83,15 @@ impl Provider for Tcp { remote_addr, })) } - - fn poll_interfaces(w: &mut Self::IfWatcher, cx: &mut Context<'_>) -> Poll> { - loop { - if let Some(event) = w.pending.pop() { - return Poll::Ready(Ok(event)); - } - - match Pin::new(&mut w.delay).poll(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(()) => { - let ifs = get_if_addrs()?; - let addrs = ifs - .into_iter() - .map(|iface| match iface.addr { - IfAddr::V4(ip4) => { - let prefix_len = - (!u32::from_be_bytes(ip4.netmask.octets())).leading_zeros(); - let ipnet = Ipv4Net::new(ip4.ip, prefix_len as u8) - .expect("prefix_len can not exceed 32"); - IpNet::V4(ipnet) - } - IfAddr::V6(ip6) => { - let prefix_len = - (!u128::from_be_bytes(ip6.netmask.octets())).leading_zeros(); - let ipnet = Ipv6Net::new(ip6.ip, prefix_len as u8) - .expect("prefix_len can not exceed 128"); - IpNet::V6(ipnet) - } - }) - .collect::>(); - - for down in w.addrs.difference(&addrs) { - w.pending.push(IfEvent::Down(*down)); - } - - for up in addrs.difference(&w.addrs) { - w.pending.push(IfEvent::Up(*up)); - } - - w.addrs = addrs; - w.delay.reset(Duration::from_secs(10)); - } - } - } - } } /// A [`tokio_crate::net::TcpStream`] that implements [`AsyncRead`] and [`AsyncWrite`]. #[derive(Debug)] pub struct TcpStream(pub tokio_crate::net::TcpStream); -impl Into for TcpStream { - fn into(self: TcpStream) -> tokio_crate::net::TcpStream { - self.0 +impl From for tokio_crate::net::TcpStream { + fn from(t: TcpStream) -> tokio_crate::net::TcpStream { + t.0 } } diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index 65c5da0559a..78f759803ee 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.35.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.34.0 + +- Update to `libp2p-core` `v0.35.0`. + # 0.33.0 - Update dependencies. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index 30d01c4f490..3b7f1eb0a3f 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-uds" edition = "2021" rust-version = "1.56.1" description = "Unix domain sockets transport for libp2p" -version = "0.33.0" +version = "0.35.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies] async-std = { version = "1.6.2", optional = true } -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4.1" futures = "0.3.1" tokio = { version = "1.15", default-features = false, features = ["net"], optional = true } diff --git a/transports/wasm-ext/CHANGELOG.md b/transports/wasm-ext/CHANGELOG.md index 65ff72d10bf..0b696f40cce 100644 --- a/transports/wasm-ext/CHANGELOG.md +++ b/transports/wasm-ext/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.36.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.35.0 + +- Update to `libp2p-core` `v0.35.0`. + # 0.34.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/wasm-ext/Cargo.toml b/transports/wasm-ext/Cargo.toml index a0f67226513..6c6a645c8c2 100644 --- a/transports/wasm-ext/Cargo.toml +++ b/transports/wasm-ext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-wasm-ext" edition = "2021" rust-version = "1.56.1" description = "Allows passing in an external transport in a WASM environment" -version = "0.34.0" +version = "0.36.0" authors = ["Pierre Krieger "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" js-sys = "0.3.50" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } parity-send-wrapper = "0.1.0" wasm-bindgen = "0.2.42" wasm-bindgen-futures = "0.4.4" diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index 3783fef6c44..00ee342d01f 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.38.0 + +- Update to `libp2p-core` `v0.36.0`. + +# 0.37.0 + +- Update to `libp2p-core` `v0.35.0`. + # 0.36.0 - Update to `libp2p-core` `v0.34.0`. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 624fc0cbe4f..b470864a959 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket" edition = "2021" rust-version = "1.56.1" description = "WebSocket transport for libp2p" -version = "0.36.0" +version = "0.38.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,7 +14,7 @@ categories = ["network-programming", "asynchronous"] futures-rustls = "0.22" either = "1.5.3" futures = "0.3.1" -libp2p-core = { version = "0.34.0", path = "../../core", default-features = false } +libp2p-core = { version = "0.36.0", path = "../../core", default-features = false } log = "0.4.8" parking_lot = "0.12.0" quicksink = "0.1"