diff --git a/Cargo.lock b/Cargo.lock index 67becc0f5f76..587fa692b8ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -711,6 +711,7 @@ dependencies = [ name = "bp-header-chain" version = "0.1.0" dependencies = [ + "assert_matches", "bp-test-utils", "finality-grandpa", "frame-support", @@ -726,10 +727,13 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ + "bitvec", "bp-runtime", "frame-support", "frame-system", + "impl-trait-for-tuples", "parity-scale-codec", + "serde", "sp-std", ] @@ -754,11 +758,12 @@ dependencies = [ name = "bp-rococo" version = "0.1.0" dependencies = [ - "bp-header-chain", "bp-messages", "bp-polkadot-core", "bp-runtime", + "frame-support", "parity-scale-codec", + "smallvec 1.6.1", "sp-api", "sp-runtime", "sp-std", @@ -799,15 +804,14 @@ dependencies = [ name = "bp-wococo" version = "0.1.0" dependencies = [ - "bp-header-chain", "bp-messages", "bp-polkadot-core", + "bp-rococo", "bp-runtime", "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", - "sp-version", ] [[package]] diff --git a/bridges/.config/lingua.dic b/bridges/.config/lingua.dic index a31ecf2c7bde..da87e36948c7 100644 --- a/bridges/.config/lingua.dic +++ b/bridges/.config/lingua.dic @@ -12,6 +12,7 @@ args aren async Best/MS +benchmarking/MS BlockId BFT/M bitfield/MS diff --git a/bridges/.config/spellcheck.toml b/bridges/.config/spellcheck.toml index 015f9b97c75f..e061c29ac222 100644 --- a/bridges/.config/spellcheck.toml +++ b/bridges/.config/spellcheck.toml @@ -2,6 +2,8 @@ lang = "en_US" search_dirs = ["."] extra_dictionaries = ["lingua.dic"] +skip_os_lookups = true +use_builtin = true [hunspell.quirks] # `Type`'s diff --git a/bridges/.github/workflows/deny.yml b/bridges/.github/workflows/deny.yml deleted file mode 100644 index 9f9f7264ae95..000000000000 --- a/bridges/.github/workflows/deny.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Cargo deny - -on: - pull_request: - schedule: - - cron: '0 0 * * *' - push: - branches: - - master - tags: - - v* - paths-ignore: - - '**.md' - - diagrams/* - - docs/* -jobs: - cargo-deny: - runs-on: ubuntu-latest - strategy: - matrix: - checks: - - advisories - - bans licenses sources - # Prevent sudden announcement of a new advisory from failing CI: - continue-on-error: ${{ matrix.checks == 'advisories' }} - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Cargo deny - uses: EmbarkStudios/cargo-deny-action@v1 - with: - command: check ${{ matrix.checks }} diff --git a/bridges/.github/workflows/lint.yml b/bridges/.github/workflows/lint.yml deleted file mode 100644 index e01f2cf065de..000000000000 --- a/bridges/.github/workflows/lint.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Check style - -on: - pull_request: - push: - branches: - - master - tags: - - v* - paths-ignore: - - '**.md' - - diagrams/* - - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' -jobs: -## Check stage - check-fmt: - name: Check RustFmt - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - steps: - - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - - name: Add rustfmt - run: rustup component add rustfmt - - - name: rust-fmt check - uses: actions-rs/cargo@master - with: - command: fmt - args: --all -- --check - check-spellcheck: - name: Check For Spelling and/or Grammar Mistakes - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - - name: Add cargo-spellcheck - run: cargo install cargo-spellcheck - - - name: Run spellcheck - run: cargo spellcheck check -m 1 -vv $(find modules/currency-exchange/src -name "*.rs") diff --git a/bridges/.github/workflows/publish-deps.yml b/bridges/.github/workflows/publish-deps.yml deleted file mode 100644 index 16d56a5d7806..000000000000 --- a/bridges/.github/workflows/publish-deps.yml +++ /dev/null @@ -1,76 +0,0 @@ -name: Publish Dependencies to Docker hub - -on: - push: - tags: - - v* - paths-ignore: - - '**.md' - - diagrams/* - - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' - -jobs: - ## Publish to Docker hub - publish: - name: Publishing - runs-on: ubuntu-latest - container: - image: docker:git - steps: - - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@v2 - with: - fetch-depth: 5 - submodules: recursive - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Prepare - id: prep - run: | - DOCKER_IMAGE=paritytech/bridge-dependencies - VERSION=latest - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF#refs/tags/} - elif [[ $GITHUB_REF == refs/heads/* ]]; then - VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') - fi - TAGS=${DOCKER_IMAGE}:${VERSION} - TAGS=$TAGS,${DOCKER_IMAGE}:sha-${GITHUB_SHA::8} - echo ::set-output name=TAGS::${TAGS} - echo ::set-output name=DATE::$(date +%d-%m-%Y) - - - name: Build and push - uses: docker/build-push-action@v2 - with: - file: deployments/BridgeDeps.Dockerfile - push: true - cache-from: type=registry,ref=paritytech/bridge-dependencies:latest - cache-to: type=inline - tags: ${{ steps.prep.outputs.TAGS }} - labels: | - org.opencontainers.image.title=bridge-dependencies - org.opencontainers.image.description=bridge-dependencies - component of Parity Bridges Common - org.opencontainers.image.source=${{ github.event.repository.html_url }} - org.opencontainers.image.url=https://github.com/paritytech/parity-bridges-common - org.opencontainers.image.documentation=https://github.com/paritytech/parity-bridges-common/README.md - org.opencontainers.image.created=${{ steps.prep.outputs.DATE }} - org.opencontainers.image.revision=${{ github.sha }} - org.opencontainers.image.authors=devops-team@parity.io - org.opencontainers.image.vendor=Parity Technologies - org.opencontainers.image.licenses=GPL-3.0 License diff --git a/bridges/.github/workflows/publish-docker.yml b/bridges/.github/workflows/publish-docker.yml deleted file mode 100644 index 5a4670b6ea17..000000000000 --- a/bridges/.github/workflows/publish-docker.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: Publish images to Docker hub - -on: - push: - tags: - - v* - paths-ignore: - - '**.md' - - diagrams/* - - docs/* - schedule: # Nightly build - - cron: '0 1 * * *' - -jobs: - ## Publish to Docker hub - publish: - name: Publishing - strategy: - matrix: - project: - - rialto-bridge-node - - millau-bridge-node - - ethereum-poa-relay - - substrate-relay - include: - - project: rialto-bridge-node - healthcheck: http://localhost:9933/health - - project: millau-bridge-node - healthcheck: http://localhost:9933/health - - project: ethereum-poa-relay - healthcheck: http://localhost:9616/metrics - - project: substrate-relay - healthcheck: http://localhost:9616/metrics - - runs-on: ubuntu-latest - steps: - - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@v2 - with: - fetch-depth: 5 - submodules: recursive - - - name: Prepare - id: prep - run: | - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF#refs/tags/} - elif [[ $GITHUB_REF == refs/heads/* ]]; then - VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') - fi - TAGS="${VERSION} sha-${GITHUB_SHA::8} latest" - echo ::set-output name=TAGS::${VERSION} - echo ::set-output name=TAGS::${TAGS} - echo ::set-output name=DATE::$(date +%d-%m-%Y) - - - name: Workaround rootless build - run: | - sudo apt-get install fuse-overlayfs - mkdir -vp ~/.config/containers - printf "[storage.options]\nmount_program=\"/usr/bin/fuse-overlayfs\"" > ~/.config/containers/storage.conf - - - name: Build image for ${{ matrix.project }} - uses: redhat-actions/buildah-build@v2.2 - with: - image: ${{ matrix.project }} - tags: ${{ steps.prep.outputs.TAGS }} - dockerfiles: ./Dockerfile - build-args: | - PROJECT=${{ matrix.project }} - HEALTH=${{ matrix.healthcheck }} - VCS_REF=sha-${GITHUB_SHA::8} - BUILD_DATE=${{ steps.prep.outputs.DATE }} - VERSION=${{ steps.prep.outputs.VERSION }} - - - name: Push ${{ matrix.project }} image to docker.io - id: push-to-dockerhub - uses: redhat-actions/push-to-registry@v2.1.1 - with: - registry: docker.io/paritytech - image: ${{ matrix.project }} - tags: ${{ steps.prep.outputs.TAGS }} - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Check the image - run: | - echo "New image has been pushed to ${{ steps.push-to-dockerhub.outputs.registry-path }}" diff --git a/bridges/.github/workflows/rust.yml b/bridges/.github/workflows/rust.yml deleted file mode 100644 index 14e72ddad02e..000000000000 --- a/bridges/.github/workflows/rust.yml +++ /dev/null @@ -1,175 +0,0 @@ -name: Compilation and Testing Suite - -on: - pull_request: - push: - branches: - - master - tags: - - v* - paths-ignore: - - '**.md' - - diagrams/* - - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' -jobs: - -## Check Stage - check-test: - name: Check and test - strategy: - matrix: - toolchain: - - stable - #- beta - - nightly-2021-04-10 - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. - steps: - - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - - name: Install Toolchain - run: rustup toolchain add $NIGHTLY - - - name: Add WASM Utilities - run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - - name: Checking rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - with: - command: check - toolchain: ${{ matrix.toolchain }} - args: --all --verbose - -## Test Stage - - name: Testing rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - if: matrix.toolchain == 'stable' - with: - command: test - toolchain: ${{ matrix.toolchain }} - args: --all --verbose - -## Check Node Benchmarks - - name: Check Rialto benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - with: - command: check - toolchain: ${{ matrix.toolchain }} - args: -p rialto-runtime --features runtime-benchmarks --verbose - - - name: Check Millau benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - with: - command: check - toolchain: ${{ matrix.toolchain }} - args: -p millau-runtime --features runtime-benchmarks --verbose - -## Build Stage - build: - name: Build - strategy: - matrix: - toolchain: - - stable - #- beta - - nightly-2021-04-10 - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. - steps: - - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - - name: Install Toolchain - run: rustup toolchain add $NIGHTLY - - - name: Add WASM Utilities - run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - - name: Building rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - if: github.ref == 'refs/heads/master' - with: - command: build - toolchain: ${{ matrix.toolchain }} - args: --all --verbose - - - name: Prepare artifacts - if: github.ref == 'refs/heads/master' - run: | - mkdir -p ./artifacts; - mv -v target/debug/rialto-bridge-node ./artifacts/; - mv -v target/debug/millau-bridge-node ./artifacts/; - mv -v target/debug/ethereum-poa-relay ./artifacts/; - mv -v target/debug/substrate-relay ./artifacts/; - shell: bash - - - name: Upload artifacts - if: github.ref == 'refs/heads/master' - uses: actions/upload-artifact@v1 - with: - name: ${{ matrix.toolchain }}.zip - path: artifacts/ - - ## Linting Stage - clippy: - name: Clippy - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. - steps: - - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - - name: Install Toolchain - run: rustup toolchain add $NIGHTLY - - - name: Add WASM Utilities - run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - - name: Add clippy - run: rustup component add clippy --toolchain $NIGHTLY - - - name: Rust Cache - uses: Swatinem/rust-cache@v1.2.0 - - - name: Clippy - uses: actions-rs/cargo@master - with: - command: clippy - toolchain: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. - args: --all-targets -- -D warnings diff --git a/bridges/.gitlab-ci.yml b/bridges/.gitlab-ci.yml new file mode 100644 index 000000000000..b49df92c73c8 --- /dev/null +++ b/bridges/.gitlab-ci.yml @@ -0,0 +1,276 @@ +stages: + - lint + - check + - test + - build + - publish + +workflow: + rules: + - if: $CI_COMMIT_TAG + - if: $CI_COMMIT_BRANCH + +variables: &default-vars + GIT_STRATEGY: fetch + GIT_DEPTH: 100 + CARGO_INCREMENTAL: 0 + ARCH: "x86_64" + CI_IMAGE: "paritytech/bridges-ci:production" + RUST_BACKTRACE: full + +default: + cache: {} + +.collect-artifacts: &collect-artifacts + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: on_success + expire_in: 7 days + paths: + - artifacts/ + +.kubernetes-build: &kubernetes-build + tags: + - kubernetes-parity-build + interruptible: true + +.docker-env: &docker-env + image: "${CI_IMAGE}" + before_script: + - rustup show + - cargo --version + - rustup +nightly show + - cargo +nightly --version + - sccache -s + retry: + max: 2 + when: + - runner_system_failure + - unknown_failure + - api_failure + interruptible: true + tags: + - linux-docker + +.test-refs: &test-refs + rules: + # FIXME: This is the cause why pipelines wouldn't start. The problem might be in our custom + # mirroring. This should be investigated further, but for now let's have the working + # pipeline. + # - if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH + # changes: + # - '**.md' + # - diagrams/* + # - docs/* + # when: never + - if: $CI_PIPELINE_SOURCE == "pipeline" + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + +.build-refs: &build-refs + rules: + # won't run on the CI image update pipeline + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + # there are two types of nightly pipelines: + # 1. this one is triggered by the schedule with $PIPELINE == "nightly", it's for releasing. + # this job runs only on nightly pipeline with the mentioned variable, against `master` branch + - if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" + +.nightly-test: &nightly-test + rules: + # 2. another is triggered by scripts repo $CI_PIPELINE_SOURCE == "pipeline" it's for the CI image + # update, it also runs all the nightly checks. + - if: $CI_PIPELINE_SOURCE == "pipeline" + +#### stage: lint + +clippy-nightly: + stage: lint + <<: *docker-env + <<: *test-refs + variables: + RUSTFLAGS: "-D warnings" + script: + - cargo +nightly clippy --all-targets + # FIXME: remove when all the warns are fixed + allow_failure: true + +fmt: + stage: lint + <<: *docker-env + <<: *test-refs + script: + - cargo fmt --all -- --check + +spellcheck: + stage: lint + <<: *docker-env + <<: *test-refs + script: + - cargo spellcheck check -m 1 -vv $(find modules/currency-exchange/src -name "*.rs") + +#### stage: check + +check: + stage: check + <<: *docker-env + <<: *test-refs + script: &check-script + - time cargo check --verbose --workspace + # Check Rialto benchmarks runtime + - time cargo check -p rialto-runtime --features runtime-benchmarks --verbose + # Check Millau benchmarks runtime + - time cargo check -p millau-runtime --features runtime-benchmarks --verbose + +check-nightly: + stage: check + <<: *docker-env + <<: *nightly-test + script: + - rustup default nightly + - *check-script + +#### stage: test + +test: + stage: test + <<: *docker-env + <<: *test-refs + script: &test-script + - time cargo test --verbose --workspace + +test-nightly: + stage: test + <<: *docker-env + <<: *nightly-test + script: + - rustup default nightly + - *test-script + +deny: + stage: test + <<: *docker-env + <<: *nightly-test + <<: *collect-artifacts + script: + - cargo deny check advisories --hide-inclusion-graph + - cargo deny check bans sources --hide-inclusion-graph + after_script: + - mkdir -p ./artifacts + - echo "___Complete logs can be found in the artifacts___" + - cargo deny check advisories 2> advisories.log + - cargo deny check bans sources 2> bans_sources.log + # this job is allowed to fail, only licenses check is important + allow_failure: true + +deny-licenses: + stage: test + <<: *docker-env + <<: *test-refs + <<: *collect-artifacts + script: + - cargo deny check licenses --hide-inclusion-graph + after_script: + - mkdir -p ./artifacts + - echo "___Complete logs can be found in the artifacts___" + - cargo deny check licenses 2> licenses.log + +#### stage: build + +build: + stage: build + <<: *docker-env + <<: *build-refs + <<: *collect-artifacts + # master + script: &build-script + - time cargo build --release --verbose --workspace + after_script: + # Prepare artifacts + - mkdir -p ./artifacts + - strip ./target/release/rialto-bridge-node + - mv -v ./target/release/rialto-bridge-node ./artifacts/ + - strip ./target/release/millau-bridge-node + - mv -v ./target/release/millau-bridge-node ./artifacts/ + - strip ./target/release/ethereum-poa-relay + - mv -v ./target/release/ethereum-poa-relay ./artifacts/ + - strip ./target/release/substrate-relay + - mv -v ./target/release/substrate-relay ./artifacts/ + - mv -v ./deployments/local-scripts/bridge-entrypoint.sh ./artifacts/ + - mv -v ./ci.Dockerfile ./artifacts/ + +build-nightly: + stage: build + <<: *docker-env + <<: *collect-artifacts + <<: *nightly-test + script: + - rustup default nightly + - *build-script + +#### stage: publish + +.build-push-image: &build-push-image + <<: *kubernetes-build + image: quay.io/buildah/stable + <<: *build-refs + variables: &image-variables + GIT_STRATEGY: none + DOCKERFILE: ci.Dockerfile + IMAGE_NAME: docker.io/paritytech/$CI_JOB_NAME + needs: + - job: build + artifacts: true + before_script: &check-versions + - if [[ "${CI_COMMIT_TAG}" ]]; then + VERSION=${CI_COMMIT_TAG}; + elif [[ "${CI_COMMIT_REF_NAME}" ]]; then + VERSION=$(echo ${CI_COMMIT_REF_NAME} | sed -r 's#/+#-#g'); + fi + - echo "Effective tags = ${VERSION} sha-${CI_COMMIT_SHORT_SHA} latest" + script: + - test "${Docker_Hub_User_Parity}" -a "${Docker_Hub_Pass_Parity}" || + ( echo "no docker credentials provided"; exit 1 ) + - cd ./artifacts + - buildah bud + --format=docker + --build-arg VCS_REF="${CI_COMMIT_SHORT_SHA}" + --build-arg BUILD_DATE="$(date +%d-%m-%Y)" + --build-arg PROJECT="${CI_JOB_NAME}" + --build-arg VERSION="${VERSION}" + --tag "${IMAGE_NAME}:${VERSION}" + --tag "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}" + --tag "${IMAGE_NAME}:latest" + --file "${DOCKERFILE}" . + # The job will success only on the protected branch + - echo "$Docker_Hub_Pass_Parity" | + buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io + - buildah info + - buildah push --format=v2s2 "${IMAGE_NAME}:${VERSION}" + - buildah push --format=v2s2 "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}" + - buildah push --format=v2s2 "${IMAGE_NAME}:latest" + after_script: + - env REGISTRY_AUTH_FILE= buildah logout "$IMAGE_NAME" + +rialto-bridge-node: + stage: publish + <<: *build-push-image + +millau-bridge-node: + stage: publish + <<: *build-push-image + +ethereum-poa-relay: + stage: publish + <<: *build-push-image + +substrate-relay: + stage: publish + <<: *build-push-image + +# FIXME: publish binaries diff --git a/bridges/Cargo.lock b/bridges/Cargo.lock index abe77d2a3c3c..86b075028125 100644 --- a/bridges/Cargo.lock +++ b/bridges/Cargo.lock @@ -18,7 +18,16 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ - "gimli", + "gimli 0.23.0", +] + +[[package]] +name = "addr2line" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" +dependencies = [ + "gimli 0.24.0", ] [[package]] @@ -162,12 +171,34 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7" + [[package]] name = "asn1_der" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "async-channel" version = "1.6.1" @@ -269,6 +300,7 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" dependencies = [ + "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -409,11 +441,11 @@ version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ - "addr2line", + "addr2line 0.14.1", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object", + "object 0.23.0", "rustc-demangle", ] @@ -487,16 +519,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] - [[package]] name = "bitvec" version = "0.20.1" @@ -504,7 +526,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" dependencies = [ "funty", - "radium 0.6.2", + "radium", "tap", "wyz", ] @@ -632,7 +654,7 @@ name = "bp-currency-exchange" version = "0.1.0" dependencies = [ "frame-support", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-std", ] @@ -649,7 +671,7 @@ dependencies = [ "impl-serde", "libsecp256k1", "parity-bytes", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "plain_hasher", "primitive-types", "rlp", @@ -666,10 +688,11 @@ dependencies = [ name = "bp-header-chain" version = "0.1.0" dependencies = [ + "assert_matches", "bp-test-utils", - "finality-grandpa 0.14.0", + "finality-grandpa", "frame-support", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-finality-grandpa", @@ -694,7 +717,7 @@ version = "0.1.0" dependencies = [ "bp-runtime", "frame-support", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-std", ] @@ -702,10 +725,13 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ + "bitvec", "bp-runtime", "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "impl-trait-for-tuples", + "parity-scale-codec", + "serde", "sp-std", ] @@ -721,6 +747,7 @@ dependencies = [ "hash256-std-hasher", "impl-codec", "impl-serde", + "max-encoded-len", "parity-util-mem", "serde", "sp-api", @@ -751,7 +778,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-core", "sp-runtime", @@ -777,11 +804,12 @@ dependencies = [ name = "bp-rococo" version = "0.1.0" dependencies = [ - "bp-header-chain", "bp-messages", "bp-polkadot-core", "bp-runtime", - "parity-scale-codec 2.0.1", + "frame-support", + "parity-scale-codec", + "smallvec 1.6.1", "sp-api", "sp-runtime", "sp-std", @@ -795,7 +823,7 @@ dependencies = [ "frame-support", "hash-db", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -810,8 +838,8 @@ version = "0.1.0" dependencies = [ "bp-header-chain", "ed25519-dalek", - "finality-grandpa 0.14.0", - "parity-scale-codec 2.0.1", + "finality-grandpa", + "parity-scale-codec", "sp-application-crypto", "sp-finality-grandpa", "sp-runtime", @@ -826,7 +854,7 @@ dependencies = [ "bp-messages", "bp-polkadot-core", "bp-runtime", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", @@ -837,15 +865,14 @@ dependencies = [ name = "bp-wococo" version = "0.1.0" dependencies = [ - "bp-header-chain", "bp-messages", "bp-polkadot-core", + "bp-rococo", "bp-runtime", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", - "sp-version", ] [[package]] @@ -861,7 +888,8 @@ dependencies = [ "pallet-bridge-dispatch", "pallet-bridge-grandpa", "pallet-bridge-messages", - "parity-scale-codec 2.0.1", + "pallet-transaction-payment", + "parity-scale-codec", "sp-core", "sp-runtime", "sp-state-machine", @@ -899,12 +927,6 @@ version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" - [[package]] name = "byte-slice-cast" version = "1.0.0" @@ -952,6 +974,15 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +[[package]] +name = "camino" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4648c6d00a709aa069a236adcaae4f605a6241c72bf5bee79331a4b625921a9" +dependencies = [ + "serde", +] + [[package]] name = "cargo-platform" version = "0.1.1" @@ -963,10 +994,11 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.12.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7714a157da7991e23d90686b9524b9e12e0407a108647f52e9328f4b3d51ac7f" +checksum = "081e3f0755c1f380c2d010481b6fa2e02973586d5f2b24eebb7a2a1d98b143d8" dependencies = [ + "camino", "cargo-platform", "semver 0.11.0", "semver-parser 0.10.2", @@ -1107,9 +1139,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" +checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" [[package]] name = "constant_time_eq" @@ -1163,38 +1195,36 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "cranelift-bforest" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcee7a5107071484772b89fdf37f0f460b7db75f476e43ea7a684fd942470bcf" +checksum = "c8ca3560686e7c9c7ed7e0fe77469f2410ba5d7781b1acaa9adc8d8deea28e3e" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "654ab96f0f1cab71c0d323618a58360a492da2c341eb2c1f977fc195c664001b" +checksum = "baf9bf1ffffb6ce3d2e5ebc83549bd2436426c99b31cc550d521364cbe35d276" dependencies = [ - "byteorder", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", - "gimli", + "gimli 0.24.0", "log", "regalloc", "serde", "smallvec 1.6.1", "target-lexicon", - "thiserror", ] [[package]] name = "cranelift-codegen-meta" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65994cfc5be9d5fd10c5fc30bcdddfa50c04bb79c91329287bff846434ff8f14" +checksum = "4cc21936a5a6d07e23849ffe83e5c1f6f50305c074f4b2970ca50c13bf55b821" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -1202,27 +1232,27 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889d720b688b8b7df5e4903f9b788c3c59396050f5548e516e58ccb7312463ab" +checksum = "ca5b6ffaa87560bebe69a5446449da18090b126037920b0c1c6d5945f72faf6b" dependencies = [ "serde", ] [[package]] name = "cranelift-entity" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a2e6884a363e42a9ba980193ea8603a4272f8a92bd8bbaf9f57a94dbea0ff96" +checksum = "7d6b4a8bef04f82e4296782646f733c641d09497df2fabf791323fefaa44c64c" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f41e2f9b57d2c030e249d0958f1cdc2c3cd46accf8c0438b3d1944e9153444" +checksum = "c31b783b351f966fce33e3c03498cb116d16d97a8f9978164a60920bd0d3a99c" dependencies = [ "cranelift-codegen", "log", @@ -1232,9 +1262,9 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aab70ba7575665375d31cbdea2462916ce58be887834e1b83c860b43b51af637" +checksum = "a77c88d3dd48021ff1e37e978a00098524abd3513444ae252c08d37b310b3d2a" dependencies = [ "cranelift-codegen", "target-lexicon", @@ -1242,9 +1272,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.71.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fc3d2e70da6439adf97648dcdf81834363154f2907405345b6fbe7ca38918c" +checksum = "edb6d408e2da77cdbbd65466298d44c86ae71c1785d2ab0d8657753cdb4d9d89" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1590,6 +1620,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast-rs" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" + [[package]] name = "dyn-clonable" version = "0.9.0" @@ -1695,9 +1731,9 @@ dependencies = [ [[package]] name = "environmental" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" +checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] name = "erased-serde" @@ -1792,10 +1828,10 @@ name = "ethereum-contract-builtin" version = "0.1.0" dependencies = [ "ethereum-types", - "finality-grandpa 0.14.0", + "finality-grandpa", "hex", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rialto-runtime", "sc-finality-grandpa", "sp-blockchain", @@ -1829,7 +1865,7 @@ dependencies = [ "messages-relay", "num-traits", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-ethereum-client", "relay-rialto-client", "relay-substrate-client", @@ -1951,31 +1987,16 @@ dependencies = [ [[package]] name = "finality-grandpa" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" -dependencies = [ - "either", - "futures 0.3.13", - "futures-timer 2.0.2", - "log", - "num-traits", - "parity-scale-codec 1.3.7", - "parking_lot 0.9.0", -] - -[[package]] -name = "finality-grandpa" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" +checksum = "74a1bfdcc776e63e49f741c7ce6116fa1b887e8ac2e3ccb14dd4aa113e54feb9" dependencies = [ "either", "futures 0.3.13", "futures-timer 3.0.2", "log", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", ] @@ -2047,9 +2068,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", ] [[package]] @@ -2065,13 +2086,13 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "3.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", "linregress", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "paste 1.0.4", "sp-api", "sp-io", @@ -2084,13 +2105,13 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "Inflector", "chrono", "frame-benchmarking", "handlebars", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-cli", "sc-client-db", "sc-executor", @@ -2107,11 +2128,11 @@ dependencies = [ [[package]] name = "frame-executive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -2122,9 +2143,9 @@ dependencies = [ [[package]] name = "frame-metadata" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-std", @@ -2133,15 +2154,16 @@ dependencies = [ [[package]] name = "frame-support" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "bitflags", "frame-metadata", "frame-support-procedural", "impl-trait-for-tuples", "log", + "max-encoded-len", "once_cell", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "paste 1.0.4", "serde", "smallvec 1.6.1", @@ -2159,7 +2181,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -2171,7 +2193,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 1.0.0", @@ -2183,7 +2205,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "proc-macro2", "quote", @@ -2193,12 +2215,12 @@ dependencies = [ [[package]] name = "frame-system" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -2210,17 +2232,17 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", ] [[package]] name = "fs-swap" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5839fda247e24ca4919c87c71dd5ca658f1f39e4f06829f80e3f15c3bafcfc2c" +checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" dependencies = [ "lazy_static", "libc", @@ -2313,22 +2335,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "futures-diagnose" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" -dependencies = [ - "futures 0.1.31", - "futures 0.3.13", - "lazy_static", - "log", - "parking_lot 0.9.0", - "pin-project 0.4.27", - "serde", - "serde_json", -] - [[package]] name = "futures-executor" version = "0.3.13" @@ -2501,6 +2507,12 @@ name = "gimli" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" + +[[package]] +name = "gimli" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" dependencies = [ "fallible-iterator", "indexmap", @@ -2946,7 +2958,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", ] [[package]] @@ -3400,9 +3412,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.86" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" +checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" [[package]] name = "libloading" @@ -4020,6 +4032,28 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "max-encoded-len" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" +dependencies = [ + "impl-trait-for-tuples", + "max-encoded-len-derive", + "parity-scale-codec", + "primitive-types", +] + +[[package]] +name = "max-encoded-len-derive" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" +dependencies = [ + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "maybe-uninit" version = "2.0.0" @@ -4105,9 +4139,11 @@ dependencies = [ "async-std", "async-trait", "bp-messages", + "bp-runtime", "futures 0.3.13", "hex", "log", + "num-traits", "parking_lot 0.11.1", "relay-utils", ] @@ -4181,7 +4217,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-block-builder", @@ -4430,11 +4466,11 @@ dependencies = [ [[package]] name = "node-inspect" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-cli", "sc-client-api", "sc-service", @@ -4553,6 +4589,12 @@ name = "object" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" + +[[package]] +name = "object" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" dependencies = [ "crc32fast", "indexmap", @@ -4610,13 +4652,13 @@ dependencies = [ [[package]] name = "pallet-aura" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", "pallet-session", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-application-crypto", "sp-consensus-aura", "sp-runtime", @@ -4626,12 +4668,12 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-authorship", "sp-runtime", "sp-std", @@ -4640,13 +4682,14 @@ dependencies = [ [[package]] name = "pallet-balances" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 2.0.1", + "max-encoded-len", + "parity-scale-codec", "sp-runtime", "sp-std", ] @@ -4661,7 +4704,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -4678,7 +4721,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -4697,7 +4740,7 @@ dependencies = [ "hex-literal 0.3.1", "libsecp256k1", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-io", "sp-runtime", @@ -4711,13 +4754,13 @@ dependencies = [ "bp-header-chain", "bp-runtime", "bp-test-utils", - "finality-grandpa 0.14.0", + "finality-grandpa", "frame-benchmarking", "frame-support", "frame-system", "log", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-finality-grandpa", "sp-io", @@ -4730,6 +4773,8 @@ dependencies = [ name = "pallet-bridge-messages" version = "0.1.0" dependencies = [ + "bitvec", + "bp-message-dispatch", "bp-messages", "bp-rialto", "bp-runtime", @@ -4741,7 +4786,7 @@ dependencies = [ "log", "num-traits", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -4751,8 +4796,8 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +version = "3.1.0" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-benchmarking", "frame-support", @@ -4760,10 +4805,11 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-application-crypto", "sp-core", "sp-finality-grandpa", + "sp-io", "sp-runtime", "sp-session", "sp-staking", @@ -4773,11 +4819,11 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "safe-mix", "sp-runtime", "sp-std", @@ -4786,13 +4832,14 @@ dependencies = [ [[package]] name = "pallet-session" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", + "log", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -4809,7 +4856,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-runtime", @@ -4820,11 +4867,11 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-io", "sp-runtime", "sp-std", @@ -4833,14 +4880,14 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-inherents", "sp-runtime", "sp-std", @@ -4850,11 +4897,11 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "smallvec 1.6.1", "sp-core", @@ -4866,13 +4913,13 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "jsonrpc-core 15.1.0", "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-blockchain", "sp-core", @@ -4883,10 +4930,10 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-runtime", ] @@ -4899,9 +4946,9 @@ checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" [[package]] name = "parity-db" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495197c078e54b8735181aa35c00a327f7f3a3cc00a1ee8c95926dd010f0ec6b" +checksum = "2e337f62db341435f0da05b8f6b97e984ef4ea5800510cd07c2d624688c40b47" dependencies = [ "blake2-rfc", "crc32fast", @@ -4934,47 +4981,22 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" -dependencies = [ - "arrayvec 0.5.2", - "bitvec 0.17.4", - "byte-slice-cast 0.3.5", - "parity-scale-codec-derive 1.2.3", - "serde", -] - -[[package]] -name = "parity-scale-codec" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd3dab59b5cf4bc81069ade0fc470341a1ef3ad5fa73e5a8943bed2ec12b2e8" +checksum = "e0f518afaa5a47d0d6386229b0a6e01e86427291d643aa4cabb4992219f504f8" dependencies = [ - "arrayvec 0.5.2", - "bitvec 0.20.1", - "byte-slice-cast 1.0.0", - "parity-scale-codec-derive 2.0.1", + "arrayvec 0.7.0", + "bitvec", + "byte-slice-cast", + "parity-scale-codec-derive", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c41512944b1faff334a5f1b9447611bf4ef40638ccb6328173dacefb338e878c" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa04976a81fde04924b40cc4036c4d12841e8bb04325a5cf2ada75731a150a7d" +checksum = "f44c5f94427bd0b5076e8f7e15ca3f60a4d8ac0077e4793884e6fdfd8915344e" dependencies = [ "proc-macro-crate 0.1.5", "proc-macro2", @@ -5045,9 +5067,9 @@ dependencies = [ [[package]] name = "parity-wasm" -version = "0.41.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" [[package]] name = "parity-ws" @@ -5541,13 +5563,13 @@ dependencies = [ [[package]] name = "pwasm-utils" -version = "0.14.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" +checksum = "a0e517f47d9964362883182404b68d0b6949382c0baa40aa5ffca94f5f1e3481" dependencies = [ "byteorder", "log", - "parity-wasm 0.41.0", + "parity-wasm 0.42.2", ] [[package]] @@ -5582,12 +5604,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - [[package]] name = "radium" version = "0.6.2" @@ -5884,6 +5900,7 @@ dependencies = [ name = "relay-ethereum-client" version = "0.1.0" dependencies = [ + "async-std", "bp-eth-poa", "headers-relay", "hex-literal 0.3.1", @@ -5891,7 +5908,7 @@ dependencies = [ "jsonrpsee-ws-client", "libsecp256k1", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-utils", "web3", ] @@ -5905,7 +5922,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "sp-core", @@ -5922,7 +5939,7 @@ dependencies = [ "headers-relay", "millau-runtime", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "sp-core", @@ -5939,7 +5956,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "sp-core", @@ -5955,7 +5972,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "rialto-runtime", @@ -5968,12 +5985,21 @@ dependencies = [ name = "relay-rococo-client" version = "0.1.0" dependencies = [ + "bp-header-chain", + "bp-message-dispatch", + "bp-messages", + "bp-polkadot-core", "bp-rococo", + "bp-runtime", + "bp-wococo", + "bridge-runtime-common", "frame-support", "frame-system", "headers-relay", + "pallet-bridge-dispatch", + "pallet-bridge-messages", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "sp-core", @@ -6000,7 +6026,7 @@ dependencies = [ "log", "num-traits", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand 0.7.3", "relay-utils", "sc-rpc-api", @@ -6042,7 +6068,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "sp-core", @@ -6054,12 +6080,21 @@ dependencies = [ name = "relay-wococo-client" version = "0.1.0" dependencies = [ + "bp-header-chain", + "bp-message-dispatch", + "bp-messages", + "bp-polkadot-core", + "bp-rococo", + "bp-runtime", "bp-wococo", + "bridge-runtime-common", "frame-support", "frame-system", "headers-relay", + "pallet-bridge-dispatch", + "pallet-bridge-messages", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "relay-substrate-client", "relay-utils", "sp-core", @@ -6088,9 +6123,9 @@ dependencies = [ [[package]] name = "retain_mut" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53552c6c49e1e13f1a203ef0080ab3bbef0beb570a528993e83df057a9d9bba1" +checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b" [[package]] name = "rialto-bridge-node" @@ -6168,7 +6203,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-block-builder", @@ -6366,12 +6401,12 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -6389,9 +6424,9 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sp-api", "sp-block-builder", @@ -6405,10 +6440,10 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-chain-spec-derive", "sc-consensus-babe", "sc-consensus-epochs", @@ -6426,7 +6461,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -6437,7 +6472,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "chrono", "fdlimit", @@ -6446,7 +6481,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand 0.7.3", "regex", "rpassword", @@ -6475,7 +6510,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", "fnv", @@ -6484,7 +6519,7 @@ dependencies = [ "kvdb", "lazy_static", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-executor", "sp-api", @@ -6509,7 +6544,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "blake2-rfc", "hash-db", @@ -6519,7 +6554,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "sc-client-api", @@ -6539,8 +6574,9 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ + "async-trait", "parking_lot 0.11.1", "sc-client-api", "sp-blockchain", @@ -6551,14 +6587,14 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "derive_more", "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-consensus-slots", @@ -6582,7 +6618,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "derive_more", @@ -6594,7 +6630,7 @@ dependencies = [ "num-bigint", "num-rational", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "pdqselect", "rand 0.7.3", @@ -6628,10 +6664,10 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "fork-tree", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -6641,14 +6677,14 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "futures 0.3.13", "futures-timer 3.0.2", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sc-telemetry", "sp-api", @@ -6669,7 +6705,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "sc-client-api", "sp-authorship", @@ -6680,14 +6716,14 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", "lazy_static", "libsecp256k1", "log", - "parity-scale-codec 2.0.1", - "parity-wasm 0.41.0", + "parity-scale-codec", + "parity-wasm 0.42.2", "parking_lot 0.11.1", "sc-executor-common", "sc-executor-wasmi", @@ -6696,7 +6732,6 @@ dependencies = [ "sp-core", "sp-externalities", "sp-io", - "sp-maybe-compressed-blob", "sp-panic-handler", "sp-runtime-interface", "sp-serializer", @@ -6710,14 +6745,14 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", - "parity-scale-codec 2.0.1", - "parity-wasm 0.41.0", + "parity-scale-codec", "pwasm-utils", "sp-allocator", "sp-core", + "sp-maybe-compressed-blob", "sp-serializer", "sp-wasm-interface", "thiserror", @@ -6727,10 +6762,10 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-executor-common", "sp-allocator", "sp-core", @@ -6742,12 +6777,13 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ + "cfg-if 1.0.0", + "libc", "log", - "parity-scale-codec 2.0.1", - "parity-wasm 0.41.0", - "pwasm-utils", + "parity-scale-codec", + "parity-wasm 0.42.2", "sc-executor-common", "scoped-tls", "sp-allocator", @@ -6760,18 +6796,18 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "derive_more", "dyn-clone", - "finality-grandpa 0.14.0", + "finality-grandpa", "fork-tree", "futures 0.3.13", "futures-timer 3.0.2", "linked-hash-map", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.5", "rand 0.7.3", @@ -6795,22 +6831,23 @@ dependencies = [ "sp-runtime", "sp-utils", "substrate-prometheus-endpoint", + "wasm-timer", ] [[package]] name = "sc-finality-grandpa-rpc" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", - "finality-grandpa 0.14.0", + "finality-grandpa", "futures 0.3.13", "jsonrpc-core 15.1.0", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -6824,10 +6861,11 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "ansi_term 0.12.1", "futures 0.3.13", + "futures-timer 3.0.2", "log", "parity-util-mem", "sc-client-api", @@ -6835,14 +6873,13 @@ dependencies = [ "sp-blockchain", "sp-runtime", "sp-transaction-pool", - "sp-utils", "wasm-timer", ] [[package]] name = "sc-keystore" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "derive_more", @@ -6862,11 +6899,11 @@ dependencies = [ [[package]] name = "sc-light" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "hash-db", "lazy_static", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-client-api", "sc-executor", @@ -6881,7 +6918,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-std", "async-trait", @@ -6905,7 +6942,7 @@ dependencies = [ "log", "lru", "nohash-hasher", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.5", "prost", @@ -6934,7 +6971,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "futures-timer 3.0.2", @@ -6951,7 +6988,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "bytes 0.5.6", "fnv", @@ -6962,7 +6999,7 @@ dependencies = [ "hyper-rustls", "log", "num_cpus", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "rand 0.7.3", "sc-client-api", @@ -6979,7 +7016,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "libp2p", @@ -6992,7 +7029,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7001,14 +7038,14 @@ dependencies = [ [[package]] name = "sc-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "hash-db", "jsonrpc-core 15.1.0", "jsonrpc-pubsub", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -7027,6 +7064,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-state-machine", + "sp-tracing", "sp-transaction-pool", "sp-utils", "sp-version", @@ -7035,7 +7073,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", "futures 0.3.13", @@ -7044,7 +7082,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "serde", "serde_json", @@ -7052,6 +7090,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", + "sp-tracing", "sp-transaction-pool", "sp-version", ] @@ -7059,7 +7098,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.1.31", "jsonrpc-core 15.1.0", @@ -7077,7 +7116,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "directories", @@ -7090,7 +7129,7 @@ dependencies = [ "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "pin-project 1.0.5", @@ -7125,8 +7164,10 @@ dependencies = [ "sp-runtime", "sp-session", "sp-state-machine", + "sp-storage", "sp-tracing", "sp-transaction-pool", + "sp-transaction-storage-proof", "sp-trie", "sp-utils", "sp-version", @@ -7141,10 +7182,10 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.11.1", @@ -7156,7 +7197,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "chrono", "futures 0.3.13", @@ -7176,7 +7217,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "ansi_term 0.12.1", "atty", @@ -7187,23 +7228,33 @@ dependencies = [ "parking_lot 0.11.1", "regex", "rustc-hash", + "sc-client-api", + "sc-rpc-server", + "sc-telemetry", "sc-tracing-proc-macro", "serde", "serde_json", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-core", + "sp-rpc", + "sp-runtime", + "sp-storage", "sp-tracing", "thiserror", "tracing", - "tracing-core", "tracing-log", "tracing-subscriber", "wasm-bindgen", + "wasm-timer", "web-sys", ] [[package]] name = "sc-tracing-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -7214,7 +7265,7 @@ dependencies = [ [[package]] name = "sc-transaction-graph" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", "futures 0.3.13", @@ -7236,13 +7287,12 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", - "futures-diagnose", "intervalier", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "sc-client-api", @@ -7282,7 +7332,6 @@ dependencies = [ "merlin", "rand 0.7.3", "rand_core 0.5.1", - "serde", "sha2 0.8.2", "subtle 2.4.0", "zeroize", @@ -7573,6 +7622,15 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +[[package]] +name = "slog" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" +dependencies = [ + "erased-serde", +] + [[package]] name = "sluice" version = "0.5.4" @@ -7657,7 +7715,7 @@ dependencies = [ [[package]] name = "sp-allocator" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "log", "sp-core", @@ -7669,11 +7727,11 @@ dependencies = [ [[package]] name = "sp-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "hash-db", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -7686,7 +7744,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "blake2-rfc", "proc-macro-crate 1.0.0", @@ -7698,9 +7756,10 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "max-encoded-len", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -7710,11 +7769,11 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-debug-derive", "sp-std", @@ -7724,10 +7783,10 @@ dependencies = [ [[package]] name = "sp-authorship" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-inherents", "sp-runtime", "sp-std", @@ -7736,9 +7795,9 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", @@ -7748,12 +7807,12 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "log", "lru", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sp-api", "sp-consensus", @@ -7766,7 +7825,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "serde", "serde_json", @@ -7775,14 +7834,14 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "serde", "sp-api", @@ -7802,10 +7861,10 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-application-crypto", "sp-consensus", @@ -7819,11 +7878,11 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "merlin", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-application-crypto", @@ -7841,9 +7900,9 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-arithmetic", "sp-runtime", ] @@ -7851,9 +7910,9 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "schnorrkel", "sp-core", "sp-runtime", @@ -7863,7 +7922,7 @@ dependencies = [ [[package]] name = "sp-core" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "base58", "blake2-rfc", @@ -7878,9 +7937,10 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", + "max-encoded-len", "merlin", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "primitive-types", @@ -7907,7 +7967,7 @@ dependencies = [ [[package]] name = "sp-database" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "kvdb", "parking_lot 0.11.1", @@ -7916,7 +7976,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "proc-macro2", "quote", @@ -7926,10 +7986,10 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "environmental", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-std", "sp-storage", ] @@ -7937,11 +7997,11 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "finality-grandpa 0.14.0", + "finality-grandpa", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-application-crypto", @@ -7954,11 +8014,11 @@ dependencies = [ [[package]] name = "sp-inherents" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-runtime", "sp-std", @@ -7968,17 +8028,18 @@ dependencies = [ [[package]] name = "sp-io" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "hash-db", "libsecp256k1", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sp-core", "sp-externalities", "sp-keystore", + "sp-maybe-compressed-blob", "sp-runtime-interface", "sp-state-machine", "sp-std", @@ -7992,7 +8053,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "lazy_static", "sp-core", @@ -8003,13 +8064,13 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "derive_more", "futures 0.3.13", "merlin", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "schnorrkel", "serde", @@ -8020,7 +8081,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "ruzstd", "zstd", @@ -8029,7 +8090,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "sp-api", "sp-core", @@ -8039,7 +8100,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "backtrace", ] @@ -8047,22 +8108,25 @@ dependencies = [ [[package]] name = "sp-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ + "rustc-hash", "serde", "sp-core", + "tracing-core", ] [[package]] name = "sp-runtime" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", + "max-encoded-len", + "parity-scale-codec", "parity-util-mem", "paste 1.0.4", "rand 0.7.3", @@ -8077,10 +8141,10 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -8094,7 +8158,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "Inflector", "proc-macro-crate 1.0.0", @@ -8106,7 +8170,7 @@ dependencies = [ [[package]] name = "sp-serializer" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "serde", "serde_json", @@ -8115,9 +8179,9 @@ dependencies = [ [[package]] name = "sp-session" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-core", "sp-runtime", @@ -8128,9 +8192,9 @@ dependencies = [ [[package]] name = "sp-staking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-runtime", "sp-std", ] @@ -8138,12 +8202,12 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "rand 0.7.3", "smallvec 1.6.1", @@ -8153,6 +8217,7 @@ dependencies = [ "sp-std", "sp-trie", "thiserror", + "tracing", "trie-db", "trie-root", ] @@ -8160,15 +8225,15 @@ dependencies = [ [[package]] name = "sp-std" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" [[package]] name = "sp-storage" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "impl-serde", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", @@ -8178,7 +8243,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "log", "sp-core", @@ -8191,12 +8256,12 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-trait", "futures-timer 3.0.2", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", @@ -8208,10 +8273,15 @@ dependencies = [ [[package]] name = "sp-tracing" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ + "erased-serde", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", + "parking_lot 0.10.2", + "serde", + "serde_json", + "slog", "sp-std", "tracing", "tracing-core", @@ -8221,12 +8291,12 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "derive_more", "futures 0.3.13", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-blockchain", @@ -8234,14 +8304,29 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sp-transaction-storage-proof" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" +dependencies = [ + "async-trait", + "log", + "parity-scale-codec", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-trie", +] + [[package]] name = "sp-trie" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "hash-db", "memory-db", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-std", "trie-db", @@ -8251,7 +8336,7 @@ dependencies = [ [[package]] name = "sp-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "futures 0.3.13", "futures-core", @@ -8263,22 +8348,35 @@ dependencies = [ [[package]] name = "sp-version" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "impl-serde", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-runtime", "sp-std", + "sp-version-proc-macro", +] + +[[package]] +name = "sp-version-proc-macro" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" +dependencies = [ + "parity-scale-codec", + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "sp-wasm-interface" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-std", "wasmi", ] @@ -8306,9 +8404,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "standback" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" dependencies = [ "version_check", ] @@ -8385,13 +8483,13 @@ dependencies = [ "bp-runtime", "bp-test-utils", "env_logger 0.8.3", - "finality-grandpa 0.12.3", + "finality-grandpa", "frame-support", "frame-system", "hash-db", "honggfuzz", "log", - "parity-scale-codec 1.3.7", + "parity-scale-codec", "sp-core", "sp-finality-grandpa", "sp-io", @@ -8487,8 +8585,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd540ba72520174c2c73ce96bf507eeba3cc8a481f58be92525b69110e1fa645" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "platforms", ] @@ -8496,7 +8593,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "frame-system-rpc-runtime-api", "futures 0.3.13", @@ -8504,7 +8601,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sc-rpc-api", "serde", @@ -8519,7 +8616,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#b094edafd1cd5d26e49ecbf92b0ce7553cfad717" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "async-std", "derive_more", @@ -8549,7 +8646,7 @@ dependencies = [ "bp-westend", "bp-wococo", "bridge-runtime-common", - "finality-grandpa 0.14.0", + "finality-grandpa", "finality-relay", "frame-support", "futures 0.3.13", @@ -8561,8 +8658,9 @@ dependencies = [ "millau-runtime", "num-format", "num-traits", + "pallet-bridge-grandpa", "pallet-bridge-messages", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "paste 1.0.4", "relay-kusama-client", "relay-millau-client", @@ -8585,14 +8683,14 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79091baab813855ddf65b191de9fe53e656b6b67c1e9bd23fdcbff8788164684" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" dependencies = [ "ansi_term 0.12.1", "atty", "build-helper", "cargo_metadata", + "sp-maybe-compressed-blob", "tempfile", "toml", "walkdir", @@ -8665,9 +8763,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95" +checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834" [[package]] name = "tempfile" @@ -9174,9 +9272,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" +checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -9196,9 +9294,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.3" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec051edf7f0fc9499a2cb0947652cab2148b9d7f61cee7605e312e9f970dacaf" +checksum = "cd81fe0c8bc2b528a51c9d2c31dae4483367a26a723a3c9a4a8120311d7774e3" dependencies = [ "hash-db", "hashbrown", @@ -9607,38 +9705,39 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.6.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +checksum = "d2ee05bba3d1d994652079893941a2ef9324d2b58a63c31b40678fb7eddd7a5a" dependencies = [ + "downcast-rs", "libc", "memory_units", "num-rational", "num-traits", - "parity-wasm 0.41.0", + "parity-wasm 0.42.2", "wasmi-validation", ] [[package]] name = "wasmi-validation" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" dependencies = [ - "parity-wasm 0.41.0", + "parity-wasm 0.42.2", ] [[package]] name = "wasmparser" -version = "0.76.0" +version = "0.78.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755a9a4afe3f6cccbbe6d7e965eef44cf260b001f93e547eba84255c1d0187d8" +checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" [[package]] name = "wasmtime" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718cb52a9fdb7ab12471e9b9d051c9adfa6b5c504e0a1fea045e5eabc81eedd9" +checksum = "b310b9d20fcf59385761d1ade7a3ef06aecc380e3d3172035b919eaf7465d9f7" dependencies = [ "anyhow", "backtrace", @@ -9646,9 +9745,11 @@ dependencies = [ "cfg-if 1.0.0", "cpp_demangle", "indexmap", + "lazy_static", "libc", "log", "paste 1.0.4", + "psm", "region", "rustc-demangle", "serde", @@ -9667,9 +9768,9 @@ dependencies = [ [[package]] name = "wasmtime-cache" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f984df56c4adeba91540f9052db9f7a8b3b00cfaac1a023bee50a972f588b0c" +checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843" dependencies = [ "anyhow", "base64 0.13.0", @@ -9688,28 +9789,29 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a05abbf94e03c2c8ee02254b1949320c4d45093de5d9d6ed4d9351d536075c9" +checksum = "c525b39f062eada7db3c1298287b96dcb6e472b9f6b22501300b28d9fa7582f6" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "cranelift-wasm", + "target-lexicon", "wasmparser", "wasmtime-environ", ] [[package]] name = "wasmtime-debug" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382eecd6281c6c1d1f3c904c3c143e671fc1a9573820cbfa777fba45ce2eda9c" +checksum = "c5d2a763e7a6fc734218e0e463196762a4f409c483063d81e0e85f96343b2e0a" dependencies = [ "anyhow", - "gimli", + "gimli 0.24.0", "more-asserts", - "object", + "object 0.24.0", "target-lexicon", "thiserror", "wasmparser", @@ -9718,16 +9820,15 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81011b2b833663d7e0ce34639459a0e301e000fc7331e0298b3a27c78d0cec60" +checksum = "f64d0c2d881c31b0d65c1f2695e022d71eb60b9fbdd336aacca28208b58eac90" dependencies = [ - "anyhow", "cfg-if 1.0.0", "cranelift-codegen", "cranelift-entity", "cranelift-wasm", - "gimli", + "gimli 0.24.0", "indexmap", "log", "more-asserts", @@ -9738,9 +9839,9 @@ dependencies = [ [[package]] name = "wasmtime-fiber" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92da32e31af2e3d828f485f5f24651ed4d3b7f03a46ea6555eae6940d1402cd" +checksum = "a089d44cd7e2465d41a53b840a5b4fca1bf6d1ecfebc970eac9592b34ea5f0b3" dependencies = [ "cc", "libc", @@ -9749,11 +9850,11 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5f649623859a12d361fe4cc4793de44f7c3ff34c322c5714289787e89650bb" +checksum = "4d4539ea734422b7c868107e2187d7746d8affbcaa71916d72639f53757ad707" dependencies = [ - "addr2line", + "addr2line 0.15.2", "anyhow", "cfg-if 1.0.0", "cranelift-codegen", @@ -9761,10 +9862,10 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli", + "gimli 0.24.0", "log", "more-asserts", - "object", + "object 0.24.0", "rayon", "region", "serde", @@ -9782,13 +9883,13 @@ dependencies = [ [[package]] name = "wasmtime-obj" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2e99cd9858f57fd062e9351e07881cedfc8597928385e02a48d9333b9e15a1" +checksum = "8e1a8ff85246d091828e2225af521a6208ed28c997bb5c39eb697366dc2e2f2b" dependencies = [ "anyhow", "more-asserts", - "object", + "object 0.24.0", "target-lexicon", "wasmtime-debug", "wasmtime-environ", @@ -9796,16 +9897,16 @@ dependencies = [ [[package]] name = "wasmtime-profiling" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46c0a590e49278ba7f79ef217af9db4ecc671b50042c185093e22d73524abb2" +checksum = "e24364d522dcd67c897c8fffc42e5bdfc57207bbb6d7eeade0da9d4a7d70105b" dependencies = [ "anyhow", "cfg-if 1.0.0", - "gimli", + "gimli 0.24.0", "lazy_static", "libc", - "object", + "object 0.24.0", "scroll", "serde", "target-lexicon", @@ -9815,10 +9916,11 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1438a09185fc7ca067caf1a80d7e5b398eefd4fb7630d94841448ade60feb3d0" +checksum = "c51e57976e8a19a18a18e002c6eb12e5769554204238e47ff155fda1809ef0f7" dependencies = [ + "anyhow", "backtrace", "cc", "cfg-if 1.0.0", @@ -9826,12 +9928,14 @@ dependencies = [ "lazy_static", "libc", "log", + "mach", "memoffset 0.6.1", "more-asserts", - "psm", + "rand 0.8.3", "region", "thiserror", "wasmtime-environ", + "wasmtime-fiber", "winapi 0.3.9", ] diff --git a/bridges/Dockerfile b/bridges/Dockerfile index b3c4a7b4ba7e..2d03db8a76f2 100644 --- a/bridges/Dockerfile +++ b/bridges/Dockerfile @@ -8,14 +8,14 @@ # # See the `deployments/README.md` for all the available `PROJECT` values. -FROM paritytech/bridge-dependencies as builder +FROM paritytech/bridges-ci:latest as builder WORKDIR /parity-bridges-common COPY . . ARG PROJECT=ethereum-poa-relay -RUN cargo build --release --verbose -p ${PROJECT} -RUN strip ./target/release/${PROJECT} +RUN cargo build --release --verbose -p ${PROJECT} && \ + strip ./target/release/${PROJECT} # In this final stage we copy over the final binary and do some checks # to make sure that everything looks good. @@ -27,9 +27,9 @@ ENV DEBIAN_FRONTEND=noninteractive RUN set -eux; \ apt-get update && \ - apt-get install -y curl ca-certificates && \ - apt-get install -y --no-install-recommends libssl-dev && \ - update-ca-certificates && \ + apt-get install -y --no-install-recommends \ + curl ca-certificates libssl-dev && \ + update-ca-certificates && \ groupadd -g 1000 user && \ useradd -u 1000 -g user -s /bin/sh -m user && \ # apt clean up diff --git a/bridges/README.md b/bridges/README.md index 84d776c4f8ef..b407f203b742 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -24,7 +24,7 @@ Substrate chains or Ethereum Proof-of-Authority chains. To get up and running you need both stable and nightly Rust. Rust nightly is used to build the Web Assembly (WASM) runtime for the node. You can configure the WASM support as so: -``` +```bash rustup install nightly rustup target add wasm32-unknown-unknown --toolchain nightly ``` @@ -94,7 +94,7 @@ the `relays` which are used to pass messages between chains. │ └── ... ├── relays // Application for sending headers and messages between chains │ └── ... -└── scripts // Useful development and maintenence scripts +└── scripts // Useful development and maintenance scripts ``` ## Running the Bridge @@ -103,6 +103,7 @@ To run the Bridge you need to be able to connect the bridge relay node to the RP on each side of the bridge (source and target chain). There are 3 ways to run the bridge, described below: + - building & running from source, - building or using Docker images for each individual component, - running a Docker Compose setup (recommended). diff --git a/bridges/bin/millau/node/Cargo.toml b/bridges/bin/millau/node/Cargo.toml index 4c5f081e1506..8c6d32402ac2 100644 --- a/bridges/bin/millau/node/Cargo.toml +++ b/bridges/bin/millau/node/Cargo.toml @@ -51,7 +51,7 @@ sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "mast substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } [build-dependencies] -substrate-build-script-utils = "3.0.0" +substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] diff --git a/bridges/bin/millau/node/src/chain_spec.rs b/bridges/bin/millau/node/src/chain_spec.rs index a6f95119ca90..2c50897b965e 100644 --- a/bridges/bin/millau/node/src/chain_spec.rs +++ b/bridges/bin/millau/node/src/chain_spec.rs @@ -72,7 +72,7 @@ impl Alternative { "tokenDecimals": 9, "tokenSymbol": "MLAU", "bridgeIds": { - "Rialto": bp_runtime::RIALTO_BRIDGE_INSTANCE, + "Rialto": bp_runtime::RIALTO_CHAIN_ID, } }) .as_object() @@ -144,12 +144,21 @@ impl Alternative { derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Alice"), )), + derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Bob"), + )), derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Charlie"), )), + derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Dave"), + )), derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Eve"), )), + derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Ferdie"), + )), ], true, ) @@ -180,7 +189,7 @@ fn testnet_genesis( changes_trie_config: Default::default(), }, balances: BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 40)).collect(), + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), }, aura: AuraConfig { authorities: Vec::new(), diff --git a/bridges/bin/millau/node/src/cli.rs b/bridges/bin/millau/node/src/cli.rs index 12f99b6b95ba..46323ed25c9e 100644 --- a/bridges/bin/millau/node/src/cli.rs +++ b/bridges/bin/millau/node/src/cli.rs @@ -63,7 +63,7 @@ pub enum Subcommand { Revert(sc_cli::RevertCmd), /// Inspect blocks or extrinsics. - Inspect(node_inspect::cli::InspectKeyCmd), + Inspect(node_inspect::cli::InspectCmd), /// Benchmark runtime pallets. Benchmark(frame_benchmarking_cli::BenchmarkCmd), diff --git a/bridges/bin/millau/node/src/service.rs b/bridges/bin/millau/node/src/service.rs index a3cb4fa2457d..2373d0fbdc67 100644 --- a/bridges/bin/millau/node/src/service.rs +++ b/bridges/bin/millau/node/src/service.rs @@ -33,14 +33,13 @@ use sc_client_api::{ExecutorProvider, RemoteBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; -use sc_finality_grandpa::SharedVoterState; + use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus::SlotData; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; // Our native executor instance. native_executor_instance!( @@ -65,12 +64,7 @@ pub fn new_partial( sp_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( - sc_consensus_aura::AuraBlockImport< - Block, - FullClient, - sc_finality_grandpa::GrandpaBlockImport, - AuraPair, - >, + sc_finality_grandpa::GrandpaBlockImport, sc_finality_grandpa::LinkHalf, Option, ), @@ -80,6 +74,7 @@ pub fn new_partial( if config.keystore_remote.is_some() { return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); } + let telemetry = config .telemetry_endpoints .clone() @@ -92,7 +87,7 @@ pub fn new_partial( .transpose()?; let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - &config, + config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; let client = Arc::new(client); @@ -108,7 +103,7 @@ pub fn new_partial( config.transaction_pool.clone(), config.role.is_authority().into(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), ); @@ -119,14 +114,11 @@ pub fn new_partial( telemetry.as_ref().map(|x| x.handle()), )?; - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: aura_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import)), + block_import: grandpa_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), create_inherent_data_providers: move |_, ()| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); @@ -153,7 +145,7 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, - other: (aura_block_import, grandpa_link, telemetry), + other: (grandpa_block_import, grandpa_link, telemetry), }) } @@ -194,16 +186,15 @@ pub fn new_full(mut config: Configuration) -> Result .extra_sets .push(sc_finality_grandpa::grandpa_peers_set_config()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - })?; + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); @@ -215,6 +206,7 @@ pub fn new_full(mut config: Configuration) -> Result let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); + let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); let rpc_extensions_builder = { use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; @@ -230,7 +222,7 @@ pub fn new_full(mut config: Configuration) -> Result let justification_stream = grandpa_link.justification_stream(); let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + let shared_voter_state = shared_voter_state.clone(); let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); @@ -266,7 +258,6 @@ pub fn new_full(mut config: Configuration) -> Result on_demand: None, remote_blockchain: None, backend, - network_status_sinks, system_rpc_tx, config, telemetry: telemetry.as_mut(), @@ -286,7 +277,7 @@ pub fn new_full(mut config: Configuration) -> Result let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let raw_slot_duration = slot_duration.slot_duration(); - let aura = sc_consensus_aura::start_aura::(StartAuraParams { + let aura = sc_consensus_aura::start_aura::(StartAuraParams { slot_duration, client, select_chain, @@ -307,7 +298,9 @@ pub fn new_full(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), can_author_with, sync_oracle: network.clone(), + justification_sync_link: network.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), })?; @@ -331,7 +324,7 @@ pub fn new_full(mut config: Configuration) -> Result name: Some(name), observer_enabled: false, keystore, - is_authority: role.is_authority(), + local_role: role, telemetry: telemetry.as_ref().map(|x| x.handle()), }; @@ -348,7 +341,7 @@ pub fn new_full(mut config: Configuration) -> Result network, voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, - shared_voter_state: SharedVoterState::empty(), + shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), }; @@ -397,24 +390,22 @@ pub fn new_light(mut config: Configuration) -> Result let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), on_demand.clone(), )); - let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( client.clone(), &(client.clone() as Arc<_>), select_chain, telemetry.as_ref().map(|x| x.handle()), )?; - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: aura_block_import, + block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import)), client: client.clone(), create_inherent_data_providers: move |_, ()| async move { @@ -434,21 +425,40 @@ pub fn new_light(mut config: Configuration) -> Result telemetry: telemetry.as_ref().map(|x| x.handle()), })?; - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - })?; + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); } + let enable_grandpa = !config.disable_grandpa; + if enable_grandpa { + let name = config.network.node_name.clone(); + + let config = sc_finality_grandpa::Config { + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore: None, + local_role: config.role.clone(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + task_manager.spawn_handle().spawn_blocking( + "grandpa-observer", + sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, + ); + } + sc_service::spawn_tasks(sc_service::SpawnTasksParams { remote_blockchain: Some(backend.remote_blockchain()), transaction_pool, @@ -460,12 +470,10 @@ pub fn new_light(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), backend, network, - network_status_sinks, system_rpc_tx, telemetry: telemetry.as_mut(), })?; network_starter.start_network(); - Ok(task_manager) } diff --git a/bridges/bin/millau/runtime/Cargo.toml b/bridges/bin/millau/runtime/Cargo.toml index 240b079d110f..367c1c3eef70 100644 --- a/bridges/bin/millau/runtime/Cargo.toml +++ b/bridges/bin/millau/runtime/Cargo.toml @@ -56,7 +56,7 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } [build-dependencies] -substrate-wasm-builder = "3.0.0" +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] default = ["std"] diff --git a/bridges/bin/millau/runtime/src/lib.rs b/bridges/bin/millau/runtime/src/lib.rs index 7fe69f0e472e..33c21027f8a2 100644 --- a/bridges/bin/millau/runtime/src/lib.rs +++ b/bridges/bin/millau/runtime/src/lib.rs @@ -252,6 +252,7 @@ parameter_types! { // For weight estimation, we assume that the most locks on an individual account will be 50. // This number may need to be adjusted in the future if this assumption no longer holds true. pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; } impl pallet_balances::Config for Runtime { @@ -265,6 +266,8 @@ impl pallet_balances::Config for Runtime { // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) type WeightInfo = (); type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; } parameter_types! { @@ -381,6 +384,7 @@ impl pallet_bridge_messages::Config for Runtime { GetDeliveryConfirmationTransactionFee, RootAccountForPayments, >; + type OnDeliveryConfirmed = (); type SourceHeaderChain = crate::rialto_messages::Rialto; type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch; @@ -600,17 +604,23 @@ impl_runtime_apis! { ).ok() } - fn messages_dispatch_weight( + fn message_details( lane: bp_messages::LaneId, begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, - ) -> Vec<(bp_messages::MessageNonce, Weight, u32)> { + ) -> Vec> { (begin..=end).filter_map(|nonce| { - let encoded_payload = BridgeRialtoMessages::outbound_message_payload(lane, nonce)?; + let message_data = BridgeRialtoMessages::outbound_message_data(lane, nonce)?; let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode( - &mut &encoded_payload[..] + &mut &message_data.payload[..] ).ok()?; - Some((nonce, decoded_payload.weight, encoded_payload.len() as _)) + Some(bp_messages::MessageDetails { + nonce, + dispatch_weight: decoded_payload.weight, + size: message_data.payload.len() as _, + delivery_and_dispatch_fee: message_data.fee, + dispatch_fee_payment: decoded_payload.dispatch_fee_payment, + }) }) .collect() } @@ -644,7 +654,7 @@ impl_runtime_apis! { /// The byte vector returned by this function should be signed with a Rialto account private key. /// This way, the owner of `millau_account_id` on Millau proves that the Rialto account private key /// is also under his control. -pub fn rialto_account_ownership_digest( +pub fn millau_to_rialto_account_ownership_digest( rialto_call: &Call, millau_account_id: AccountId, rialto_spec_version: SpecVersion, @@ -658,7 +668,8 @@ where rialto_call, millau_account_id, rialto_spec_version, - bp_runtime::MILLAU_BRIDGE_INSTANCE, + bp_runtime::MILLAU_CHAIN_ID, + bp_runtime::RIALTO_CHAIN_ID, ) } @@ -676,6 +687,7 @@ mod tests { bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT, ); let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add( @@ -691,6 +703,7 @@ mod tests { let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, + bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _, ) .unwrap_or(u32::MAX); pallet_bridge_messages::ensure_able_to_receive_confirmation::( diff --git a/bridges/bin/millau/runtime/src/rialto_messages.rs b/bridges/bin/millau/runtime/src/rialto_messages.rs index a61d7cce2978..12af2c328521 100644 --- a/bridges/bin/millau/runtime/src/rialto_messages.rs +++ b/bridges/bin/millau/runtime/src/rialto_messages.rs @@ -23,7 +23,7 @@ use bp_messages::{ target_chain::{ProvedMessages, SourceHeaderChain}, InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, }; -use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE}; +use bp_runtime::{ChainId, MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; use codec::{Decode, Encode}; use frame_support::{ @@ -52,7 +52,7 @@ pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifie pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload; /// Encoded Millau Call as it comes from Rialto. -pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; +pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; /// Messages proof for Rialto -> Millau messages. type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof; @@ -64,6 +64,7 @@ type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesD pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch< WithRialtoMessageBridge, crate::Runtime, + pallet_balances::Pallet, pallet_bridge_dispatch::DefaultInstance, >; @@ -72,12 +73,13 @@ pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDi pub struct WithRialtoMessageBridge; impl MessageBridge for WithRialtoMessageBridge { - const INSTANCE: InstanceId = RIALTO_BRIDGE_INSTANCE; - const RELAYER_FEE_PERCENT: u32 = 10; + const THIS_CHAIN_ID: ChainId = MILLAU_CHAIN_ID; + const BRIDGED_CHAIN_ID: ChainId = RIALTO_CHAIN_ID; type ThisChain = Millau; type BridgedChain = Rialto; + type BridgedMessagesInstance = crate::WithRialtoMessagesInstance; fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance { bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance)) @@ -96,8 +98,6 @@ impl messages::ChainWithMessages for Millau { type Signature = bp_millau::Signature; type Weight = Weight; type Balance = bp_millau::Balance; - - type MessagesInstance = crate::WithRialtoMessagesInstance; } impl messages::ThisChainWithMessages for Millau { @@ -112,9 +112,12 @@ impl messages::ThisChainWithMessages for Millau { } fn estimate_delivery_confirmation_transaction() -> MessageTransaction { - let inbound_data_size = - InboundLaneData::::encoded_size_hint(bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) - .unwrap_or(u32::MAX); + let inbound_data_size = InboundLaneData::::encoded_size_hint( + bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + 1, + 1, + ) + .unwrap_or(u32::MAX); MessageTransaction { dispatch_weight: bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, @@ -147,8 +150,6 @@ impl messages::ChainWithMessages for Rialto { type Signature = bp_rialto::Signature; type Weight = Weight; type Balance = bp_rialto::Balance; - - type MessagesInstance = pallet_bridge_messages::DefaultInstance; } impl messages::BridgedChainWithMessages for Rialto { @@ -170,6 +171,7 @@ impl messages::BridgedChainWithMessages for Rialto { fn estimate_delivery_transaction( message_payload: &[u8], + include_pay_dispatch_fee_cost: bool, message_dispatch_weight: Weight, ) -> MessageTransaction { let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); @@ -180,6 +182,11 @@ impl messages::BridgedChainWithMessages for Rialto { dispatch_weight: extra_bytes_in_payload .saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) .saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) + .saturating_sub(if include_pay_dispatch_fee_cost { + 0 + } else { + bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT + }) .saturating_add(message_dispatch_weight), size: message_payload_len .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) diff --git a/bridges/bin/rialto/node/Cargo.toml b/bridges/bin/rialto/node/Cargo.toml index 38272cd8c786..1c9ec8b5bb0a 100644 --- a/bridges/bin/rialto/node/Cargo.toml +++ b/bridges/bin/rialto/node/Cargo.toml @@ -52,7 +52,7 @@ sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "mast substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } [build-dependencies] -substrate-build-script-utils = "3.0.0" +substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] diff --git a/bridges/bin/rialto/node/src/chain_spec.rs b/bridges/bin/rialto/node/src/chain_spec.rs index 2b8706c16d83..4174cda24487 100644 --- a/bridges/bin/rialto/node/src/chain_spec.rs +++ b/bridges/bin/rialto/node/src/chain_spec.rs @@ -73,7 +73,7 @@ impl Alternative { "tokenDecimals": 9, "tokenSymbol": "RLT", "bridgeIds": { - "Millau": bp_runtime::MILLAU_BRIDGE_INSTANCE, + "Millau": bp_runtime::MILLAU_CHAIN_ID, } }) .as_object() @@ -142,12 +142,21 @@ impl Alternative { rialto_runtime::Runtime, pallet_bridge_messages::DefaultInstance, >::relayer_fund_account_id(), + derive_account_from_millau_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Alice"), + )), derive_account_from_millau_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Bob"), )), + derive_account_from_millau_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Charlie"), + )), derive_account_from_millau_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Dave"), )), + derive_account_from_millau_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Eve"), + )), derive_account_from_millau_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Ferdie"), )), @@ -181,7 +190,7 @@ fn testnet_genesis( changes_trie_config: Default::default(), }, balances: BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 40)).collect(), + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), }, aura: AuraConfig { authorities: Vec::new(), diff --git a/bridges/bin/rialto/node/src/cli.rs b/bridges/bin/rialto/node/src/cli.rs index 12f99b6b95ba..46323ed25c9e 100644 --- a/bridges/bin/rialto/node/src/cli.rs +++ b/bridges/bin/rialto/node/src/cli.rs @@ -63,7 +63,7 @@ pub enum Subcommand { Revert(sc_cli::RevertCmd), /// Inspect blocks or extrinsics. - Inspect(node_inspect::cli::InspectKeyCmd), + Inspect(node_inspect::cli::InspectCmd), /// Benchmark runtime pallets. Benchmark(frame_benchmarking_cli::BenchmarkCmd), diff --git a/bridges/bin/rialto/node/src/service.rs b/bridges/bin/rialto/node/src/service.rs index a0d3d311a018..35f923c77cc9 100644 --- a/bridges/bin/rialto/node/src/service.rs +++ b/bridges/bin/rialto/node/src/service.rs @@ -28,19 +28,20 @@ // ===================================================================================== // ===================================================================================== +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + use rialto_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{ExecutorProvider, RemoteBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; -use sc_finality_grandpa::SharedVoterState; + use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus::SlotData; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; // Our native executor instance. native_executor_instance!( @@ -65,12 +66,7 @@ pub fn new_partial( sp_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( - sc_consensus_aura::AuraBlockImport< - Block, - FullClient, - sc_finality_grandpa::GrandpaBlockImport, - AuraPair, - >, + sc_finality_grandpa::GrandpaBlockImport, sc_finality_grandpa::LinkHalf, Option, ), @@ -93,7 +89,7 @@ pub fn new_partial( .transpose()?; let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - &config, + config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; let client = Arc::new(client); @@ -109,7 +105,7 @@ pub fn new_partial( config.transaction_pool.clone(), config.role.is_authority().into(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), ); @@ -120,14 +116,11 @@ pub fn new_partial( telemetry.as_ref().map(|x| x.handle()), )?; - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: aura_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import)), + block_import: grandpa_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), create_inherent_data_providers: move |_, ()| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); @@ -154,7 +147,7 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, - other: (aura_block_import, grandpa_link, telemetry), + other: (grandpa_block_import, grandpa_link, telemetry), }) } @@ -195,16 +188,15 @@ pub fn new_full(mut config: Configuration) -> Result .extra_sets .push(sc_finality_grandpa::grandpa_peers_set_config()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - })?; + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); @@ -217,6 +209,8 @@ pub fn new_full(mut config: Configuration) -> Result let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); + let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + let rpc_extensions_builder = { use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; @@ -231,7 +225,7 @@ pub fn new_full(mut config: Configuration) -> Result let justification_stream = grandpa_link.justification_stream(); let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + let shared_voter_state = shared_voter_state.clone(); let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); @@ -268,7 +262,6 @@ pub fn new_full(mut config: Configuration) -> Result on_demand: None, remote_blockchain: None, backend, - network_status_sinks, system_rpc_tx, config, telemetry: telemetry.as_mut(), @@ -287,7 +280,8 @@ pub fn new_full(mut config: Configuration) -> Result let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let raw_slot_duration = slot_duration.slot_duration(); - let aura = sc_consensus_aura::start_aura::(StartAuraParams { + + let aura = sc_consensus_aura::start_aura::(StartAuraParams { slot_duration, client, select_chain, @@ -308,7 +302,9 @@ pub fn new_full(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), can_author_with, sync_oracle: network.clone(), + justification_sync_link: network.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), })?; @@ -332,7 +328,7 @@ pub fn new_full(mut config: Configuration) -> Result name: Some(name), observer_enabled: false, keystore, - is_authority: role.is_authority(), + local_role: role, telemetry: telemetry.as_ref().map(|x| x.handle()), }; @@ -349,7 +345,7 @@ pub fn new_full(mut config: Configuration) -> Result network, voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, - shared_voter_state: SharedVoterState::empty(), + shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), }; @@ -398,24 +394,22 @@ pub fn new_light(mut config: Configuration) -> Result let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), on_demand.clone(), )); - let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( client.clone(), &(client.clone() as Arc<_>), select_chain, telemetry.as_ref().map(|x| x.handle()), )?; - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: aura_block_import, + block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import)), client: client.clone(), create_inherent_data_providers: move |_, ()| async move { @@ -435,21 +429,40 @@ pub fn new_light(mut config: Configuration) -> Result telemetry: telemetry.as_ref().map(|x| x.handle()), })?; - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - })?; + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); } + let enable_grandpa = !config.disable_grandpa; + if enable_grandpa { + let name = config.network.node_name.clone(); + + let config = sc_finality_grandpa::Config { + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore: None, + local_role: config.role.clone(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + task_manager.spawn_handle().spawn_blocking( + "grandpa-observer", + sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, + ); + } + sc_service::spawn_tasks(sc_service::SpawnTasksParams { remote_blockchain: Some(backend.remote_blockchain()), transaction_pool, @@ -461,12 +474,10 @@ pub fn new_light(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), backend, network, - network_status_sinks, system_rpc_tx, telemetry: telemetry.as_mut(), })?; network_starter.start_network(); - Ok(task_manager) } diff --git a/bridges/bin/rialto/runtime/Cargo.toml b/bridges/bin/rialto/runtime/Cargo.toml index 63a48a255bd4..f66b8920c8a5 100644 --- a/bridges/bin/rialto/runtime/Cargo.toml +++ b/bridges/bin/rialto/runtime/Cargo.toml @@ -68,7 +68,7 @@ sp-version = { git = "https://github.com/paritytech/substrate", branch = "master libsecp256k1 = { version = "0.3.4", features = ["hmac"] } [build-dependencies] -substrate-wasm-builder = "3.0.0" +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] default = ["std"] diff --git a/bridges/bin/rialto/runtime/src/lib.rs b/bridges/bin/rialto/runtime/src/lib.rs index a2f492c997bb..219feda64693 100644 --- a/bridges/bin/rialto/runtime/src/lib.rs +++ b/bridges/bin/rialto/runtime/src/lib.rs @@ -359,6 +359,7 @@ parameter_types! { // For weight estimation, we assume that the most locks on an individual account will be 50. // This number may need to be adjusted in the future if this assumption no longer holds true. pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; } impl pallet_balances::Config for Runtime { @@ -372,6 +373,8 @@ impl pallet_balances::Config for Runtime { // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) type WeightInfo = (); type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; } parameter_types! { @@ -488,6 +491,7 @@ impl pallet_bridge_messages::Config for Runtime { GetDeliveryConfirmationTransactionFee, RootAccountForPayments, >; + type OnDeliveryConfirmed = (); type SourceHeaderChain = crate::millau_messages::Millau; type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch; @@ -751,17 +755,23 @@ impl_runtime_apis! { ).ok() } - fn messages_dispatch_weight( + fn message_details( lane: bp_messages::LaneId, begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, - ) -> Vec<(bp_messages::MessageNonce, Weight, u32)> { + ) -> Vec> { (begin..=end).filter_map(|nonce| { - let encoded_payload = BridgeMillauMessages::outbound_message_payload(lane, nonce)?; + let message_data = BridgeMillauMessages::outbound_message_data(lane, nonce)?; let decoded_payload = millau_messages::ToMillauMessagePayload::decode( - &mut &encoded_payload[..] + &mut &message_data.payload[..] ).ok()?; - Some((nonce, decoded_payload.weight, encoded_payload.len() as _)) + Some(bp_messages::MessageDetails { + nonce, + dispatch_weight: decoded_payload.weight, + size: message_data.payload.len() as _, + delivery_and_dispatch_fee: message_data.fee, + dispatch_fee_payment: decoded_payload.dispatch_fee_payment, + }) }) .collect() } @@ -853,6 +863,7 @@ impl_runtime_apis! { } use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; + use bp_runtime::messages::DispatchFeePayment; use bridge_runtime_common::messages; use pallet_bridge_messages::benchmarking::{ Pallet as MessagesBench, @@ -896,6 +907,7 @@ impl_runtime_apis! { weight: params.size as _, origin: dispatch_origin, call: message_payload, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, }; (message, pallet_bridge_messages::benchmarking::MESSAGE_FEE.into()) } @@ -903,16 +915,16 @@ impl_runtime_apis! { fn prepare_message_proof( params: MessageProofParams, ) -> (millau_messages::FromMillauMessagesProof, Weight) { - use crate::millau_messages::{Millau, WithMillauMessageBridge}; + use crate::millau_messages::WithMillauMessageBridge; use bp_messages::MessageKey; use bridge_runtime_common::{ - messages::ChainWithMessages, + messages::MessageBridge, messages_benchmarking::{ed25519_sign, prepare_message_proof}, }; use codec::Encode; use frame_support::weights::GetDispatchInfo; use pallet_bridge_messages::storage_keys; - use sp_runtime::traits::Header; + use sp_runtime::traits::{Header, IdentifyAccount}; let remark = match params.size { MessagesProofSize::Minimal(ref size) => vec![0u8; *size as _], @@ -925,20 +937,26 @@ impl_runtime_apis! { let (rialto_raw_public, rialto_raw_signature) = ed25519_sign( &call, &millau_account_id, + VERSION.spec_version, + bp_runtime::MILLAU_CHAIN_ID, + bp_runtime::RIALTO_CHAIN_ID, ); let rialto_public = MultiSigner::Ed25519(sp_core::ed25519::Public::from_raw(rialto_raw_public)); let rialto_signature = MultiSignature::Ed25519(sp_core::ed25519::Signature::from_raw( rialto_raw_signature, )); + if params.dispatch_fee_payment == DispatchFeePayment::AtTargetChain { + Self::endow_account(&rialto_public.clone().into_account()); + } + let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::< - Runtime, - ::MessagesInstance, + ::BridgedMessagesInstance, >( &message_key.lane_id, message_key.nonce, ).0; let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::< - ::MessagesInstance, + ::BridgedMessagesInstance, >( &lane_id, ).0; @@ -951,6 +969,7 @@ impl_runtime_apis! { Default::default(), ); + let dispatch_fee_payment = params.dispatch_fee_payment.clone(); prepare_message_proof::( params, make_millau_message_key, @@ -969,6 +988,7 @@ impl_runtime_apis! { rialto_public, rialto_signature, ), + dispatch_fee_payment, call: call.encode(), }.encode(), ) @@ -977,18 +997,14 @@ impl_runtime_apis! { fn prepare_message_delivery_proof( params: MessageDeliveryProofParams, ) -> millau_messages::ToMillauMessagesDeliveryProof { - use crate::millau_messages::{Millau, WithMillauMessageBridge}; - use bridge_runtime_common::{ - messages::ChainWithMessages, - messages_benchmarking::prepare_message_delivery_proof, - }; + use crate::millau_messages::WithMillauMessageBridge; + use bridge_runtime_common::{messages_benchmarking::prepare_message_delivery_proof}; use sp_runtime::traits::Header; prepare_message_delivery_proof::( params, |lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key::< - Runtime, - ::MessagesInstance, + ::BridgedMessagesInstance, >( &lane_id, ).0, @@ -1001,6 +1017,18 @@ impl_runtime_apis! { ), ) } + + fn is_message_dispatched(nonce: bp_messages::MessageNonce) -> bool { + frame_system::Pallet::::events() + .into_iter() + .map(|event_record| event_record.event) + .any(|event| matches!( + event, + Event::BridgeDispatch(pallet_bridge_dispatch::Event::::MessageDispatched( + _, ([0, 0, 0, 0], nonce_from_event), _, + )) if nonce_from_event == nonce + )) + } } add_benchmark!( @@ -1028,7 +1056,7 @@ impl_runtime_apis! { /// The byte vector returned by this function should be signed with a Millau account private key. /// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key /// is also under his control. -pub fn millau_account_ownership_digest( +pub fn rialto_to_millau_account_ownership_digest( millau_call: &Call, rialto_account_id: AccountId, millau_spec_version: SpecVersion, @@ -1042,7 +1070,8 @@ where millau_call, rialto_account_id, millau_spec_version, - bp_runtime::RIALTO_BRIDGE_INSTANCE, + bp_runtime::RIALTO_CHAIN_ID, + bp_runtime::MILLAU_CHAIN_ID, ) } @@ -1095,6 +1124,7 @@ mod tests { bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT, ); let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add( @@ -1110,6 +1140,7 @@ mod tests { let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, + bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _, ) .unwrap_or(u32::MAX); pallet_bridge_messages::ensure_able_to_receive_confirmation::( diff --git a/bridges/bin/rialto/runtime/src/millau_messages.rs b/bridges/bin/rialto/runtime/src/millau_messages.rs index 30a34b9276ce..bf97478a0aa2 100644 --- a/bridges/bin/rialto/runtime/src/millau_messages.rs +++ b/bridges/bin/rialto/runtime/src/millau_messages.rs @@ -23,7 +23,7 @@ use bp_messages::{ target_chain::{ProvedMessages, SourceHeaderChain}, InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, }; -use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE}; +use bp_runtime::{ChainId, MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; use codec::{Decode, Encode}; use frame_support::{ @@ -52,12 +52,13 @@ pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifie pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload; /// Encoded Rialto Call as it comes from Millau. -pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; +pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; /// Call-dispatch based message dispatch for Millau -> Rialto messages. pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDispatch< WithMillauMessageBridge, crate::Runtime, + pallet_balances::Pallet, pallet_bridge_dispatch::DefaultInstance, >; @@ -72,12 +73,13 @@ pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessa pub struct WithMillauMessageBridge; impl MessageBridge for WithMillauMessageBridge { - const INSTANCE: InstanceId = MILLAU_BRIDGE_INSTANCE; - const RELAYER_FEE_PERCENT: u32 = 10; + const THIS_CHAIN_ID: ChainId = RIALTO_CHAIN_ID; + const BRIDGED_CHAIN_ID: ChainId = MILLAU_CHAIN_ID; type ThisChain = Rialto; type BridgedChain = Millau; + type BridgedMessagesInstance = crate::WithMillauMessagesInstance; fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance { bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance)) @@ -96,8 +98,6 @@ impl messages::ChainWithMessages for Rialto { type Signature = bp_rialto::Signature; type Weight = Weight; type Balance = bp_rialto::Balance; - - type MessagesInstance = crate::WithMillauMessagesInstance; } impl messages::ThisChainWithMessages for Rialto { @@ -112,9 +112,12 @@ impl messages::ThisChainWithMessages for Rialto { } fn estimate_delivery_confirmation_transaction() -> MessageTransaction { - let inbound_data_size = - InboundLaneData::::encoded_size_hint(bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) - .unwrap_or(u32::MAX); + let inbound_data_size = InboundLaneData::::encoded_size_hint( + bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + 1, + 1, + ) + .unwrap_or(u32::MAX); MessageTransaction { dispatch_weight: bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, @@ -147,8 +150,6 @@ impl messages::ChainWithMessages for Millau { type Signature = bp_millau::Signature; type Weight = Weight; type Balance = bp_millau::Balance; - - type MessagesInstance = pallet_bridge_messages::DefaultInstance; } impl messages::BridgedChainWithMessages for Millau { @@ -170,6 +171,7 @@ impl messages::BridgedChainWithMessages for Millau { fn estimate_delivery_transaction( message_payload: &[u8], + include_pay_dispatch_fee_cost: bool, message_dispatch_weight: Weight, ) -> MessageTransaction { let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); @@ -180,6 +182,11 @@ impl messages::BridgedChainWithMessages for Millau { dispatch_weight: extra_bytes_in_payload .saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) .saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) + .saturating_sub(if include_pay_dispatch_fee_cost { + 0 + } else { + bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT + }) .saturating_add(message_dispatch_weight), size: message_payload_len .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) @@ -256,3 +263,87 @@ impl MessagesParameter for RialtoToMillauMessagesParameter { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{AccountId, Call, ExistentialDeposit, Runtime, SystemCall, SystemConfig, VERSION}; + use bp_message_dispatch::CallOrigin; + use bp_messages::{ + target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, + MessageKey, + }; + use bp_runtime::{derive_account_id, messages::DispatchFeePayment, SourceAccount}; + use bridge_runtime_common::messages::target::{FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload}; + use frame_support::{ + traits::Currency, + weights::{GetDispatchInfo, WeightToFeePolynomial}, + }; + use sp_runtime::traits::Convert; + + #[test] + fn transfer_happens_when_dispatch_fee_is_paid_at_target_chain() { + // this test actually belongs to the `bridge-runtime-common` crate, but there we have no + // mock runtime. Making another one there just for this test, given that both crates + // live n single repo is an overkill + let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::().unwrap().into(); + ext.execute_with(|| { + let bridge = MILLAU_CHAIN_ID; + let call: Call = SystemCall::remark(vec![]).into(); + let dispatch_weight = call.get_dispatch_info().weight; + let dispatch_fee = ::WeightToFee::calc(&dispatch_weight); + assert!(dispatch_fee > 0); + + // create relayer account with minimal balance + let relayer_account: AccountId = [1u8; 32].into(); + let initial_amount = ExistentialDeposit::get(); + let _ = as Currency>::deposit_creating( + &relayer_account, + initial_amount, + ); + + // create dispatch account with minimal balance + dispatch fee + let dispatch_account = derive_account_id::<::SourceChainAccountId>( + bridge, + SourceAccount::Root, + ); + let dispatch_account = + ::AccountIdConverter::convert(dispatch_account); + let _ = as Currency>::deposit_creating( + &dispatch_account, + initial_amount + dispatch_fee, + ); + + // dispatch message with intention to pay dispatch fee at the target chain + FromMillauMessageDispatch::dispatch( + &relayer_account, + DispatchMessage { + key: MessageKey { + lane_id: Default::default(), + nonce: 0, + }, + data: DispatchMessageData { + payload: Ok(FromBridgedChainMessagePayload:: { + spec_version: VERSION.spec_version, + weight: dispatch_weight, + origin: CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, + call: FromBridgedChainEncodedMessageCall::new(call.encode()), + }), + fee: 1, + }, + }, + ); + + // ensure that fee has been transferred from dispatch to relayer account + assert_eq!( + as Currency>::free_balance(&relayer_account), + initial_amount + dispatch_fee, + ); + assert_eq!( + as Currency>::free_balance(&dispatch_account), + initial_amount, + ); + }); + } +} diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 83803d06deb5..07fe8910c21f 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -24,6 +24,7 @@ pallet-bridge-messages = { path = "../../modules/messages", default-features = f # Substrate dependencies frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } @@ -42,6 +43,7 @@ std = [ "pallet-bridge-dispatch/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", + "pallet-transaction-payment/std", "sp-core/std", "sp-runtime/std", "sp-state-machine/std", diff --git a/bridges/bin/runtime-common/README.md b/bridges/bin/runtime-common/README.md index a7322af97399..38a47bfdcc9d 100644 --- a/bridges/bin/runtime-common/README.md +++ b/bridges/bin/runtime-common/README.md @@ -102,7 +102,9 @@ This trait represents this chain from bridge point of view. Let's review every m have declared dispatch weight larger than 50% of the maximal bridged extrinsic weight. - `MessageBridge::estimate_delivery_transaction`: you will need to return estimated dispatch weight and - size of the delivery transaction that delivers a given message to the target chain. + size of the delivery transaction that delivers a given message to the target chain. The transaction + weight must or must not include the weight of pay-dispatch-fee operation, depending on the value + of `include_pay_dispatch_fee_cost` argument. - `MessageBridge::transaction_payment`: you'll need to return fee that the submitter must pay for given transaction on bridged chain. The best case is when you have the same conversion diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs index e879aa17bfb2..72249e4f4e33 100644 --- a/bridges/bin/runtime-common/src/messages.rs +++ b/bridges/bin/runtime-common/src/messages.rs @@ -26,9 +26,16 @@ use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages}, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, }; -use bp_runtime::{InstanceId, Size, StorageProofChecker}; +use bp_runtime::{ + messages::{DispatchFeePayment, MessageDispatchResult}, + ChainId, Size, StorageProofChecker, +}; use codec::{Decode, Encode}; -use frame_support::{traits::Instance, weights::Weight, RuntimeDebug}; +use frame_support::{ + traits::{Currency, ExistenceRequirement, Instance}, + weights::{Weight, WeightToFeePolynomial}, + RuntimeDebug, +}; use hash_db::Hasher; use sp_runtime::{ traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul}, @@ -39,16 +46,20 @@ use sp_trie::StorageProof; /// Bidirectional message bridge. pub trait MessageBridge { - /// Instance id of this bridge. - const INSTANCE: InstanceId; - /// Relayer interest (in percents). const RELAYER_FEE_PERCENT: u32; + /// Identifier of this chain. + const THIS_CHAIN_ID: ChainId; + /// Identifier of the Bridged chain. + const BRIDGED_CHAIN_ID: ChainId; + /// This chain in context of message bridge. type ThisChain: ThisChainWithMessages; /// Bridged chain in context of message bridge. type BridgedChain: BridgedChainWithMessages; + /// Instance of the `pallet-bridge-messages` pallet at the Bridged chain. + type BridgedMessagesInstance: Instance; /// Convert Bridged chain balance into This chain balance. fn bridged_balance_to_this_balance(bridged_balance: BalanceOf>) -> BalanceOf>; @@ -71,9 +82,6 @@ pub trait ChainWithMessages { type Weight: From + PartialOrd; /// Type of balances that is used on the chain. type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From + Copy; - - /// Instance of the `pallet-bridge-messages` pallet. - type MessagesInstance: Instance; } /// Message related transaction parameters estimation. @@ -124,6 +132,7 @@ pub trait BridgedChainWithMessages: ChainWithMessages { /// Estimate size and weight of single message delivery transaction at the Bridged chain. fn estimate_delivery_transaction( message_payload: &[u8], + include_pay_dispatch_fee_cost: bool, message_dispatch_weight: WeightOf, ) -> MessageTransaction>; @@ -139,7 +148,6 @@ pub(crate) type SignerOf = ::Signer; pub(crate) type SignatureOf = ::Signature; pub(crate) type WeightOf = ::Weight; pub(crate) type BalanceOf = ::Balance; -pub(crate) type MessagesInstanceOf = ::MessagesInstance; pub(crate) type CallOf = ::Call; @@ -326,8 +334,19 @@ pub mod source { relayer_fee_percent: u32, ) -> Result>, &'static str> { // the fee (in Bridged tokens) of all transactions that are made on the Bridged chain - let delivery_transaction = - BridgedChain::::estimate_delivery_transaction(&payload.call, payload.weight.into()); + // + // if we're going to pay dispatch fee at the target chain, then we don't include weight + // of the message dispatch in the delivery transaction cost + let pay_dispatch_fee_at_target_chain = payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; + let delivery_transaction = BridgedChain::::estimate_delivery_transaction( + &payload.call, + pay_dispatch_fee_at_target_chain, + if pay_dispatch_fee_at_target_chain { + 0.into() + } else { + payload.weight.into() + }, + ); let delivery_transaction_fee = BridgedChain::::transaction_payment(delivery_transaction); // the fee (in This tokens) of all transactions that are made on This chain @@ -357,7 +376,6 @@ pub mod source { ) -> Result, &'static str> where ThisRuntime: pallet_bridge_grandpa::Config, - ThisRuntime: pallet_bridge_messages::Config>>, HashOf>: Into>::BridgedChain>>, { @@ -372,10 +390,8 @@ pub mod source { |storage| { // Messages delivery proof is just proof of single storage key read => any error // is fatal. - let storage_inbound_lane_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::< - ThisRuntime, - MessagesInstanceOf>, - >(&lane); + let storage_inbound_lane_data_key = + pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&lane); let raw_inbound_lane_data = storage .read_value(storage_inbound_lane_data_key.0.as_ref()) .map_err(|_| "Failed to read inbound lane state from storage proof")? @@ -406,7 +422,7 @@ pub mod target { AccountIdOf>, SignerOf>, SignatureOf>, - FromBridgedChainEncodedMessageCall, + FromBridgedChainEncodedMessageCall>>, >; /// Messages proof from bridged chain: @@ -444,33 +460,51 @@ pub mod target { /// Our Call is opaque (`Vec`) for Bridged chain. So it is encoded, prefixed with /// vector length. Custom decode implementation here is exactly to deal with this. #[derive(Decode, Encode, RuntimeDebug, PartialEq)] - pub struct FromBridgedChainEncodedMessageCall { - pub(crate) encoded_call: Vec, - pub(crate) _marker: PhantomData, + pub struct FromBridgedChainEncodedMessageCall { + encoded_call: Vec, + _marker: PhantomData, + } + + impl FromBridgedChainEncodedMessageCall { + /// Create encoded call. + pub fn new(encoded_call: Vec) -> Self { + FromBridgedChainEncodedMessageCall { + encoded_call, + _marker: PhantomData::default(), + } + } } - impl From> for Result>, ()> { - fn from(encoded_call: FromBridgedChainEncodedMessageCall) -> Self { - CallOf::>::decode(&mut &encoded_call.encoded_call[..]).map_err(drop) + impl From> for Result { + fn from(encoded_call: FromBridgedChainEncodedMessageCall) -> Self { + DecodedCall::decode(&mut &encoded_call.encoded_call[..]).map_err(drop) } } /// Dispatching Bridged -> This chain messages. #[derive(RuntimeDebug, Clone, Copy)] - pub struct FromBridgedChainMessageDispatch { - _marker: PhantomData<(B, ThisRuntime, ThisDispatchInstance)>, + pub struct FromBridgedChainMessageDispatch { + _marker: PhantomData<(B, ThisRuntime, ThisCurrency, ThisDispatchInstance)>, } - impl - MessageDispatch< as ChainWithMessages>::Balance> - for FromBridgedChainMessageDispatch + impl + MessageDispatch>, BalanceOf>> + for FromBridgedChainMessageDispatch where ThisDispatchInstance: frame_support::traits::Instance, - ThisRuntime: pallet_bridge_dispatch::Config, - >::Event: - From>, - pallet_bridge_dispatch::Pallet: - bp_message_dispatch::MessageDispatch<(LaneId, MessageNonce), Message = FromBridgedChainMessagePayload>, + ThisRuntime: pallet_bridge_dispatch::Config + + pallet_transaction_payment::Config, + ::OnChargeTransaction: + pallet_transaction_payment::OnChargeTransaction>>, + ThisCurrency: Currency>, Balance = BalanceOf>>, + >::Event: From< + pallet_bridge_dispatch::RawEvent<(LaneId, MessageNonce), AccountIdOf>, ThisDispatchInstance>, + >, + pallet_bridge_dispatch::Pallet: bp_message_dispatch::MessageDispatch< + AccountIdOf>, + (LaneId, MessageNonce), + Message = FromBridgedChainMessagePayload, + >, { type DispatchPayload = FromBridgedChainMessagePayload; @@ -480,13 +514,26 @@ pub mod target { message.data.payload.as_ref().map(|payload| payload.weight).unwrap_or(0) } - fn dispatch(message: DispatchMessage>>) { + fn dispatch( + relayer_account: &AccountIdOf>, + message: DispatchMessage>>, + ) -> MessageDispatchResult { let message_id = (message.key.lane_id, message.key.nonce); pallet_bridge_dispatch::Pallet::::dispatch( - B::INSTANCE, + B::BRIDGED_CHAIN_ID, + B::THIS_CHAIN_ID, message_id, message.data.payload.map_err(drop), - ); + |dispatch_origin, dispatch_weight| { + ThisCurrency::transfer( + dispatch_origin, + relayer_account, + ThisRuntime::WeightToFee::calc(&dispatch_weight), + ExistenceRequirement::AllowDeath, + ) + .map_err(drop) + }, + ) } } @@ -511,7 +558,7 @@ pub mod target { ) -> Result>>>, &'static str> where ThisRuntime: pallet_bridge_grandpa::Config, - ThisRuntime: pallet_bridge_messages::Config>>, + ThisRuntime: pallet_bridge_messages::Config, HashOf>: Into>::BridgedChain>>, { @@ -524,7 +571,7 @@ pub mod target { StorageProof::new(bridged_storage_proof), |storage_adapter| storage_adapter, ) - .map(|storage| StorageProofCheckerAdapter::<_, B, ThisRuntime> { + .map(|storage| StorageProofCheckerAdapter::<_, B> { storage, _dummy: Default::default(), }) @@ -564,31 +611,29 @@ pub mod target { fn read_raw_message(&self, message_key: &MessageKey) -> Option>; } - struct StorageProofCheckerAdapter { + struct StorageProofCheckerAdapter { storage: StorageProofChecker, - _dummy: sp_std::marker::PhantomData<(B, ThisRuntime)>, + _dummy: sp_std::marker::PhantomData, } - impl MessageProofParser for StorageProofCheckerAdapter + impl MessageProofParser for StorageProofCheckerAdapter where H: Hasher, B: MessageBridge, - ThisRuntime: pallet_bridge_messages::Config>>, { fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option> { - let storage_outbound_lane_data_key = pallet_bridge_messages::storage_keys::outbound_lane_data_key::< - MessagesInstanceOf>, - >(lane_id); + let storage_outbound_lane_data_key = + pallet_bridge_messages::storage_keys::outbound_lane_data_key::(lane_id); self.storage .read_value(storage_outbound_lane_data_key.0.as_ref()) .ok()? } fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - let storage_message_key = pallet_bridge_messages::storage_keys::message_key::< - ThisRuntime, - MessagesInstanceOf>, - >(&message_key.lane_id, message_key.nonce); + let storage_message_key = pallet_bridge_messages::storage_keys::message_key::( + &message_key.lane_id, + message_key.nonce, + ); self.storage.read_value(storage_message_key.0.as_ref()).ok()? } } @@ -692,11 +737,13 @@ mod tests { struct OnThisChainBridge; impl MessageBridge for OnThisChainBridge { - const INSTANCE: InstanceId = *b"this"; const RELAYER_FEE_PERCENT: u32 = 10; + const THIS_CHAIN_ID: ChainId = *b"this"; + const BRIDGED_CHAIN_ID: ChainId = *b"brdg"; type ThisChain = ThisChain; type BridgedChain = BridgedChain; + type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance; fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance { ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32) @@ -708,11 +755,13 @@ mod tests { struct OnBridgedChainBridge; impl MessageBridge for OnBridgedChainBridge { - const INSTANCE: InstanceId = *b"brdg"; const RELAYER_FEE_PERCENT: u32 = 20; + const THIS_CHAIN_ID: ChainId = *b"brdg"; + const BRIDGED_CHAIN_ID: ChainId = *b"this"; type ThisChain = BridgedChain; type BridgedChain = ThisChain; + type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance; fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance { unreachable!() @@ -815,8 +864,6 @@ mod tests { type Signature = ThisChainSignature; type Weight = frame_support::weights::Weight; type Balance = ThisChainBalance; - - type MessagesInstance = pallet_bridge_messages::DefaultInstance; } impl ThisChainWithMessages for ThisChain { @@ -853,6 +900,7 @@ mod tests { fn estimate_delivery_transaction( _message_payload: &[u8], + _include_pay_dispatch_fee_cost: bool, _message_dispatch_weight: WeightOf, ) -> MessageTransaction> { unreachable!() @@ -872,8 +920,6 @@ mod tests { type Signature = BridgedChainSignature; type Weight = frame_support::weights::Weight; type Balance = BridgedChainBalance; - - type MessagesInstance = pallet_bridge_messages::DefaultInstance; } impl ThisChainWithMessages for BridgedChain { @@ -908,6 +954,7 @@ mod tests { fn estimate_delivery_transaction( _message_payload: &[u8], + _include_pay_dispatch_fee_cost: bool, message_dispatch_weight: WeightOf, ) -> MessageTransaction> { MessageTransaction { @@ -932,6 +979,7 @@ mod tests { spec_version: 1, weight: 100, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, call: ThisChainCall::Transfer.encode(), } .encode(); @@ -946,10 +994,10 @@ mod tests { spec_version: 1, weight: 100, origin: bp_message_dispatch::CallOrigin::SourceRoot, - call: target::FromBridgedChainEncodedMessageCall:: { - encoded_call: ThisChainCall::Transfer.encode(), - _marker: PhantomData::default(), - }, + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, + call: target::FromBridgedChainEncodedMessageCall::::new( + ThisChainCall::Transfer.encode(), + ), } ); assert_eq!(Ok(ThisChainCall::Transfer), message_on_this_chain.call.into()); @@ -963,6 +1011,7 @@ mod tests { spec_version: 1, weight: 100, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![42], } } @@ -983,12 +1032,27 @@ mod tests { Ok(ThisChainBalance(EXPECTED_MINIMAL_FEE)), ); + // let's check if estimation is less than hardcoded, if dispatch is paid at target chain + let mut payload_with_pay_on_target = regular_outbound_message_payload(); + payload_with_pay_on_target.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; + let fee_at_source = source::estimate_message_dispatch_and_delivery_fee::( + &payload_with_pay_on_target, + OnThisChainBridge::RELAYER_FEE_PERCENT, + ) + .expect("estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message"); + assert!( + fee_at_source < EXPECTED_MINIMAL_FEE.into(), + "Computed fee {:?} without prepaid dispatch must be less than the fee with prepaid dispatch {}", + fee_at_source, + EXPECTED_MINIMAL_FEE, + ); + // and now check that the verifier checks the fee assert_eq!( source::FromThisChainMessageVerifier::::verify_message( &Sender::Root, &ThisChainBalance(1), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ), @@ -998,7 +1062,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::Root, &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ) @@ -1013,6 +1077,7 @@ mod tests { spec_version: 1, weight: 100, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![42], }; @@ -1021,7 +1086,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::Signed(ThisChainAccountId(0)), &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ), @@ -1031,7 +1096,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::None, &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ), @@ -1041,7 +1106,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::Root, &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ) @@ -1056,6 +1121,7 @@ mod tests { spec_version: 1, weight: 100, origin: bp_message_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)), + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![42], }; @@ -1064,7 +1130,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::Signed(ThisChainAccountId(0)), &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ), @@ -1074,7 +1140,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::Signed(ThisChainAccountId(1)), &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &test_lane_outbound_data(), &payload, ) @@ -1102,7 +1168,7 @@ mod tests { source::FromThisChainMessageVerifier::::verify_message( &Sender::Root, &ThisChainBalance(1_000_000), - &TEST_LANE_ID, + TEST_LANE_ID, &OutboundLaneData { latest_received_nonce: 100, latest_generated_nonce: 100 + MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE + 1, @@ -1123,6 +1189,7 @@ mod tests { spec_version: 1, weight: 5, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![1, 2, 3, 4, 5, 6], },) .is_err() @@ -1138,6 +1205,7 @@ mod tests { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![1, 2, 3, 4, 5, 6], },) .is_err() @@ -1153,6 +1221,7 @@ mod tests { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![0; source::maximal_message_size::() as usize + 1], },) .is_err() @@ -1168,6 +1237,7 @@ mod tests { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![0; source::maximal_message_size::() as _], },), Ok(()), diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index 639e5f6c5049..3785f4a4607f 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -25,6 +25,7 @@ use crate::messages::{ }; use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload}; +use bp_runtime::ChainId; use codec::Encode; use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH}; use frame_support::weights::Weight; @@ -37,7 +38,13 @@ use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder /// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`. /// /// Returns public key of the signer and the signature itself. -pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode) -> ([u8; 32], [u8; 64]) { +pub fn ed25519_sign( + target_call: &impl Encode, + source_account_id: &impl Encode, + target_spec_version: u32, + source_chain_id: ChainId, + target_chain_id: ChainId, +) -> ([u8; 32], [u8; 64]) { // key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html) let target_secret = SecretKey::from_bytes(&[ 157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050, @@ -51,9 +58,13 @@ pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode) target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes()); let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid"); - let mut signature_message = Vec::new(); - target_call.encode_to(&mut signature_message); - source_account_id.encode_to(&mut signature_message); + let signature_message = pallet_bridge_dispatch::account_ownership_digest( + target_call, + source_account_id, + target_spec_version, + source_chain_id, + target_chain_id, + ); let target_origin_signature = target_pair .try_sign(&signature_message) .expect("Ed25519 try_sign should not fail in benchmarks"); diff --git a/bridges/ci.Dockerfile b/bridges/ci.Dockerfile new file mode 100644 index 000000000000..0bd2bc4dae83 --- /dev/null +++ b/bridges/ci.Dockerfile @@ -0,0 +1,53 @@ +# This file is a "runtime" part from a builder-pattern in Dockerfile, it's used in CI. +# The only different part is that the compilation happens externally, +# so COPY has a different source. +FROM ubuntu:20.04 + +# show backtraces +ENV RUST_BACKTRACE 1 +ENV DEBIAN_FRONTEND=noninteractive + +RUN set -eux; \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + curl ca-certificates libssl-dev && \ + update-ca-certificates && \ + groupadd -g 1000 user && \ + useradd -u 1000 -g user -s /bin/sh -m user && \ + # apt clean up + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# switch to non-root user +USER user + +WORKDIR /home/user + +ARG PROJECT=ethereum-poa-relay + +COPY --chown=user:user ./${PROJECT} ./ +COPY --chown=user:user ./bridge-entrypoint.sh ./ + +# check if executable works in this container +RUN ./${PROJECT} --version + +ENV PROJECT=$PROJECT +ENTRYPOINT ["/home/user/bridge-entrypoint.sh"] + +# metadata +ARG VCS_REF=master +ARG BUILD_DATE="" +ARG VERSION="" + +LABEL org.opencontainers.image.title="${PROJECT}" \ + org.opencontainers.image.description="${PROJECT} - component of Parity Bridges Common" \ + org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/ci.Dockerfile" \ + org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/ci.Dockerfile" \ + org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/README.md" \ + org.opencontainers.image.created="${BUILD_DATE}" \ + org.opencontainers.image.version="${VERSION}" \ + org.opencontainers.image.revision="${VCS_REF}" \ + org.opencontainers.image.authors="devops-team@parity.io" \ + org.opencontainers.image.vendor="Parity Technologies" \ + org.opencontainers.image.licenses="GPL-3.0 License" diff --git a/bridges/deny.toml b/bridges/deny.toml index 7f91bce7c9f8..e754b8e9bd36 100644 --- a/bridges/deny.toml +++ b/bridges/deny.toml @@ -53,12 +53,22 @@ ignore = [ "RUSTSEC-2020-0146", # yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice. "RUSTSEC-2018-0006", - # We need to wait until Substrate updates their `wasmtime` dependency to fix this. - # TODO: See issue #676: https://github.com/paritytech/parity-bridges-common/issues/676 - "RUSTSEC-2021-0013", - # We need to wait until Substrate updates their `hyper` dependency to fix this. - # TODO: See issue #710: https://github.com/paritytech/parity-bridges-common/issues/681 - "RUSTSEC-2021-0020", + # Comes from wasmtime via Substrate: 'cranelift-codegen' + "RUSTSEC-2021-0067", + # Comes from libp2p via Substrate: 'aes-soft', 'aesni', 'block-cipher', 'stream-cipher' + "RUSTSEC-2021-0060", + "RUSTSEC-2021-0059", + "RUSTSEC-2020-0057", + "RUSTSEC-2021-0064", + # Comes from jsonrpc via Substrate: 'failure', 'net2', 'lock_api' + "RUSTSEC-2020-0036", + "RUSTSEC-2020-0077", + "RUSTSEC-2019-0036", + "RUSTSEC-2020-0070", + # Comes from honggfuzz via storage-proof-fuzzer: 'memmap' + "RUSTSEC-2020-0077", + # Comes from time: 'stweb' (will be fixed in upcoming time 0.3) + "RUSTSEC-2020-0056" ] # Threshold for security vulnerabilities, any vulnerability with a CVSS score # lower than the range specified will be ignored. Note that ignored advisories @@ -134,6 +144,7 @@ license-files = [ # Each entry is a crate relative path, and the (opaque) hash of its contents { path = "LICENSE", hash = 0xbd0eed23 } ] + [[licenses.clarify]] name = "webpki" expression = "ISC" diff --git a/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh b/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh index 2f051d40d5c8..432cdd6b72c5 100755 --- a/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh +++ b/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 3 +sleep 20 curl -v http://poa-node-arthur:8545/api/health curl -v http://poa-node-bertha:8545/api/health curl -v http://poa-node-carlos:8545/api/health diff --git a/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh b/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh index 1e51d2d32d1a..1677cc1accde 100755 --- a/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh +++ b/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 10 +sleep 20 curl -v http://rialto-node-bob:9933/health curl -v http://poa-node-bertha:8545/api/health diff --git a/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh b/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh index 7be12000b915..131a31ffbea9 100755 --- a/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh +++ b/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 3 +sleep 20 curl -v http://poa-node-arthur:8545/api/health curl -v http://poa-node-bertha:8545/api/health curl -v http://poa-node-carlos:8545/api/health diff --git a/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh index 48e5a2817991..26be814b6941 100755 --- a/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh +++ b/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 3 +sleep 20 curl -v http://millau-node-bob:9933/health curl -v http://rialto-node-bob:9933/health diff --git a/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh index 378aeedd9f99..04bde07ad971 100755 --- a/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh +++ b/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 3 +sleep 20 curl -v http://millau-node-bob:9933/health curl -v http://rialto-node-bob:9933/health diff --git a/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh b/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh index d8d3290428f0..4b50ac086a88 100755 --- a/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh +++ b/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 3 +sleep 20 curl -v http://millau-node-alice:9933/health curl -v http://rialto-node-alice:9933/health diff --git a/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh b/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh index 740a9a973960..4a96ade6ec85 100755 --- a/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh +++ b/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -xeu -sleep 3 +sleep 20 curl -v http://millau-node-alice:9933/health curl -v https://westend-rpc.polkadot.io:443/health diff --git a/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh b/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh new file mode 100755 index 000000000000..2736243c5a48 --- /dev/null +++ b/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Run an instance of the Rococo -> Wococo header sync. +# +# Right now this relies on local Wococo and Rococo networks +# running (which include `pallet-bridge-grandpa` in their +# runtimes), but in the future it could use use public RPC nodes. + +set -xeu + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge RococoToWococo \ + --source-host 127.0.0.1 \ + --source-port 9955 \ + --target-host 127.0.0.1 \ + --target-port 9944 \ + --target-signer //Alice + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers RococoToWococo \ + --source-host 127.0.0.1 \ + --source-port 9955 \ + --target-host 127.0.0.1 \ + --target-port 9944 \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 \ diff --git a/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh b/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh new file mode 100755 index 000000000000..b3a7e383d9b9 --- /dev/null +++ b/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Run an instance of the Wococo -> Rococo header sync. +# +# Right now this relies on local Wococo and Rococo networks +# running (which include `pallet-bridge-grandpa` in their +# runtimes), but in the future it could use use public RPC nodes. + +set -xeu + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge WococoToRococo \ + --source-host 127.0.0.1 \ + --source-port 9944 \ + --target-host 127.0.0.1 \ + --target-port 9955 \ + --target-signer //Alice + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers WococoToRococo \ + --source-host 127.0.0.1 \ + --source-port 9944 \ + --target-host 127.0.0.1 \ + --target-port 9955 \ + --target-signer //Charlie \ + --prometheus-host=0.0.0.0 \ diff --git a/bridges/deployments/local-scripts/run-rococo-bob-node.sh b/bridges/deployments/local-scripts/run-rococo-bob-node.sh deleted file mode 100755 index 550d8cf7553e..000000000000 --- a/bridges/deployments/local-scripts/run-rococo-bob-node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Rococo Substrate bridge node. -# To override the default port just export ROCOCO_PORT=9966 -# -# Note: This script will not work out of the box with the bridges -# repo since it relies on a Polkadot binary. - -ROCOCO_BOB_PORT="${ROCOCO_BOB_PORT:-9966}" - -RUST_LOG=runtime=trace,runtime::bridge=trace \ -./target/debug/polkadot --chain=rococo-local --bob --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33055 --rpc-port 9935 --ws-port $ROCOCO_BOB_PORT \ diff --git a/bridges/deployments/local-scripts/run-rococo-node.sh b/bridges/deployments/local-scripts/run-rococo-node.sh index 073d39a3eaf6..4d43321eba0c 100755 --- a/bridges/deployments/local-scripts/run-rococo-node.sh +++ b/bridges/deployments/local-scripts/run-rococo-node.sh @@ -9,6 +9,6 @@ ROCOCO_PORT="${ROCOCO_PORT:-9955}" RUST_LOG=runtime=trace,runtime::bridge=trace \ -./target/debug/polkadot --chain=rococo-local --alice --tmp \ +./target/debug/polkadot --chain=rococo-dev --alice --tmp \ --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ --port 33044 --rpc-port 9934 --ws-port $ROCOCO_PORT \ diff --git a/bridges/deployments/local-scripts/run-wococo-node.sh b/bridges/deployments/local-scripts/run-wococo-node.sh new file mode 100755 index 000000000000..f314c0c7fa09 --- /dev/null +++ b/bridges/deployments/local-scripts/run-wococo-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Wococo Substrate bridge node. +# To override the default port just export WOCOCO_PORT=9955 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +WOCOCO_PORT="${WOCOCO_PORT:-9944}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=wococo-dev --alice --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33033 --rpc-port 9933 --ws-port $WOCOCO_PORT \ diff --git a/bridges/deployments/types-millau.json b/bridges/deployments/types-millau.json index 2414620733f4..a15527f59d79 100644 --- a/bridges/deployments/types-millau.json +++ b/bridges/deployments/types-millau.json @@ -67,7 +67,7 @@ "set_id": "SetId" }, "Id": "[u8; 4]", - "InstanceId": "Id", + "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", "MessageId": "(Id, u64)", @@ -77,9 +77,18 @@ }, "InboundRelayer": "AccountId", "InboundLaneData": { - "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "relayers": "Vec", "last_confirmed_nonce": "MessageNonce" }, + "UnrewardedRelayer": { + "relayer": "RelayerId", + "messages": "DeliveredMessages" + }, + "DeliveredMessages": { + "begin": "MessageNonce", + "end": "MessageNonce", + "dispatch_results": "BitVec" + }, "OutboundLaneData": { "latest_generated_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", @@ -96,6 +105,7 @@ "spec_version": "SpecVersion", "weight": "Weight", "origin": "CallOrigin", + "dispatch_fee_payment": "DispatchFeePayment", "call": "BridgedOpaqueCall" }, "CallOrigin": { @@ -105,6 +115,12 @@ "SourceAccount": "SourceAccountId" } }, + "DispatchFeePayment": { + "_enum": { + "AtSourceChain": "()", + "AtTargetChain": "()" + } + }, "MultiSigner": { "_enum": { "Ed25519": "H256", diff --git a/bridges/deployments/types-rialto.json b/bridges/deployments/types-rialto.json index bd746e003ea1..5375e43aea45 100644 --- a/bridges/deployments/types-rialto.json +++ b/bridges/deployments/types-rialto.json @@ -67,7 +67,7 @@ "set_id": "SetId" }, "Id": "[u8; 4]", - "InstanceId": "Id", + "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", "MessageId": "(Id, u64)", @@ -77,9 +77,18 @@ }, "InboundRelayer": "AccountId", "InboundLaneData": { - "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "relayers": "Vec", "last_confirmed_nonce": "MessageNonce" }, + "UnrewardedRelayer": { + "relayer": "RelayerId", + "messages": "DeliveredMessages" + }, + "DeliveredMessages": { + "begin": "MessageNonce", + "end": "MessageNonce", + "dispatch_results": "BitVec" + }, "OutboundLaneData": { "latest_generated_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", @@ -96,6 +105,7 @@ "spec_version": "SpecVersion", "weight": "Weight", "origin": "CallOrigin", + "dispatch_fee_payment": "DispatchFeePayment", "call": "BridgedOpaqueCall" }, "CallOrigin": { @@ -105,6 +115,12 @@ "SourceAccount": "SourceAccountId" } }, + "DispatchFeePayment": { + "_enum": { + "AtSourceChain": "()", + "AtTargetChain": "()" + } + }, "MultiSigner": { "_enum": { "Ed25519": "H256", diff --git a/bridges/deployments/types-rococo.json b/bridges/deployments/types-rococo.json new file mode 100644 index 000000000000..6490266809f5 --- /dev/null +++ b/bridges/deployments/types-rococo.json @@ -0,0 +1,147 @@ +{ + "--1": "Rococo Types", + "RococoBalance": "u128", + "RococoBlockHash": "H256", + "RococoBlockNumber": "u32", + "RococoHeader": "Header", + "--2": "Wococo Types", + "WococoBalance": "RococoBalance", + "WococoBlockHash": "RococoBlockHash", + "WococoBlockNumber": "RococoBlockNumber", + "WococoHeader": "RococoHeader", + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "ChainId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec", + "last_confirmed_nonce": "MessageNonce" + }, + "UnrewardedRelayer": { + "relayer": "RelayerId", + "messages": "DeliveredMessages" + }, + "DeliveredMessages": { + "begin": "MessageNonce", + "end": "MessageNonce", + "dispatch_results": "BitVec" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "dispatch_fee_payment": "DispatchFeePayment", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "DispatchFeePayment": { + "_enum": { + "AtSourceChain": "()", + "AtTargetChain": "()" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + }, + "Fee": "RococoBalance", + "Balance": "RococoBalance", + "BlockHash": "RococoBlockHash", + "BlockNumber": "RococoBlockNumber", + "BridgedBlockHash": "WococoBlockHash", + "BridgedBlockNumber": "WococoBlockNumber", + "BridgedHeader": "WococoHeader", + "Parameter": { + "_enum": { + "RococoToWococoConversionRate": "u128" + } + } +} diff --git a/bridges/deployments/types-wococo.json b/bridges/deployments/types-wococo.json new file mode 100644 index 000000000000..1a4084e94cfc --- /dev/null +++ b/bridges/deployments/types-wococo.json @@ -0,0 +1,148 @@ +{ + "--1": "Rococo Types", + "RococoBalance": "u128", + "RococoBlockHash": "H256", + "RococoBlockNumber": "u32", + "RococoHeader": "Header", + "--2": "Wococo Types", + "WococoBalance": "RococoBalance", + "WococoBlockHash": "RococoBlockHash", + "WococoBlockNumber": "RococoBlockNumber", + "WococoHeader": "RococoHeader", + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "ChainId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec", + "last_confirmed_nonce": "MessageNonce" + }, + "UnrewardedRelayer": { + "relayer": "RelayerId", + "messages": "DeliveredMessages" + }, + "DeliveredMessages": { + "begin": "MessageNonce", + "end": "MessageNonce", + "dispatch_results": "BitVec" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "dispatch_fee_payment": "DispatchFeePayment", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "DispatchFeePayment": { + "_enum": { + "AtSourceChain": "()", + "AtTargetChain": "()" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + }, + "Fee": "WococoBalance", + "Balance": "WococoBalance", + "Hash": "WococoBlockHash", + "BlockHash": "WococoBlockHash", + "BlockNumber": "WococoBlockNumber", + "BridgedBlockHash": "RococoBlockHash", + "BridgedBlockNumber": "RococoBlockNumber", + "BridgedHeader": "RococoHeader", + "Parameter": { + "_enum": { + "WococoToRococoConversionRate": "u128" + } + } +} diff --git a/bridges/deployments/types/build.sh b/bridges/deployments/types/build.sh index 52605e7e4da6..1bcfd23ee063 100755 --- a/bridges/deployments/types/build.sh +++ b/bridges/deployments/types/build.sh @@ -2,14 +2,21 @@ # The script generates JSON type definition files in `./deployment` directory to be used for # JS clients. -# Both networks have a lot of common types, so to avoid duplication we merge `common.json` file with -# chain-specific definitions in `rialto|millau.json`. +# +# It works by creating definitions for each side of the different bridge pairs we support +# (Rialto<>Millau and Rococo<>Wococo at the moment). +# +# To avoid duplication each bridge pair has a JSON file with common definitions, as well as a +# general JSON file with common definitions regardless of the bridge pair. These files are then +# merged with chain-specific type definitions. -set -exu +set -eux # Make sure we are in the right dir. cd $(dirname $(realpath $0)) -# Create rialto and millau types. -jq -s '.[0] * .[1]' common.json rialto.json > ../types-rialto.json -jq -s '.[0] * .[1]' common.json millau.json > ../types-millau.json +# Create types for our supported bridge pairs (Rialto<>Millau, Rococo<>Wococo) +jq -s '.[0] * .[1] * .[2]' rialto-millau.json common.json rialto.json > ../types-rialto.json +jq -s '.[0] * .[1] * .[2]' rialto-millau.json common.json millau.json > ../types-millau.json +jq -s '.[0] * .[1] * .[2]' rococo-wococo.json common.json rococo.json > ../types-rococo.json +jq -s '.[0] * .[1] * .[2]' rococo-wococo.json common.json wococo.json > ../types-wococo.json diff --git a/bridges/deployments/types/common.json b/bridges/deployments/types/common.json index cf881288694c..d3395ea687fd 100644 --- a/bridges/deployments/types/common.json +++ b/bridges/deployments/types/common.json @@ -1,54 +1,4 @@ { - "--1": "Millau Types", - "MillauBalance": "u64", - "MillauBlockHash": "H512", - "MillauBlockNumber": "u64", - "MillauHeader": { - "parent_Hash": "MillauBlockHash", - "number": "Compact", - "state_root": "MillauBlockHash", - "extrinsics_root": "MillauBlockHash", - "digest": "MillauDigest" - }, - "MillauDigest": { - "logs": "Vec" - }, - "MillauDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "MillauBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, - "--2": "Rialto Types", - "RialtoBalance": "u128", - "RialtoBlockHash": "H256", - "RialtoBlockNumber": "u32", - "RialtoHeader": { - "parent_Hash": "RialtoBlockHash", - "number": "Compact", - "state_root": "RialtoBlockHash", - "extrinsics_root": "RialtoBlockHash", - "digest": "RialtoDigest" - }, - "RialtoDigest": { - "logs": "Vec" - }, - "RialtoDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "RialtoBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, "--3": "Common types", "Address": "AccountId", "LookupSource": "AccountId", @@ -67,7 +17,7 @@ "set_id": "SetId" }, "Id": "[u8; 4]", - "InstanceId": "Id", + "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", "MessageId": "(Id, u64)", @@ -77,9 +27,18 @@ }, "InboundRelayer": "AccountId", "InboundLaneData": { - "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "relayers": "Vec", "last_confirmed_nonce": "MessageNonce" }, + "UnrewardedRelayer": { + "relayer": "RelayerId", + "messages": "DeliveredMessages" + }, + "DeliveredMessages": { + "begin": "MessageNonce", + "end": "MessageNonce", + "dispatch_results": "BitVec" + }, "OutboundLaneData": { "latest_generated_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", @@ -96,6 +55,7 @@ "spec_version": "SpecVersion", "weight": "Weight", "origin": "CallOrigin", + "dispatch_fee_payment": "DispatchFeePayment", "call": "BridgedOpaqueCall" }, "CallOrigin": { @@ -105,6 +65,12 @@ "SourceAccount": "SourceAccountId" } }, + "DispatchFeePayment": { + "_enum": { + "AtSourceChain": "()", + "AtTargetChain": "()" + } + }, "MultiSigner": { "_enum": { "Ed25519": "H256", diff --git a/bridges/deployments/types/millau.json b/bridges/deployments/types/millau.json index bfc86491a527..f738701263d5 100644 --- a/bridges/deployments/types/millau.json +++ b/bridges/deployments/types/millau.json @@ -12,5 +12,4 @@ "MillauToRialtoConversionRate": "u128" } } - } diff --git a/bridges/deployments/types/rialto-millau.json b/bridges/deployments/types/rialto-millau.json new file mode 100644 index 000000000000..96efb84fc3bb --- /dev/null +++ b/bridges/deployments/types/rialto-millau.json @@ -0,0 +1,52 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + } +} diff --git a/bridges/deployments/types/rococo-wococo.json b/bridges/deployments/types/rococo-wococo.json new file mode 100644 index 000000000000..b1c4cfa21b92 --- /dev/null +++ b/bridges/deployments/types/rococo-wococo.json @@ -0,0 +1,12 @@ +{ + "--1": "Rococo Types", + "RococoBalance": "u128", + "RococoBlockHash": "H256", + "RococoBlockNumber": "u32", + "RococoHeader": "Header", + "--2": "Wococo Types", + "WococoBalance": "RococoBalance", + "WococoBlockHash": "RococoBlockHash", + "WococoBlockNumber": "RococoBlockNumber", + "WococoHeader": "RococoHeader" +} diff --git a/bridges/deployments/types/rococo.json b/bridges/deployments/types/rococo.json new file mode 100644 index 000000000000..4576378fd479 --- /dev/null +++ b/bridges/deployments/types/rococo.json @@ -0,0 +1,14 @@ +{ + "Fee": "RococoBalance", + "Balance": "RococoBalance", + "BlockHash": "RococoBlockHash", + "BlockNumber": "RococoBlockNumber", + "BridgedBlockHash": "WococoBlockHash", + "BridgedBlockNumber": "WococoBlockNumber", + "BridgedHeader": "WococoHeader", + "Parameter": { + "_enum": { + "RococoToWococoConversionRate": "u128" + } + } +} diff --git a/bridges/deployments/types/wococo.json b/bridges/deployments/types/wococo.json new file mode 100644 index 000000000000..cc01a6ccecfb --- /dev/null +++ b/bridges/deployments/types/wococo.json @@ -0,0 +1,15 @@ +{ + "Fee": "WococoBalance", + "Balance": "WococoBalance", + "Hash": "WococoBlockHash", + "BlockHash": "WococoBlockHash", + "BlockNumber": "WococoBlockNumber", + "BridgedBlockHash": "RococoBlockHash", + "BridgedBlockNumber": "RococoBlockNumber", + "BridgedHeader": "RococoHeader", + "Parameter": { + "_enum": { + "WococoToRococoConversionRate": "u128" + } + } +} diff --git a/bridges/fuzz/storage-proof/Cargo.toml b/bridges/fuzz/storage-proof/Cargo.toml index 05456114e6b2..c5848ebed00e 100644 --- a/bridges/fuzz/storage-proof/Cargo.toml +++ b/bridges/fuzz/storage-proof/Cargo.toml @@ -8,8 +8,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } -finality-grandpa = "0.12.3" +codec = { package = "parity-scale-codec", version = "2.0.0" } +finality-grandpa = "0.14.0" hash-db = "0.15.2" honggfuzz = "0.5.54" log = "0.4.0" diff --git a/bridges/modules/dispatch/README.md b/bridges/modules/dispatch/README.md index c4e703c402d5..068ff1167f7d 100644 --- a/bridges/modules/dispatch/README.md +++ b/bridges/modules/dispatch/README.md @@ -44,6 +44,8 @@ module events set: weight, the dispatch is rejected. Keep in mind, that even if post-dispatch weight will be less than specified, the submitter still have to declare (and pay for) the maximal possible weight (that is the pre-dispatch weight); +- `MessageDispatchPaymentFailed` event is emitted if the message submitter has selected to pay + dispatch fee at the target chain, but has failed to do that; - `MessageDispatched` event is emitted if the message has passed all checks and we have actually dispatched it. The dispatch may still fail, though - that's why we are including the dispatch result in the event payload. diff --git a/bridges/modules/dispatch/src/lib.rs b/bridges/modules/dispatch/src/lib.rs index 5ac2825c8c11..e9bf75686bbd 100644 --- a/bridges/modules/dispatch/src/lib.rs +++ b/bridges/modules/dispatch/src/lib.rs @@ -23,9 +23,15 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] +// Generated by `decl_event!` +#![allow(clippy::unused_unit)] use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion, Weight}; -use bp_runtime::{derive_account_id, InstanceId, SourceAccount}; +use bp_runtime::{ + derive_account_id, + messages::{DispatchFeePayment, MessageDispatchResult}, + ChainId, SourceAccount, +}; use codec::{Decode, Encode}; use frame_support::{ decl_event, decl_module, decl_storage, @@ -87,24 +93,27 @@ decl_storage! { decl_event!( pub enum Event where - >::MessageId + >::MessageId, + AccountId = ::AccountId, { /// Message has been rejected before reaching dispatch. - MessageRejected(InstanceId, MessageId), + MessageRejected(ChainId, MessageId), /// Message has been rejected by dispatcher because of spec version mismatch. /// Last two arguments are: expected and passed spec version. - MessageVersionSpecMismatch(InstanceId, MessageId, SpecVersion, SpecVersion), + MessageVersionSpecMismatch(ChainId, MessageId, SpecVersion, SpecVersion), /// Message has been rejected by dispatcher because of weight mismatch. /// Last two arguments are: expected and passed call weight. - MessageWeightMismatch(InstanceId, MessageId, Weight, Weight), + MessageWeightMismatch(ChainId, MessageId, Weight, Weight), /// Message signature mismatch. - MessageSignatureMismatch(InstanceId, MessageId), - /// Message has been dispatched with given result. - MessageDispatched(InstanceId, MessageId, DispatchResult), + MessageSignatureMismatch(ChainId, MessageId), /// We have failed to decode Call from the message. - MessageCallDecodeFailed(InstanceId, MessageId), + MessageCallDecodeFailed(ChainId, MessageId), /// The call from the message has been rejected by the call filter. - MessageCallRejected(InstanceId, MessageId), + MessageCallRejected(ChainId, MessageId), + /// The origin account has failed to pay fee for dispatching the message. + MessageDispatchPaymentFailed(ChainId, MessageId, AccountId, Weight), + /// Message has been dispatched with given result. + MessageDispatched(ChainId, MessageId, DispatchResult), /// Phantom member, never used. Needed to handle multiple pallet instances. _Dummy(PhantomData), } @@ -118,7 +127,7 @@ decl_module! { } } -impl, I: Instance> MessageDispatch for Pallet { +impl, I: Instance> MessageDispatch for Pallet { type Message = MessagePayload; @@ -126,77 +135,108 @@ impl, I: Instance> MessageDispatch for Pallet { message.weight } - fn dispatch(bridge: InstanceId, id: T::MessageId, message: Result) { + fn dispatch Result<(), ()>>( + source_chain: ChainId, + target_chain: ChainId, + id: T::MessageId, + message: Result, + pay_dispatch_fee: P, + ) -> MessageDispatchResult { // emit special even if message has been rejected by external component let message = match message { Ok(message) => message, Err(_) => { - log::trace!(target: "runtime::bridge-dispatch", "Message {:?}/{:?}: rejected before actual dispatch", bridge, id); - Self::deposit_event(RawEvent::MessageRejected(bridge, id)); - return; + log::trace!( + target: "runtime::bridge-dispatch", + "Message {:?}/{:?}: rejected before actual dispatch", + source_chain, + id, + ); + Self::deposit_event(RawEvent::MessageRejected(source_chain, id)); + return MessageDispatchResult { + dispatch_result: false, + unspent_weight: 0, + dispatch_fee_paid_during_dispatch: false, + }; } }; // verify spec version // (we want it to be the same, because otherwise we may decode Call improperly) + let mut dispatch_result = MessageDispatchResult { + dispatch_result: false, + unspent_weight: message.weight, + dispatch_fee_paid_during_dispatch: false, + }; let expected_version = ::Version::get().spec_version; if message.spec_version != expected_version { log::trace!( "Message {:?}/{:?}: spec_version mismatch. Expected {:?}, got {:?}", - bridge, + source_chain, id, expected_version, message.spec_version, ); Self::deposit_event(RawEvent::MessageVersionSpecMismatch( - bridge, + source_chain, id, expected_version, message.spec_version, )); - return; + return dispatch_result; } // now that we have spec version checked, let's decode the call let call = match message.call.into() { Ok(call) => call, Err(_) => { - log::trace!(target: "runtime::bridge-dispatch", "Failed to decode Call from message {:?}/{:?}", bridge, id,); - Self::deposit_event(RawEvent::MessageCallDecodeFailed(bridge, id)); - return; + log::trace!( + target: "runtime::bridge-dispatch", + "Failed to decode Call from message {:?}/{:?}", + source_chain, + id, + ); + Self::deposit_event(RawEvent::MessageCallDecodeFailed(source_chain, id)); + return dispatch_result; } }; // prepare dispatch origin let origin_account = match message.origin { CallOrigin::SourceRoot => { - let hex_id = derive_account_id::(bridge, SourceAccount::Root); + let hex_id = derive_account_id::(source_chain, SourceAccount::Root); let target_id = T::AccountIdConverter::convert(hex_id); log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id); target_id } CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => { - let digest = account_ownership_digest(&call, source_account_id, message.spec_version, bridge); + let digest = account_ownership_digest( + &call, + source_account_id, + message.spec_version, + source_chain, + target_chain, + ); let target_account = target_public.into_account(); if !target_signature.verify(&digest[..], &target_account) { log::trace!( target: "runtime::bridge-dispatch", "Message {:?}/{:?}: origin proof is invalid. Expected account: {:?} from signature: {:?}", - bridge, + source_chain, id, target_account, target_signature, ); - Self::deposit_event(RawEvent::MessageSignatureMismatch(bridge, id)); - return; + Self::deposit_event(RawEvent::MessageSignatureMismatch(source_chain, id)); + return dispatch_result; } log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account); target_account } CallOrigin::SourceAccount(source_account_id) => { - let hex_id = derive_account_id(bridge, SourceAccount::Account(source_account_id)); + let hex_id = derive_account_id(source_chain, SourceAccount::Account(source_account_id)); let target_id = T::AccountIdConverter::convert(hex_id); log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id); target_id @@ -208,12 +248,12 @@ impl, I: Instance> MessageDispatch for Pallet { log::trace!( target: "runtime::bridge-dispatch", "Message {:?}/{:?}: the call ({:?}) is rejected by filter", - bridge, + source_chain, id, call, ); - Self::deposit_event(RawEvent::MessageCallRejected(bridge, id)); - return; + Self::deposit_event(RawEvent::MessageCallRejected(source_chain, id)); + return dispatch_result; } // verify weight @@ -225,41 +265,67 @@ impl, I: Instance> MessageDispatch for Pallet { log::trace!( target: "runtime::bridge-dispatch", "Message {:?}/{:?}: passed weight is too low. Expected at least {:?}, got {:?}", - bridge, + source_chain, id, expected_weight, message.weight, ); Self::deposit_event(RawEvent::MessageWeightMismatch( - bridge, + source_chain, id, expected_weight, message.weight, )); - return; + return dispatch_result; } + // pay dispatch fee right before dispatch + let pay_dispatch_fee_at_target_chain = message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; + if pay_dispatch_fee_at_target_chain && pay_dispatch_fee(&origin_account, message.weight).is_err() { + log::trace!( + target: "runtime::bridge-dispatch", + "Failed to pay dispatch fee for dispatching message {:?}/{:?} with weight {}", + source_chain, + id, + message.weight, + ); + Self::deposit_event(RawEvent::MessageDispatchPaymentFailed( + source_chain, + id, + origin_account, + message.weight, + )); + return dispatch_result; + } + dispatch_result.dispatch_fee_paid_during_dispatch = pay_dispatch_fee_at_target_chain; + // finally dispatch message let origin = RawOrigin::Signed(origin_account).into(); + log::trace!(target: "runtime::bridge-dispatch", "Message being dispatched is: {:.4096?}", &call); - let dispatch_result = call.dispatch(origin); - let actual_call_weight = extract_actual_weight(&dispatch_result, &dispatch_info); + let result = call.dispatch(origin); + let actual_call_weight = extract_actual_weight(&result, &dispatch_info); + dispatch_result.dispatch_result = result.is_ok(); + dispatch_result.unspent_weight = message.weight.saturating_sub(actual_call_weight); log::trace!( target: "runtime::bridge-dispatch", - "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}", - bridge, + "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}. Call dispatch result: {:?}", + source_chain, id, - actual_call_weight, + dispatch_result.unspent_weight, message.weight, dispatch_result, + result, ); Self::deposit_event(RawEvent::MessageDispatched( - bridge, + source_chain, id, - dispatch_result.map(drop).map_err(|e| e.error), + result.map(drop).map_err(|e| e.error), )); + + dispatch_result } } @@ -290,7 +356,7 @@ where } CallOrigin::SourceAccount(ref source_account_id) => { ensure!( - sender_origin == &RawOrigin::Signed(source_account_id.clone()), + sender_origin == &RawOrigin::Signed(source_account_id.clone()) || sender_origin == &RawOrigin::Root, BadOrigin ); Ok(Some(source_account_id.clone())) @@ -303,23 +369,24 @@ where /// The byte vector returned by this function will be signed with a target chain account /// private key. This way, the owner of `source_account_id` on the source chain proves that /// the target chain account private key is also under his control. -pub fn account_ownership_digest( +pub fn account_ownership_digest( call: &Call, source_account_id: AccountId, target_spec_version: SpecVersion, - source_instance_id: BridgeId, + source_chain_id: ChainId, + target_chain_id: ChainId, ) -> Vec where Call: Encode, AccountId: Encode, SpecVersion: Encode, - BridgeId: Encode, { let mut proof = Vec::new(); call.encode_to(&mut proof); source_account_id.encode_to(&mut proof); target_spec_version.encode_to(&mut proof); - source_instance_id.encode_to(&mut proof); + source_chain_id.encode_to(&mut proof); + target_chain_id.encode_to(&mut proof); proof } @@ -342,6 +409,9 @@ mod tests { type AccountId = u64; type MessageId = [u8; 4]; + const SOURCE_CHAIN_ID: ChainId = *b"srce"; + const TARGET_CHAIN_ID: ChainId = *b"trgt"; + #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] pub struct TestAccountPublic(AccountId); @@ -463,31 +533,32 @@ mod tests { fn prepare_message( origin: CallOrigin, call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch::MessageId>>::Message { MessagePayload { spec_version: TEST_SPEC_VERSION, weight: TEST_WEIGHT, origin, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: EncodedCall(call.encode()), } } fn prepare_root_message( call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch::MessageId>>::Message { prepare_message(CallOrigin::SourceRoot, call) } fn prepare_target_message( call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch::MessageId>>::Message { let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1)); prepare_message(origin, call) } fn prepare_source_message( call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch::MessageId>>::Message { let origin = CallOrigin::SourceAccount(1); prepare_message(origin, call) } @@ -495,23 +566,25 @@ mod tests { #[test] fn should_fail_on_spec_version_mismatch() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; const BAD_SPEC_VERSION: SpecVersion = 99; let mut message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + let weight = message.weight; message.spec_version = BAD_SPEC_VERSION; System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert_eq!(result.unspent_weight, weight); + assert!(!result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageVersionSpecMismatch( - bridge, + SOURCE_CHAIN_ID, id, TEST_SPEC_VERSION, BAD_SPEC_VERSION @@ -525,21 +598,25 @@ mod tests { #[test] fn should_fail_on_weight_mismatch() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; let mut message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); - message.weight = 0; + message.weight = 7; System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert_eq!(result.unspent_weight, 7); + assert!(!result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageWeightMismatch( - bridge, id, 1345000, 0, + SOURCE_CHAIN_ID, + id, + 1038000, + 7, )), topics: vec![], }], @@ -550,7 +627,6 @@ mod tests { #[test] fn should_fail_on_signature_mismatch() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99)); @@ -558,16 +634,20 @@ mod tests { call_origin, Call::System(>::remark(vec![1, 2, 3])), ); + let weight = message.weight; System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert_eq!(result.unspent_weight, weight); + assert!(!result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageSignatureMismatch( - bridge, id + SOURCE_CHAIN_ID, + id )), topics: vec![], }], @@ -578,17 +658,19 @@ mod tests { #[test] fn should_emit_event_for_rejected_messages() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; System::set_block_number(1); - Dispatch::dispatch(bridge, id, Err(())); + Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Err(()), |_, _| unreachable!()); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageRejected(bridge, id)), + event: Event::Dispatch(call_dispatch::Event::::MessageRejected( + SOURCE_CHAIN_ID, + id + )), topics: vec![], }], ); @@ -598,22 +680,25 @@ mod tests { #[test] fn should_fail_on_call_decode() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; let mut message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + let weight = message.weight; message.call.0 = vec![]; System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert_eq!(result.unspent_weight, weight); + assert!(!result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageCallDecodeFailed( - bridge, id + SOURCE_CHAIN_ID, + id )), topics: vec![], }], @@ -624,7 +709,6 @@ mod tests { #[test] fn should_emit_event_for_rejected_calls() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; let call = Call::System(>::fill_block(Perbill::from_percent(75))); @@ -633,35 +717,132 @@ mod tests { message.weight = weight; System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert_eq!(result.unspent_weight, weight); + assert!(!result.dispatch_result); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::Dispatch(call_dispatch::Event::::MessageCallRejected( + SOURCE_CHAIN_ID, + id + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_emit_event_for_unpaid_calls() { + new_test_ext().execute_with(|| { + let id = [0; 4]; + + let mut message = + prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + let weight = message.weight; + message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; + + System::set_block_number(1); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Err(())); + assert_eq!(result.unspent_weight, weight); + assert!(!result.dispatch_result); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::Dispatch(call_dispatch::Event::::MessageDispatchPaymentFailed( + SOURCE_CHAIN_ID, + id, + AccountIdConverter::convert(derive_account_id::( + SOURCE_CHAIN_ID, + SourceAccount::Root + )), + TEST_WEIGHT, + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_dispatch_calls_paid_at_target_chain() { + new_test_ext().execute_with(|| { + let id = [0; 4]; + + let mut message = + prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; + + System::set_block_number(1); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Ok(())); + assert!(result.dispatch_fee_paid_during_dispatch); + assert!(result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageCallRejected(bridge, id)), + event: Event::Dispatch(call_dispatch::Event::::MessageDispatched( + SOURCE_CHAIN_ID, + id, + Ok(()) + )), topics: vec![], }], ); }); } + #[test] + fn should_return_dispatch_failed_flag_if_dispatch_happened_but_failed() { + new_test_ext().execute_with(|| { + let id = [0; 4]; + + let call = Call::System(>::set_heap_pages(1)); + let message = prepare_target_message(call); + + System::set_block_number(1); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert!(!result.dispatch_fee_paid_during_dispatch); + assert!(!result.dispatch_result); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::Dispatch(call_dispatch::Event::::MessageDispatched( + SOURCE_CHAIN_ID, + id, + Err(sp_runtime::DispatchError::BadOrigin) + )), + topics: vec![], + }], + ); + }) + } + #[test] fn should_dispatch_bridge_message_from_root_origin() { new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); let id = [0; 4]; let message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert!(!result.dispatch_fee_paid_during_dispatch); + assert!(result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageDispatched( - bridge, + SOURCE_CHAIN_ID, id, Ok(()) )), @@ -675,20 +856,21 @@ mod tests { fn should_dispatch_bridge_message_from_target_origin() { new_test_ext().execute_with(|| { let id = [0; 4]; - let bridge = b"ethb".to_owned(); let call = Call::System(>::remark(vec![])); let message = prepare_target_message(call); System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert!(!result.dispatch_fee_paid_during_dispatch); + assert!(result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageDispatched( - bridge, + SOURCE_CHAIN_ID, id, Ok(()) )), @@ -702,20 +884,21 @@ mod tests { fn should_dispatch_bridge_message_from_source_origin() { new_test_ext().execute_with(|| { let id = [0; 4]; - let bridge = b"ethb".to_owned(); let call = Call::System(>::remark(vec![])); let message = prepare_source_message(call); System::set_block_number(1); - Dispatch::dispatch(bridge, id, Ok(message)); + let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + assert!(!result.dispatch_fee_paid_during_dispatch); + assert!(result.dispatch_result); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::Dispatch(call_dispatch::Event::::MessageDispatched( - bridge, + SOURCE_CHAIN_ID, id, Ok(()) )), @@ -782,10 +965,7 @@ mod tests { Err(BadOrigin) )); - // If we try and send the message from Root, it is also rejected - assert!(matches!( - verify_message_origin(&RawOrigin::Root, &message), - Err(BadOrigin) - )); + // The Root account is allowed to assume any expected origin account + assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(Some(1)))); } } diff --git a/bridges/modules/ethereum-contract-builtin/src/lib.rs b/bridges/modules/ethereum-contract-builtin/src/lib.rs index 47c4452aee6c..a07f838cf8d6 100644 --- a/bridges/modules/ethereum-contract-builtin/src/lib.rs +++ b/bridges/modules/ethereum-contract-builtin/src/lib.rs @@ -151,7 +151,7 @@ pub fn verify_substrate_finality_proof( let best_set = best_set?; let verify_result = sc_finality_grandpa::GrandpaJustification::::decode_and_verify_finalizes( - &raw_finality_proof, + raw_finality_proof, (finality_target_hash, finality_target_number), best_set_id, &best_set, diff --git a/bridges/modules/ethereum/src/lib.rs b/bridges/modules/ethereum/src/lib.rs index aeb7d69f763b..facf377d51b8 100644 --- a/bridges/modules/ethereum/src/lib.rs +++ b/bridges/modules/ethereum/src/lib.rs @@ -1381,15 +1381,12 @@ pub(crate) mod tests { fn verify_transaction_finalized_works_for_best_finalized_header() { run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - true, - ); + assert!(verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(true))], + )); }); } @@ -1400,15 +1397,12 @@ pub(crate) mod tests { insert_header(&mut storage, example_header_parent()); insert_header(&mut storage, example_header()); storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header_parent().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - true, - ); + assert!(verify_transaction_finalized( + &storage, + example_header_parent().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(true))], + )); }); } @@ -1416,10 +1410,12 @@ pub(crate) mod tests { fn verify_transaction_finalized_rejects_proof_with_missing_tx() { run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 1, + &[], + ),); }); } @@ -1427,10 +1423,12 @@ pub(crate) mod tests { fn verify_transaction_finalized_rejects_unknown_header() { run_test(TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 1, + &[], + )); }); } @@ -1440,15 +1438,12 @@ pub(crate) mod tests { let mut storage = BridgeStorage::::new(); insert_header(&mut storage, example_header_parent()); insert_header(&mut storage, example_header()); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(true))], + )); }); } @@ -1464,15 +1459,12 @@ pub(crate) mod tests { insert_header(&mut storage, example_header()); insert_header(&mut storage, finalized_header_sibling); storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert_eq!( - verify_transaction_finalized( - &storage, - finalized_header_sibling_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + finalized_header_sibling_hash, + 0, + &[(example_tx(), example_tx_receipt(true))], + )); }); } @@ -1488,15 +1480,12 @@ pub(crate) mod tests { insert_header(&mut storage, finalized_header_uncle); insert_header(&mut storage, example_header()); storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert_eq!( - verify_transaction_finalized( - &storage, - finalized_header_uncle_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + finalized_header_uncle_hash, + 0, + &[(example_tx(), example_tx_receipt(true))], + )); }); } @@ -1504,18 +1493,15 @@ pub(crate) mod tests { fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() { run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[ - (example_tx(), example_tx_receipt(true)), - (example_tx(), example_tx_receipt(true)) - ], - ), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[ + (example_tx(), example_tx_receipt(true)), + (example_tx(), example_tx_receipt(true)) + ], + )); }); } @@ -1523,15 +1509,12 @@ pub(crate) mod tests { fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() { run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), vec![42])], - ), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[(example_tx(), vec![42])], + )); }); } @@ -1539,15 +1522,12 @@ pub(crate) mod tests { fn verify_transaction_finalized_rejects_failed_transaction() { run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header_with_failed_receipt().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(false))], - ), - false, - ); + assert!(!verify_transaction_finalized( + &storage, + example_header_with_failed_receipt().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(false))], + )); }); } } diff --git a/bridges/modules/ethereum/src/test_utils.rs b/bridges/modules/ethereum/src/test_utils.rs index 18ad6876d68b..41161089ba6d 100644 --- a/bridges/modules/ethereum/src/test_utils.rs +++ b/bridges/modules/ethereum/src/test_utils.rs @@ -214,7 +214,7 @@ impl HeaderBuilder { /// Helper function for getting a genesis header which has been signed by an authority. pub fn build_genesis_header(author: &SecretKey) -> AuraHeader { let genesis = HeaderBuilder::genesis(); - genesis.header.sign_by(&author) + genesis.header.sign_by(author) } /// Helper function for building a custom child header which has been signed by an authority. @@ -222,7 +222,7 @@ pub fn build_custom_header(author: &SecretKey, previous: &AuraHeader, customi where F: FnOnce(AuraHeader) -> AuraHeader, { - let new_header = HeaderBuilder::with_parent(&previous); + let new_header = HeaderBuilder::with_parent(previous); let custom_header = customize_header(new_header.header); custom_header.sign_by(author) } diff --git a/bridges/modules/ethereum/src/verification.rs b/bridges/modules/ethereum/src/verification.rs index c79242d1d4d9..c8c4deca87f1 100644 --- a/bridges/modules/ethereum/src/verification.rs +++ b/bridges/modules/ethereum/src/verification.rs @@ -396,7 +396,7 @@ mod tests { fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result, Error> { run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); - verify_aura_header(&storage, &config, None, header, &ConstChainTime::default()) + verify_aura_header(&storage, config, None, header, &ConstChainTime::default()) }) } @@ -787,7 +787,7 @@ mod tests { fn pool_verifies_future_block_number() { // when header is too far from the future assert_eq!( - default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(&validators), None),), + default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(validators), None),), Err(Error::UnsignedTooFarInTheFuture), ); } @@ -800,7 +800,7 @@ mod tests { default_accept_into_pool(|validators| ( HeaderBuilder::with_parent_number(3) .step(GENESIS_STEP + 3) - .sign_by_set(&validators), + .sign_by_set(validators), None, ),), Err(Error::DoubleVote), diff --git a/bridges/modules/grandpa/src/benchmarking.rs b/bridges/modules/grandpa/src/benchmarking.rs index b7294e918003..bc027e86a4b5 100644 --- a/bridges/modules/grandpa/src/benchmarking.rs +++ b/bridges/modules/grandpa/src/benchmarking.rs @@ -38,24 +38,18 @@ //! //! Note that the worst case scenario here would be a justification where each validator has it's //! own fork which is `SESSION_LENGTH` blocks long. -//! -//! As far as benchmarking results go, the only benchmark that should be used in -//! `pallet-bridge-grandpa` to annotate weights is the `submit_finality_proof` one. The others are -//! looking at the effects of specific code paths and do not actually reflect the overall worst case -//! scenario. use crate::*; use bp_test_utils::{ - accounts, authority_list, make_justification_for_header, test_keyring, JustificationGeneratorParams, ALICE, - TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID, + accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID, }; use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; use frame_support::traits::Get; use frame_system::RawOrigin; use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::Zero; -use sp_std::{vec, vec::Vec}; +use sp_std::vec::Vec; // The maximum number of vote ancestries to include in a justification. // @@ -75,124 +69,46 @@ fn header_number, I: 'static, N: From>() -> N { (T::HeadersToKeep::get() + 1).into() } +/// Prepare header and its justification to submit using `submit_finality_proof`. +fn prepare_benchmark_data, I: 'static>( + precommits: u32, + ancestors: u32, +) -> (BridgedHeader, GrandpaJustification>) { + let authority_list = accounts(precommits as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list, + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + bootstrap_bridge::(init_data); + + let header: BridgedHeader = bp_test_utils::test_header(header_number::()); + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: accounts(precommits as u16).iter().map(|k| (*k, 1)).collect::>(), + ancestors, + forks: 1, + }; + let justification = make_justification_for_header(params); + (header, justification) +} + benchmarks_instance_pallet! { // This is the "gold standard" benchmark for this extrinsic, and it's what should be used to // annotate the weight in the pallet. - // - // The other benchmarks related to `submit_finality_proof` are looking at the effect of specific - // parameters and are there mostly for seeing how specific codepaths behave. submit_finality_proof { - let v in 1..MAX_VOTE_ANCESTRIES; let p in 1..MAX_VALIDATOR_SET_SIZE; - - let caller: T::AccountId = whitelisted_caller(); - - let authority_list = accounts(p as u16) - .iter() - .map(|id| (AuthorityId::from(*id), 1)) - .collect::>(); - - let init_data = InitializationData { - header: bp_test_utils::test_header(Zero::zero()), - authority_list, - set_id: TEST_GRANDPA_SET_ID, - is_halted: false, - }; - - bootstrap_bridge::(init_data); - - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - let params = JustificationGeneratorParams { - header: header.clone(), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::>(), - votes: v, - forks: 1, - }; - - let justification = make_justification_for_header(params); - - }: _(RawOrigin::Signed(caller), header, justification) - verify { - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - let expected_hash = header.hash(); - - assert_eq!(>::get(), expected_hash); - assert!(>::contains_key(expected_hash)); - } - - // What we want to check here is the effect of vote ancestries on justification verification - // do this by varying the number of headers between `finality_target` and `header_of_chain`. - submit_finality_proof_on_single_fork { let v in 1..MAX_VOTE_ANCESTRIES; - - let caller: T::AccountId = whitelisted_caller(); - - let init_data = InitializationData { - header: bp_test_utils::test_header(Zero::zero()), - authority_list: authority_list(), - set_id: TEST_GRANDPA_SET_ID, - is_halted: false, - }; - - bootstrap_bridge::(init_data); - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - - let params = JustificationGeneratorParams { - header: header.clone(), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: test_keyring(), - votes: v, - forks: 1, - }; - - let justification = make_justification_for_header(params); - - }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) - verify { - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - let expected_hash = header.hash(); - - assert_eq!(>::get(), expected_hash); - assert!(>::contains_key(expected_hash)); - } - - // What we want to check here is the effect of many pre-commits on justification verification. - // We do this by creating many forks, whose head will be used as a signed pre-commit in the - // final justification. - submit_finality_proof_on_many_forks { - let p in 1..MAX_VALIDATOR_SET_SIZE; - let caller: T::AccountId = whitelisted_caller(); - - let authority_list = accounts(p as u16) - .iter() - .map(|id| (AuthorityId::from(*id), 1)) - .collect::>(); - - let init_data = InitializationData { - header: bp_test_utils::test_header(Zero::zero()), - authority_list, - set_id: TEST_GRANDPA_SET_ID, - is_halted: false, - }; - - bootstrap_bridge::(init_data); - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - - let params = JustificationGeneratorParams { - header: header.clone(), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::>(), - votes: p, - forks: p, - }; - - let justification = make_justification_for_header(params); - + let (header, justification) = prepare_benchmark_data::(p, v); }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) verify { let header: BridgedHeader = bp_test_utils::test_header(header_number::()); @@ -201,81 +117,4 @@ benchmarks_instance_pallet! { assert_eq!(>::get(), expected_hash); assert!(>::contains_key(expected_hash)); } - - // Here we want to find out the overheaded of looking through consensus digests found in a - // header. As the number of logs in a header grows, how much more work do we require to look - // through them? - // - // Note that this should be the same for looking through scheduled changes and forces changes, - // which is why we only have one benchmark for this. - find_scheduled_change { - // Not really sure what a good bound for this is. - let n in 1..1000; - - let mut logs = vec![]; - for i in 0..n { - // We chose a non-consensus log on purpose since that way we have to look through all - // the logs in the header - logs.push(sp_runtime::DigestItem::Other(vec![])); - } - - let mut header: BridgedHeader = bp_test_utils::test_header(Zero::zero()); - let digest = header.digest_mut(); - *digest = sp_runtime::Digest { - logs, - }; - - }: { - crate::find_scheduled_change(&header) - } - - // What we want to check here is how long it takes to read and write the authority set tracked - // by the pallet as the number of authorities grows. - read_write_authority_sets { - // The current max target number of validators on Polkadot/Kusama - let n in 1..1000; - - let mut authorities = vec![]; - for i in 0..n { - authorities.push((ALICE, 1)); - } - - let authority_set = bp_header_chain::AuthoritySet { - authorities: authorities.iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect(), - set_id: 0 - }; - - >::put(&authority_set); - - }: { - let authority_set = >::get(); - >::put(&authority_set); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::assert_ok; - - #[test] - fn finality_proof_is_valid() { - mock::run_test(|| { - assert_ok!(test_benchmark_submit_finality_proof::()); - }); - } - - #[test] - fn single_fork_finality_proof_is_valid() { - mock::run_test(|| { - assert_ok!(test_benchmark_submit_finality_proof_on_single_fork::()); - }); - } - - #[test] - fn multi_fork_finality_proof_is_valid() { - mock::run_test(|| { - assert_ok!(test_benchmark_submit_finality_proof_on_many_forks::()); - }); - } } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index d38f61826f98..4cca1c782738 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -46,6 +46,7 @@ use frame_support::{ensure, fail}; use frame_system::{ensure_signed, RawOrigin}; use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero}; +use sp_std::convert::TryInto; #[cfg(test)] mod mock; @@ -124,8 +125,8 @@ pub mod pallet { /// If successful in verification, it will write the target header to the underlying storage /// pallet. #[pallet::weight(T::WeightInfo::submit_finality_proof( - justification.votes_ancestries.len() as u32, - justification.commit.precommits.len() as u32, + justification.commit.precommits.len().try_into().unwrap_or(u32::MAX), + justification.votes_ancestries.len().try_into().unwrap_or(u32::MAX), ))] pub fn submit_finality_proof( origin: OriginFor, @@ -414,9 +415,14 @@ pub mod pallet { let set_id = authority_set.set_id; Ok( - verify_justification::>((hash, number), set_id, &voter_set, &justification).map_err( + verify_justification::>((hash, number), set_id, &voter_set, justification).map_err( |e| { - log::error!(target: "runtime::bridge-grandpa", "Received invalid justification for {:?}: {:?}", hash, e); + log::error!( + target: "runtime::bridge-grandpa", + "Received invalid justification for {:?}: {:?}", + hash, + e, + ); >::InvalidJustification }, )?, @@ -693,7 +699,7 @@ mod tests { CurrentAuthoritySet::::get().authorities, init_data.authority_list ); - assert_eq!(IsHalted::::get(), false); + assert!(!IsHalted::::get()); }) } diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index 9e7c2ebc087e..18d88049f16a 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -17,7 +17,7 @@ //! Autogenerated weights for pallet_bridge_grandpa //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-21, STEPS: [50, ], REPEAT: 20 +//! DATE: 2021-06-03, STEPS: [50, ], REPEAT: 20 //! LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled //! CHAIN: Some("dev"), DB CACHE: 128 @@ -48,74 +48,28 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bridge_grandpa. pub trait WeightInfo { - fn submit_finality_proof(v: u32, p: u32) -> Weight; - fn submit_finality_proof_on_single_fork(v: u32) -> Weight; - fn submit_finality_proof_on_many_forks(p: u32) -> Weight; - fn find_scheduled_change(n: u32) -> Weight; - fn read_write_authority_sets(n: u32) -> Weight; + fn submit_finality_proof(p: u32, v: u32) -> Weight; } /// Weights for pallet_bridge_grandpa using the Rialto node and recommended hardware. pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { - fn submit_finality_proof(v: u32, p: u32) -> Weight { + fn submit_finality_proof(p: u32, v: u32) -> Weight { (0 as Weight) - .saturating_add((756_462_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((791_236_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((59_692_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((6_876_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } - fn submit_finality_proof_on_single_fork(v: u32) -> Weight { - (280_121_000 as Weight) - .saturating_add((14_098_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) - } - fn submit_finality_proof_on_many_forks(p: u32) -> Weight { - (10_370_940_000 as Weight) - .saturating_add((96_902_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) - } - fn find_scheduled_change(n: u32) -> Weight { - (479_000 as Weight).saturating_add((11_000 as Weight).saturating_mul(n as Weight)) - } - fn read_write_authority_sets(n: u32) -> Weight { - (8_030_000 as Weight) - .saturating_add((232_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } } // For backwards compatibility and tests impl WeightInfo for () { - fn submit_finality_proof(v: u32, p: u32) -> Weight { + fn submit_finality_proof(p: u32, v: u32) -> Weight { (0 as Weight) - .saturating_add((756_462_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((791_236_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((59_692_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((6_876_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } - fn submit_finality_proof_on_single_fork(v: u32) -> Weight { - (280_121_000 as Weight) - .saturating_add((14_098_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) - } - fn submit_finality_proof_on_many_forks(p: u32) -> Weight { - (10_370_940_000 as Weight) - .saturating_add((96_902_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) - } - fn find_scheduled_change(n: u32) -> Weight { - (479_000 as Weight).saturating_add((11_000 as Weight).saturating_mul(n as Weight)) - } - fn read_write_authority_sets(n: u32) -> Weight { - (8_030_000 as Weight) - .saturating_add((232_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 4a75fa8181f8..a26cf65c028b 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +bitvec = { version = "0.20", default-features = false, features = ["alloc"] } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } log = { version = "0.4.14", default-features = false } num-traits = { version = "0.2", default-features = false } @@ -14,6 +15,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } # Bridge dependencies +bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } bp-messages = { path = "../../primitives/messages", default-features = false } bp-rialto = { path = "../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../primitives/runtime", default-features = false } @@ -36,6 +38,7 @@ pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "m [features] default = ["std"] std = [ + "bp-message-dispatch/std", "bp-messages/std", "bp-runtime/std", "bp-rialto/std", diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md index eda5e28a6c88..be25b3c37f75 100644 --- a/bridges/modules/messages/README.md +++ b/bridges/modules/messages/README.md @@ -101,7 +101,14 @@ the `MessageAccepted` event is emitted in the `send_message()` transaction. The message lane identifier and nonce that has been assigned to the message. When a message is delivered to the target chain, the `MessagesDelivered` event is emitted from the `receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane -identifier and inclusive range of delivered message nonces. +identifier, inclusive range of delivered message nonces and their single-bit dispatch results. + +Please note that the meaning of the 'dispatch result' is determined by the message dispatcher at +the target chain. For example, in case of immediate call dispatcher it will be the `true` if call +has been successfully dispatched and `false` if it has only been delivered. This simple mechanism +built into the messages module allows building basic bridge applications, which only care whether +their messages have been successfully dispatched or not. More sophisticated applications may use +their own dispatch result delivery mechanism to deliver something larger than single bit. ### How to plug-in Messages Module to Send Messages to the Bridged Chain? @@ -152,7 +159,7 @@ all required traits and will simply reject all transactions, related to outbound The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with inbound messages. The `pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the -bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction, +bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction, this implementation must be able to parse and verify the proof of messages wrapped in this transaction. Normally, you would reuse the same (configurable) type on all chains that are sending messages to the same bridged chain. @@ -194,7 +201,7 @@ message needs to be read. So there's another When choosing values for these parameters, you must also keep in mind that if proof in your scheme is based on finality of headers (and it is the most obvious option for Substrate-based chains with finality notion), then choosing too small values for these parameters may cause significant delays -in message delivery. That's because there too many actors involved in this scheme: 1) authorities +in message delivery. That's because there are too many actors involved in this scheme: 1) authorities that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the headers relayer then needs to submit this header and its finality proof to the source chain; 3) the messages relayer must then send confirmation transaction (storage proof of this map) to the source @@ -347,6 +354,23 @@ Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_corr `pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every runtime's tests. +### Post-dispatch weight refunds of the `receive_messages_proof` call + +Weight formula of the `receive_messages_proof` call assumes that the dispatch fee of every message is +paid at the target chain (where call is executed), that every message will be dispatched and that +dispatch weight of the message will be exactly the weight that is returned from the +`MessageDispatch::dispatch_weight` method call. This isn't true for all messages, so the call returns +actual weight used to dispatch messages. + +This actual weight is the weight, returned by the weight formula, minus: +- the weight of undispatched messages, if we have failed to dispatch because of different issues; +- the unspent dispatch weight if the declared weight of some messages is less than their actual post-dispatch weight; +- the pay-dispatch-fee weight for every message that had dispatch fee paid at the source chain. + +The last component is computed as a difference between two benchmarks results - the `receive_single_message_proof` +benchmark (that assumes that the fee is paid during dispatch) and the `receive_single_prepaid_message_proof` +(that assumes that the dispatch fee is already paid). + ### Weight of `receive_messages_delivery_proof` call #### Related benchmarks diff --git a/bridges/modules/messages/src/benchmarking.rs b/bridges/modules/messages/src/benchmarking.rs index d1ecf7750002..54cb7c26cd3d 100644 --- a/bridges/modules/messages/src/benchmarking.rs +++ b/bridges/modules/messages/src/benchmarking.rs @@ -17,16 +17,25 @@ //! Messages pallet benchmarking. use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH; -use crate::{inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, Call, Instance}; +use crate::{ + inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, outbound_lane::ReceivalConfirmationResult, + Call, Instance, +}; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, InboundLaneData, LaneId, MessageData, - MessageNonce, OutboundLaneData, UnrewardedRelayersState, + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, InboundLaneData, LaneId, + MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, }; +use bp_runtime::messages::DispatchFeePayment; use frame_benchmarking::{account, benchmarks_instance}; use frame_support::{traits::Get, weights::Weight}; use frame_system::RawOrigin; -use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, ops::RangeInclusive, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + convert::TryInto, + ops::RangeInclusive, + prelude::*, +}; /// Fee paid by submitter for single message delivery. pub const MESSAGE_FEE: u64 = 10_000_000_000; @@ -67,6 +76,8 @@ pub struct MessageProofParams { pub outbound_lane_data: Option, /// Proof size requirements. pub size: ProofSize, + /// Where the fee for dispatching message is paid? + pub dispatch_fee_payment: DispatchFeePayment, } /// Benchmark-specific message delivery proof parameters. @@ -108,6 +119,8 @@ pub trait Config: crate::Config { fn prepare_message_delivery_proof( params: MessageDeliveryProofParams, ) -> >::MessagesDeliveryProof; + /// Returns true if message has been dispatched (either successfully or not). + fn is_message_dispatched(nonce: MessageNonce) -> bool; } benchmarks_instance! { @@ -242,7 +255,8 @@ benchmarks_instance! { // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. + // * message requires all heavy checks done by dispatcher; + // * message dispatch fee is paid at target (this) chain. // // This is base benchmark for all other message delivery benchmarks. receive_single_message_proof { @@ -257,6 +271,7 @@ benchmarks_instance! { message_nonces: 21..=21, outbound_lane_data: None, size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { @@ -264,13 +279,15 @@ benchmarks_instance! { crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); + assert!(T::is_message_dispatched(21)); } // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. + // * message requires all heavy checks done by dispatcher; + // * message dispatch fee is paid at target (this) chain. // // The weight of single message delivery could be approximated as // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. @@ -288,6 +305,7 @@ benchmarks_instance! { message_nonces: 21..=22, outbound_lane_data: None, size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) verify { @@ -295,13 +313,15 @@ benchmarks_instance! { crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 22, ); + assert!(T::is_message_dispatched(22)); } // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: // * proof includes outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. + // * message requires all heavy checks done by dispatcher; + // * message dispatch fee is paid at target (this) chain. // // The weight of outbound lane state delivery would be // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. @@ -323,6 +343,7 @@ benchmarks_instance! { latest_generated_nonce: 21, }), size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { @@ -334,6 +355,7 @@ benchmarks_instance! { crate::Pallet::::inbound_latest_confirmed_nonce(T::bench_lane_id()), 20, ); + assert!(T::is_message_dispatched(21)); } // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: @@ -357,6 +379,7 @@ benchmarks_instance! { message_nonces: 21..=21, outbound_lane_data: None, size: ProofSize::HasExtraNodes(1024), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { @@ -364,6 +387,7 @@ benchmarks_instance! { crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); + assert!(T::is_message_dispatched(21)); } // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: @@ -389,6 +413,7 @@ benchmarks_instance! { message_nonces: 21..=21, outbound_lane_data: None, size: ProofSize::HasExtraNodes(16 * 1024), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { @@ -396,6 +421,40 @@ benchmarks_instance! { crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); + assert!(T::is_message_dispatched(21)); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher; + // * message dispatch fee is paid at source (bridged) chain. + // + // This benchmark is used to compute extra weight spent at target chain when fee is paid there. Then we use + // this information in two places: (1) to reduce weight of delivery tx if sender pays fee at the source chain + // and (2) to refund relayer with this weight if fee has been paid at the source chain. + receive_single_prepaid_message_proof { + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: None, + size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + }); + }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + assert!(T::is_message_dispatched(21)); } // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: @@ -420,7 +479,10 @@ benchmarks_instance! { let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { lane: T::bench_lane_id(), inbound_lane_data: InboundLaneData { - relayers: vec![(1, 1, relayer_id.clone())].into_iter().collect(), + relayers: vec![UnrewardedRelayer { + relayer: relayer_id.clone(), + messages: DeliveredMessages::new(1, true), + }].into_iter().collect(), last_confirmed_nonce: 0, }, size: ProofSize::Minimal(0), @@ -455,10 +517,15 @@ benchmarks_instance! { messages_in_oldest_entry: 2, total_messages: 2, }; + let mut delivered_messages = DeliveredMessages::new(1, true); + delivered_messages.note_dispatched_message(true); let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { lane: T::bench_lane_id(), inbound_lane_data: InboundLaneData { - relayers: vec![(1, 2, relayer_id.clone())].into_iter().collect(), + relayers: vec![UnrewardedRelayer { + relayer: relayer_id.clone(), + messages: delivered_messages, + }].into_iter().collect(), last_confirmed_nonce: 0, }, size: ProofSize::Minimal(0), @@ -496,8 +563,14 @@ benchmarks_instance! { lane: T::bench_lane_id(), inbound_lane_data: InboundLaneData { relayers: vec![ - (1, 1, relayer1_id.clone()), - (2, 2, relayer2_id.clone()), + UnrewardedRelayer { + relayer: relayer1_id.clone(), + messages: DeliveredMessages::new(1, true), + }, + UnrewardedRelayer { + relayer: relayer2_id.clone(), + messages: DeliveredMessages::new(2, true), + }, ].into_iter().collect(), last_confirmed_nonce: 0, }, @@ -569,6 +642,7 @@ benchmarks_instance! { message_nonces: 21..=(20 + i as MessageNonce), outbound_lane_data: None, size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof( RawOrigin::Signed(relayer_id_on_target), @@ -606,6 +680,7 @@ benchmarks_instance! { message_nonces: 21..=21, outbound_lane_data: None, size: ProofSize::HasExtraNodes(i as _), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof( RawOrigin::Signed(relayer_id_on_target), @@ -643,6 +718,7 @@ benchmarks_instance! { message_nonces: 21..=21, outbound_lane_data: None, size: ProofSize::HasLargeLeaf(i as _), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof( RawOrigin::Signed(relayer_id_on_target), @@ -686,6 +762,7 @@ benchmarks_instance! { latest_generated_nonce: 21, }), size: ProofSize::Minimal(0), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, }); }: receive_messages_proof( RawOrigin::Signed(relayer_id_on_target), @@ -728,10 +805,17 @@ benchmarks_instance! { messages_in_oldest_entry: 1, total_messages: i as MessageNonce, }; + let mut delivered_messages = DeliveredMessages::new(1, true); + for nonce in 2..=i { + delivered_messages.note_dispatched_message(true); + } let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { lane: T::bench_lane_id(), inbound_lane_data: InboundLaneData { - relayers: vec![(1, i as MessageNonce, relayer_id.clone())].into_iter().collect(), + relayers: vec![UnrewardedRelayer { + relayer: relayer_id.clone(), + messages: delivered_messages, + }].into_iter().collect(), last_confirmed_nonce: 0, }, size: ProofSize::Minimal(0), @@ -776,7 +860,10 @@ benchmarks_instance! { relayers: relayers .keys() .enumerate() - .map(|(j, relayer_id)| (j as MessageNonce + 1, j as MessageNonce + 1, relayer_id.clone())) + .map(|(j, relayer)| UnrewardedRelayer { + relayer: relayer.clone(), + messages: DeliveredMessages::new(j as MessageNonce + 1, true), + }) .collect(), last_confirmed_nonce: 0, }, @@ -808,13 +895,29 @@ fn send_regular_message_with_payload, I: Instance>(payload: Vec fn confirm_message_delivery, I: Instance>(nonce: MessageNonce) { let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - assert!(outbound_lane.confirm_delivery(nonce).is_some()); + let latest_received_nonce = outbound_lane.data().latest_received_nonce; + let mut relayers = VecDeque::with_capacity((nonce - latest_received_nonce) as usize); + for nonce in latest_received_nonce + 1..=nonce { + relayers.push_back(UnrewardedRelayer { + relayer: (), + messages: DeliveredMessages::new(nonce, true), + }); + } + assert!(matches!( + outbound_lane.confirm_delivery(nonce, &relayers), + ReceivalConfirmationResult::ConfirmedMessages(_), + )); } fn receive_messages, I: Instance>(nonce: MessageNonce) { let mut inbound_lane_storage = inbound_lane_storage::(T::bench_lane_id()); inbound_lane_storage.set_data(InboundLaneData { - relayers: vec![(1, nonce, T::bridged_relayer_id())].into_iter().collect(), + relayers: vec![UnrewardedRelayer { + relayer: T::bridged_relayer_id(), + messages: DeliveredMessages::new(nonce, true), + }] + .into_iter() + .collect(), last_confirmed_nonce: 0, }); } diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index b5576bc30a1e..83d17dc3c06c 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -18,8 +18,10 @@ use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, + DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, UnrewardedRelayer, }; +use bp_runtime::messages::MessageDispatchResult; +use frame_support::RuntimeDebug; use sp_std::prelude::PartialEq; /// Inbound lane storage. @@ -27,7 +29,7 @@ pub trait InboundLaneStorage { /// Delivery and dispatch fee type on source chain. type MessageFee; /// Id of relayer on source chain. - type Relayer: PartialEq; + type Relayer: Clone + PartialEq; /// Lane id. fn id(&self) -> LaneId; @@ -41,6 +43,22 @@ pub trait InboundLaneStorage { fn set_data(&mut self, data: InboundLaneData); } +/// Result of single message receival. +#[derive(RuntimeDebug, PartialEq, Eq)] +pub enum ReceivalResult { + /// Message has been received and dispatched. Note that we don't care whether dispatch has + /// been successful or not - in both case message falls into this category. + /// + /// The message dispatch result is also returned. + Dispatched(MessageDispatchResult), + /// Message has invalid nonce and lane has rejected to accept this message. + InvalidNonce, + /// There are too many unrewarded relayer entires at the lane. + TooManyUnrewardedRelayers, + /// There are too many unconfirmed messages at the lane. + TooManyUnconfirmedMessages, +} + /// Inbound messages lane. pub struct InboundLane { storage: S, @@ -71,7 +89,7 @@ impl InboundLane { while data .relayers .front() - .map(|(_, nonce_high, _)| *nonce_high <= new_confirmed_nonce) + .map(|entry| entry.messages.end <= new_confirmed_nonce) .unwrap_or(false) { data.relayers.pop_front(); @@ -79,8 +97,12 @@ impl InboundLane { // Secondly, update the next record with lower nonce equal to new confirmed nonce if needed. // Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap. match data.relayers.front_mut() { - Some((nonce_low, _, _)) if *nonce_low < new_confirmed_nonce => { - *nonce_low = new_confirmed_nonce + 1; + Some(entry) if entry.messages.begin < new_confirmed_nonce => { + entry.messages.dispatch_results = entry + .messages + .dispatch_results + .split_off((new_confirmed_nonce + 1 - entry.messages.begin) as _); + entry.messages.begin = new_confirmed_nonce + 1; } _ => {} } @@ -90,51 +112,61 @@ impl InboundLane { } /// Receive new message. - pub fn receive_message>( + pub fn receive_message, AccountId>( &mut self, - relayer: S::Relayer, + relayer_at_bridged_chain: &S::Relayer, + relayer_at_this_chain: &AccountId, nonce: MessageNonce, message_data: DispatchMessageData, - ) -> bool { + ) -> ReceivalResult { let mut data = self.storage.data(); let is_correct_message = nonce == data.last_delivered_nonce() + 1; if !is_correct_message { - return false; + return ReceivalResult::InvalidNonce; } // if there are more unrewarded relayer entries than we may accept, reject this message if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return false; + return ReceivalResult::TooManyUnrewardedRelayers; } // if there are more unconfirmed messages than we may accept, reject this message let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return false; + return ReceivalResult::TooManyUnconfirmedMessages; } + // dispatch message before updating anything in the storage. If dispatch would panic, + // (which should not happen in the runtime) then we simply won't consider message as + // delivered (no changes to the inbound lane storage have been made). + let dispatch_result = P::dispatch( + relayer_at_this_chain, + DispatchMessage { + key: MessageKey { + lane_id: self.storage.id(), + nonce, + }, + data: message_data, + }, + ); + + // now let's update inbound lane storage let push_new = match data.relayers.back_mut() { - Some((_, nonce_high, last_relayer)) if last_relayer == &relayer => { - *nonce_high = nonce; + Some(entry) if entry.relayer == *relayer_at_bridged_chain => { + entry.messages.note_dispatched_message(dispatch_result.dispatch_result); false } _ => true, }; if push_new { - data.relayers.push_back((nonce, nonce, relayer)); + data.relayers.push_back(UnrewardedRelayer { + relayer: (*relayer_at_bridged_chain).clone(), + messages: DeliveredMessages::new(nonce, dispatch_result.dispatch_result), + }); } - self.storage.set_data(data); - P::dispatch(DispatchMessage { - key: MessageKey { - lane_id: self.storage.id(), - nonce, - }, - data: message_data, - }); - - true + ReceivalResult::Dispatched(dispatch_result) } } @@ -144,8 +176,8 @@ mod tests { use crate::{ inbound_lane, mock::{ - message_data, run_test, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, - TEST_RELAYER_B, TEST_RELAYER_C, + dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, TestRuntime, + REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, }, DefaultInstance, RuntimeInboundLaneStorage, }; @@ -154,11 +186,15 @@ mod tests { lane: &mut InboundLane>, nonce: MessageNonce, ) { - assert!(lane.receive_message::( - TEST_RELAYER_A, - nonce, - message_data(REGULAR_PAYLOAD).into() - )); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + nonce, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); } #[test] @@ -213,7 +249,10 @@ mod tests { receive_regular_message(&mut lane, 2); receive_regular_message(&mut lane, 3); assert_eq!(lane.storage.data().last_confirmed_nonce, 0); - assert_eq!(lane.storage.data().relayers, vec![(1, 3, TEST_RELAYER_A)]); + assert_eq!( + lane.storage.data().relayers, + vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)] + ); assert_eq!( lane.receive_state_update(OutboundLaneData { @@ -223,7 +262,10 @@ mod tests { Some(2), ); assert_eq!(lane.storage.data().last_confirmed_nonce, 2); - assert_eq!(lane.storage.data().relayers, vec![(3, 3, TEST_RELAYER_A)]); + assert_eq!( + lane.storage.data().relayers, + vec![unrewarded_relayer(3, 3, TEST_RELAYER_A)] + ); assert_eq!( lane.receive_state_update(OutboundLaneData { @@ -244,10 +286,16 @@ mod tests { let mut seed_storage_data = lane.storage.data(); // Prepare data seed_storage_data.last_confirmed_nonce = 0; - seed_storage_data.relayers.push_back((1, 1, TEST_RELAYER_A)); + seed_storage_data + .relayers + .push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A)); // Simulate messages batch (2, 3, 4) from relayer #2 - seed_storage_data.relayers.push_back((2, 4, TEST_RELAYER_B)); - seed_storage_data.relayers.push_back((5, 5, TEST_RELAYER_C)); + seed_storage_data + .relayers + .push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B)); + seed_storage_data + .relayers + .push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C)); lane.storage.set_data(seed_storage_data); // Check assert_eq!( @@ -260,7 +308,10 @@ mod tests { assert_eq!(lane.storage.data().last_confirmed_nonce, 3); assert_eq!( lane.storage.data().relayers, - vec![(4, 4, TEST_RELAYER_B), (5, 5, TEST_RELAYER_C)] + vec![ + unrewarded_relayer(4, 4, TEST_RELAYER_B), + unrewarded_relayer(5, 5, TEST_RELAYER_C) + ] ); }); } @@ -269,11 +320,15 @@ mod tests { fn fails_to_receive_message_with_incorrect_nonce() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - assert!(!lane.receive_message::( - TEST_RELAYER_A, - 10, - message_data(REGULAR_PAYLOAD).into() - )); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + 10, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::InvalidNonce + ); assert_eq!(lane.storage.data().last_delivered_nonce(), 0); }); } @@ -284,29 +339,35 @@ mod tests { let mut lane = inbound_lane::(TEST_LANE_ID); let max_nonce = ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); for current_nonce in 1..max_nonce + 1 { - assert!(lane.receive_message::( - TEST_RELAYER_A + current_nonce, - current_nonce, - message_data(REGULAR_PAYLOAD).into() - )); + assert_eq!( + lane.receive_message::( + &(TEST_RELAYER_A + current_nonce), + &(TEST_RELAYER_A + current_nonce), + current_nonce, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); } // Fails to dispatch new message from different than latest relayer. assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_A + max_nonce + 1, + lane.receive_message::( + &(TEST_RELAYER_A + max_nonce + 1), + &(TEST_RELAYER_A + max_nonce + 1), max_nonce + 1, message_data(REGULAR_PAYLOAD).into() - ) + ), + ReceivalResult::TooManyUnrewardedRelayers, ); // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_A + max_nonce, + lane.receive_message::( + &(TEST_RELAYER_A + max_nonce), + &(TEST_RELAYER_A + max_nonce), max_nonce + 1, message_data(REGULAR_PAYLOAD).into() - ) + ), + ReceivalResult::TooManyUnrewardedRelayers, ); }); } @@ -317,29 +378,35 @@ mod tests { let mut lane = inbound_lane::(TEST_LANE_ID); let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); for current_nonce in 1..=max_nonce { - assert!(lane.receive_message::( - TEST_RELAYER_A, - current_nonce, - message_data(REGULAR_PAYLOAD).into() - )); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + current_nonce, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); } // Fails to dispatch new message from different than latest relayer. assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_B, + lane.receive_message::( + &TEST_RELAYER_B, + &TEST_RELAYER_B, max_nonce + 1, message_data(REGULAR_PAYLOAD).into() - ) + ), + ReceivalResult::TooManyUnconfirmedMessages, ); // Fails to dispatch new messages from latest relayer. assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_A, + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, max_nonce + 1, message_data(REGULAR_PAYLOAD).into() - ) + ), + ReceivalResult::TooManyUnconfirmedMessages, ); }); } @@ -348,24 +415,40 @@ mod tests { fn correctly_receives_following_messages_from_two_relayers_alternately() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - assert!(lane.receive_message::( - TEST_RELAYER_A, - 1, - message_data(REGULAR_PAYLOAD).into() - )); - assert!(lane.receive_message::( - TEST_RELAYER_B, - 2, - message_data(REGULAR_PAYLOAD).into() - )); - assert!(lane.receive_message::( - TEST_RELAYER_A, - 3, - message_data(REGULAR_PAYLOAD).into() - )); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + 1, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_B, + &TEST_RELAYER_B, + 2, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + 3, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); assert_eq!( lane.storage.data().relayers, - vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B), (3, 3, TEST_RELAYER_A)] + vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + unrewarded_relayer(3, 3, TEST_RELAYER_A) + ] ); }); } @@ -374,14 +457,23 @@ mod tests { fn rejects_same_message_from_two_different_relayers() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - assert!(lane.receive_message::( - TEST_RELAYER_A, - 1, - message_data(REGULAR_PAYLOAD).into() - )); assert_eq!( - false, - lane.receive_message::(TEST_RELAYER_B, 1, message_data(REGULAR_PAYLOAD).into()) + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + 1, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::Dispatched(dispatch_result(0)) + ); + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_B, + &TEST_RELAYER_B, + 1, + message_data(REGULAR_PAYLOAD).into() + ), + ReceivalResult::InvalidNonce, ); }); } @@ -394,4 +486,22 @@ mod tests { assert_eq!(lane.storage.data().last_delivered_nonce(), 1); }); } + + #[test] + fn unspent_weight_is_returned_by_receive_message() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + let mut payload = REGULAR_PAYLOAD; + payload.dispatch_result.unspent_weight = 1; + assert_eq!( + lane.receive_message::( + &TEST_RELAYER_A, + &TEST_RELAYER_A, + 1, + message_data(payload).into() + ), + ReceivalResult::Dispatched(dispatch_result(1)) + ); + }); + } } diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index 15e8c7166baa..a5f94c1eda81 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -34,28 +34,34 @@ //! or some benchmarks assumptions are broken for your runtime. #![cfg_attr(not(feature = "std"), no_std)] +// Generated by `decl_event!` +#![allow(clippy::unused_unit)] pub use crate::weights_ext::{ ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, }; -use crate::inbound_lane::{InboundLane, InboundLaneStorage}; -use crate::outbound_lane::{OutboundLane, OutboundLaneStorage}; +use crate::inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult}; +use crate::outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult}; use crate::weights::WeightInfo; use bp_messages::{ - source_chain::{LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, TargetHeaderChain}, + source_chain::{ + LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, TargetHeaderChain, + }, target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - total_unrewarded_messages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, MessagePayload, - OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState, + total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, + OperatingMode, OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState, }; use bp_runtime::Size; use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, ensure, + decl_error, decl_event, decl_module, decl_storage, + dispatch::DispatchResultWithPostInfo, + ensure, fail, traits::Get, - weights::{DispatchClass, Weight}, + weights::{DispatchClass, Pays, PostDispatchInfo, Weight}, Parameter, StorageMap, }; use frame_system::{ensure_signed, RawOrigin}; @@ -142,13 +148,19 @@ pub trait Config: frame_system::Config { type LaneMessageVerifier: LaneMessageVerifier; /// Message delivery payment. type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment; + /// Handler for delivered messages. + type OnDeliveryConfirmed: OnDeliveryConfirmed; // Types that are used by inbound_lane (on target chain). /// Source header chain, as it is represented on target chain. type SourceHeaderChain: SourceHeaderChain; /// Message dispatch. - type MessageDispatch: MessageDispatch; + type MessageDispatch: MessageDispatch< + Self::AccountId, + Self::InboundMessageFee, + DispatchPayload = Self::InboundPayload, + >; } /// Shortcut to messages proof type for Config. @@ -178,6 +190,8 @@ decl_error! { InvalidMessagesDispatchWeight, /// Invalid messages delivery proof has been submitted. InvalidMessagesDeliveryProof, + /// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane). + InvalidUnrewardedRelayers, /// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call. InvalidUnrewardedRelayersState, /// The message someone is trying to work with (i.e. increase fee) is already-delivered. @@ -196,8 +210,10 @@ decl_storage! { /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt /// flag directly or call the `halt_operations`). pub PalletOwner get(fn module_owner): Option; - /// If true, all pallet transactions are failed immediately. - pub IsHalted get(fn is_halted) config(): bool; + /// The current operating mode of the pallet. + /// + /// Depending on the mode either all, some, or no transactions will be allowed. + pub PalletOperatingMode get(fn operating_mode) config(): OperatingMode; /// Map of lane id => inbound lane data. pub InboundLanes: map hasher(blake2_128_concat) LaneId => InboundLaneData; /// Map of lane id => outbound lane data. @@ -226,8 +242,8 @@ decl_event!( ParameterUpdated(Parameter), /// Message has been accepted and is waiting to be delivered. MessageAccepted(LaneId, MessageNonce), - /// Messages in the inclusive range have been delivered and processed by the bridged chain. - MessagesDelivered(LaneId, MessageNonce, MessageNonce), + /// Messages in the inclusive range have been delivered to the bridged chain. + MessagesDelivered(LaneId, DeliveredMessages), /// Phantom member, never used. Dummy(PhantomData<(AccountId, I)>), } @@ -264,19 +280,18 @@ decl_module! { } } - /// Halt or resume all pallet operations. + /// Halt or resume all/some pallet operations. /// /// May only be called either by root, or by `PalletOwner`. #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn set_operational(origin, operational: bool) { + pub fn set_operating_mode(origin, operating_mode: OperatingMode) { ensure_owner_or_root::(origin)?; - >::put(operational); - - if operational { - log::info!(target: "runtime::bridge-messages", "Resuming pallet operations."); - } else { - log::warn!(target: "runtime::bridge-messages", "Stopping pallet operations."); - } + >::put(operating_mode); + log::info!( + target: "runtime::bridge-messages", + "Setting messages pallet operating mode to {:?}.", + operating_mode, + ); } /// Update pallet parameter. @@ -299,7 +314,7 @@ decl_module! { payload: T::OutboundPayload, delivery_and_dispatch_fee: T::OutboundMessageFee, ) -> DispatchResult { - ensure_operational::()?; + ensure_normal_operating_mode::()?; let submitter = origin.into().map_err(|_| BadOrigin)?; // let's first check if message can be delivered to target chain @@ -382,6 +397,7 @@ decl_module! { nonce: MessageNonce, additional_fee: T::OutboundMessageFee, ) -> DispatchResult { + ensure_not_halted::()?; // if someone tries to pay for already-delivered message, we're rejecting this intention // (otherwise this additional fee will be locked forever in relayers fund) // @@ -434,13 +450,13 @@ decl_module! { #[weight = T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight)] pub fn receive_messages_proof( origin, - relayer_id: T::InboundRelayer, + relayer_id_at_bridged_chain: T::InboundRelayer, proof: MessagesProofOf, messages_count: u32, dispatch_weight: Weight, - ) -> DispatchResult { - ensure_operational::()?; - let _ = ensure_signed(origin)?; + ) -> DispatchResultWithPostInfo { + ensure_not_halted::()?; + let relayer_id_at_this_chain = ensure_signed(origin)?; // reject transactions that are declaring too many messages ensure!( @@ -448,6 +464,23 @@ decl_module! { Error::::TooManyMessagesInTheProof ); + // why do we need to know the weight of this (`receive_messages_proof`) call? Because + // we may want to return some funds for not-dispatching (or partially dispatching) some + // messages to the call origin (relayer). And this is done by returning actual weight + // from the call. But we only know dispatch weight of every messages. So to refund relayer + // because we have not dispatched Message, we need to: + // + // ActualWeight = DeclaredWeight - Message.DispatchWeight + // + // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible + // to get pre-computed value (and it has been already computed by the executive). + let declared_weight = T::WeightInfo::receive_messages_proof_weight( + &proof, + messages_count, + dispatch_weight, + ); + let mut actual_weight = declared_weight; + // verify messages proof && convert proof into messages let messages = verify_and_decode_messages_proof::< T::SourceHeaderChain, @@ -507,20 +540,57 @@ decl_module! { debug_assert_eq!(message.key.lane_id, lane_id); total_messages += 1; - if lane.receive_message::(relayer_id.clone(), message.key.nonce, message.data) { - valid_messages += 1; - } + let dispatch_weight = T::MessageDispatch::dispatch_weight(&message); + let receival_result = lane.receive_message::( + &relayer_id_at_bridged_chain, + &relayer_id_at_this_chain, + message.key.nonce, + message.data, + ); + + // note that we're returning unspent weight to relayer even if message has been + // rejected by the lane. This allows relayers to submit spam transactions with + // e.g. the same set of already delivered messages over and over again, without + // losing funds for messages dispatch. But keep in mind that relayer pays base + // delivery transaction cost anyway. And base cost covers everything except + // dispatch, so we have a balance here. + let (unspent_weight, refund_pay_dispatch_fee) = match receival_result { + ReceivalResult::Dispatched(dispatch_result) => { + valid_messages += 1; + (dispatch_result.unspent_weight, !dispatch_result.dispatch_fee_paid_during_dispatch) + }, + ReceivalResult::InvalidNonce + | ReceivalResult::TooManyUnrewardedRelayers + | ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true), + }; + actual_weight = actual_weight + .saturating_sub(sp_std::cmp::min(unspent_weight, dispatch_weight)) + .saturating_sub( + // delivery call weight formula assumes that the fee is paid at + // this (target) chain. If the message is prepaid at the source + // chain, let's refund relayer with this extra cost. + if refund_pay_dispatch_fee { + T::WeightInfo::pay_inbound_dispatch_fee_overhead() + } else { + 0 + } + ); } } log::trace!( target: "runtime::bridge-messages", - "Received messages: total={}, valid={}", + "Received messages: total={}, valid={}. Weight used: {}/{}", total_messages, valid_messages, + actual_weight, + declared_weight, ); - Ok(()) + Ok(PostDispatchInfo { + actual_weight: Some(actual_weight), + pays_fee: Pays::Yes, + }) } /// Receive messages delivery proof from bridged chain. @@ -530,7 +600,7 @@ decl_module! { proof: MessagesDeliveryProofOf, relayers_state: UnrewardedRelayersState, ) -> DispatchResult { - ensure_operational::()?; + ensure_not_halted::()?; let confirmation_relayer = ensure_signed(origin)?; let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| { @@ -556,19 +626,36 @@ decl_module! { let mut lane = outbound_lane::(lane_id); let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new(); let last_delivered_nonce = lane_data.last_delivered_nonce(); - let received_range = lane.confirm_delivery(last_delivered_nonce); - if let Some(received_range) = received_range { - Self::deposit_event(RawEvent::MessagesDelivered(lane_id, received_range.0, received_range.1)); + let confirmed_messages = match lane.confirm_delivery(last_delivered_nonce, &lane_data.relayers) { + ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => Some(confirmed_messages), + ReceivalConfirmationResult::NoNewConfirmations => None, + error => { + log::trace!( + target: "runtime::bridge-messages", + "Messages delivery proof contains invalid unrewarded relayers vec: {:?}", + error, + ); + + fail!(Error::::InvalidUnrewardedRelayers); + }, + }; + if let Some(confirmed_messages) = confirmed_messages { + // handle messages delivery confirmation + T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages); + + // emit 'delivered' event + let received_range = confirmed_messages.begin..=confirmed_messages.end; + Self::deposit_event(RawEvent::MessagesDelivered(lane_id, confirmed_messages)); // remember to reward relayers that have delivered messages // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain - for (nonce_low, nonce_high, relayer) in lane_data.relayers { - let nonce_begin = sp_std::cmp::max(nonce_low, received_range.0); - let nonce_end = sp_std::cmp::min(nonce_high, received_range.1); + for entry in lane_data.relayers { + let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); + let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end()); // loop won't proceed if current entry is ahead of received range (begin > end). // this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain - let mut relayer_reward = relayers_rewards.entry(relayer).or_default(); + let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default(); for nonce in nonce_begin..nonce_end + 1 { let message_data = OutboundMessages::::get(MessageKey { lane_id, @@ -603,9 +690,9 @@ decl_module! { } impl, I: Instance> Pallet { - /// Get payload of given outbound message. - pub fn outbound_message_payload(lane: LaneId, nonce: MessageNonce) -> Option { - OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(|message_data| message_data.payload) + /// Get stored data of the outbound message with given nonce. + pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option> { + OutboundMessages::::get(MessageKey { lane_id: lane, nonce }) } /// Get nonce of latest generated message at given outbound lane. @@ -633,7 +720,10 @@ impl, I: Instance> Pallet { let relayers = InboundLanes::::get(&lane).relayers; bp_messages::UnrewardedRelayersState { unrewarded_relayer_entries: relayers.len() as _, - messages_in_oldest_entry: relayers.front().map(|(begin, end, _)| 1 + end - begin).unwrap_or(0), + messages_in_oldest_entry: relayers + .front() + .map(|entry| 1 + entry.messages.end - entry.messages.begin) + .unwrap_or(0), total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), } } @@ -665,24 +755,38 @@ impl, I: Instance> Pallet { /// trying to avoid here) - by using strings like "Instance2", "OutboundMessages", etc. pub mod storage_keys { use super::*; - use frame_support::storage::generator::StorageMap; + use frame_support::{traits::Instance, StorageHasher}; use sp_core::storage::StorageKey; /// Storage key of the outbound message in the runtime storage. - pub fn message_key, I: Instance>(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - let message_key = MessageKey { lane_id: *lane, nonce }; - let raw_storage_key = OutboundMessages::::storage_map_final_key(message_key); - StorageKey(raw_storage_key) + pub fn message_key(lane: &LaneId, nonce: MessageNonce) -> StorageKey { + storage_map_final_key::("OutboundMessages", &MessageKey { lane_id: *lane, nonce }.encode()) } /// Storage key of the outbound message lane state in the runtime storage. pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - StorageKey(OutboundLanes::::storage_map_final_key(*lane)) + storage_map_final_key::("OutboundLanes", lane) } /// Storage key of the inbound message lane state in the runtime storage. - pub fn inbound_lane_data_key, I: Instance>(lane: &LaneId) -> StorageKey { - StorageKey(InboundLanes::::storage_map_final_key(*lane)) + pub fn inbound_lane_data_key(lane: &LaneId) -> StorageKey { + storage_map_final_key::("InboundLanes", lane) + } + + /// This is a copypaste of the `frame_support::storage::generator::StorageMap::storage_map_final_key`. + fn storage_map_final_key(map_name: &str, key: &[u8]) -> StorageKey { + let module_prefix_hashed = frame_support::Twox128::hash(I::PREFIX.as_bytes()); + let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes()); + let key_hashed = frame_support::Blake2_128Concat::hash(key); + + let mut final_key = + Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len()); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key_hashed.as_ref()); + + StorageKey(final_key) } } @@ -695,9 +799,18 @@ fn ensure_owner_or_root, I: Instance>(origin: T::Origin) -> Result< } } -/// Ensure that the pallet is in operational mode (not halted). -fn ensure_operational, I: Instance>() -> Result<(), Error> { - if IsHalted::::get() { +/// Ensure that the pallet is in normal operational mode. +fn ensure_normal_operating_mode, I: Instance>() -> Result<(), Error> { + if PalletOperatingMode::::get() != OperatingMode::Normal { + Err(Error::::Halted) + } else { + Ok(()) + } +} + +/// Ensure that the pallet is not halted. +fn ensure_not_halted, I: Instance>() -> Result<(), Error> { + if PalletOperatingMode::::get() == OperatingMode::Halted { Err(Error::::Halted) } else { Ok(()) @@ -847,12 +960,12 @@ fn verify_and_decode_messages_proof, Fee, Dispatch mod tests { use super::*; use crate::mock::{ - message, run_test, Event as TestEvent, Origin, TestMessageDeliveryAndDispatchPayment, - TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof, TestPayload, TestRuntime, - TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, - TEST_RELAYER_B, + message, message_payload, run_test, unrewarded_relayer, Event as TestEvent, Origin, + TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof, + TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, + TEST_RELAYER_A, TEST_RELAYER_B, }; - use bp_messages::UnrewardedRelayersState; + use bp_messages::{UnrewardedRelayer, UnrewardedRelayersState}; use frame_support::{assert_noop, assert_ok}; use frame_system::{EventRecord, Pallet as System, Phase}; use hex_literal::hex; @@ -866,11 +979,15 @@ mod tests { fn send_regular_message() { get_ready_for_events(); + let message_nonce = outbound_lane::(TEST_LANE_ID) + .data() + .latest_generated_nonce + + 1; assert_ok!(Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, - REGULAR_PAYLOAD.1, + REGULAR_PAYLOAD.declared_weight, )); // check event with assigned nonce @@ -878,13 +995,16 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(RawEvent::MessageAccepted(TEST_LANE_ID, 1)), + event: TestEvent::Messages(RawEvent::MessageAccepted(TEST_LANE_ID, message_nonce)), topics: vec![], }], ); // check that fee has been withdrawn from submitter - assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, REGULAR_PAYLOAD.1)); + assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid( + 1, + REGULAR_PAYLOAD.declared_weight + )); } fn receive_messages_delivery_proof() { @@ -897,17 +1017,29 @@ mod tests { TEST_LANE_ID, InboundLaneData { last_confirmed_nonce: 1, - ..Default::default() + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: DeliveredMessages::new(1, true), + }] + .into_iter() + .collect(), }, ))), - Default::default(), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, )); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(RawEvent::MessagesDelivered(TEST_LANE_ID, 1, 1)), + event: TestEvent::Messages(RawEvent::MessagesDelivered( + TEST_LANE_ID, + DeliveredMessages::new(1, true), + )), topics: vec![], }], ); @@ -920,29 +1052,41 @@ mod tests { assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); assert_noop!( - Pallet::::set_operational(Origin::signed(2), false), + Pallet::::set_operating_mode(Origin::signed(2), OperatingMode::Halted), DispatchError::BadOrigin, ); - assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_ok!(Pallet::::set_operating_mode( + Origin::root(), + OperatingMode::Halted + )); assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); assert_noop!( - Pallet::::set_operational(Origin::signed(1), true), + Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Normal), DispatchError::BadOrigin, ); assert_noop!( - Pallet::::set_operational(Origin::signed(2), true), + Pallet::::set_operating_mode(Origin::signed(2), OperatingMode::Normal), DispatchError::BadOrigin, ); - assert_ok!(Pallet::::set_operational(Origin::root(), true)); + assert_ok!(Pallet::::set_operating_mode( + Origin::root(), + OperatingMode::Normal + )); }); } #[test] fn pallet_may_be_halted_by_root() { run_test(|| { - assert_ok!(Pallet::::set_operational(Origin::root(), false)); - assert_ok!(Pallet::::set_operational(Origin::root(), true)); + assert_ok!(Pallet::::set_operating_mode( + Origin::root(), + OperatingMode::Halted + )); + assert_ok!(Pallet::::set_operating_mode( + Origin::root(), + OperatingMode::Normal + )); }); } @@ -951,21 +1095,30 @@ mod tests { run_test(|| { PalletOwner::::put(2); - assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); - assert_ok!(Pallet::::set_operational(Origin::signed(2), true)); + assert_ok!(Pallet::::set_operating_mode( + Origin::signed(2), + OperatingMode::Halted + )); + assert_ok!(Pallet::::set_operating_mode( + Origin::signed(2), + OperatingMode::Normal + )); assert_noop!( - Pallet::::set_operational(Origin::signed(1), false), + Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Halted), DispatchError::BadOrigin, ); assert_noop!( - Pallet::::set_operational(Origin::signed(1), true), + Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Normal), DispatchError::BadOrigin, ); - assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_ok!(Pallet::::set_operating_mode( + Origin::signed(2), + OperatingMode::Halted + )); assert_noop!( - Pallet::::set_operational(Origin::signed(1), true), + Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Normal), DispatchError::BadOrigin, ); }); @@ -1072,25 +1225,30 @@ mod tests { // send message first to be able to check that delivery_proof fails later send_regular_message(); - IsHalted::::put(true); + PalletOperatingMode::::put(OperatingMode::Halted); assert_noop!( Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, - REGULAR_PAYLOAD.1, + REGULAR_PAYLOAD.declared_weight, ), Error::::Halted, ); + assert_noop!( + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1,), + Error::::Halted, + ); + assert_noop!( Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), 1, - REGULAR_PAYLOAD.1, + REGULAR_PAYLOAD.declared_weight, ), Error::::Halted, ); @@ -1112,6 +1270,53 @@ mod tests { }); } + #[test] + fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(); + + PalletOperatingMode::::put(OperatingMode::RejectingOutboundMessages); + + assert_noop!( + Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + REGULAR_PAYLOAD.declared_weight, + ), + Error::::Halted, + ); + + assert_ok!(Pallet::::increase_message_fee( + Origin::signed(1), + TEST_LANE_ID, + 1, + 1, + )); + + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), + 1, + REGULAR_PAYLOAD.declared_weight, + ),); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + ..Default::default() + }, + ))), + Default::default(), + )); + }); + } + #[test] fn send_message_works() { run_test(|| { @@ -1128,7 +1333,7 @@ mod tests { Origin::signed(1), TEST_LANE_ID, PAYLOAD_REJECTED_BY_TARGET_CHAIN, - PAYLOAD_REJECTED_BY_TARGET_CHAIN.1 + PAYLOAD_REJECTED_BY_TARGET_CHAIN.declared_weight ), Error::::MessageRejectedByChainVerifier, ); @@ -1155,7 +1360,7 @@ mod tests { Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, - REGULAR_PAYLOAD.1 + REGULAR_PAYLOAD.declared_weight ), Error::::FailedToWithdrawMessageFee, ); @@ -1170,7 +1375,7 @@ mod tests { TEST_RELAYER_A, Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), 1, - REGULAR_PAYLOAD.1, + REGULAR_PAYLOAD.declared_weight, )); assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1); @@ -1185,9 +1390,12 @@ mod tests { TEST_LANE_ID, InboundLaneData { last_confirmed_nonce: 8, - relayers: vec![(9, 9, TEST_RELAYER_A), (10, 10, TEST_RELAYER_B)] - .into_iter() - .collect(), + relayers: vec![ + unrewarded_relayer(9, 9, TEST_RELAYER_A), + unrewarded_relayer(10, 10, TEST_RELAYER_B), + ] + .into_iter() + .collect(), }, ); assert_eq!( @@ -1211,16 +1419,19 @@ mod tests { TEST_RELAYER_A, message_proof, 1, - REGULAR_PAYLOAD.1, + REGULAR_PAYLOAD.declared_weight, )); assert_eq!( InboundLanes::::get(TEST_LANE_ID), InboundLaneData { last_confirmed_nonce: 9, - relayers: vec![(10, 10, TEST_RELAYER_B), (11, 11, TEST_RELAYER_A)] - .into_iter() - .collect(), + relayers: vec![ + unrewarded_relayer(10, 10, TEST_RELAYER_B), + unrewarded_relayer(11, 11, TEST_RELAYER_A) + ] + .into_iter() + .collect(), }, ); assert_eq!( @@ -1243,7 +1454,7 @@ mod tests { TEST_RELAYER_A, Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), 1, - REGULAR_PAYLOAD.1 - 1, + REGULAR_PAYLOAD.declared_weight - 1, ), Error::::InvalidMessagesDispatchWeight, ); @@ -1317,7 +1528,7 @@ mod tests { TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A)].into_iter().collect(), + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(), ..Default::default() } ))), @@ -1342,9 +1553,12 @@ mod tests { TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] - .into_iter() - .collect(), + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B) + ] + .into_iter() + .collect(), ..Default::default() } ))), @@ -1389,9 +1603,12 @@ mod tests { TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] - .into_iter() - .collect(), + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B) + ] + .into_iter() + .collect(), ..Default::default() } ))), @@ -1411,9 +1628,12 @@ mod tests { TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] - .into_iter() - .collect(), + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B) + ] + .into_iter() + .collect(), ..Default::default() } ))), @@ -1465,7 +1685,7 @@ mod tests { ]) .into(), 3, - REGULAR_PAYLOAD.1 + REGULAR_PAYLOAD.1, + REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, ),); assert_eq!( @@ -1479,7 +1699,7 @@ mod tests { fn storage_message_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking all // previously crafted messages proofs. - let storage_key = storage_keys::message_key::(&*b"test", 42).0; + let storage_key = storage_keys::message_key::(&*b"test", 42).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), @@ -1505,7 +1725,7 @@ mod tests { fn inbound_lane_data_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking all // previously crafted inbound lane state proofs. - let storage_key = storage_keys::inbound_lane_data_key::(&*b"test").0; + let storage_key = storage_keys::inbound_lane_data_key::(&*b"test").0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), @@ -1517,9 +1737,9 @@ mod tests { #[test] fn actual_dispatch_weight_does_not_overlow() { run_test(|| { - let message1 = message(1, TestPayload(0, Weight::MAX / 2)); - let message2 = message(2, TestPayload(0, Weight::MAX / 2)); - let message3 = message(2, TestPayload(0, Weight::MAX / 2)); + let message1 = message(1, message_payload(0, Weight::MAX / 2)); + let message2 = message(2, message_payload(0, Weight::MAX / 2)); + let message3 = message(2, message_payload(0, Weight::MAX / 2)); assert_noop!( Pallet::::receive_messages_proof( @@ -1586,4 +1806,127 @@ mod tests { assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, 100)); }); } + + #[test] + fn weight_refund_from_receive_messages_proof_works() { + run_test(|| { + fn submit_with_unspent_weight( + nonce: MessageNonce, + unspent_weight: Weight, + is_prepaid: bool, + ) -> (Weight, Weight) { + let mut payload = REGULAR_PAYLOAD; + payload.dispatch_result.unspent_weight = unspent_weight; + payload.dispatch_result.dispatch_fee_paid_during_dispatch = !is_prepaid; + let proof = Ok(vec![message(nonce, payload)]).into(); + let messages_count = 1; + let pre_dispatch_weight = ::WeightInfo::receive_messages_proof_weight( + &proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ); + let post_dispatch_weight = Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .expect("delivery has failed") + .actual_weight + .expect("receive_messages_proof always returns Some"); + + (pre_dispatch_weight, post_dispatch_weight) + } + + // when dispatch is returning `unspent_weight < declared_weight` + let (pre, post) = submit_with_unspent_weight(1, 1, false); + assert_eq!(post, pre - 1); + + // when dispatch is returning `unspent_weight = declared_weight` + let (pre, post) = submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight, false); + assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight); + + // when dispatch is returning `unspent_weight > declared_weight` + let (pre, post) = submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false); + assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight); + + // when there's no unspent weight + let (pre, post) = submit_with_unspent_weight(4, 0, false); + assert_eq!(post, pre); + + // when dispatch is returning `unspent_weight < declared_weight` AND message is prepaid + let (pre, post) = submit_with_unspent_weight(5, 1, true); + assert_eq!( + post, + pre - 1 - ::WeightInfo::pay_inbound_dispatch_fee_overhead() + ); + }); + } + + #[test] + fn messages_delivered_callbacks_are_called() { + run_test(|| { + send_regular_message(); + send_regular_message(); + send_regular_message(); + + // messages 1+2 are confirmed in 1 tx, message 3 in a separate tx + // dispatch of message 2 has failed + let mut delivered_messages_1_and_2 = DeliveredMessages::new(1, true); + delivered_messages_1_and_2.note_dispatched_message(false); + let messages_1_and_2_proof = Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 0, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: delivered_messages_1_and_2.clone(), + }] + .into_iter() + .collect(), + }, + )); + let delivered_message_3 = DeliveredMessages::new(3, true); + let messages_3_proof = Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 0, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: delivered_message_3.clone(), + }] + .into_iter() + .collect(), + }, + )); + + // first tx with messages 1+2 + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(messages_1_and_2_proof), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 2, + ..Default::default() + }, + )); + // second tx with message 3 + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(messages_3_proof), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + )); + + // ensure that both callbacks have been called twice: for 1+2, then for 3 + crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); + crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3); + crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); + crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3); + }); + } } diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs index e640fa780542..2e184dda1585 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/mock.rs @@ -19,15 +19,17 @@ use crate::Config; +use bitvec::prelude::*; use bp_messages::{ source_chain::{ - LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, Sender, TargetHeaderChain, + LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, Sender, + TargetHeaderChain, }, target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, - Parameter as MessagesParameter, + DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, + Parameter as MessagesParameter, UnrewardedRelayer, }; -use bp_runtime::Size; +use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{parameter_types, weights::Weight}; use sp_core::H256; @@ -41,7 +43,17 @@ use std::collections::BTreeMap; pub type AccountId = u64; pub type Balance = u64; #[derive(Decode, Encode, Clone, Debug, PartialEq, Eq)] -pub struct TestPayload(pub u64, pub Weight); +pub struct TestPayload { + /// Field that may be used to identify messages. + pub id: u64, + /// Dispatch weight that is declared by the message sender. + pub declared_weight: Weight, + /// Message dispatch result. + /// + /// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, but for test + /// purposes we'll be making it larger than `declared_weight` sometimes. + pub dispatch_result: MessageDispatchResult, +} pub type TestMessageFee = u64; pub type TestRelayer = u64; @@ -115,6 +127,8 @@ impl pallet_balances::Config for TestRuntime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type WeightInfo = (); + type MaxReserves = (); + type ReserveIdentifier = (); } parameter_types! { @@ -157,6 +171,7 @@ impl Config for TestRuntime { type TargetHeaderChain = TestTargetHeaderChain; type LaneMessageVerifier = TestLaneMessageVerifier; type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment; + type OnDeliveryConfirmed = (TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2); type SourceHeaderChain = TestSourceHeaderChain; type MessageDispatch = TestMessageDispatch; @@ -187,10 +202,10 @@ pub const TEST_ERROR: &str = "Test error"; pub const TEST_LANE_ID: LaneId = [0, 0, 0, 1]; /// Regular message payload. -pub const REGULAR_PAYLOAD: TestPayload = TestPayload(0, 50); +pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50); /// Payload that is rejected by `TestTargetHeaderChain`. -pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = TestPayload(1, 50); +pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50); /// Vec of proved messages, grouped by lane. pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages>)>; @@ -333,6 +348,44 @@ impl MessageDeliveryAndDispatchPayment for TestMessag } } +/// First on-messages-delivered callback. +#[derive(Debug)] +pub struct TestOnDeliveryConfirmed1; + +impl TestOnDeliveryConfirmed1 { + /// Verify that the callback has been called with given delivered messages. + pub fn ensure_called(lane: &LaneId, messages: &DeliveredMessages) { + let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode(); + assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); + } +} + +impl OnDeliveryConfirmed for TestOnDeliveryConfirmed1 { + fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) { + let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode(); + frame_support::storage::unhashed::put(&key, &true); + } +} + +/// Seconde on-messages-delivered callback. +#[derive(Debug)] +pub struct TestOnDeliveryConfirmed2; + +impl TestOnDeliveryConfirmed2 { + /// Verify that the callback has been called with given delivered messages. + pub fn ensure_called(lane: &LaneId, messages: &DeliveredMessages) { + let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode(); + assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); + } +} + +impl OnDeliveryConfirmed for TestOnDeliveryConfirmed2 { + fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) { + let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode(); + frame_support::storage::unhashed::put(&key, &true); + } +} + /// Source header chain that is used in tests. #[derive(Debug)] pub struct TestSourceHeaderChain; @@ -357,17 +410,25 @@ impl SourceHeaderChain for TestSourceHeaderChain { #[derive(Debug)] pub struct TestMessageDispatch; -impl MessageDispatch for TestMessageDispatch { +impl MessageDispatch for TestMessageDispatch { type DispatchPayload = TestPayload; fn dispatch_weight(message: &DispatchMessage) -> Weight { match message.data.payload.as_ref() { - Ok(payload) => payload.1, + Ok(payload) => payload.declared_weight, Err(_) => 0, } } - fn dispatch(_message: DispatchMessage) {} + fn dispatch( + _relayer_account: &AccountId, + message: DispatchMessage, + ) -> MessageDispatchResult { + match message.data.payload.as_ref() { + Ok(payload) => payload.dispatch_result.clone(), + Err(_) => dispatch_result(0), + } + } } /// Return test lane message with given nonce and payload. @@ -381,6 +442,15 @@ pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message TestPayload { + TestPayload { + id, + declared_weight, + dispatch_result: dispatch_result(0), + } +} + /// Return message data with valid fee for given payload. pub fn message_data(payload: TestPayload) -> MessageData { MessageData { @@ -389,6 +459,35 @@ pub fn message_data(payload: TestPayload) -> MessageData { } } +/// Returns message dispatch result with given unspent weight. +pub const fn dispatch_result(unspent_weight: Weight) -> MessageDispatchResult { + MessageDispatchResult { + dispatch_result: true, + unspent_weight, + dispatch_fee_paid_during_dispatch: true, + } +} + +/// Constructs unrewarded relayer entry from nonces range and relayer id. +pub fn unrewarded_relayer( + begin: MessageNonce, + end: MessageNonce, + relayer: TestRelayer, +) -> UnrewardedRelayer { + UnrewardedRelayer { + relayer, + messages: DeliveredMessages { + begin, + end, + dispatch_results: if end >= begin { + bitvec![Msb0, u8; 1; (end - begin + 1) as _] + } else { + Default::default() + }, + }, + } +} + /// Run pallet test. pub fn run_test(test: impl FnOnce() -> T) -> T { let mut t = frame_system::GenesisConfig::default() diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index 47616c33eac8..44061d984e1d 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -16,7 +16,12 @@ //! Everything about outgoing messages sending. -use bp_messages::{LaneId, MessageData, MessageNonce, OutboundLaneData}; +use bitvec::prelude::*; +use bp_messages::{ + DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, +}; +use frame_support::RuntimeDebug; +use sp_std::collections::vec_deque::VecDeque; /// Outbound lane storage. pub trait OutboundLaneStorage { @@ -38,6 +43,28 @@ pub trait OutboundLaneStorage { fn remove_message(&mut self, nonce: &MessageNonce); } +/// Result of messages receival confirmation. +#[derive(RuntimeDebug, PartialEq, Eq)] +pub enum ReceivalConfirmationResult { + /// New messages have been confirmed by the confirmation transaction. + ConfirmedMessages(DeliveredMessages), + /// Confirmation transaction brings no new confirmation. This may be a result of relayer + /// error or several relayers runnng. + NoNewConfirmations, + /// Bridged chain is trying to confirm more messages than we have generated. May be a result + /// of invalid bridged chain storage. + FailedToConfirmFutureMessages, + /// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged + /// chain storage. + EmptyUnrewardedRelayerEntry, + /// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid bridged + /// chain storage. + NonConsecutiveUnrewardedRelayerEntries, + /// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May be + /// a result of invalid bridged chain storage. + InvalidNumberOfDispatchResults, +} + /// Outbound messages lane. pub struct OutboundLane { storage: S, @@ -69,20 +96,34 @@ impl OutboundLane { } /// Confirm messages delivery. - /// - /// Returns `None` if confirmation is wrong/duplicate. - /// Returns `Some` with inclusive ranges of message nonces that have been received. - pub fn confirm_delivery(&mut self, latest_received_nonce: MessageNonce) -> Option<(MessageNonce, MessageNonce)> { + pub fn confirm_delivery( + &mut self, + latest_received_nonce: MessageNonce, + relayers: &VecDeque>, + ) -> ReceivalConfirmationResult { let mut data = self.storage.data(); - if latest_received_nonce <= data.latest_received_nonce || latest_received_nonce > data.latest_generated_nonce { - return None; + if latest_received_nonce <= data.latest_received_nonce { + return ReceivalConfirmationResult::NoNewConfirmations; + } + if latest_received_nonce > data.latest_generated_nonce { + return ReceivalConfirmationResult::FailedToConfirmFutureMessages; } + let dispatch_results = + match extract_dispatch_results(data.latest_received_nonce, latest_received_nonce, relayers) { + Ok(dispatch_results) => dispatch_results, + Err(extract_error) => return extract_error, + }; + let prev_latest_received_nonce = data.latest_received_nonce; data.latest_received_nonce = latest_received_nonce; self.storage.set_data(data); - Some((prev_latest_received_nonce + 1, latest_received_nonce)) + ReceivalConfirmationResult::ConfirmedMessages(DeliveredMessages { + begin: prev_latest_received_nonce + 1, + end: latest_received_nonce, + dispatch_results, + }) } /// Prune at most `max_messages_to_prune` already received messages. @@ -108,13 +149,108 @@ impl OutboundLane { } } +/// Extract new dispatch results from the unrewarded relayers vec. +/// +/// Returns `Err(_)` if unrewarded relayers vec contains invalid data, meaning that the bridged +/// chain has invalid runtime storage. +fn extract_dispatch_results( + prev_latest_received_nonce: MessageNonce, + latest_received_nonce: MessageNonce, + relayers: &VecDeque>, +) -> Result { + // the only caller of this functions checks that the prev_latest_received_nonce..=latest_received_nonce + // is valid, so we're ready to accept messages in this range + // => with_capacity call must succeed here or we'll be unable to receive confirmations at all + let mut received_dispatch_result = + BitVec::with_capacity((latest_received_nonce - prev_latest_received_nonce + 1) as _); + let mut last_entry_end: Option = None; + for entry in relayers { + // unrewarded relayer entry must have at least 1 unconfirmed message + // (guaranteed by the `InboundLane::receive_message()`) + if entry.messages.end < entry.messages.begin { + return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry); + } + // every entry must confirm range of messages that follows previous entry range + // (guaranteed by the `InboundLane::receive_message()`) + if let Some(last_entry_end) = last_entry_end { + let expected_entry_begin = last_entry_end.checked_add(1); + if expected_entry_begin != Some(entry.messages.begin) { + return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries); + } + } + last_entry_end = Some(entry.messages.end); + // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` + // (guaranteed by the `InboundLane::receive_message()`) + if entry.messages.end > latest_received_nonce { + // technically this will be detected in the next loop iteration as `InvalidNumberOfDispatchResults` + // but to guarantee safety of loop operations below this is detected now + return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages); + } + // entry must have single dispatch result for every message + // (guaranteed by the `InboundLane::receive_message()`) + if entry.messages.dispatch_results.len() as MessageNonce != entry.messages.end - entry.messages.begin + 1 { + return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults); + } + + // now we know that the entry is valid + // => let's check if it brings new confirmations + let new_messages_begin = sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1); + let new_messages_end = sp_std::cmp::min(entry.messages.end, latest_received_nonce); + let new_messages_range = new_messages_begin..=new_messages_end; + if new_messages_range.is_empty() { + continue; + } + + // now we know that entry brings new confirmations + // => let's extract dispatch results + received_dispatch_result.extend_from_bitslice( + &entry.messages.dispatch_results[(new_messages_begin - entry.messages.begin) as usize..], + ); + } + + Ok(received_dispatch_result) +} + #[cfg(test)] mod tests { use super::*; use crate::{ - mock::{message_data, run_test, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID}, + mock::{message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID}, outbound_lane, }; + use sp_std::ops::RangeInclusive; + + fn unrewarded_relayers(nonces: RangeInclusive) -> VecDeque> { + vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)] + .into_iter() + .collect() + } + + fn delivered_messages(nonces: RangeInclusive) -> DeliveredMessages { + DeliveredMessages { + begin: *nonces.start(), + end: *nonces.end(), + dispatch_results: bitvec![Msb0, u8; 1; (nonces.end() - nonces.start() + 1) as _], + } + } + + fn assert_3_messages_confirmation_fails( + latest_received_nonce: MessageNonce, + relayers: &VecDeque>, + ) -> ReceivalConfirmationResult { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + let result = lane.confirm_delivery(latest_received_nonce, relayers); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + result + }) + } #[test] fn send_message_works() { @@ -136,7 +272,10 @@ mod tests { assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 3); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!(lane.confirm_delivery(3), Some((1, 3))); + assert_eq!( + lane.confirm_delivery(3, &unrewarded_relayers(1..=3)), + ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), + ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); }); @@ -151,12 +290,21 @@ mod tests { lane.send_message(message_data(REGULAR_PAYLOAD)); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!(lane.confirm_delivery(3), Some((1, 3))); - assert_eq!(lane.confirm_delivery(3), None); + assert_eq!( + lane.confirm_delivery(3, &unrewarded_relayers(1..=3)), + ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), + ); + assert_eq!( + lane.confirm_delivery(3, &unrewarded_relayers(1..=3)), + ReceivalConfirmationResult::NoNewConfirmations, + ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); - assert_eq!(lane.confirm_delivery(2), None); + assert_eq!( + lane.confirm_delivery(2, &unrewarded_relayers(1..=1)), + ReceivalConfirmationResult::NoNewConfirmations, + ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); }); @@ -164,17 +312,70 @@ mod tests { #[test] fn confirm_delivery_rejects_nonce_larger_than_last_generated() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!(lane.confirm_delivery(10), None); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - }); + assert_eq!( + assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),), + ReceivalConfirmationResult::FailedToConfirmFutureMessages, + ); + } + + #[test] + fn confirm_delivery_fails_if_entry_confirms_future_messages() { + assert_eq!( + assert_3_messages_confirmation_fails( + 3, + &unrewarded_relayers(1..=1) + .into_iter() + .chain(unrewarded_relayers(2..=30).into_iter()) + .chain(unrewarded_relayers(3..=3).into_iter()) + .collect(), + ), + ReceivalConfirmationResult::FailedToConfirmFutureMessages, + ); + } + + #[test] + #[allow(clippy::reversed_empty_ranges)] + fn confirm_delivery_fails_if_entry_is_empty() { + assert_eq!( + assert_3_messages_confirmation_fails( + 3, + &unrewarded_relayers(1..=1) + .into_iter() + .chain(unrewarded_relayers(2..=1).into_iter()) + .chain(unrewarded_relayers(2..=3).into_iter()) + .collect(), + ), + ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry, + ); + } + + #[test] + fn confirm_delivery_fails_if_entries_are_non_consecutive() { + assert_eq!( + assert_3_messages_confirmation_fails( + 3, + &unrewarded_relayers(1..=1) + .into_iter() + .chain(unrewarded_relayers(3..=3).into_iter()) + .chain(unrewarded_relayers(2..=2).into_iter()) + .collect(), + ), + ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries, + ); + } + + #[test] + fn confirm_delivery_fails_if_number_of_dispatch_results_in_entry_is_invalid() { + let mut relayers: VecDeque<_> = unrewarded_relayers(1..=1) + .into_iter() + .chain(unrewarded_relayers(2..=2).into_iter()) + .chain(unrewarded_relayers(3..=3).into_iter()) + .collect(); + relayers[0].messages.dispatch_results.clear(); + assert_eq!( + assert_3_messages_confirmation_fails(3, &relayers), + ReceivalConfirmationResult::InvalidNumberOfDispatchResults, + ); } #[test] @@ -191,11 +392,17 @@ mod tests { assert_eq!(lane.prune_messages(100), 0); assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); // after confirmation, some messages are received - assert_eq!(lane.confirm_delivery(2), Some((1, 2))); + assert_eq!( + lane.confirm_delivery(2, &unrewarded_relayers(1..=2)), + ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=2)), + ); assert_eq!(lane.prune_messages(100), 2); assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3); // after last message is confirmed, everything is pruned - assert_eq!(lane.confirm_delivery(3), Some((3, 3))); + assert_eq!( + lane.confirm_delivery(3, &unrewarded_relayers(3..=3)), + ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(3..=3)), + ); assert_eq!(lane.prune_messages(100), 1); assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4); }); diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index f86a21e3ed90..9b65c8217ad6 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -17,7 +17,7 @@ //! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-21, STEPS: [50, ], REPEAT: 20 +//! DATE: 2021-06-18, STEPS: [50, ], REPEAT: 20 //! LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled //! CHAIN: Some("dev"), DB CACHE: 128 @@ -57,6 +57,7 @@ pub trait WeightInfo { fn receive_single_message_proof_with_outbound_lane_state() -> Weight; fn receive_single_message_proof_1_kb() -> Weight; fn receive_single_message_proof_16_kb() -> Weight; + fn receive_single_prepaid_message_proof() -> Weight; fn receive_delivery_proof_for_single_message() -> Weight; fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; @@ -73,105 +74,110 @@ pub trait WeightInfo { pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { fn send_minimal_message_worst_case() -> Weight { - (149_643_000 as Weight) + (159_305_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn send_1_kb_message_worst_case() -> Weight { - (153_329_000 as Weight) + (164_394_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn send_16_kb_message_worst_case() -> Weight { - (200_113_000 as Weight) + (223_521_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn increase_message_fee() -> Weight { - (6_407_252_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (6_709_925_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof() -> Weight { - (141_256_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + (206_769_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_two_messages_proof() -> Weight { - (247_723_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + (343_982_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (159_731_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + (223_738_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof_1_kb() -> Weight { - (168_546_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + (235_369_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof_16_kb() -> Weight { - (450_087_000 as Weight) + (510_338_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn receive_single_prepaid_message_proof() -> Weight { + (141_536_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_single_message() -> Weight { - (164_519_000 as Weight) + (128_805_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (173_300_000 as Weight) + (137_143_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (246_205_000 as Weight) + (193_108_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn send_messages_of_various_lengths(i: u32) -> Weight { - (149_551_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) + (133_632_000 as Weight) + .saturating_add((4_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn receive_multiple_messages_proof(i: u32) -> Weight { (0 as Weight) - .saturating_add((114_817_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add((145_006_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (437_797_000 as Weight) + (486_301_000 as Weight) .saturating_add((10_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (137_633_000 as Weight) + (178_139_000 as Weight) .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { (0 as Weight) - .saturating_add((118_482_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add((150_844_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (116_036_000 as Weight) - .saturating_add((7_118_000 as Weight).saturating_mul(i as Weight)) + (113_140_000 as Weight) + .saturating_add((7_656_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (172_780_000 as Weight) - .saturating_add((63_718_000 as Weight).saturating_mul(i as Weight)) + (97_424_000 as Weight) + .saturating_add((63_128_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -182,105 +188,110 @@ impl WeightInfo for RialtoWeight { // For backwards compatibility and tests impl WeightInfo for () { fn send_minimal_message_worst_case() -> Weight { - (149_643_000 as Weight) + (159_305_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn send_1_kb_message_worst_case() -> Weight { - (153_329_000 as Weight) + (164_394_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn send_16_kb_message_worst_case() -> Weight { - (200_113_000 as Weight) + (223_521_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn increase_message_fee() -> Weight { - (6_407_252_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (6_709_925_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof() -> Weight { - (141_256_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + (206_769_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_two_messages_proof() -> Weight { - (247_723_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + (343_982_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (159_731_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + (223_738_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof_1_kb() -> Weight { - (168_546_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + (235_369_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof_16_kb() -> Weight { - (450_087_000 as Weight) + (510_338_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn receive_single_prepaid_message_proof() -> Weight { + (141_536_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_single_message() -> Weight { - (164_519_000 as Weight) + (128_805_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (173_300_000 as Weight) + (137_143_000 as Weight) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (246_205_000 as Weight) + (193_108_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn send_messages_of_various_lengths(i: u32) -> Weight { - (149_551_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) + (133_632_000 as Weight) + .saturating_add((4_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn receive_multiple_messages_proof(i: u32) -> Weight { (0 as Weight) - .saturating_add((114_817_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add((145_006_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (437_797_000 as Weight) + (486_301_000 as Weight) .saturating_add((10_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (137_633_000 as Weight) + (178_139_000 as Weight) .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { (0 as Weight) - .saturating_add((118_482_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add((150_844_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (116_036_000 as Weight) - .saturating_add((7_118_000 as Weight).saturating_mul(i as Weight)) + (113_140_000 as Weight) + .saturating_add((7_656_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (172_780_000 as Weight) - .saturating_add((63_718_000 as Weight).saturating_mul(i as Weight)) + (97_424_000 as Weight) + .saturating_add((63_128_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) diff --git a/bridges/modules/messages/src/weights_ext.rs b/bridges/modules/messages/src/weights_ext.rs index cb754a102310..be440174b4b9 100644 --- a/bridges/modules/messages/src/weights_ext.rs +++ b/bridges/modules/messages/src/weights_ext.rs @@ -34,6 +34,7 @@ pub fn ensure_weights_are_correct( expected_default_message_delivery_tx_weight: Weight, expected_additional_byte_delivery_weight: Weight, expected_messages_delivery_confirmation_tx_weight: Weight, + expected_pay_inbound_dispatch_fee_weight: Weight, ) { // verify `send_message` weight components assert_ne!(W::send_message_overhead(), 0); @@ -88,6 +89,15 @@ pub fn ensure_weights_are_correct( actual_messages_delivery_confirmation_tx_weight, expected_messages_delivery_confirmation_tx_weight, ); + + // verify pay-dispatch-fee overhead for inbound messages + let actual_pay_inbound_dispatch_fee_weight = W::pay_inbound_dispatch_fee_overhead(); + assert!( + actual_pay_inbound_dispatch_fee_weight <= expected_pay_inbound_dispatch_fee_weight, + "Weight {} of pay-dispatch-fee overhead for inbound messages is larger than expected weight {}", + actual_pay_inbound_dispatch_fee_weight, + expected_pay_inbound_dispatch_fee_weight, + ); } /// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. @@ -304,6 +314,13 @@ pub trait WeightInfoExt: WeightInfo { (Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024); proof_size_in_bytes * byte_weight } + + /// Returns weight of the pay-dispatch-fee operation for inbound messages. + /// + /// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain option. + fn pay_inbound_dispatch_fee_overhead() -> Weight { + Self::receive_single_message_proof().saturating_sub(Self::receive_single_prepaid_message_proof()) + } } impl WeightInfoExt for () { diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/primitives/chain-kusama/src/lib.rs index b221aff049d4..e5ab47259e54 100644 --- a/bridges/primitives/chain-kusama/src/lib.rs +++ b/bridges/primitives/chain-kusama/src/lib.rs @@ -20,7 +20,7 @@ // Runtime-generated DecodeLimit::decode_all_with_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use sp_std::prelude::*; pub use bp_polkadot_core::*; @@ -31,7 +31,7 @@ pub type Kusama = PolkadotLike; // We use this to get the account on Kusama (target) which is derived from Polkadot's (source) // account. pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_BRIDGE_INSTANCE, id); + let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_CHAIN_ID, id); AccountIdConverter::convert(encoded_id) } @@ -43,8 +43,8 @@ pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_head /// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToKusamaOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_KUSAMA_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToKusamaOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToKusamaOutboundLaneApi::message_details` runtime method. +pub const TO_KUSAMA_MESSAGE_DETAILS_METHOD: &str = "ToKusamaOutboundLaneApi_message_details"; /// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method. pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method. @@ -87,15 +87,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/chain-millau/Cargo.toml b/bridges/primitives/chain-millau/Cargo.toml index 67db08c20860..f4198e35c38c 100644 --- a/bridges/primitives/chain-millau/Cargo.toml +++ b/bridges/primitives/chain-millau/Cargo.toml @@ -23,6 +23,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +max-encoded-len = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, features = ["derive"] } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } @@ -41,6 +42,7 @@ std = [ "hash256-std-hasher/std", "impl-codec/std", "impl-serde", + "max-encoded-len/std", "parity-util-mem/std", "serde", "sp-api/std", diff --git a/bridges/primitives/chain-millau/src/lib.rs b/bridges/primitives/chain-millau/src/lib.rs index a5f3a888f9ad..0efc54e96e6a 100644 --- a/bridges/primitives/chain-millau/src/lib.rs +++ b/bridges/primitives/chain-millau/src/lib.rs @@ -22,7 +22,7 @@ mod millau_hash; -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use frame_support::{ weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, @@ -80,7 +80,7 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024; /// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. /// The message must have dispatch weight set to zero. The result then must be rounded up to account /// possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; +pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; /// Increase of delivery transaction weight on Millau chain with every additional message byte. /// @@ -95,6 +95,13 @@ pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; /// runtime upgrades. pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; +/// Weight of pay-dispatch-fee operation for inbound messages at Millau chain. +/// +/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` +/// call for your chain. Don't put too much reserve there, because it is used to **decrease** +/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper. +pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; + /// The target length of a session (how often authorities change) on Millau measured in of number of /// blocks. /// @@ -201,7 +208,7 @@ impl sp_runtime::traits::Convert for AccountIdConverte /// /// Note that this should only be used for testing. pub fn derive_account_from_rialto_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_BRIDGE_INSTANCE, id); + let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_CHAIN_ID, id); AccountIdConverter::convert(encoded_id) } @@ -244,8 +251,8 @@ pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_fi /// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToMillauOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToMillauOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToMillauOutboundLaneApi::message_details` runtime method. +pub const TO_MILLAU_MESSAGE_DETAILS_METHOD: &str = "ToMillauOutboundLaneApi_message_details"; /// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method. pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce"; /// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method. @@ -288,15 +295,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/chain-millau/src/millau_hash.rs b/bridges/primitives/chain-millau/src/millau_hash.rs index 936791217af1..219ceb68a824 100644 --- a/bridges/primitives/chain-millau/src/millau_hash.rs +++ b/bridges/primitives/chain-millau/src/millau_hash.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use frame_support::traits::MaxEncodedLen; use parity_util_mem::MallocSizeOf; use sp_runtime::traits::CheckEqual; @@ -22,7 +23,7 @@ use sp_runtime::traits::CheckEqual; fixed_hash::construct_fixed_hash! { /// Hash type used in Millau chain. - #[derive(MallocSizeOf)] + #[derive(MallocSizeOf, MaxEncodedLen)] pub struct MillauHash(64); } diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/primitives/chain-polkadot/src/lib.rs index 8398b3d52733..b0ba77c66ffc 100644 --- a/bridges/primitives/chain-polkadot/src/lib.rs +++ b/bridges/primitives/chain-polkadot/src/lib.rs @@ -20,7 +20,7 @@ // Runtime-generated DecodeLimit::decode_all_with_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use sp_std::prelude::*; pub use bp_polkadot_core::*; @@ -31,7 +31,7 @@ pub type Polkadot = PolkadotLike; // We use this to get the account on Polkadot (target) which is derived from Kusama's (source) // account. pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_BRIDGE_INSTANCE, id); + let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_CHAIN_ID, id); AccountIdConverter::convert(encoded_id) } @@ -43,8 +43,8 @@ pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_ /// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToPolkadotOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_POLKADOT_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToPolkadotOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToPolkadotOutboundLaneApi::message_details` runtime method. +pub const TO_POLKADOT_MESSAGE_DETAILS_METHOD: &str = "ToPolkadotOutboundLaneApi_message_details"; /// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method. pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method. @@ -87,15 +87,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/chain-rialto/src/lib.rs b/bridges/primitives/chain-rialto/src/lib.rs index c063dc72a7f2..8139372959e3 100644 --- a/bridges/primitives/chain-rialto/src/lib.rs +++ b/bridges/primitives/chain-rialto/src/lib.rs @@ -20,7 +20,7 @@ // Runtime-generated DecodeLimit::decode_all_With_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use frame_support::{ weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, @@ -71,7 +71,7 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128; /// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. /// The message must have dispatch weight set to zero. The result then must be rounded up to account /// possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; +pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; /// Increase of delivery transaction weight on Rialto chain with every additional message byte. /// @@ -86,6 +86,13 @@ pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; /// runtime upgrades. pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; +/// Weight of pay-dispatch-fee operation for inbound messages at Rialto chain. +/// +/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` +/// call for your chain. Don't put too much reserve there, because it is used to **decrease** +/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper. +pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; + /// The target length of a session (how often authorities change) on Rialto measured in of number of /// blocks. /// @@ -162,7 +169,7 @@ impl Convert for AccountIdConverter { // // Note that this should only be used for testing. pub fn derive_account_from_millau_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_BRIDGE_INSTANCE, id); + let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_CHAIN_ID, id); AccountIdConverter::convert(encoded_id) } @@ -205,8 +212,8 @@ pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_fi /// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToRialtoOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRialtoOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToRialtoOutboundLaneApi::message_details` runtime method. +pub const TO_RIALTO_MESSAGE_DETAILS_METHOD: &str = "ToRialtoOutboundLaneApi_message_details"; /// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method. pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method. @@ -249,15 +256,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/primitives/chain-rococo/Cargo.toml index b97e8d9d1ab1..33772c7890a0 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/primitives/chain-rococo/Cargo.toml @@ -8,14 +8,15 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +smallvec = "1.6" # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } bp-messages = { path = "../messages", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -24,10 +25,10 @@ sp-version = { git = "https://github.com/paritytech/substrate", branch = "master [features] default = ["std"] std = [ - "bp-header-chain/std", "bp-messages/std", "bp-polkadot-core/std", "bp-runtime/std", + "frame-support/std", "parity-scale-codec/std", "sp-api/std", "sp-runtime/std", diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/primitives/chain-rococo/src/lib.rs index f5953d3c8e1c..d76ec8e679d3 100644 --- a/bridges/primitives/chain-rococo/src/lib.rs +++ b/bridges/primitives/chain-rococo/src/lib.rs @@ -20,8 +20,8 @@ // Runtime-generated DecodeLimit::decode_all_with_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; -use bp_runtime::Chain; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; +use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial}; use sp_std::prelude::*; use sp_version::RuntimeVersion; @@ -30,57 +30,46 @@ pub use bp_polkadot_core::*; /// Rococo Chain pub type Rococo = PolkadotLike; -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; +/// The target length of a session (how often authorities change) on Westend measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = 10 * time_units::MINUTES; // NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("rococo"), - impl_name: sp_version::create_runtime_str!("parity-rococo-v1.5"), + impl_name: sp_version::create_runtime_str!("parity-rococo-v1.6"), authoring_version: 0, - spec_version: 232, + spec_version: 9004, impl_version: 0, apis: sp_version::create_apis_vec![[]], transaction_version: 0, }; -/// Rococo Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to Rococo chain. -/// Ideally this code would be auto-generated from Metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] -pub enum Call { - /// Wococo bridge pallet. - #[codec(index = 41)] - BridgeGrandpaWococo(BridgeGrandpaWococoCall), -} - -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] -#[allow(non_camel_case_types)] -pub enum BridgeGrandpaWococoCall { - #[codec(index = 0)] - submit_finality_proof( - ::Header, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), +// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. +pub struct WeightToFee; +impl WeightToFeePolynomial for WeightToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + const CENTS: Balance = 1_000_000_000_000 / 100; + let p = CENTS; + let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); + smallvec::smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } } -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } +// We use this to get the account on Rococo (target) which is derived from Wococo's (source) +// account. +pub fn derive_account_from_wococo_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::WOCOCO_CHAIN_ID, id); + AccountIdConverter::convert(encoded_id) } /// Name of the `RococoFinalityApi::best_finalized` runtime method. @@ -91,8 +80,8 @@ pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_head /// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToRococoOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_ROCOCO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRococoOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToRococoOutboundLaneApi::message_details` runtime method. +pub const TO_ROCOCO_MESSAGE_DETAILS_METHOD: &str = "ToRococoOutboundLaneApi_message_details"; /// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method. pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method. @@ -135,15 +124,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs index 08ca9c28c8ce..e3c4d733def9 100644 --- a/bridges/primitives/chain-westend/src/lib.rs +++ b/bridges/primitives/chain-westend/src/lib.rs @@ -20,7 +20,7 @@ // Runtime-generated DecodeLimit::decode_all_with_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use sp_std::prelude::*; use sp_version::RuntimeVersion; @@ -86,7 +86,7 @@ impl sp_runtime::traits::Dispatchable for Call { // We use this to get the account on Westend (target) which is derived from Rococo's (source) // account. pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_BRIDGE_INSTANCE, id); + let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_CHAIN_ID, id); AccountIdConverter::convert(encoded_id) } @@ -98,8 +98,8 @@ pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_he /// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToWestendOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_WESTEND_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToWestendOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToWestendOutboundLaneApi::message_details` runtime method. +pub const TO_WESTEND_MESSAGE_DETAILS_METHOD: &str = "ToWestendOutboundLaneApi_message_details"; /// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method. pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method. @@ -149,15 +149,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/chain-wococo/Cargo.toml b/bridges/primitives/chain-wococo/Cargo.toml index ecf783a51eeb..88201dde9ac1 100644 --- a/bridges/primitives/chain-wococo/Cargo.toml +++ b/bridges/primitives/chain-wococo/Cargo.toml @@ -10,27 +10,25 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } bp-messages = { path = "../messages", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-rococo = { path = "../chain-rococo", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] std = [ - "bp-header-chain/std", "bp-messages/std", "bp-polkadot-core/std", "bp-runtime/std", + "bp-rococo/std", "parity-scale-codec/std", "sp-api/std", "sp-runtime/std", "sp-std/std", - "sp-version/std", ] diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-wococo/src/lib.rs index f7a472ddf31d..24572e141b20 100644 --- a/bridges/primitives/chain-wococo/src/lib.rs +++ b/bridges/primitives/chain-wococo/src/lib.rs @@ -20,73 +20,20 @@ // Runtime-generated DecodeLimit::decode_all_with_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; -use bp_runtime::Chain; +use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use sp_std::prelude::*; -use sp_version::RuntimeVersion; pub use bp_polkadot_core::*; +// Rococo runtime = Wococo runtime +pub use bp_rococo::{WeightToFee, SESSION_LENGTH, VERSION}; /// Wococo Chain pub type Wococo = PolkadotLike; -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; - -// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("rococo"), - impl_name: sp_version::create_runtime_str!("parity-rococo-v1.5"), - authoring_version: 0, - spec_version: 232, - impl_version: 0, - apis: sp_version::create_apis_vec![[]], - transaction_version: 0, -}; - -/// Wococo Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to Rococo chain. -/// Ideally this code would be auto-generated from Metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] -pub enum Call { - /// Rococo bridge pallet. - #[codec(index = 40)] - BridgeGrandpaRococo(BridgeGrandpaRococoCall), -} - -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] -#[allow(non_camel_case_types)] -pub enum BridgeGrandpaRococoCall { - #[codec(index = 0)] - submit_finality_proof( - ::Header, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), -} - -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } -} - // We use this to get the account on Wococo (target) which is derived from Rococo's (source) // account. pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_BRIDGE_INSTANCE, id); + let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_CHAIN_ID, id); AccountIdConverter::convert(encoded_id) } @@ -98,8 +45,8 @@ pub const IS_KNOWN_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_is_known_head /// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToWococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToWococoOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_WOCOCO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToWococoOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToWococoOutboundLaneApi::message_details` runtime method. +pub const TO_WOCOCO_MESSAGE_DETAILS_METHOD: &str = "ToWococoOutboundLaneApi_message_details"; /// Name of the `ToWococoOutboundLaneApi::latest_generated_nonce` runtime method. pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToWococoOutboundLaneApi::latest_received_nonce` runtime method. @@ -142,15 +89,16 @@ sp_api::decl_runtime_apis! { lane_id: LaneId, payload: OutboundPayload, ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all + /// messages in given inclusive range. /// /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( + fn message_details( lane: LaneId, begin: MessageNonce, end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; + ) -> Vec>; /// Returns nonce of the latest message, received by bridged chain. fn latest_received_nonce(lane: LaneId) -> MessageNonce; /// Returns nonce of the latest message, generated by given lane. diff --git a/bridges/primitives/ethereum-poa/src/lib.rs b/bridges/primitives/ethereum-poa/src/lib.rs index 57c539f2e27b..382e6f81ee5d 100644 --- a/bridges/primitives/ethereum-poa/src/lib.rs +++ b/bridges/primitives/ethereum-poa/src/lib.rs @@ -245,7 +245,7 @@ impl AuraHeader { /// Get step this header is generated for. pub fn step(&self) -> Option { - self.seal.get(0).map(|x| Rlp::new(&x)).and_then(|x| x.as_val().ok()) + self.seal.get(0).map(|x| Rlp::new(x)).and_then(|x| x.as_val().ok()) } /// Get header author' signature. @@ -496,7 +496,7 @@ pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result { + /// The round (voting period) this justification is valid for. + pub round: u64, + /// The set of votes for the chain which is to be finalized. + pub commit: finality_grandpa::Commit, + /// A proof that the chain of blocks in the commit are related to each other. + pub votes_ancestries: Vec
, +} + +impl crate::FinalityProof for GrandpaJustification { + fn target_header_number(&self) -> H::Number { + self.commit.target_number + } +} + /// Justification verification error. #[derive(RuntimeDebug, PartialEq)] pub enum Error { @@ -34,14 +55,15 @@ pub enum Error { JustificationDecode, /// Justification is finalizing unexpected header. InvalidJustificationTarget, - /// Invalid commit in justification. - InvalidJustificationCommit, - /// Justification has invalid authority singature. + /// The authority has provided an invalid signature. InvalidAuthoritySignature, - /// The justification has precommit for the header that has no route from the target header. - InvalidPrecommitAncestryProof, - /// The justification has 'unused' headers in its precommit ancestries. - InvalidPrecommitAncestries, + /// The justification contains precommit for header that is not a descendant of the commit header. + PrecommitIsNotCommitDescendant, + /// The cumulative weight of all votes in the justification is not enough to justify commit + /// header finalization. + TooLowCumulativeWeight, + /// The justification contains extra (unused) headers in its `votes_ancestries` field. + ExtraHeadersInVotesAncestries, } /// Decode justification target. @@ -63,123 +85,135 @@ pub fn verify_justification( where Header::Number: finality_grandpa::BlockNumberOps, { - // Ensure that it is justification for the expected header + // ensure that it is justification for the expected header if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { return Err(Error::InvalidJustificationTarget); } - // Validate commit of the justification. Note that `validate_commit()` assumes that all - // signatures are valid. We'll check the validity of the signatures later since they're more - // resource intensive to verify. - let ancestry_chain = AncestryChain::new(&justification.votes_ancestries); - match finality_grandpa::validate_commit(&justification.commit, authorities_set, &ancestry_chain) { - Ok(ref result) if result.ghost().is_some() => {} - _ => return Err(Error::InvalidJustificationCommit), - } - - // Now that we know that the commit is correct, check authorities signatures - let mut buf = Vec::new(); - let mut visited_hashes = BTreeSet::new(); + let mut chain = AncestryChain::new(&justification.votes_ancestries); + let mut signature_buffer = Vec::new(); + let mut votes = BTreeSet::new(); + let mut cumulative_weight = 0u64; for signed in &justification.commit.precommits { + // authority must be in the set + let authority_info = match authorities_set.get(&signed.id) { + Some(authority_info) => authority_info, + None => { + // just ignore precommit from unknown authority as `finality_grandpa::import_precommit` does + continue; + } + }; + + // check if authority has already voted in the same round. + // + // there's a lot of code in `validate_commit` and `import_precommit` functions inside + // `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing that we + // care about is that only first vote from the authority is accepted + if !votes.insert(signed.id.clone()) { + continue; + } + + // everything below this line can't just `continue`, because state is already altered + + // all precommits must be for block higher than the target + if signed.precommit.target_number < justification.commit.target_number { + return Err(Error::PrecommitIsNotCommitDescendant); + } + // all precommits must be for target block descendents + chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?; + // since we know now that the precommit target is the descendant of the justification target, + // we may increase 'weight' of the justification target + // + // there's a lot of code in the `VoteGraph::insert` method inside `finality-grandpa` crate, + // but in the end it is only used to find GHOST, which we don't care about. The only thing + // that we care about is that the justification target has enough weight + cumulative_weight = cumulative_weight.checked_add(authority_info.weight().0.into()).expect( + "sum of weights of ALL authorities is expected not to overflow - this is guaranteed by\ + existence of VoterSet;\ + the order of loop conditions guarantees that we can account vote from same authority\ + multiple times;\ + thus we'll never overflow the u64::MAX;\ + qed", + ); + // verify authority signature if !sp_finality_grandpa::check_message_signature_with_buffer( &finality_grandpa::Message::Precommit(signed.precommit.clone()), &signed.id, &signed.signature, justification.round, authorities_set_id, - &mut buf, + &mut signature_buffer, ) { return Err(Error::InvalidAuthoritySignature); } - - if justification.commit.target_hash == signed.precommit.target_hash { - continue; - } - - match ancestry_chain.ancestry(justification.commit.target_hash, signed.precommit.target_hash) { - Ok(route) => { - // ancestry starts from parent hash but the precommit target hash has been visited - visited_hashes.insert(signed.precommit.target_hash); - visited_hashes.extend(route); - } - _ => { - // could this happen in practice? I don't think so, but original code has this check - return Err(Error::InvalidPrecommitAncestryProof); - } - } } - let ancestry_hashes = justification - .votes_ancestries - .iter() - .map(|h: &Header| h.hash()) - .collect(); - if visited_hashes != ancestry_hashes { - return Err(Error::InvalidPrecommitAncestries); + // check that there are no extra headers in the justification + if !chain.unvisited.is_empty() { + return Err(Error::ExtraHeadersInVotesAncestries); } - Ok(()) -} - -/// A GRANDPA Justification is a proof that a given header was finalized -/// at a certain height and with a certain set of authorities. -/// -/// This particular proof is used to prove that headers on a bridged chain -/// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq)] -pub struct GrandpaJustification { - /// The round (voting period) this justification is valid for. - pub round: u64, - /// The set of votes for the chain which is to be finalized. - pub commit: finality_grandpa::Commit, - /// A proof that the chain of blocks in the commit are related to each other. - pub votes_ancestries: Vec
, -} - -impl crate::FinalityProof for GrandpaJustification { - fn target_header_number(&self) -> H::Number { - self.commit.target_number + // check that the cumulative weight of validators voted for the justification target (or one + // of its descendents) is larger than required threshold. + let threshold = authorities_set.threshold().0.into(); + if cumulative_weight >= threshold { + Ok(()) + } else { + Err(Error::TooLowCumulativeWeight) } } -/// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. +/// Votes ancestries with useful methods. #[derive(RuntimeDebug)] -struct AncestryChain { - ancestry: BTreeMap, +pub struct AncestryChain { + /// Header hash => parent header hash mapping. + pub parents: BTreeMap, + /// Hashes of headers that weren't visited by `is_ancestor` method. + pub unvisited: BTreeSet, } impl AncestryChain
{ - fn new(ancestry: &[Header]) -> AncestryChain
{ - AncestryChain { - ancestry: ancestry - .iter() - .map(|header| (header.hash(), *header.parent_hash())) - .collect(), + /// Create new ancestry chain. + pub fn new(ancestry: &[Header]) -> AncestryChain
{ + let mut parents = BTreeMap::new(); + let mut unvisited = BTreeSet::new(); + for ancestor in ancestry { + let hash = ancestor.hash(); + let parent_hash = *ancestor.parent_hash(); + parents.insert(hash, parent_hash); + unvisited.insert(hash); } + AncestryChain { parents, unvisited } } -} -impl finality_grandpa::Chain for AncestryChain
-where - Header::Number: finality_grandpa::BlockNumberOps, -{ - fn ancestry(&self, base: Header::Hash, block: Header::Hash) -> Result, GrandpaError> { - let mut route = Vec::new(); - let mut current_hash = block; + /// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and `Ok(_)` otherwise. + pub fn ensure_descendant( + mut self, + commit_target: &Header::Hash, + precommit_target: &Header::Hash, + ) -> Result { + let mut current_hash = *precommit_target; loop { - if current_hash == base { + if current_hash == *commit_target { break; } - match self.ancestry.get(¤t_hash).cloned() { + + let is_visited_before = !self.unvisited.remove(¤t_hash); + current_hash = match self.parents.get(¤t_hash) { Some(parent_hash) => { - current_hash = parent_hash; - route.push(current_hash); + if is_visited_before { + // `Some(parent_hash)` means that the `current_hash` is in the `parents` container + // `is_visited_before` means that it has been visited before in some of previous calls + // => since we assume that previous call has finished with `true`, this also will + // be finished with `true` + return Ok(self); + } + + *parent_hash } - _ => return Err(GrandpaError::NotDescendent), - } + None => return Err(Error::PrecommitIsNotCommitDescendant), + }; } - route.pop(); // remove the base - - Ok(route) + Ok(self) } } diff --git a/bridges/primitives/header-chain/tests/implementation_match.rs b/bridges/primitives/header-chain/tests/implementation_match.rs new file mode 100644 index 000000000000..0b55c1903528 --- /dev/null +++ b/bridges/primitives/header-chain/tests/implementation_match.rs @@ -0,0 +1,317 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests inside this module are made to ensure that our custom justification verification +//! implementation works exactly as `fn finality_grandpa::validate_commit`. +//! +//! Some of tests in this module may partially duplicate tests from `justification.rs`, +//! but their purpose is different. + +use assert_matches::assert_matches; +use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; +use bp_test_utils::{ + header_id, make_justification_for_header, signed_precommit, test_header, Account, JustificationGeneratorParams, + ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID, +}; +use finality_grandpa::voter_set::VoterSet; +use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; +use sp_runtime::traits::Header as HeaderT; + +type TestHeader = sp_runtime::testing::Header; +type TestHash = ::Hash; +type TestNumber = ::Number; + +/// Implementation of `finality_grandpa::Chain` that is used in tests. +struct AncestryChain(bp_header_chain::justification::AncestryChain); + +impl AncestryChain { + fn new(ancestry: &[TestHeader]) -> Self { + Self(bp_header_chain::justification::AncestryChain::new(ancestry)) + } +} + +impl finality_grandpa::Chain for AncestryChain { + fn ancestry(&self, base: TestHash, block: TestHash) -> Result, finality_grandpa::Error> { + let mut route = Vec::new(); + let mut current_hash = block; + loop { + if current_hash == base { + break; + } + match self.0.parents.get(¤t_hash).cloned() { + Some(parent_hash) => { + current_hash = parent_hash; + route.push(current_hash); + } + _ => return Err(finality_grandpa::Error::NotDescendent), + } + } + route.pop(); // remove the base + + Ok(route) + } +} + +/// Get a full set of accounts. +fn full_accounts_set() -> Vec<(Account, AuthorityWeight)> { + vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)] +} + +/// Get a full set of GRANDPA authorities. +fn full_voter_set() -> VoterSet { + VoterSet::new(full_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))).unwrap() +} + +/// Get a minimal set of accounts. +fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> { + // there are 5 accounts in the full set => we need 2/3 + 1 accounts, which results in 4 accounts + vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)] +} + +/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a header finality. +pub fn minimal_voter_set() -> VoterSet { + VoterSet::new( + minimal_accounts_set() + .iter() + .map(|(id, w)| (AuthorityId::from(*id), *w)), + ) + .unwrap() +} + +/// Make a valid GRANDPA justification with sensible defaults. +pub fn make_default_justification(header: &TestHeader) -> GrandpaJustification { + make_justification_for_header(JustificationGeneratorParams { + header: header.clone(), + authorities: minimal_accounts_set(), + ..Default::default() + }) +} + +// the `finality_grandpa::validate_commit` function has two ways to report an unsuccessful +// commit validation: +// +// 1) to return `Err()` (which only may happen if `finality_grandpa::Chain` implementation +// returns an error); +// 2) to return `Ok(validation_result) if validation_result.ghost().is_none()`. +// +// Our implementation would just return error in both cases. + +#[test] +fn same_result_when_precommit_target_has_lower_number_than_commit_target() { + let mut justification = make_default_justification(&test_header(1)); + // the number of header in precommit (0) is lower than number of header in commit (1) + justification.commit.precommits[0].precommit.target_number = 0; + + // our implementation returns an error + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::PrecommitIsNotCommitDescendant), + ); + // original implementation returns empty GHOST + assert_matches!( + finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .map(|result| result.ghost().cloned()), + Ok(None) + ); +} + +#[test] +fn same_result_when_precommit_target_is_not_descendant_of_commit_target() { + let not_descendant = test_header::(10); + let mut justification = make_default_justification(&test_header(1)); + // the route from header of commit (1) to header of precommit (10) is missing from + // the votes ancestries + justification.commit.precommits[0].precommit.target_number = *not_descendant.number(); + justification.commit.precommits[0].precommit.target_hash = not_descendant.hash(); + justification.votes_ancestries.push(not_descendant); + + // our implementation returns an error + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::PrecommitIsNotCommitDescendant), + ); + // original implementation returns empty GHOST + assert_matches!( + finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .map(|result| result.ghost().cloned()), + Ok(None) + ); +} + +#[test] +fn same_result_when_justification_contains_duplicate_vote() { + let mut justification = make_default_justification(&test_header(1)); + // the justification may contain exactly the same vote (i.e. same precommit and same signature) + // multiple times && it isn't treated as an error by original implementation + justification + .commit + .precommits + .push(justification.commit.precommits[0].clone()); + justification + .commit + .precommits + .push(justification.commit.precommits[0].clone()); + + // our implementation succeeds + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Ok(()), + ); + // original implementation returns non-empty GHOST + assert_matches!( + finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .map(|result| result.ghost().cloned()), + Ok(Some(_)) + ); +} + +#[test] +fn same_result_when_authority_equivocates_once_in_a_round() { + let mut justification = make_default_justification(&test_header(1)); + // the justification original implementation allows authority to submit two different + // votes in a single round, of which only first is 'accepted' + justification.commit.precommits.push(signed_precommit::( + &ALICE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); + + // our implementation succeeds + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Ok(()), + ); + // original implementation returns non-empty GHOST + assert_matches!( + finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .map(|result| result.ghost().cloned()), + Ok(Some(_)) + ); +} + +#[test] +fn same_result_when_authority_equivocates_twice_in_a_round() { + let mut justification = make_default_justification(&test_header(1)); + // there's some code in the original implementation that should return an error when + // same authority submits more than two different votes in a single round: + // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/lib.rs#L473 + // but there's also a code that prevents this from happening: + // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287 + // => so now we are also just ignoring all votes from the same authority, except the first one + justification.commit.precommits.push(signed_precommit::( + &ALICE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); + justification.commit.precommits.push(signed_precommit::( + &ALICE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); + + // our implementation succeeds + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Ok(()), + ); + // original implementation returns non-empty GHOST + assert_matches!( + finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .map(|result| result.ghost().cloned()), + Ok(Some(_)) + ); +} + +#[test] +fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { + // just remove one authority from the minimal set and we shall not reach the threshold + let mut authorities_set = minimal_accounts_set(); + authorities_set.pop(); + let justification = make_justification_for_header(JustificationGeneratorParams { + header: test_header(1), + authorities: authorities_set, + ..Default::default() + }); + + // our implementation returns an error + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::TooLowCumulativeWeight), + ); + // original implementation returns empty GHOST + assert_matches!( + finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .map(|result| result.ghost().cloned()), + Ok(None) + ); +} diff --git a/bridges/primitives/header-chain/tests/justification.rs b/bridges/primitives/header-chain/tests/justification.rs index 1ce739e4536f..85846c6d50f9 100644 --- a/bridges/primitives/header-chain/tests/justification.rs +++ b/bridges/primitives/header-chain/tests/justification.rs @@ -23,13 +23,13 @@ type TestHeader = sp_runtime::testing::Header; #[test] fn valid_justification_accepted() { - let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)]; + let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]; let params = JustificationGeneratorParams { header: test_header(1), round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, authorities: authorities.clone(), - votes: 7, + ancestors: 7, forks: 3, }; @@ -45,7 +45,7 @@ fn valid_justification_accepted() { ); assert_eq!(justification.commit.precommits.len(), authorities.len()); - assert_eq!(justification.votes_ancestries.len(), params.votes as usize); + assert_eq!(justification.votes_ancestries.len(), params.ancestors as usize); } #[test] @@ -55,7 +55,7 @@ fn valid_justification_accepted_with_single_fork() { round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)], - votes: 5, + ancestors: 5, forks: 1, }; @@ -83,7 +83,7 @@ fn valid_justification_accepted_with_arbitrary_number_of_authorities() { round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, authorities: authorities.clone(), - votes: n.into(), + ancestors: n.into(), forks: n.into(), }; @@ -129,7 +129,7 @@ fn justification_with_invalid_commit_rejected() { &voter_set(), &justification, ), - Err(Error::InvalidJustificationCommit), + Err(Error::ExtraHeadersInVotesAncestries), ); } @@ -161,7 +161,7 @@ fn justification_with_invalid_precommit_ancestry() { &voter_set(), &justification, ), - Err(Error::InvalidPrecommitAncestries), + Err(Error::ExtraHeadersInVotesAncestries), ); } @@ -175,7 +175,7 @@ fn justification_is_invalid_if_we_dont_meet_threshold() { round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, authorities: authorities.clone(), - votes: 2 * authorities.len() as u32, + ancestors: 2 * authorities.len() as u32, forks: 2, }; @@ -186,6 +186,6 @@ fn justification_is_invalid_if_we_dont_meet_threshold() { &voter_set(), &make_justification_for_header::(params) ), - Err(Error::InvalidJustificationCommit), + Err(Error::TooLowCumulativeWeight), ); } diff --git a/bridges/primitives/message-dispatch/src/lib.rs b/bridges/primitives/message-dispatch/src/lib.rs index 124437123964..859dc5e469ad 100644 --- a/bridges/primitives/message-dispatch/src/lib.rs +++ b/bridges/primitives/message-dispatch/src/lib.rs @@ -19,7 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use bp_runtime::{InstanceId, Size}; +use bp_runtime::{ + messages::{DispatchFeePayment, MessageDispatchResult}, + ChainId, Size, +}; use codec::{Decode, Encode}; use frame_support::RuntimeDebug; use sp_std::prelude::*; @@ -31,7 +34,7 @@ pub type Weight = u64; pub type SpecVersion = u32; /// A generic trait to dispatch arbitrary messages delivered over the bridge. -pub trait MessageDispatch { +pub trait MessageDispatch { /// A type of the message to be dispatched. type Message: codec::Decode; @@ -43,7 +46,8 @@ pub trait MessageDispatch { /// Dispatches the message internally. /// - /// `bridge` indicates instance of deployed bridge where the message came from. + /// `source_chain` indicates the chain where the message came from. + /// `target_chain` indicates the chain where message dispatch happens. /// /// `id` is a short unique identifier of the message. /// @@ -51,7 +55,15 @@ pub trait MessageDispatch { /// a sign that some other component has rejected the message even before it has /// reached `dispatch` method (right now this may only be caused if we fail to decode /// the whole message). - fn dispatch(bridge: InstanceId, id: MessageId, message: Result); + /// + /// Returns unspent dispatch weight. + fn dispatch Result<(), ()>>( + source_chain: ChainId, + target_chain: ChainId, + id: MessageId, + message: Result, + pay_dispatch_fee: P, + ) -> MessageDispatchResult; } /// Origin of a Call when it is dispatched on the target chain. @@ -90,7 +102,7 @@ pub enum CallOrigin, + /// Where the fee for dispatching message is paid? + pub dispatch_fee_payment: DispatchFeePayment, /// The call itself. pub call: Call, } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 9cb037a34ce8..b5b68220a409 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -7,7 +7,10 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +bitvec = { version = "0.20", default-features = false, features = ["alloc"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive", "bit-vec"] } +impl-trait-for-tuples = "0.2" +serde = { version = "1.0.101", optional = true, features = ["derive"] } # Bridge dependencies @@ -26,5 +29,6 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", + "serde", "sp-std/std" ] diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index c3ffce8baa52..963543ec3213 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -22,6 +22,8 @@ // Generated by `DecodeLimit::decode_with_depth_limit` #![allow(clippy::unnecessary_mut_passed)] +use bitvec::prelude::*; +use bp_runtime::messages::DispatchFeePayment; use codec::{Decode, Encode}; use frame_support::RuntimeDebug; use sp_std::{collections::vec_deque::VecDeque, prelude::*}; @@ -32,12 +34,40 @@ pub mod target_chain; // Weight is reexported to avoid additional frame-support dependencies in related crates. pub use frame_support::weights::Weight; +/// Messages pallet operating mode. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub enum OperatingMode { + /// Normal mode, when all operations are allowed. + Normal, + /// The pallet is not accepting outbound messages. Inbound messages and receival proofs + /// are still accepted. + /// + /// This mode may be used e.g. when bridged chain expects upgrade. Then to avoid dispatch + /// failures, the pallet owner may stop accepting new messages, while continuing to deliver + /// queued messages to the bridged chain. Once upgrade is completed, the mode may be switched + /// back to `Normal`. + RejectingOutboundMessages, + /// The pallet is halted. All operations (except operating mode change) are prohibited. + Halted, +} + +impl Default for OperatingMode { + fn default() -> Self { + OperatingMode::Normal + } +} + /// Messages pallet parameter. pub trait Parameter: frame_support::Parameter { /// Save parameter value in the runtime storage. fn save(&self); } +impl Parameter for () { + fn save(&self) {} +} + /// Lane identifier. pub type LaneId = [u8; 4]; @@ -96,7 +126,7 @@ pub struct InboundLaneData { /// When a relayer sends a single message, both of MessageNonces are the same. /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce. /// Multiple dispatches from the same relayer are allowed. - pub relayers: VecDeque<(MessageNonce, MessageNonce, RelayerId)>, + pub relayers: VecDeque>, /// Nonce of the last message that /// a) has been delivered to the target (this) chain and @@ -123,22 +153,106 @@ impl InboundLaneData { /// size of each entry. /// /// Returns `None` if size overflows `u32` limits. - pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32) -> Option { + pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32, messages_count: u32) -> Option { let message_nonce_size = 8; let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?; let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?; - relayers_size.checked_add(message_nonce_size) + let dispatch_results_per_byte = 8; + let dispatch_result_size = sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte); + relayers_size + .checked_add(message_nonce_size) + .and_then(|result| result.checked_add(dispatch_result_size)) } /// Nonce of the last message that has been delivered to this (target) chain. pub fn last_delivered_nonce(&self) -> MessageNonce { self.relayers .back() - .map(|(_, last_nonce, _)| *last_nonce) + .map(|entry| entry.messages.end) .unwrap_or(self.last_confirmed_nonce) } } +/// Message details, returned by runtime APIs. +#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] +pub struct MessageDetails { + /// Nonce assigned to the message. + pub nonce: MessageNonce, + /// Message dispatch weight, declared by the submitter. + pub dispatch_weight: Weight, + /// Size of the encoded message. + pub size: u32, + /// Delivery+dispatch fee paid by the message submitter at the source chain. + pub delivery_and_dispatch_fee: OutboundMessageFee, + /// Where the fee for dispatching message is paid? + pub dispatch_fee_payment: DispatchFeePayment, +} + +/// Bit vector of message dispatch results. +pub type DispatchResultsBitVec = BitVec; + +/// Unrewarded relayer entry stored in the inbound lane data. +/// +/// This struct represents a continuous range of messages that have been delivered by the same relayer +/// and whose confirmations are still pending. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] +pub struct UnrewardedRelayer { + /// Identifier of the relayer. + pub relayer: RelayerId, + /// Messages range, delivered by this relayer. + pub messages: DeliveredMessages, +} + +/// Delivered messages with their dispatch result. +#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq)] +pub struct DeliveredMessages { + /// Nonce of the first message that has been delivered (inclusive). + pub begin: MessageNonce, + /// Nonce of the last message that has been delivered (inclusive). + pub end: MessageNonce, + /// Dispatch result (`false`/`true`), returned by the message dispatcher for every + /// message in the `[begin; end]` range. See `dispatch_result` field of the + /// `bp_runtime::messages::MessageDispatchResult` structure for more information. + pub dispatch_results: DispatchResultsBitVec, +} + +impl DeliveredMessages { + /// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given dispatch result. + pub fn new(nonce: MessageNonce, dispatch_result: bool) -> Self { + DeliveredMessages { + begin: nonce, + end: nonce, + dispatch_results: bitvec![Msb0, u8; if dispatch_result { 1 } else { 0 }], + } + } + + /// Note new dispatched message. + pub fn note_dispatched_message(&mut self, dispatch_result: bool) { + self.end += 1; + self.dispatch_results.push(dispatch_result); + } + + /// Returns true if delivered messages contain message with given nonce. + pub fn contains_message(&self, nonce: MessageNonce) -> bool { + (self.begin..=self.end).contains(&nonce) + } + + /// Get dispatch result flag by message nonce. + /// + /// Dispatch result flag must be interpreted using the knowledge of dispatch mechanism + /// at the target chain. See `dispatch_result` field of the + /// `bp_runtime::messages::MessageDispatchResult` structure for more information. + /// + /// Panics if message nonce is not in the `begin..=end` range. Typically you'll first + /// check if message is within the range by calling `contains_message`. + pub fn message_dispatch_result(&self, nonce: MessageNonce) -> bool { + const INVALID_NONCE: &str = "Invalid nonce used to index dispatch_results"; + + let index = nonce.checked_sub(self.begin).expect(INVALID_NONCE) as usize; + *self.dispatch_results.get(index).expect(INVALID_NONCE) + } +} + /// Gist of `InboundLaneData::relayers` field used by runtime APIs. #[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq)] pub struct UnrewardedRelayersState { @@ -177,12 +291,10 @@ impl Default for OutboundLaneData { /// Returns total number of messages in the `InboundLaneData::relayers` vector. /// /// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`). -pub fn total_unrewarded_messages( - relayers: &VecDeque<(MessageNonce, MessageNonce, RelayerId)>, -) -> Option { +pub fn total_unrewarded_messages(relayers: &VecDeque>) -> Option { match (relayers.front(), relayers.back()) { - (Some((begin, _, _)), Some((_, end, _))) => { - if let Some(difference) = end.checked_sub(*begin) { + (Some(front), Some(back)) => { + if let Some(difference) = back.messages.end.checked_sub(front.messages.begin) { difference.checked_add(1) } else { Some(0) @@ -200,9 +312,18 @@ mod tests { fn total_unrewarded_messages_does_not_overflow() { assert_eq!( total_unrewarded_messages( - &vec![(0, 0, 1), (MessageNonce::MAX, MessageNonce::MAX, 2)] - .into_iter() - .collect() + &vec![ + UnrewardedRelayer { + relayer: 1, + messages: DeliveredMessages::new(0, true) + }, + UnrewardedRelayer { + relayer: 2, + messages: DeliveredMessages::new(MessageNonce::MAX, true) + }, + ] + .into_iter() + .collect() ), None, ); @@ -210,19 +331,60 @@ mod tests { #[test] fn inbound_lane_data_returns_correct_hint() { - let expected_size = InboundLaneData::::encoded_size_hint(1, 13); - let actual_size = InboundLaneData { - relayers: (1u8..=13u8).map(|i| (i as _, i as _, i)).collect(), - last_confirmed_nonce: 13, + let test_cases = vec![ + // single relayer, multiple messages + (1, 128u8), + // multiple relayers, single message per relayer + (128u8, 128u8), + // several messages per relayer + (13u8, 128u8), + ]; + for (relayer_entries, messages_count) in test_cases { + let expected_size = InboundLaneData::::encoded_size_hint(1, relayer_entries as _, messages_count as _); + let actual_size = InboundLaneData { + relayers: (1u8..=relayer_entries) + .map(|i| { + let mut entry = UnrewardedRelayer { + relayer: i, + messages: DeliveredMessages::new(i as _, true), + }; + entry.messages.dispatch_results = bitvec![ + Msb0, u8; + 1; + (messages_count / relayer_entries) as _ + ]; + entry + }) + .collect(), + last_confirmed_nonce: messages_count as _, + } + .encode() + .len(); + let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs(); + assert!( + difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1, + "Too large difference between actual ({}) and expected ({:?}) inbound lane data size. Test case: {}+{}", + actual_size, + expected_size, + relayer_entries, + messages_count, + ); } - .encode() - .len(); - let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs(); - assert!( - difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1, - "Too large difference between actual ({}) and expected ({:?}) inbound lane data size", - actual_size, - expected_size, - ); + } + + #[test] + fn message_dispatch_result_works() { + let delivered_messages = DeliveredMessages { + begin: 100, + end: 150, + dispatch_results: bitvec![Msb0, u8; 1; 151], + }; + + assert!(!delivered_messages.contains_message(99)); + assert!(delivered_messages.contains_message(100)); + assert!(delivered_messages.contains_message(150)); + assert!(!delivered_messages.contains_message(151)); + + assert!(delivered_messages.message_dispatch_result(125)); } } diff --git a/bridges/primitives/messages/src/source_chain.rs b/bridges/primitives/messages/src/source_chain.rs index 1d313634bcba..392331eda672 100644 --- a/bridges/primitives/messages/src/source_chain.rs +++ b/bridges/primitives/messages/src/source_chain.rs @@ -16,7 +16,7 @@ //! Primitives of messages module, that are used on the source chain. -use crate::{InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; +use crate::{DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; use bp_runtime::Size; use frame_support::{Parameter, RuntimeDebug}; @@ -135,6 +135,15 @@ pub trait MessageDeliveryAndDispatchPayment { } } +/// Handler for messages delivery confirmation. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnDeliveryConfirmed { + /// Called when we receive confirmation that our messages have been delivered to the + /// target chain. The confirmation also has single bit dispatch result for every + /// confirmed message (see `DeliveredMessages` for details). + fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) {} +} + /// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and /// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. pub struct ForbidOutboundMessages; diff --git a/bridges/primitives/messages/src/target_chain.rs b/bridges/primitives/messages/src/target_chain.rs index 676e919bc619..d1b87fd02323 100644 --- a/bridges/primitives/messages/src/target_chain.rs +++ b/bridges/primitives/messages/src/target_chain.rs @@ -18,7 +18,7 @@ use crate::{LaneId, Message, MessageData, MessageKey, OutboundLaneData}; -use bp_runtime::Size; +use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode, Error as CodecError}; use frame_support::{weights::Weight, Parameter, RuntimeDebug}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, prelude::*}; @@ -84,7 +84,7 @@ pub trait SourceHeaderChain { } /// Called when inbound message is received. -pub trait MessageDispatch { +pub trait MessageDispatch { /// Decoded message payload type. Valid message may contain invalid payload. In this case /// message is delivered, but dispatch fails. Therefore, two separate types of payload /// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch). @@ -100,7 +100,13 @@ pub trait MessageDispatch { /// /// It is up to the implementers of this trait to determine whether the message /// is invalid (i.e. improperly encoded, has too large weight, ...) or not. - fn dispatch(message: DispatchMessage); + /// + /// If your configuration allows paying dispatch fee at the target chain, then + /// it must be paid inside this method to the `relayer_account`. + fn dispatch( + relayer_account: &AccountId, + message: DispatchMessage, + ) -> MessageDispatchResult; } impl Default for ProvedLaneMessages { @@ -149,12 +155,18 @@ impl SourceHeaderChain for ForbidInboundMessages { } } -impl MessageDispatch for ForbidInboundMessages { +impl MessageDispatch for ForbidInboundMessages { type DispatchPayload = (); fn dispatch_weight(_message: &DispatchMessage) -> Weight { Weight::MAX } - fn dispatch(_message: DispatchMessage) {} + fn dispatch(_: &AccountId, _: DispatchMessage) -> MessageDispatchResult { + MessageDispatchResult { + dispatch_result: false, + unspent_weight: 0, + dispatch_fee_paid_during_dispatch: false, + } + } } diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index c9858c0820d7..a1619b27bcf4 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -22,7 +22,7 @@ use frame_support::{ dispatch::Dispatchable, parameter_types, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, WEIGHT_PER_SECOND}, DispatchClass, Weight, }, Blake2_128Concat, RuntimeDebug, StorageHasher, Twox128, @@ -33,13 +33,13 @@ use sp_core::Hasher as HasherT; use sp_runtime::{ generic, traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiAddress, MultiSignature, OpaqueExtrinsic, Perbill, + MultiAddress, MultiSignature, OpaqueExtrinsic, }; use sp_std::prelude::Vec; // Re-export's to avoid extra substrate dependencies in chain-specific crates. -pub use frame_support::Parameter; -pub use sp_runtime::traits::Convert; +pub use frame_support::{weights::constants::ExtrinsicBaseWeight, Parameter}; +pub use sp_runtime::{traits::Convert, Perbill}; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at /// Polkadot-like chain. This mostly depends on number of entries in the storage trie. diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index e12f484417dd..a4bb400a93c4 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -29,29 +29,31 @@ pub use storage_proof::{Error as StorageProofError, StorageProofChecker}; #[cfg(feature = "std")] pub use storage_proof::craft_valid_storage_proof; +pub mod messages; + mod chain; mod storage_proof; /// Use this when something must be shared among all instances. -pub const NO_INSTANCE_ID: InstanceId = [0, 0, 0, 0]; +pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; /// Bridge-with-Rialto instance id. -pub const RIALTO_BRIDGE_INSTANCE: InstanceId = *b"rlto"; +pub const RIALTO_CHAIN_ID: ChainId = *b"rlto"; /// Bridge-with-Millau instance id. -pub const MILLAU_BRIDGE_INSTANCE: InstanceId = *b"mlau"; +pub const MILLAU_CHAIN_ID: ChainId = *b"mlau"; /// Bridge-with-Polkadot instance id. -pub const POLKADOT_BRIDGE_INSTANCE: InstanceId = *b"pdot"; +pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot"; /// Bridge-with-Kusama instance id. -pub const KUSAMA_BRIDGE_INSTANCE: InstanceId = *b"ksma"; +pub const KUSAMA_CHAIN_ID: ChainId = *b"ksma"; /// Bridge-with-Rococo instance id. -pub const ROCOCO_BRIDGE_INSTANCE: InstanceId = *b"roco"; +pub const ROCOCO_CHAIN_ID: ChainId = *b"roco"; /// Bridge-with-Wococo instance id. -pub const WOCOCO_BRIDGE_INSTANCE: InstanceId = *b"woco"; +pub const WOCOCO_CHAIN_ID: ChainId = *b"woco"; /// Call-dispatch module prefix. pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/dispatch"; @@ -62,11 +64,13 @@ pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/ /// A unique prefix for entropy when generating a cross-chain account ID for the Root account. pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root"; -/// Id of deployed module instance. We have a bunch of pallets that may be used in -/// different bridges. E.g. messages pallet may be deployed twice in the same -/// runtime to bridge ThisChain with Chain1 and Chain2. Sometimes we need to be able -/// to identify deployed instance dynamically. This type is used for that. -pub type InstanceId = [u8; 4]; +/// Unique identifier of the chain. +/// +/// In addition to its main function (identifying the chain), this type may also be used to +/// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g. +/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and Chain2. +/// Sometimes we need to be able to identify deployed instance dynamically. This type may be used for that. +pub type ChainId = [u8; 4]; /// Type of accounts on the source chain. pub enum SourceAccount { @@ -90,7 +94,7 @@ pub enum SourceAccount { /// Note: If the same `bridge_id` is used across different chains (for example, if one source chain /// is bridged to multiple target chains), then all the derived accounts would be the same across /// the different chains. This could negatively impact users' privacy across chains. -pub fn derive_account_id(bridge_id: InstanceId, id: SourceAccount) -> H256 +pub fn derive_account_id(bridge_id: ChainId, id: SourceAccount) -> H256 where AccountId: Encode, { @@ -107,7 +111,7 @@ where /// /// The account ID can be the same across different instances of `pallet-bridge-messages` if the same /// `bridge_id` is used. -pub fn derive_relayer_fund_account_id(bridge_id: InstanceId) -> H256 { +pub fn derive_relayer_fund_account_id(bridge_id: ChainId) -> H256 { ("relayer-fund-account", bridge_id).using_encoded(blake2_256).into() } diff --git a/bridges/primitives/runtime/src/messages.rs b/bridges/primitives/runtime/src/messages.rs new file mode 100644 index 000000000000..f6e04619c723 --- /dev/null +++ b/bridges/primitives/runtime/src/messages.rs @@ -0,0 +1,56 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives that may be used by different message delivery and dispatch mechanisms. + +use codec::{Decode, Encode}; +use frame_support::{weights::Weight, RuntimeDebug}; + +/// Where message dispatch fee is paid? +#[derive(Encode, Decode, RuntimeDebug, Clone, Copy, PartialEq, Eq)] +pub enum DispatchFeePayment { + /// The dispacth fee is paid at the source chain. + AtSourceChain, + /// The dispatch fee is paid at the target chain. + /// + /// The fee will be paid right before the message is dispatched. So in case of any other + /// issues (like invalid call encoding, invalid signature, ...) the dispatch module won't + /// do any direct transfers. Instead, it'll return fee related to this message dispatch to the + /// relayer. + AtTargetChain, +} + +/// Message dispatch result. +#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct MessageDispatchResult { + /// Dispatch result flag. This flag is relayed back to the source chain and, generally + /// speaking, may bring any (that fits in single bit) information from the dispatcher at + /// the target chain to the message submitter at the source chain. If you're using immediate + /// call dispatcher, then it'll be result of the dispatch - `true` if dispatch has succeeded + /// and `false` otherwise. + pub dispatch_result: bool, + /// Unspent dispatch weight. This weight that will be deducted from total delivery transaction + /// weight, thus reducing the transaction cost. This shall not be zero in (at least) two cases: + /// + /// 1) if message has been dispatched successfully, but post-dispatch weight is less than + /// the weight, declared by the message sender; + /// 2) if message has not been dispatched at all. + pub unspent_weight: Weight, + /// Whether the message dispatch fee has been paid during dispatch. This will be true if your + /// configuration supports pay-dispatch-fee-at-target-chain option and message sender has enabled + /// this option. + pub dispatch_fee_paid_during_dispatch: bool, +} diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index 37bcc622fbdd..fe6a68087873 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -7,9 +7,9 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] bp-header-chain = { path = "../header-chain", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] } finality-grandpa = { version = "0.14.1", default-features = false } -parity-scale-codec = { version = "2.0.0", default-features = false } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -19,9 +19,9 @@ sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", d default = ["std"] std = [ "bp-header-chain/std", + "codec/std", "ed25519-dalek/std", "finality-grandpa/std", - "parity-scale-codec/std", "sp-application-crypto/std", "sp-finality-grandpa/std", "sp-runtime/std", diff --git a/bridges/primitives/test-utils/src/keyring.rs b/bridges/primitives/test-utils/src/keyring.rs index 6c5b1cae9114..b83678cae5e5 100644 --- a/bridges/primitives/test-utils/src/keyring.rs +++ b/bridges/primitives/test-utils/src/keyring.rs @@ -16,9 +16,9 @@ //! Utilities for working with test accounts. +use codec::Encode; use ed25519_dalek::{Keypair, PublicKey, SecretKey, Signature}; use finality_grandpa::voter_set::VoterSet; -use parity_scale_codec::Encode; use sp_application_crypto::Public; use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight}; use sp_runtime::RuntimeDebug; diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index 0fcc263763c3..64109754086c 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -19,6 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; use sp_application_crypto::TryFrom; use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; use sp_finality_grandpa::{AuthoritySignature, SetId}; @@ -46,10 +47,10 @@ pub struct JustificationGeneratorParams { /// /// The size of the set will determine the number of pre-commits in our justification. pub authorities: Vec<(Account, AuthorityWeight)>, - /// The total number of vote ancestries in our justification. + /// The total number of precommit ancestors in the `votes_ancestries` field our justification. /// /// These may be distributed among many different forks. - pub votes: u32, + pub ancestors: u32, /// The number of forks. /// /// Useful for creating a "worst-case" scenario in which each authority is on its own fork. @@ -63,7 +64,7 @@ impl Default for JustificationGeneratorParams { round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, authorities: test_keyring(), - votes: 2, + ancestors: 2, forks: 1, } } @@ -94,35 +95,33 @@ pub fn make_justification_for_header(params: JustificationGeneratorP round, set_id, authorities, - mut votes, + mut ancestors, forks, } = params; - let (target_hash, target_number) = (header.hash(), *header.number()); - let mut precommits = vec![]; let mut votes_ancestries = vec![]; + let mut precommits = vec![]; assert!(forks != 0, "Need at least one fork to have a chain.."); - assert!(votes >= forks, "Need at least one header per fork."); assert!( forks as usize <= authorities.len(), "If we have more forks than authorities we can't create valid pre-commits for all the forks." ); // Roughly, how many vote ancestries do we want per fork - let target_depth = (votes + forks - 1) / forks; + let target_depth = (ancestors + forks - 1) / forks; let mut unsigned_precommits = vec![]; for i in 0..forks { - let depth = if votes >= target_depth { - votes -= target_depth; + let depth = if ancestors >= target_depth { + ancestors -= target_depth; target_depth } else { - votes + ancestors }; // Note: Adding 1 to account for the target header - let chain = generate_chain(i as u8, depth + 1, &header); + let chain = generate_chain(i as u32, depth + 1, &header); // We don't include our finality target header in the vote ancestries for child in &chain[1..] { @@ -138,7 +137,7 @@ pub fn make_justification_for_header(params: JustificationGeneratorP for (i, (id, _weight)) in authorities.iter().enumerate() { // Assign authorities to sign pre-commits in a round-robin fashion let target = unsigned_precommits[i % forks as usize]; - let precommit = signed_precommit::(&id, target, round, set_id); + let precommit = signed_precommit::(id, target, round, set_id); precommits.push(precommit); } @@ -154,7 +153,7 @@ pub fn make_justification_for_header(params: JustificationGeneratorP } } -fn generate_chain(fork_id: u8, depth: u32, ancestor: &H) -> Vec { +fn generate_chain(fork_id: u32, depth: u32, ancestor: &H) -> Vec { let mut headers = vec![ancestor.clone()]; for i in 1..depth { @@ -169,7 +168,7 @@ fn generate_chain(fork_id: u8, depth: u32, ancestor: &H) -> Vec { header .digest_mut() .logs - .push(sp_runtime::DigestItem::Other(vec![fork_id])); + .push(sp_runtime::DigestItem::Other(fork_id.encode())); headers.push(header); } @@ -177,7 +176,8 @@ fn generate_chain(fork_id: u8, depth: u32, ancestor: &H) -> Vec { headers } -fn signed_precommit( +/// Create signed precommit with given target. +pub fn signed_precommit( signer: &Account, target: (H::Hash, H::Number), round: u64, diff --git a/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs index 84c12be7a708..3f9076f6db22 100644 --- a/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs +++ b/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs @@ -60,8 +60,8 @@ pub async fn run(params: EthereumDeployContractParams) { } = params; let result = async move { - let eth_client = EthereumClient::new(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::new(sub_params).await.map_err(RpcError::Substrate)?; + let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?; + let sub_client = SubstrateClient::::try_connect(sub_params).await.map_err(RpcError::Substrate)?; let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; let initial_set_id = sub_initial_authorities_set_id.unwrap_or(0); diff --git a/bridges/relays/bin-ethereum/src/ethereum_exchange.rs b/bridges/relays/bin-ethereum/src/ethereum_exchange.rs index 18470512b5d6..3111aa2de436 100644 --- a/bridges/relays/bin-ethereum/src/ethereum_exchange.rs +++ b/bridges/relays/bin-ethereum/src/ethereum_exchange.rs @@ -335,8 +335,10 @@ async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_has .. } = params; - let eth_client = EthereumClient::new(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::new(sub_params) + let eth_client = EthereumClient::try_connect(eth_params) + .await + .map_err(RpcError::Ethereum)?; + let sub_client = SubstrateClient::::try_connect(sub_params) .await .map_err(RpcError::Substrate)?; @@ -363,12 +365,8 @@ async fn run_auto_transactions_relay_loop( .. } = params; - let eth_client = EthereumClient::new(eth_params) - .await - .map_err(|err| format!("Error starting Ethereum client: {:?}", err))?; - let sub_client = SubstrateClient::::new(sub_params) - .await - .map_err(|err| format!("Error starting Substrate client: {:?}", err))?; + let eth_client = EthereumClient::new(eth_params).await; + let sub_client = SubstrateClient::::new(sub_params).await; let eth_start_with_block_number = match eth_start_with_block_number { Some(eth_start_with_block_number) => eth_start_with_block_number, diff --git a/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs index 09871a0fc786..602d4f14e4f0 100644 --- a/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs +++ b/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs @@ -52,7 +52,7 @@ pub async fn run(params: EthereumExchangeSubmitParams) { } = params; let result: Result<_, String> = async move { - let eth_client = EthereumClient::new(eth_params) + let eth_client = EthereumClient::try_connect(eth_params) .await .map_err(|err| format!("error connecting to Ethereum node: {:?}", err))?; diff --git a/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs b/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs index 3dcd27e18f6b..111abcd86e71 100644 --- a/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs +++ b/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs @@ -270,8 +270,8 @@ pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> { instance, } = params; - let eth_client = EthereumClient::new(eth_params).await?; - let sub_client = SubstrateClient::::new(sub_params).await?; + let eth_client = EthereumClient::new(eth_params).await; + let sub_client = SubstrateClient::::new(sub_params).await; let sign_sub_transactions = match sync_params.target_tx_mode { TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, diff --git a/bridges/relays/bin-ethereum/src/instances.rs b/bridges/relays/bin-ethereum/src/instances.rs index 30752cc59e23..2ade8632a92c 100644 --- a/bridges/relays/bin-ethereum/src/instances.rs +++ b/bridges/relays/bin-ethereum/src/instances.rs @@ -53,7 +53,7 @@ impl BridgeInstance for RialtoPoA { .into_iter() .map(|header| { ( - into_substrate_ethereum_header(&header.header()), + into_substrate_ethereum_header(header.header()), into_substrate_ethereum_receipts(header.extra()), ) }) @@ -65,7 +65,7 @@ impl BridgeInstance for RialtoPoA { fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( - into_substrate_ethereum_header(&header.header()), + into_substrate_ethereum_header(header.header()), into_substrate_ethereum_receipts(header.extra()), ); diff --git a/bridges/relays/bin-ethereum/src/main.rs b/bridges/relays/bin-ethereum/src/main.rs index 234e1237fcf5..bcdae353d3dc 100644 --- a/bridges/relays/bin-ethereum/src/main.rs +++ b/bridges/relays/bin-ethereum/src/main.rs @@ -60,7 +60,7 @@ async fn run_command(matches: &clap::ArgMatches<'_>) { match matches.subcommand() { ("eth-to-sub", Some(eth_to_sub_matches)) => { log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); - if ethereum_sync_loop::run(match ethereum_sync_params(ð_to_sub_matches) { + if ethereum_sync_loop::run(match ethereum_sync_params(eth_to_sub_matches) { Ok(ethereum_sync_params) => ethereum_sync_params, Err(err) => { log::error!(target: "bridge", "Error parsing parameters: {}", err); @@ -75,7 +75,7 @@ async fn run_command(matches: &clap::ArgMatches<'_>) { } ("sub-to-eth", Some(sub_to_eth_matches)) => { log::info!(target: "bridge", "Starting SUB âž¡ ETH relay."); - if substrate_sync_loop::run(match substrate_sync_params(&sub_to_eth_matches) { + if substrate_sync_loop::run(match substrate_sync_params(sub_to_eth_matches) { Ok(substrate_sync_params) => substrate_sync_params, Err(err) => { log::error!(target: "bridge", "Error parsing parameters: {}", err); @@ -90,7 +90,7 @@ async fn run_command(matches: &clap::ArgMatches<'_>) { } ("eth-deploy-contract", Some(eth_deploy_matches)) => { log::info!(target: "bridge", "Deploying ETH contracts."); - ethereum_deploy_contract::run(match ethereum_deploy_contract_params(ð_deploy_matches) { + ethereum_deploy_contract::run(match ethereum_deploy_contract_params(eth_deploy_matches) { Ok(ethereum_deploy_params) => ethereum_deploy_params, Err(err) => { log::error!(target: "bridge", "Error during contract deployment: {}", err); @@ -101,7 +101,7 @@ async fn run_command(matches: &clap::ArgMatches<'_>) { } ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); - ethereum_exchange_submit::run(match ethereum_exchange_submit_params(ð_exchange_submit_matches) { + ethereum_exchange_submit::run(match ethereum_exchange_submit_params(eth_exchange_submit_matches) { Ok(eth_exchange_submit_params) => eth_exchange_submit_params, Err(err) => { log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); @@ -112,7 +112,7 @@ async fn run_command(matches: &clap::ArgMatches<'_>) { } ("eth-exchange-sub", Some(eth_exchange_matches)) => { log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); - ethereum_exchange::run(match ethereum_exchange_params(ð_exchange_matches) { + ethereum_exchange::run(match ethereum_exchange_params(eth_exchange_matches) { Ok(eth_exchange_params) => eth_exchange_params, Err(err) => { log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); @@ -285,7 +285,7 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result Result<(), RpcError> { metrics_params, } = params; - let eth_client = EthereumClient::new(eth_params).await?; - let sub_client = SubstrateClient::::new(sub_params).await?; + let eth_client = EthereumClient::new(eth_params).await; + let sub_client = SubstrateClient::::new(sub_params).await; let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); let source = SubstrateHeadersSource::new(sub_client); diff --git a/bridges/relays/bin-substrate/Cargo.toml b/bridges/relays/bin-substrate/Cargo.toml index bf047e5b9992..c2f30546f30f 100644 --- a/bridges/relays/bin-substrate/Cargo.toml +++ b/bridges/relays/bin-substrate/Cargo.toml @@ -59,5 +59,6 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } [dev-dependencies] -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } hex-literal = "0.3" +pallet-bridge-grandpa = { path = "../../modules/grandpa" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/bridges/relays/bin-substrate/src/chains/millau.rs b/bridges/relays/bin-substrate/src/chains/millau.rs index 41ac76082807..3cba16ea32f4 100644 --- a/bridges/relays/bin-substrate/src/chains/millau.rs +++ b/bridges/relays/bin-substrate/src/chains/millau.rs @@ -23,7 +23,7 @@ use crate::cli::{ }; use bp_message_dispatch::{CallOrigin, MessagePayload}; use codec::Decode; -use frame_support::weights::{GetDispatchInfo, Weight}; +use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; use relay_millau_client::Millau; use sp_version::RuntimeVersion; @@ -62,6 +62,10 @@ impl CliEncodeCall for Millau { }, }) } + + fn get_dispatch_info(call: &millau_runtime::Call) -> anyhow::Result { + Ok(call.get_dispatch_info()) + } } impl CliChain for Millau { diff --git a/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs index d96fa7b79720..31dc51e9c27b 100644 --- a/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs +++ b/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs @@ -23,7 +23,7 @@ use crate::messages_source::SubstrateMessagesSource; use crate::messages_target::SubstrateMessagesTarget; use bp_messages::MessageNonce; -use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; +use bp_runtime::{MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; use frame_support::dispatch::GetDispatchInfo; @@ -42,8 +42,7 @@ pub type MillauMessagesToRialto = SubstrateMessageLaneToSubstrate; impl SubstrateMessageLane for MillauMessagesToRialto { - const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = - bp_rialto::TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD; + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD; const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_GENERATED_NONCE_METHOD; const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD; @@ -59,7 +58,7 @@ impl SubstrateMessageLane for MillauMessagesToRialto { type SourceChain = Millau; type TargetChain = Rialto; - fn source_transactions_author(&self) -> bp_rialto::AccountId { + fn source_transactions_author(&self) -> bp_millau::AccountId { (*self.source_sign.public().as_array_ref()).into() } @@ -127,20 +126,12 @@ impl SubstrateMessageLane for MillauMessagesToRialto { } /// Millau node as messages source. -type MillauSourceClient = SubstrateMessagesSource< - Millau, - MillauMessagesToRialto, - millau_runtime::Runtime, - millau_runtime::WithRialtoMessagesInstance, ->; +type MillauSourceClient = + SubstrateMessagesSource; /// Rialto node as messages target. -type RialtoTargetClient = SubstrateMessagesTarget< - Rialto, - MillauMessagesToRialto, - rialto_runtime::Runtime, - rialto_runtime::WithMillauMessagesInstance, ->; +type RialtoTargetClient = + SubstrateMessagesTarget; /// Run Millau-to-Rialto messages sync. pub async fn run( @@ -160,7 +151,7 @@ pub async fn run( }; // 2/3 is reserved for proofs and tx overhead - let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() as usize / 3; + let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() / 3; // TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390 let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = select_delivery_transaction_limits::>( @@ -194,20 +185,21 @@ pub async fn run( max_messages_in_single_batch, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, + relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, }, }, MillauSourceClient::new( source_client.clone(), lane.clone(), lane_id, - RIALTO_BRIDGE_INSTANCE, + RIALTO_CHAIN_ID, params.target_to_source_headers_relay, ), RialtoTargetClient::new( params.target_client, lane, lane_id, - MILLAU_BRIDGE_INSTANCE, + MILLAU_CHAIN_ID, params.source_to_target_headers_relay, ), relay_utils::relay_metrics( diff --git a/bridges/relays/bin-substrate/src/chains/mod.rs b/bridges/relays/bin-substrate/src/chains/mod.rs index b4061ced37f7..09d3c3e9c060 100644 --- a/bridges/relays/bin-substrate/src/chains/mod.rs +++ b/bridges/relays/bin-substrate/src/chains/mod.rs @@ -21,8 +21,10 @@ pub mod millau_messages_to_rialto; pub mod rialto_headers_to_millau; pub mod rialto_messages_to_millau; pub mod rococo_headers_to_wococo; +pub mod rococo_messages_to_wococo; pub mod westend_headers_to_millau; pub mod wococo_headers_to_rococo; +pub mod wococo_messages_to_rococo; mod millau; mod rialto; @@ -86,7 +88,7 @@ mod tests { let millau_public: bp_millau::AccountSigner = millau_sign.public().into(); let millau_account_id: bp_millau::AccountId = millau_public.into_account(); - let digest = millau_runtime::rialto_account_ownership_digest( + let digest = millau_runtime::millau_to_rialto_account_ownership_digest( &call, millau_account_id, rialto_runtime::VERSION.spec_version, @@ -107,7 +109,7 @@ mod tests { let rialto_public: bp_rialto::AccountSigner = rialto_sign.public().into(); let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); - let digest = rialto_runtime::millau_account_ownership_digest( + let digest = rialto_runtime::rialto_to_millau_account_ownership_digest( &call, rialto_account_id, millau_runtime::VERSION.spec_version, @@ -271,7 +273,10 @@ mod rococo_tests { votes_ancestries: vec![], }; - let actual = bp_rococo::BridgeGrandpaWococoCall::submit_finality_proof(header.clone(), justification.clone()); + let actual = relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof( + header.clone(), + justification.clone(), + ); let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( header, justification, diff --git a/bridges/relays/bin-substrate/src/chains/rialto.rs b/bridges/relays/bin-substrate/src/chains/rialto.rs index 77eeb06da0d7..9a6185b4fc7d 100644 --- a/bridges/relays/bin-substrate/src/chains/rialto.rs +++ b/bridges/relays/bin-substrate/src/chains/rialto.rs @@ -23,7 +23,7 @@ use crate::cli::{ }; use bp_message_dispatch::{CallOrigin, MessagePayload}; use codec::Decode; -use frame_support::weights::{GetDispatchInfo, Weight}; +use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; use relay_rialto_client::Rialto; use sp_version::RuntimeVersion; @@ -60,6 +60,10 @@ impl CliEncodeCall for Rialto { }, }) } + + fn get_dispatch_info(call: &rialto_runtime::Call) -> anyhow::Result { + Ok(call.get_dispatch_info()) + } } impl CliChain for Rialto { diff --git a/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs index ec39a4caa3fc..89f9dd7e997e 100644 --- a/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs +++ b/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs @@ -23,7 +23,7 @@ use crate::messages_source::SubstrateMessagesSource; use crate::messages_target::SubstrateMessagesTarget; use bp_messages::MessageNonce; -use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; +use bp_runtime::{MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; use frame_support::dispatch::GetDispatchInfo; @@ -42,8 +42,7 @@ pub type RialtoMessagesToMillau = SubstrateMessageLaneToSubstrate; impl SubstrateMessageLane for RialtoMessagesToMillau { - const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = - bp_millau::TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD; + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD; const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_GENERATED_NONCE_METHOD; const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD; @@ -86,7 +85,7 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { Bytes(transaction.encode()) } - fn target_transactions_author(&self) -> bp_rialto::AccountId { + fn target_transactions_author(&self) -> bp_millau::AccountId { (*self.target_sign.public().as_array_ref()).into() } @@ -127,20 +126,12 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { } /// Rialto node as messages source. -type RialtoSourceClient = SubstrateMessagesSource< - Rialto, - RialtoMessagesToMillau, - rialto_runtime::Runtime, - rialto_runtime::WithMillauMessagesInstance, ->; +type RialtoSourceClient = + SubstrateMessagesSource; /// Millau node as messages target. -type MillauTargetClient = SubstrateMessagesTarget< - Millau, - RialtoMessagesToMillau, - millau_runtime::Runtime, - millau_runtime::WithRialtoMessagesInstance, ->; +type MillauTargetClient = + SubstrateMessagesTarget; /// Run Rialto-to-Millau messages sync. pub async fn run( @@ -160,7 +151,7 @@ pub async fn run( }; // 2/3 is reserved for proofs and tx overhead - let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() as usize / 3; + let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() / 3; let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = select_delivery_transaction_limits::>( bp_millau::max_extrinsic_weight(), @@ -193,20 +184,21 @@ pub async fn run( max_messages_in_single_batch, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, + relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, }, }, RialtoSourceClient::new( source_client.clone(), lane.clone(), lane_id, - MILLAU_BRIDGE_INSTANCE, + MILLAU_CHAIN_ID, params.target_to_source_headers_relay, ), MillauTargetClient::new( params.target_client, lane, lane_id, - RIALTO_BRIDGE_INSTANCE, + RIALTO_CHAIN_ID, params.source_to_target_headers_relay, ), relay_utils::relay_metrics( diff --git a/bridges/relays/bin-substrate/src/chains/rococo.rs b/bridges/relays/bin-substrate/src/chains/rococo.rs index 0bcf388c3462..ec94450a63de 100644 --- a/bridges/relays/bin-substrate/src/chains/rococo.rs +++ b/bridges/relays/bin-substrate/src/chains/rococo.rs @@ -14,11 +14,70 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{encode_message, CliChain}; -use frame_support::weights::Weight; +use codec::Decode; +use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use relay_rococo_client::Rococo; use sp_version::RuntimeVersion; +use crate::cli::{ + bridge, + encode_call::{Call, CliEncodeCall}, + encode_message, CliChain, +}; + +/// Weight of the `system::remark` call at Rococo. +/// +/// This weight is larger (x2) than actual weight at current Rooco runtime to avoid unsuccessful +/// calls in the future. But since it is used only in tests (and on test chains), this is ok. +pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; + +impl CliEncodeCall for Rococo { + fn max_extrinsic_size() -> u32 { + bp_rococo::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Remark { remark_payload, .. } => { + relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + )) + } + Call::BridgeSendMessage { + lane, + payload, + fee, + bridge_instance_index, + } => match *bridge_instance_index { + bridge::ROCOCO_TO_WOCOCO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + relay_rococo_client::runtime::Call::BridgeMessagesWococo( + relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message(lane.0, payload, fee.0), + ) + } + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + _ => anyhow::bail!("The call is not supported"), + }) + } + + fn get_dispatch_info(call: &relay_rococo_client::runtime::Call) -> anyhow::Result { + match *call { + relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark(_)) => { + Ok(DispatchInfo { + weight: SYSTEM_REMARK_CALL_WEIGHT, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }) + } + _ => anyhow::bail!("Unsupported Rococo call: {:?}", call), + } + } +} + impl CliChain for Rococo { const RUNTIME_VERSION: RuntimeVersion = bp_rococo::VERSION; @@ -30,7 +89,7 @@ impl CliChain for Rococo { } fn max_extrinsic_weight() -> Weight { - 0 + bp_wococo::max_extrinsic_weight() } fn encode_message(_message: encode_message::MessagePayload) -> Result { diff --git a/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs index 5247c6068b5e..c7f60100f13c 100644 --- a/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs +++ b/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs @@ -16,6 +16,7 @@ //! Rococo-to-Wococo headers sync entrypoint. +use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY; use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; use bp_header_chain::justification::GrandpaJustification; @@ -38,6 +39,18 @@ impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo { crate::chains::add_polkadot_kusama_price_metrics::(params) } + fn start_relay_guards(&self) { + relay_substrate_client::guard::abort_on_spec_version_change( + self.target_client.clone(), + bp_wococo::VERSION.spec_version, + ); + relay_substrate_client::guard::abort_when_account_balance_decreased( + self.target_client.clone(), + self.transactions_author(), + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } + fn transactions_author(&self) -> bp_wococo::AccountId { (*self.target_sign.public().as_array_ref()).into() } @@ -48,10 +61,9 @@ impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo { header: RococoSyncHeader, proof: GrandpaJustification, ) -> Bytes { - let call = bp_wococo::Call::BridgeGrandpaRococo(bp_wococo::BridgeGrandpaRococoCall::submit_finality_proof( - header.into_inner(), - proof, - )); + let call = relay_wococo_client::runtime::Call::BridgeGrandpaRococo( + relay_wococo_client::runtime::BridgeGrandpaRococoCall::submit_finality_proof(header.into_inner(), proof), + ); let genesis_hash = *self.target_client.genesis_hash(); let transaction = Wococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); diff --git a/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs b/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs new file mode 100644 index 000000000000..be5f91116ec3 --- /dev/null +++ b/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs @@ -0,0 +1,227 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rococo-to-Wococo messages sync entrypoint. + +use crate::messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, +}; +use crate::messages_source::SubstrateMessagesSource; +use crate::messages_target::SubstrateMessagesTarget; + +use bp_messages::MessageNonce; +use bp_runtime::{ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID}; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use codec::Encode; +use messages_relay::message_lane::MessageLane; +use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams}; +use relay_substrate_client::{metrics::StorageProofOverheadMetric, Chain, TransactionSignScheme}; +use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo}; +use sp_core::{Bytes, Pair}; +use std::{ops::RangeInclusive, time::Duration}; + +/// Rococo-to-Wococo message lane. +pub type RococoMessagesToWococo = + SubstrateMessageLaneToSubstrate; + +impl SubstrateMessageLane for RococoMessagesToWococo { + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = + bp_wococo::TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = + bp_rococo::FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; + + type SourceChain = Rococo; + type TargetChain = Wococo; + + fn source_transactions_author(&self) -> bp_rococo::AccountId { + (*self.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_block: WococoHeaderId, + proof: ::MessagesReceivingProof, + ) -> Bytes { + let (relayers_state, proof) = proof; + let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo( + relay_rococo_client::runtime::BridgeMessagesWococoCall::receive_messages_delivery_proof( + proof, + relayers_state, + ), + ); + let genesis_hash = *self.source_client.genesis_hash(); + let transaction = Rococo::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Wococo -> Rococo confirmation transaction. Weight: /{}, size: {}/{}", + bp_rococo::max_extrinsic_weight(), + transaction.encode().len(), + bp_rococo::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } + + fn target_transactions_author(&self) -> bp_wococo::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_header: RococoHeaderId, + _nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes { + let (dispatch_weight, proof) = proof; + let FromBridgedChainMessagesProof { + ref nonces_start, + ref nonces_end, + .. + } = proof; + let messages_count = nonces_end - nonces_start + 1; + + let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo( + relay_wococo_client::runtime::BridgeMessagesRococoCall::receive_messages_proof( + self.relayer_id_at_source.clone(), + proof, + messages_count as _, + dispatch_weight, + ), + ); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Wococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Rococo -> Wococo delivery transaction. Weight: /{}, size: {}/{}", + bp_wococo::max_extrinsic_weight(), + transaction.encode().len(), + bp_wococo::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } +} + +/// Rococo node as messages source. +type RococoSourceClient = + SubstrateMessagesSource; + +/// Wococo node as messages target. +type WococoTargetClient = + SubstrateMessagesTarget; + +/// Run Rococo-to-Wococo messages sync. +pub async fn run( + params: MessagesRelayParams, +) -> Result<(), String> { + let stall_timeout = Duration::from_secs(5 * 60); + let relayer_id_at_rococo = (*params.source_sign.public().as_array_ref()).into(); + + let lane_id = params.lane_id; + let source_client = params.source_client; + let lane = RococoMessagesToWococo { + source_client: source_client.clone(), + source_sign: params.source_sign, + target_client: params.target_client.clone(), + target_sign: params.target_sign, + relayer_id_at_source: relayer_id_at_rococo, + }; + + // 2/3 is reserved for proofs and tx overhead + let max_messages_size_in_single_batch = bp_wococo::max_extrinsic_size() / 3; + // we don't know exact weights of the Wococo runtime. So to guess weights we'll be using + // weights from Rialto and then simply dividing it by x2. + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + select_delivery_transaction_limits::>( + bp_wococo::max_extrinsic_weight(), + bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = ( + max_messages_in_single_batch / 2, + max_messages_weight_in_single_batch / 2, + ); + + log::info!( + target: "bridge", + "Starting Rococo -> Wococo messages relay.\n\t\ + Rococo relayer account id: {:?}\n\t\ + Max messages in single transaction: {}\n\t\ + Max messages size in single transaction: {}\n\t\ + Max messages weight in single transaction: {}", + lane.relayer_id_at_source, + max_messages_in_single_batch, + max_messages_size_in_single_batch, + max_messages_weight_in_single_batch, + ); + + messages_relay::message_lane_loop::run( + messages_relay::message_lane_loop::Params { + lane: lane_id, + source_tick: Rococo::AVERAGE_BLOCK_INTERVAL, + target_tick: Wococo::AVERAGE_BLOCK_INTERVAL, + reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, + stall_timeout, + delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_messages_in_single_batch, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, + }, + }, + RococoSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + WOCOCO_CHAIN_ID, + params.target_to_source_headers_relay, + ), + WococoTargetClient::new( + params.target_client, + lane, + lane_id, + ROCOCO_CHAIN_ID, + params.source_to_target_headers_relay, + ), + relay_utils::relay_metrics( + Some(messages_relay::message_lane_loop::metrics_prefix::< + RococoMessagesToWococo, + >(&lane_id)), + params.metrics_params, + ) + .standalone_metric(|registry, prefix| { + StorageProofOverheadMetric::new( + registry, + prefix, + source_client.clone(), + "rococo_storage_proof_overhead".into(), + "Rococo storage proof overhead".into(), + ) + })? + .into_params(), + futures::future::pending(), + ) + .await +} diff --git a/bridges/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs b/bridges/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs deleted file mode 100644 index f1b390215748..000000000000 --- a/bridges/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Westend-to-Rococo headers sync entrypoint. - -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; - -use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; -use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams}; -use relay_substrate_client::{Chain, TransactionSignScheme}; -use relay_utils::metrics::MetricsParams; -use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend}; -use sp_core::{Bytes, Pair}; - -/// Westend-to-Rococo finality sync pipeline. -pub(crate) type WestendFinalityToRococo = SubstrateFinalityToSubstrate; - -impl SubstrateFinalitySyncPipeline for WestendFinalityToRococo { - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; - - type TargetChain = Rococo; - - fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::(params) - } - - fn transactions_author(&self) -> bp_rococo::AccountId { - (*self.target_sign.public().as_array_ref()).into() - } - - fn make_submit_finality_proof_transaction( - &self, - transaction_nonce: ::Index, - header: WestendSyncHeader, - proof: GrandpaJustification, - ) -> Bytes { - let call = bp_rococo::Call::BridgeGrandpaWestend(bp_rococo::BridgeGrandpaCall::submit_finality_proof( - header.into_inner(), - proof, - )); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); - - Bytes(transaction.encode()) - } -} diff --git a/bridges/relays/bin-substrate/src/chains/wococo.rs b/bridges/relays/bin-substrate/src/chains/wococo.rs index 549aa8a882c7..9b944d781685 100644 --- a/bridges/relays/bin-substrate/src/chains/wococo.rs +++ b/bridges/relays/bin-substrate/src/chains/wococo.rs @@ -14,11 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{encode_message, CliChain}; -use frame_support::weights::Weight; +use codec::Decode; +use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use relay_wococo_client::Wococo; use sp_version::RuntimeVersion; +use crate::cli::{ + bridge, + encode_call::{Call, CliEncodeCall}, + encode_message, CliChain, +}; + +impl CliEncodeCall for Wococo { + fn max_extrinsic_size() -> u32 { + bp_wococo::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Remark { remark_payload, .. } => { + relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + )) + } + Call::BridgeSendMessage { + lane, + payload, + fee, + bridge_instance_index, + } => match *bridge_instance_index { + bridge::WOCOCO_TO_ROCOCO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + relay_wococo_client::runtime::Call::BridgeMessagesRococo( + relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message(lane.0, payload, fee.0), + ) + } + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + _ => anyhow::bail!("The call is not supported"), + }) + } + + fn get_dispatch_info(call: &relay_wococo_client::runtime::Call) -> anyhow::Result { + match *call { + relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark(_)) => { + Ok(DispatchInfo { + weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }) + } + _ => anyhow::bail!("Unsupported Rococo call: {:?}", call), + } + } +} + impl CliChain for Wococo { const RUNTIME_VERSION: RuntimeVersion = bp_wococo::VERSION; @@ -30,7 +83,7 @@ impl CliChain for Wococo { } fn max_extrinsic_weight() -> Weight { - 0 + bp_wococo::max_extrinsic_weight() } fn encode_message(_message: encode_message::MessagePayload) -> Result { diff --git a/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs index 9dfae5294e29..8ee30d3ff492 100644 --- a/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs +++ b/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs @@ -26,6 +26,13 @@ use relay_utils::metrics::MetricsParams; use relay_wococo_client::{SyncHeader as WococoSyncHeader, Wococo}; use sp_core::{Bytes, Pair}; +/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat +/// relay as gone wild. +/// +/// See `maximal_balance_decrease_per_day_is_sane` test for details. +/// Note that this is in plancks, so this corresponds to `1500 UNITS`. +pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_rococo::Balance = 1_500_000_000_000_000; + /// Wococo-to-Rococo finality sync pipeline. pub(crate) type WococoFinalityToRococo = SubstrateFinalityToSubstrate; @@ -38,6 +45,18 @@ impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { crate::chains::add_polkadot_kusama_price_metrics::(params) } + fn start_relay_guards(&self) { + relay_substrate_client::guard::abort_on_spec_version_change( + self.target_client.clone(), + bp_rococo::VERSION.spec_version, + ); + relay_substrate_client::guard::abort_when_account_balance_decreased( + self.target_client.clone(), + self.transactions_author(), + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } + fn transactions_author(&self) -> bp_rococo::AccountId { (*self.target_sign.public().as_array_ref()).into() } @@ -48,13 +67,50 @@ impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { header: WococoSyncHeader, proof: GrandpaJustification, ) -> Bytes { - let call = bp_rococo::Call::BridgeGrandpaWococo(bp_rococo::BridgeGrandpaWococoCall::submit_finality_proof( - header.into_inner(), - proof, - )); + let call = relay_rococo_client::runtime::Call::BridgeGrandpaWococo( + relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof(header.into_inner(), proof), + ); let genesis_hash = *self.target_client.genesis_hash(); let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); Bytes(transaction.encode()) } } + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::weights::WeightToFeePolynomial; + use pallet_bridge_grandpa::weights::WeightInfo; + + #[test] + fn maximal_balance_decrease_per_day_is_sane() { + // Rococo/Wococo GRANDPA pallet weights. They're now using Rialto weights => using `RialtoWeight` is justified. + // + // Using Rialto runtime this is slightly incorrect, because `DbWeight` of Rococo/Wococo runtime may differ + // from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is the same. + type RococoGrandpaPalletWeights = pallet_bridge_grandpa::weights::RialtoWeight; + + // The following formula shall not be treated as super-accurate - guard is to protect from mad relays, + // not to protect from over-average loses. + // + // Worst case: we're submitting proof for every source header. Since we submit every header, the number of + // headers in ancestry proof is near to 0 (let's round up to 2). And the number of authorities is 1024, + // which is (now) larger than on any existing chain => normally there'll be ~1024*2/3+1 commits. + const AVG_VOTES_ANCESTRIES_LEN: u32 = 2; + const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1; + let number_of_source_headers_per_day: bp_wococo::Balance = bp_wococo::DAYS as _; + let single_source_header_submit_call_weight = + RococoGrandpaPalletWeights::submit_finality_proof(AVG_VOTES_ANCESTRIES_LEN, AVG_PRECOMMITS_LEN); + // for simplicity - add extra weight for base tx fee + fee that is paid for the tx size + adjusted fee + let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2; + let single_source_header_tx_cost = bp_rococo::WeightToFee::calc(&single_source_header_submit_tx_weight); + let maximal_expected_decrease = single_source_header_tx_cost * number_of_source_headers_per_day; + assert!( + MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_expected_decrease, + "Maximal expected loss per day {} is larger than hardcoded {}", + maximal_expected_decrease, + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } +} diff --git a/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs b/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs new file mode 100644 index 000000000000..b696801569e8 --- /dev/null +++ b/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs @@ -0,0 +1,227 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Wococo-to-Rococo messages sync entrypoint. + +use crate::messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, +}; +use crate::messages_source::SubstrateMessagesSource; +use crate::messages_target::SubstrateMessagesTarget; + +use bp_messages::MessageNonce; +use bp_runtime::{ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID}; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use codec::Encode; +use messages_relay::message_lane::MessageLane; +use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams}; +use relay_substrate_client::{metrics::StorageProofOverheadMetric, Chain, TransactionSignScheme}; +use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo}; +use sp_core::{Bytes, Pair}; +use std::{ops::RangeInclusive, time::Duration}; + +/// Wococo-to-Rococo message lane. +pub type WococoMessagesToRococo = + SubstrateMessageLaneToSubstrate; + +impl SubstrateMessageLane for WococoMessagesToRococo { + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = + bp_rococo::TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = + bp_wococo::FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + + type SourceChain = Wococo; + type TargetChain = Rococo; + + fn source_transactions_author(&self) -> bp_wococo::AccountId { + (*self.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_block: RococoHeaderId, + proof: ::MessagesReceivingProof, + ) -> Bytes { + let (relayers_state, proof) = proof; + let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo( + relay_wococo_client::runtime::BridgeMessagesRococoCall::receive_messages_delivery_proof( + proof, + relayers_state, + ), + ); + let genesis_hash = *self.source_client.genesis_hash(); + let transaction = Wococo::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Rococo -> Wococo confirmation transaction. Weight: /{}, size: {}/{}", + bp_wococo::max_extrinsic_weight(), + transaction.encode().len(), + bp_wococo::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } + + fn target_transactions_author(&self) -> bp_rococo::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_header: WococoHeaderId, + _nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes { + let (dispatch_weight, proof) = proof; + let FromBridgedChainMessagesProof { + ref nonces_start, + ref nonces_end, + .. + } = proof; + let messages_count = nonces_end - nonces_start + 1; + + let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo( + relay_rococo_client::runtime::BridgeMessagesWococoCall::receive_messages_proof( + self.relayer_id_at_source.clone(), + proof, + messages_count as _, + dispatch_weight, + ), + ); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Wococo -> Rococo delivery transaction. Weight: /{}, size: {}/{}", + bp_rococo::max_extrinsic_weight(), + transaction.encode().len(), + bp_rococo::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } +} + +/// Wococo node as messages source. +type WococoSourceClient = + SubstrateMessagesSource; + +/// Rococo node as messages target. +type RococoTargetClient = + SubstrateMessagesTarget; + +/// Run Wococo-to-Rococo messages sync. +pub async fn run( + params: MessagesRelayParams, +) -> Result<(), String> { + let stall_timeout = Duration::from_secs(5 * 60); + let relayer_id_at_wococo = (*params.source_sign.public().as_array_ref()).into(); + + let lane_id = params.lane_id; + let source_client = params.source_client; + let lane = WococoMessagesToRococo { + source_client: source_client.clone(), + source_sign: params.source_sign, + target_client: params.target_client.clone(), + target_sign: params.target_sign, + relayer_id_at_source: relayer_id_at_wococo, + }; + + // 2/3 is reserved for proofs and tx overhead + let max_messages_size_in_single_batch = bp_rococo::max_extrinsic_size() / 3; + // we don't know exact weights of the Rococo runtime. So to guess weights we'll be using + // weights from Rialto and then simply dividing it by x2. + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + select_delivery_transaction_limits::>( + bp_rococo::max_extrinsic_weight(), + bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = ( + max_messages_in_single_batch / 2, + max_messages_weight_in_single_batch / 2, + ); + + log::info!( + target: "bridge", + "Starting Wococo -> Rococo messages relay.\n\t\ + Wococo relayer account id: {:?}\n\t\ + Max messages in single transaction: {}\n\t\ + Max messages size in single transaction: {}\n\t\ + Max messages weight in single transaction: {}", + lane.relayer_id_at_source, + max_messages_in_single_batch, + max_messages_size_in_single_batch, + max_messages_weight_in_single_batch, + ); + + messages_relay::message_lane_loop::run( + messages_relay::message_lane_loop::Params { + lane: lane_id, + source_tick: Wococo::AVERAGE_BLOCK_INTERVAL, + target_tick: Rococo::AVERAGE_BLOCK_INTERVAL, + reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, + stall_timeout, + delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_messages_in_single_batch, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, + }, + }, + WococoSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + ROCOCO_CHAIN_ID, + params.target_to_source_headers_relay, + ), + RococoTargetClient::new( + params.target_client, + lane, + lane_id, + WOCOCO_CHAIN_ID, + params.source_to_target_headers_relay, + ), + relay_utils::relay_metrics( + Some(messages_relay::message_lane_loop::metrics_prefix::< + WococoMessagesToRococo, + >(&lane_id)), + params.metrics_params, + ) + .standalone_metric(|registry, prefix| { + StorageProofOverheadMetric::new( + registry, + prefix, + source_client.clone(), + "wococo_storage_proof_overhead".into(), + "Wococo storage proof overhead".into(), + ) + })? + .into_params(), + futures::future::pending(), + ) + .await +} diff --git a/bridges/relays/bin-substrate/src/cli/bridge.rs b/bridges/relays/bin-substrate/src/cli/bridge.rs index 996edb49942a..1feb3dcb1a46 100644 --- a/bridges/relays/bin-substrate/src/cli/bridge.rs +++ b/bridges/relays/bin-substrate/src/cli/bridge.rs @@ -22,6 +22,8 @@ arg_enum! { pub enum FullBridge { MillauToRialto, RialtoToMillau, + RococoToWococo, + WococoToRococo, } } @@ -31,12 +33,16 @@ impl FullBridge { match self { Self::MillauToRialto => MILLAU_TO_RIALTO_INDEX, Self::RialtoToMillau => RIALTO_TO_MILLAU_INDEX, + Self::RococoToWococo => ROCOCO_TO_WOCOCO_INDEX, + Self::WococoToRococo => WOCOCO_TO_ROCOCO_INDEX, } } } pub const RIALTO_TO_MILLAU_INDEX: u8 = 0; pub const MILLAU_TO_RIALTO_INDEX: u8 = 0; +pub const ROCOCO_TO_WOCOCO_INDEX: u8 = 0; +pub const WOCOCO_TO_ROCOCO_INDEX: u8 = 0; /// The macro allows executing bridge-specific code without going fully generic. /// @@ -64,7 +70,7 @@ macro_rules! select_full_bridge { use bp_rialto::TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; // Send-message #[allow(unused_imports)] - use millau_runtime::rialto_account_ownership_digest as account_ownership_digest; + use millau_runtime::millau_to_rialto_account_ownership_digest as account_ownership_digest; $generic } @@ -87,7 +93,51 @@ macro_rules! select_full_bridge { // Send-message #[allow(unused_imports)] - use rialto_runtime::millau_account_ownership_digest as account_ownership_digest; + use rialto_runtime::rialto_to_millau_account_ownership_digest as account_ownership_digest; + + $generic + } + FullBridge::RococoToWococo => { + type Source = relay_rococo_client::Rococo; + #[allow(dead_code)] + type Target = relay_wococo_client::Wococo; + + // Derive-account + #[allow(unused_imports)] + use bp_wococo::derive_account_from_rococo_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::rococo_messages_to_wococo::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_wococo::TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + // Send-message + #[allow(unused_imports)] + use relay_rococo_client::runtime::rococo_to_wococo_account_ownership_digest as account_ownership_digest; + + $generic + } + FullBridge::WococoToRococo => { + type Source = relay_wococo_client::Wococo; + #[allow(dead_code)] + type Target = relay_rococo_client::Rococo; + + // Derive-account + #[allow(unused_imports)] + use bp_rococo::derive_account_from_wococo_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::wococo_messages_to_rococo::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_rococo::TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + // Send-message + #[allow(unused_imports)] + use relay_wococo_client::runtime::wococo_to_rococo_account_ownership_digest as account_ownership_digest; $generic } diff --git a/bridges/relays/bin-substrate/src/cli/encode_call.rs b/bridges/relays/bin-substrate/src/cli/encode_call.rs index 6e1130cffc1a..cfe6d99a4eb9 100644 --- a/bridges/relays/bin-substrate/src/cli/encode_call.rs +++ b/bridges/relays/bin-substrate/src/cli/encode_call.rs @@ -17,7 +17,7 @@ use crate::cli::bridge::FullBridge; use crate::cli::{AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId}; use crate::select_full_bridge; -use frame_support::dispatch::GetDispatchInfo; +use frame_support::weights::DispatchInfo; use relay_substrate_client::Chain; use structopt::StructOpt; @@ -85,6 +85,9 @@ pub trait CliEncodeCall: Chain { /// Encode a CLI call. fn encode_call(call: &Call) -> anyhow::Result; + + /// Get dispatch info for the call. + fn get_dispatch_info(call: &Self::Call) -> anyhow::Result; } impl EncodeCall { @@ -96,7 +99,7 @@ impl EncodeCall { let encoded = HexBytes::encode(&call); log::info!(target: "bridge", "Generated {} call: {:#?}", Source::NAME, call); - log::info!(target: "bridge", "Weight of {} call: {}", Source::NAME, call.get_dispatch_info().weight); + log::info!(target: "bridge", "Weight of {} call: {}", Source::NAME, Source::get_dispatch_info(&call)?.weight); log::info!(target: "bridge", "Encoded {} call: {:?}", Source::NAME, encoded); Ok(encoded) @@ -129,7 +132,7 @@ pub(crate) fn preprocess_call { if remark_payload.is_none() { *remark_payload = Some(HexBytes(generate_remark_payload( - &remark_size, + remark_size, compute_maximal_message_arguments_size(Source::max_extrinsic_size(), Target::max_extrinsic_size()), ))); } diff --git a/bridges/relays/bin-substrate/src/cli/encode_message.rs b/bridges/relays/bin-substrate/src/cli/encode_message.rs index a29aa8597d63..213c8377678f 100644 --- a/bridges/relays/bin-substrate/src/cli/encode_message.rs +++ b/bridges/relays/bin-substrate/src/cli/encode_message.rs @@ -72,7 +72,7 @@ mod tests { #[test] fn should_encode_raw_message() { // given - let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d3c040130000000000000000000000000"; + let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000"; let encode_message = EncodeMessage::from_iter(vec!["encode-message", "MillauToRialto", "raw", msg]); // when @@ -101,6 +101,6 @@ mod tests { let hex = encode_message.encode().unwrap(); // then - assert_eq!(format!("{:?}", hex), "0x01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d3c040130000000000000000000000000"); + assert_eq!(format!("{:?}", hex), "0x01000000b0d60f000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000"); } } diff --git a/bridges/relays/bin-substrate/src/cli/estimate_fee.rs b/bridges/relays/bin-substrate/src/cli/estimate_fee.rs index 4e39ad351ede..129699c26917 100644 --- a/bridges/relays/bin-substrate/src/cli/estimate_fee.rs +++ b/bridges/relays/bin-substrate/src/cli/estimate_fee.rs @@ -18,7 +18,7 @@ use crate::cli::bridge::FullBridge; use crate::cli::{Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams}; use crate::select_full_bridge; use codec::{Decode, Encode}; -use relay_substrate_client::{Chain, ChainWithBalances}; +use relay_substrate_client::Chain; use structopt::StructOpt; /// Estimate Delivery & Dispatch Fee command. @@ -52,7 +52,7 @@ impl EstimateFee { let lane = lane.into(); let payload = Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?; - let fee: ::NativeBalance = + let fee: ::Balance = estimate_message_delivery_and_dispatch_fee(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload) .await?; diff --git a/bridges/relays/bin-substrate/src/cli/init_bridge.rs b/bridges/relays/bin-substrate/src/cli/init_bridge.rs index b5590b9e5a44..20e6daa500e5 100644 --- a/bridges/relays/bin-substrate/src/cli/init_bridge.rs +++ b/bridges/relays/bin-substrate/src/cli/init_bridge.rs @@ -109,7 +109,9 @@ macro_rules! select_bridge { fn encode_init_bridge( init_data: InitializationData<::Header>, ) -> ::Call { - bp_wococo::Call::BridgeGrandpaRococo(bp_wococo::BridgeGrandpaRococoCall::initialize(init_data)) + relay_wococo_client::runtime::Call::BridgeGrandpaRococo( + relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize(init_data), + ) } $generic @@ -121,7 +123,9 @@ macro_rules! select_bridge { fn encode_init_bridge( init_data: InitializationData<::Header>, ) -> ::Call { - bp_rococo::Call::BridgeGrandpaWococo(bp_rococo::BridgeGrandpaWococoCall::initialize(init_data)) + relay_rococo_client::runtime::Call::BridgeGrandpaWococo( + relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize(init_data), + ) } $generic diff --git a/bridges/relays/bin-substrate/src/cli/mod.rs b/bridges/relays/bin-substrate/src/cli/mod.rs index 341051de5c27..49bc5dc8c837 100644 --- a/bridges/relays/bin-substrate/src/cli/mod.rs +++ b/bridges/relays/bin-substrate/src/cli/mod.rs @@ -406,7 +406,7 @@ macro_rules! declare_chain_options { port: self.[<$chain_prefix _port>], secure: self.[<$chain_prefix _secure>], }) - .await? + .await ) } } diff --git a/bridges/relays/bin-substrate/src/cli/relay_headers.rs b/bridges/relays/bin-substrate/src/cli/relay_headers.rs index e8459b58bf60..ec521c2918d8 100644 --- a/bridges/relays/bin-substrate/src/cli/relay_headers.rs +++ b/bridges/relays/bin-substrate/src/cli/relay_headers.rs @@ -24,6 +24,9 @@ pub struct RelayHeaders { /// A bridge instance to relay headers for. #[structopt(possible_values = &RelayHeadersBridge::variants(), case_insensitive = true)] bridge: RelayHeadersBridge, + /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) are relayed. + #[structopt(long)] + only_mandatory_headers: bool, #[structopt(flatten)] source: SourceConnectionParams, #[structopt(flatten)] @@ -97,12 +100,14 @@ impl RelayHeaders { let target_client = self.target.to_client::().await?; let target_sign = self.target_sign.to_keypair::()?; let metrics_params = Finality::customize_metrics(self.prometheus_params.into())?; + let finality = Finality::new(target_client.clone(), target_sign); + finality.start_relay_guards(); crate::finality_pipeline::run( - Finality::new(target_client.clone(), target_sign), + finality, source_client, target_client, - false, + self.only_mandatory_headers, metrics_params, ) .await diff --git a/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs index f2238ba5763c..e71ea6aeaa2f 100644 --- a/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs +++ b/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs @@ -35,14 +35,15 @@ use structopt::StructOpt; #[derive(StructOpt)] pub enum RelayHeadersAndMessages { MillauRialto(MillauRialtoHeadersAndMessages), + RococoWococo(RococoWococoHeadersAndMessages), } /// Parameters that have the same names across all bridges. #[derive(StructOpt)] pub struct HeadersAndMessagesSharedParams { - /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + /// Hex-encoded lane identifiers that should be served by the complex relay. #[structopt(long, default_value = "00000000")] - lane: HexLaneId, + lane: Vec, #[structopt(flatten)] prometheus_params: PrometheusParams, } @@ -102,6 +103,26 @@ macro_rules! select_bridge { use crate::chains::millau_messages_to_rialto::run as left_to_right_messages; use crate::chains::rialto_messages_to_millau::run as right_to_left_messages; + $generic + } + RelayHeadersAndMessages::RococoWococo(_) => { + type Params = RococoWococoHeadersAndMessages; + + type Left = relay_rococo_client::Rococo; + type Right = relay_wococo_client::Wococo; + + type LeftToRightFinality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo; + type RightToLeftFinality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; + + type LeftToRightMessages = crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo; + type RightToLeftMessages = crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo; + + const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber = bp_rococo::SESSION_LENGTH; + const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber = bp_wococo::SESSION_LENGTH; + + use crate::chains::rococo_messages_to_wococo::run as left_to_right_messages; + use crate::chains::wococo_messages_to_rococo::run as right_to_left_messages; + $generic } } @@ -111,8 +132,11 @@ macro_rules! select_bridge { // All supported chains. declare_chain_options!(Millau, millau); declare_chain_options!(Rialto, rialto); +declare_chain_options!(Rococo, rococo); +declare_chain_options!(Wococo, wococo); // All supported bridges. declare_bridge_options!(Millau, Rialto); +declare_bridge_options!(Rococo, Wococo); impl RelayHeadersAndMessages { /// Run the command. @@ -125,7 +149,7 @@ impl RelayHeadersAndMessages { let right_client = params.right.to_client::().await?; let right_sign = params.right_sign.to_keypair::()?; - let lane = params.shared.lane.into(); + let lanes = params.shared.lane; let metrics_params: MetricsParams = params.shared.prometheus_params.into(); let metrics_params = relay_utils::relay_metrics(None, metrics_params).into_params(); @@ -143,46 +167,49 @@ impl RelayHeadersAndMessages { MAX_MISSING_RIGHT_HEADERS_AT_LEFT, ); - let left_to_right_messages = left_to_right_messages(MessagesRelayParams { - source_client: left_client.clone(), - source_sign: left_sign.clone(), - target_client: right_client.clone(), - target_sign: right_sign.clone(), - source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), - target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), - lane_id: lane, - metrics_params: metrics_params - .clone() - .disable() - .metrics_prefix(messages_relay::message_lane_loop::metrics_prefix::(&lane)), - }) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); - let right_to_left_messages = right_to_left_messages(MessagesRelayParams { - source_client: right_client, - source_sign: right_sign, - target_client: left_client.clone(), - target_sign: left_sign.clone(), - source_to_target_headers_relay: Some(right_to_left_on_demand_headers), - target_to_source_headers_relay: Some(left_to_right_on_demand_headers), - lane_id: lane, - metrics_params: metrics_params - .clone() - .disable() - .metrics_prefix(messages_relay::message_lane_loop::metrics_prefix::(&lane)), - }) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + // Need 2x capacity since we consider both directions for each lane + let mut message_relays = Vec::with_capacity(lanes.len() * 2); + for lane in lanes { + let lane = lane.into(); + let left_to_right_messages = left_to_right_messages(MessagesRelayParams { + source_client: left_client.clone(), + source_sign: left_sign.clone(), + target_client: right_client.clone(), + target_sign: right_sign.clone(), + source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), + target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), + lane_id: lane, + metrics_params: metrics_params.clone().disable().metrics_prefix( + messages_relay::message_lane_loop::metrics_prefix::(&lane), + ), + }) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); + let right_to_left_messages = right_to_left_messages(MessagesRelayParams { + source_client: right_client.clone(), + source_sign: right_sign.clone(), + target_client: left_client.clone(), + target_sign: left_sign.clone(), + source_to_target_headers_relay: Some(right_to_left_on_demand_headers.clone()), + target_to_source_headers_relay: Some(left_to_right_on_demand_headers.clone()), + lane_id: lane, + metrics_params: metrics_params.clone().disable().metrics_prefix( + messages_relay::message_lane_loop::metrics_prefix::(&lane), + ), + }) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); + + message_relays.push(left_to_right_messages); + message_relays.push(right_to_left_messages); + } relay_utils::relay_metrics(None, metrics_params) .expose() .await .map_err(|e| anyhow::format_err!("{}", e))?; - futures::future::select(left_to_right_messages, right_to_left_messages) - .await - .factor_first() - .0 + futures::future::select_all(message_relays).await.0 }) } } diff --git a/bridges/relays/bin-substrate/src/cli/send_message.rs b/bridges/relays/bin-substrate/src/cli/send_message.rs index 6fa82a8cdb5e..f710f814e41d 100644 --- a/bridges/relays/bin-substrate/src/cli/send_message.rs +++ b/bridges/relays/bin-substrate/src/cli/send_message.rs @@ -22,8 +22,9 @@ use crate::cli::{ TargetSigningParams, }; use bp_message_dispatch::{CallOrigin, MessagePayload}; +use bp_runtime::messages::DispatchFeePayment; use codec::Encode; -use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; +use frame_support::weights::Weight; use relay_substrate_client::{Chain, TransactionSignScheme}; use sp_core::{Bytes, Pair}; use sp_runtime::{traits::IdentifyAccount, AccountId32, MultiSignature, MultiSigner}; @@ -40,9 +41,12 @@ pub struct SendMessage { source: SourceConnectionParams, #[structopt(flatten)] source_sign: SourceSigningParams, - // TODO [#885] Move TargetSign to origins - #[structopt(flatten)] - target_sign: TargetSigningParams, + /// The SURI of secret key to use when transactions are submitted to the Target node. + #[structopt(long, required_if("origin", "Target"))] + target_signer: Option, + /// The password for the SURI of secret key to use when transactions are submitted to the Target node. + #[structopt(long)] + target_signer_password: Option, /// Hex-encoded lane id. Defaults to `00000000`. #[structopt(long, default_value = "00000000")] lane: HexLaneId, @@ -68,7 +72,8 @@ impl SendMessage { crate::select_full_bridge!(self.bridge, { let SendMessage { source_sign, - target_sign, + target_signer, + target_signer_password, ref mut message, dispatch_weight, origin, @@ -77,15 +82,14 @@ impl SendMessage { } = self; let source_sign = source_sign.to_keypair::()?; - let target_sign = target_sign.to_keypair::()?; encode_call::preprocess_call::(message, bridge.bridge_instance_index()); - let target_call = Target::encode_call(&message)?; + let target_call = Target::encode_call(message)?; let payload = { let target_call_weight = prepare_call_dispatch_weight( dispatch_weight, - ExplicitOrMaximal::Explicit(target_call.get_dispatch_info().weight), + ExplicitOrMaximal::Explicit(Target::get_dispatch_info(&target_call)?.weight), compute_maximal_message_dispatch_weight(Target::max_extrinsic_weight()), ); let source_sender_public: MultiSigner = source_sign.public().into(); @@ -97,6 +101,13 @@ impl SendMessage { match origin { Origins::Source => CallOrigin::SourceAccount(source_account_id), Origins::Target => { + let target_sign = TargetSigningParams { + target_signer: target_signer.clone().ok_or_else(|| { + anyhow::format_err!("The argument target_signer is not available") + })?, + target_signer_password: target_signer_password.clone(), + }; + let target_sign = target_sign.to_keypair::()?; let digest = account_ownership_digest( &target_call, source_account_id.clone(), @@ -130,11 +141,12 @@ impl SendMessage { let fee = match self.fee { Some(fee) => fee, None => Balance( - estimate_message_delivery_and_dispatch_fee::< - ::NativeBalance, - _, - _, - >(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload.clone()) + estimate_message_delivery_and_dispatch_fee::<::Balance, _, _>( + &source_client, + ESTIMATE_MESSAGE_FEE_METHOD, + lane, + payload.clone(), + ) .await? as _, ), }; @@ -210,6 +222,7 @@ where spec_version, weight, origin, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: HexBytes::encode(call), }; @@ -221,12 +234,14 @@ where spec_version, weight, origin, + dispatch_fee_payment, call, } = payload; MessagePayload { spec_version, weight, origin, + dispatch_fee_payment, call: call.0, } } @@ -250,8 +265,6 @@ mod tests { "1234", "--source-signer", "//Alice", - "--target-signer", - "//Bob", "remark", "--remark-payload", "1234", @@ -265,8 +278,9 @@ mod tests { payload, MessagePayload { spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, - weight: 1345000, + weight: 1038000, origin: CallOrigin::SourceAccount(sp_keyring::AccountKeyring::Alice.to_account_id()), + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: hex!("0401081234").to_vec(), } ); @@ -304,14 +318,35 @@ mod tests { payload, MessagePayload { spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, - weight: 1345000, + weight: 1038000, origin: CallOrigin::TargetAccount( sp_keyring::AccountKeyring::Alice.to_account_id(), sp_keyring::AccountKeyring::Bob.into(), signature, ), + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: hex!("0701081234").to_vec(), } ); } + + #[test] + fn target_signer_must_exist_if_origin_is_target() { + // given + let send_message = SendMessage::from_iter_safe(vec![ + "send-message", + "MillauToRialto", + "--source-port", + "1234", + "--source-signer", + "//Alice", + "--origin", + "Target", + "remark", + "--remark-payload", + "1234", + ]); + + assert!(send_message.is_err()); + } } diff --git a/bridges/relays/bin-substrate/src/finality_pipeline.rs b/bridges/relays/bin-substrate/src/finality_pipeline.rs index dad69b1576b0..19fa0917df39 100644 --- a/bridges/relays/bin-substrate/src/finality_pipeline.rs +++ b/bridges/relays/bin-substrate/src/finality_pipeline.rs @@ -26,12 +26,12 @@ use sp_core::Bytes; use std::{fmt::Debug, marker::PhantomData, time::Duration}; /// Default synchronization loop timeout. -const STALL_TIMEOUT: Duration = Duration::from_secs(120); +pub(crate) const STALL_TIMEOUT: Duration = Duration::from_secs(120); /// Default limit of recent finality proofs. /// /// Finality delay of 4096 blocks is unlikely to happen in practice in /// Substrate+GRANDPA based chains (good to know). -const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; +pub(crate) const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; /// Headers sync pipeline for Substrate <-> Substrate relays. pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { @@ -46,6 +46,13 @@ pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { Ok(params) } + /// Start finality relay guards. + /// + /// Different finality bridges may have different set of guards - e.g. on ephemeral chains we + /// don't need version guards, on test chains we don't care that much about relayer account + /// balance, ... So the implementation is left to the specific bridges. + fn start_relay_guards(&self) {} + /// Returns id of account that we're using to sign transactions at target chain. fn transactions_author(&self) -> ::AccountId; @@ -112,7 +119,7 @@ pub async fn run( pipeline: P, source_client: Client, target_client: Client, - is_on_demand_task: bool, + only_mandatory_headers: bool, metrics_params: MetricsParams, ) -> anyhow::Result<()> where @@ -135,13 +142,13 @@ where ); finality_relay::run( - FinalitySource::new(source_client), + FinalitySource::new(source_client, None), SubstrateFinalityTarget::new(target_client, pipeline), FinalitySyncParams { - is_on_demand_task, tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL), recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, stall_timeout: STALL_TIMEOUT, + only_mandatory_headers, }, metrics_params, futures::future::pending(), diff --git a/bridges/relays/bin-substrate/src/messages_lane.rs b/bridges/relays/bin-substrate/src/messages_lane.rs index 616e2253a6b0..7efea545f9a6 100644 --- a/bridges/relays/bin-substrate/src/messages_lane.rs +++ b/bridges/relays/bin-substrate/src/messages_lane.rs @@ -49,7 +49,7 @@ pub struct MessagesRelayParams { /// Message sync pipeline for Substrate <-> Substrate relays. pub trait SubstrateMessageLane: MessageLane { /// Name of the runtime method that returns dispatch weight of outbound messages at the source chain. - const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str; + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str; /// Name of the runtime method that returns latest generated nonce at the source chain. const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str; /// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain. @@ -139,6 +139,7 @@ where type MessagesProof = SubstrateMessagesProof; type MessagesReceivingProof = SubstrateMessagesReceivingProof; + type SourceChainBalance = Source::Balance; type SourceHeaderNumber = BlockNumberOf; type SourceHeaderHash = HashOf; @@ -203,7 +204,7 @@ mod tests { // reserved for messages dispatch allows dispatch of non-trivial messages. // // Any significant change in this values should attract additional attention. - (1013, 216_583_333_334), + (782, 216_583_333_334), ); } } diff --git a/bridges/relays/bin-substrate/src/messages_source.rs b/bridges/relays/bin-substrate/src/messages_source.rs index 0ccf8bbde885..88c8b529dcc6 100644 --- a/bridges/relays/bin-substrate/src/messages_source.rs +++ b/bridges/relays/bin-substrate/src/messages_source.rs @@ -23,17 +23,16 @@ use crate::on_demand_headers::OnDemandHeadersRelay; use async_trait::async_trait; use bp_messages::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; +use bp_runtime::{messages::DispatchFeePayment, ChainId}; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::{Decode, Encode}; use frame_support::{traits::Instance, weights::Weight}; use messages_relay::{ message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{ - ClientState, MessageProofParameters, MessageWeights, MessageWeightsMap, SourceClient, SourceClientState, + ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, SourceClientState, }, }; -use pallet_bridge_messages::Config as MessagesConfig; use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf}; use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId}; use sp_core::Bytes; @@ -46,22 +45,22 @@ use std::{marker::PhantomData, ops::RangeInclusive}; pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { +pub struct SubstrateMessagesSource { client: Client, lane: P, lane_id: LaneId, - instance: InstanceId, + instance: ChainId, target_to_source_headers_relay: Option>, - _phantom: PhantomData<(R, I)>, + _phantom: PhantomData, } -impl SubstrateMessagesSource { +impl SubstrateMessagesSource { /// Create new Substrate headers source. pub fn new( client: Client, lane: P, lane_id: LaneId, - instance: InstanceId, + instance: ChainId, target_to_source_headers_relay: Option>, ) -> Self { SubstrateMessagesSource { @@ -75,7 +74,7 @@ impl SubstrateMessagesSource Clone for SubstrateMessagesSource { +impl Clone for SubstrateMessagesSource { fn clone(&self) -> Self { Self { client: self.client.clone(), @@ -89,11 +88,10 @@ impl Clone for SubstrateMessagesSource< } #[async_trait] -impl RelayClient for SubstrateMessagesSource +impl RelayClient for SubstrateMessagesSource where C: Chain, P: SubstrateMessageLane, - R: 'static + Send + Sync, I: Send + Sync + Instance, { type Error = SubstrateError; @@ -104,7 +102,7 @@ where } #[async_trait] -impl SourceClient

for SubstrateMessagesSource +impl SourceClient

for SubstrateMessagesSource where C: Chain, C::Header: DeserializeOwned, @@ -112,6 +110,7 @@ where C::BlockNumber: BlockNumberBase, P: SubstrateMessageLane< MessagesProof = SubstrateMessagesProof, + SourceChainBalance = C::Balance, SourceHeaderNumber = ::Number, SourceHeaderHash = ::Hash, SourceChain = C, @@ -119,7 +118,6 @@ where P::TargetChain: Chain, P::TargetHeaderNumber: Decode, P::TargetHeaderHash: Decode, - R: Send + Sync + MessagesConfig, I: Send + Sync + Instance, { async fn state(&self) -> Result, SubstrateError> { @@ -168,21 +166,21 @@ where Ok((id, latest_received_nonce)) } - async fn generated_messages_weights( + async fn generated_message_details( &self, id: SourceHeaderIdOf

, nonces: RangeInclusive, - ) -> Result { + ) -> Result, SubstrateError> { let encoded_response = self .client .state_call( - P::OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD.into(), + P::OUTBOUND_LANE_MESSAGE_DETAILS_METHOD.into(), Bytes((self.lane_id, nonces.start(), nonces.end()).encode()), Some(id.1), ) .await?; - make_message_weights_map::( + make_message_details_map::( Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?, nonces, ) @@ -197,7 +195,7 @@ where let mut storage_keys = Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); let mut message_nonce = *nonces.start(); while message_nonce <= *nonces.end() { - let message_key = pallet_bridge_messages::storage_keys::message_key::(&self.lane_id, message_nonce); + let message_key = pallet_bridge_messages::storage_keys::message_key::(&self.lane_id, message_nonce); storage_keys.push(message_key); message_nonce += 1; } @@ -239,9 +237,13 @@ where async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

) { if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay { - target_to_source_headers_relay.require_finalized_header(id); + target_to_source_headers_relay.require_finalized_header(id).await; } } + + async fn estimate_confirmation_transaction(&self) -> P::SourceChainBalance { + num_traits::Zero::zero() // TODO: https://github.com/paritytech/parity-bridges-common/issues/997 + } } pub async fn read_client_state( @@ -287,10 +289,10 @@ where }) } -fn make_message_weights_map( - weights: Vec<(MessageNonce, Weight, u32)>, +fn make_message_details_map( + weights: Vec>, nonces: RangeInclusive, -) -> Result { +) -> Result, SubstrateError> { let make_missing_nonce_error = |expected_nonce| { Err(SubstrateError::Custom(format!( "Missing nonce {} in messages_dispatch_weight call result. Expected all nonces from {:?}", @@ -298,7 +300,7 @@ fn make_message_weights_map( ))) }; - let mut weights_map = MessageWeightsMap::new(); + let mut weights_map = MessageDetailsMap::new(); // this is actually prevented by external logic if nonces.is_empty() { @@ -308,7 +310,7 @@ fn make_message_weights_map( // check if last nonce is missing - loop below is not checking this let last_nonce_is_missing = weights .last() - .map(|(last_nonce, _, _)| last_nonce != nonces.end()) + .map(|details| details.nonce != *nonces.end()) .unwrap_or(true); if last_nonce_is_missing { return make_missing_nonce_error(*nonces.end()); @@ -317,8 +319,8 @@ fn make_message_weights_map( let mut expected_nonce = *nonces.start(); let mut is_at_head = true; - for (nonce, weight, size) in weights { - match (nonce == expected_nonce, is_at_head) { + for details in weights { + match (details.nonce == expected_nonce, is_at_head) { (true, _) => (), (false, true) => { // this may happen if some messages were already pruned from the source node @@ -328,7 +330,7 @@ fn make_message_weights_map( target: "bridge", "Some messages are missing from the {} node: {:?}. Target node may be out of sync?", C::NAME, - expected_nonce..nonce, + expected_nonce..details.nonce, ); } (false, false) => { @@ -340,13 +342,16 @@ fn make_message_weights_map( } weights_map.insert( - nonce, - MessageWeights { - weight, - size: size as _, + details.nonce, + MessageDetails { + dispatch_weight: details.dispatch_weight, + size: details.size as _, + // TODO: https://github.com/paritytech/parity-bridges-common/issues/997 + reward: num_traits::Zero::zero(), + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, }, ); - expected_nonce = nonce + 1; + expected_nonce = details.nonce + 1; is_at_head = false; } @@ -357,15 +362,53 @@ fn make_message_weights_map( mod tests { use super::*; + fn message_details_from_rpc( + nonces: RangeInclusive, + ) -> Vec> { + nonces + .into_iter() + .map(|nonce| bp_messages::MessageDetails { + nonce, + dispatch_weight: 0, + size: 0, + delivery_and_dispatch_fee: 0, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + }) + .collect() + } + #[test] - fn make_message_weights_map_succeeds_if_no_messages_are_missing() { + fn make_message_details_map_succeeds_if_no_messages_are_missing() { assert_eq!( - make_message_weights_map::(vec![(1, 0, 0), (2, 0, 0), (3, 0, 0)], 1..=3,) - .unwrap(), + make_message_details_map::(message_details_from_rpc(1..=3), 1..=3,).unwrap(), vec![ - (1, MessageWeights { weight: 0, size: 0 }), - (2, MessageWeights { weight: 0, size: 0 }), - (3, MessageWeights { weight: 0, size: 0 }), + ( + 1, + MessageDetails { + dispatch_weight: 0, + size: 0, + reward: 0, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + } + ), + ( + 2, + MessageDetails { + dispatch_weight: 0, + size: 0, + reward: 0, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + } + ), + ( + 3, + MessageDetails { + dispatch_weight: 0, + size: 0, + reward: 0, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + } + ), ] .into_iter() .collect(), @@ -373,12 +416,28 @@ mod tests { } #[test] - fn make_message_weights_map_succeeds_if_head_messages_are_missing() { + fn make_message_details_map_succeeds_if_head_messages_are_missing() { assert_eq!( - make_message_weights_map::(vec![(2, 0, 0), (3, 0, 0)], 1..=3,).unwrap(), + make_message_details_map::(message_details_from_rpc(2..=3), 1..=3,).unwrap(), vec![ - (2, MessageWeights { weight: 0, size: 0 }), - (3, MessageWeights { weight: 0, size: 0 }), + ( + 2, + MessageDetails { + dispatch_weight: 0, + size: 0, + reward: 0, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + } + ), + ( + 3, + MessageDetails { + dispatch_weight: 0, + size: 0, + reward: 0, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + } + ), ] .into_iter() .collect(), @@ -386,25 +445,27 @@ mod tests { } #[test] - fn make_message_weights_map_fails_if_mid_messages_are_missing() { + fn make_message_details_map_fails_if_mid_messages_are_missing() { + let mut message_details_from_rpc = message_details_from_rpc(1..=3); + message_details_from_rpc.remove(1); assert!(matches!( - make_message_weights_map::(vec![(1, 0, 0), (3, 0, 0)], 1..=3,), + make_message_details_map::(message_details_from_rpc, 1..=3,), Err(SubstrateError::Custom(_)) )); } #[test] - fn make_message_weights_map_fails_if_tail_messages_are_missing() { + fn make_message_details_map_fails_if_tail_messages_are_missing() { assert!(matches!( - make_message_weights_map::(vec![(1, 0, 0), (2, 0, 0)], 1..=3,), + make_message_details_map::(message_details_from_rpc(1..=2), 1..=3,), Err(SubstrateError::Custom(_)) )); } #[test] - fn make_message_weights_map_fails_if_all_messages_are_missing() { + fn make_message_details_map_fails_if_all_messages_are_missing() { assert!(matches!( - make_message_weights_map::(vec![], 1..=3), + make_message_details_map::(vec![], 1..=3), Err(SubstrateError::Custom(_)) )); } diff --git a/bridges/relays/bin-substrate/src/messages_target.rs b/bridges/relays/bin-substrate/src/messages_target.rs index 39f638d7e91c..f74efbe61b5a 100644 --- a/bridges/relays/bin-substrate/src/messages_target.rs +++ b/bridges/relays/bin-substrate/src/messages_target.rs @@ -24,15 +24,14 @@ use crate::on_demand_headers::OnDemandHeadersRelay; use async_trait::async_trait; use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; -use bp_runtime::InstanceId; +use bp_runtime::ChainId; use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; use codec::{Decode, Encode}; -use frame_support::traits::Instance; +use frame_support::{traits::Instance, weights::Weight}; use messages_relay::{ message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{TargetClient, TargetClientState}, }; -use pallet_bridge_messages::Config as MessagesConfig; use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf}; use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase}; use sp_core::Bytes; @@ -46,22 +45,22 @@ pub type SubstrateMessagesReceivingProof = ( ); /// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { +pub struct SubstrateMessagesTarget { client: Client, lane: P, lane_id: LaneId, - instance: InstanceId, + instance: ChainId, source_to_target_headers_relay: Option>, - _phantom: PhantomData<(R, I)>, + _phantom: PhantomData, } -impl SubstrateMessagesTarget { +impl SubstrateMessagesTarget { /// Create new Substrate headers target. pub fn new( client: Client, lane: P, lane_id: LaneId, - instance: InstanceId, + instance: ChainId, source_to_target_headers_relay: Option>, ) -> Self { SubstrateMessagesTarget { @@ -75,7 +74,7 @@ impl SubstrateMessagesTarget Clone for SubstrateMessagesTarget { +impl Clone for SubstrateMessagesTarget { fn clone(&self) -> Self { Self { client: self.client.clone(), @@ -89,11 +88,10 @@ impl Clone for SubstrateMessagesTarget< } #[async_trait] -impl RelayClient for SubstrateMessagesTarget +impl RelayClient for SubstrateMessagesTarget where C: Chain, P: SubstrateMessageLane, - R: 'static + Send + Sync, I: Send + Sync + Instance, { type Error = SubstrateError; @@ -104,7 +102,7 @@ where } #[async_trait] -impl TargetClient

for SubstrateMessagesTarget +impl TargetClient

for SubstrateMessagesTarget where C: Chain, C::Header: DeserializeOwned, @@ -119,7 +117,6 @@ where P::SourceChain: Chain, P::SourceHeaderNumber: Decode, P::SourceHeaderHash: Decode, - R: Send + Sync + MessagesConfig, I: Send + Sync + Instance, { async fn state(&self) -> Result, SubstrateError> { @@ -190,7 +187,7 @@ where id: TargetHeaderIdOf

, ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), SubstrateError> { let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&self.lane_id); + let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&self.lane_id); let proof = self .client .prove_storage(vec![inbound_data_key], id.1) @@ -226,7 +223,16 @@ where async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

) { if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { - source_to_target_headers_relay.require_finalized_header(id); + source_to_target_headers_relay.require_finalized_header(id).await; } } + + async fn estimate_delivery_transaction_in_source_tokens( + &self, + _nonces: RangeInclusive, + _total_dispatch_weight: Weight, + _total_size: u32, + ) -> P::SourceChainBalance { + num_traits::Zero::zero() // TODO: https://github.com/paritytech/parity-bridges-common/issues/997 + } } diff --git a/bridges/relays/bin-substrate/src/on_demand_headers.rs b/bridges/relays/bin-substrate/src/on_demand_headers.rs index 77d2b3705410..58ef268a29f7 100644 --- a/bridges/relays/bin-substrate/src/on_demand_headers.rs +++ b/bridges/relays/bin-substrate/src/on_demand_headers.rs @@ -16,39 +16,38 @@ //! On-demand Substrate -> Substrate headers relay. -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use crate::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT, STALL_TIMEOUT, +}; use crate::finality_target::SubstrateFinalityTarget; +use async_std::sync::{Arc, Mutex}; use bp_header_chain::justification::GrandpaJustification; use finality_relay::{ - FinalitySyncPipeline, SourceClient as FinalitySourceClient, TargetClient as FinalityTargetClient, -}; -use futures::{ - channel::{mpsc, oneshot}, - select, FutureExt, StreamExt, + FinalitySyncParams, FinalitySyncPipeline, SourceClient as FinalitySourceClient, SourceHeader, + TargetClient as FinalityTargetClient, }; -use num_traits::{CheckedSub, Zero}; +use futures::{select, FutureExt}; +use num_traits::{CheckedSub, One, Zero}; use relay_substrate_client::{ - finality_source::FinalitySource as SubstrateFinalitySource, BlockNumberOf, Chain, Client, HashOf, HeaderIdOf, - SyncHeader, + finality_source::{FinalitySource as SubstrateFinalitySource, RequiredHeaderNumberRef}, + BlockNumberOf, Chain, Client, HashOf, HeaderIdOf, SyncHeader, }; use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, HeaderId, - MaybeConnectionError, + metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, MaybeConnectionError, }; use std::fmt::Debug; /// On-demand Substrate <-> Substrate headers relay. /// -/// This relay may be started by messages whenever some other relay (e.g. messages relay) needs more -/// headers to be relayed to continue its regular work. When enough headers are relayed, on-demand -/// relay may be deactivated. +/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages relay) needs +/// it to continue its regular work. When enough headers are relayed, on-demand stops syncing headers. #[derive(Clone)] pub struct OnDemandHeadersRelay { - /// Background task name. - background_task_name: String, - /// Required headers to background sender. - required_header_tx: mpsc::Sender>, + /// Relay task name. + relay_task_name: String, + /// Shared reference to maximal required finalized header number. + required_header_number: RequiredHeaderNumberRef, } impl OnDemandHeadersRelay { @@ -75,49 +74,49 @@ impl OnDemandHeadersRelay { SubstrateFinalityTarget>: FinalityTargetClient>, { - let (required_header_tx, required_header_rx) = mpsc::channel(1); + let required_header_number = Arc::new(Mutex::new(Zero::zero())); + let this = OnDemandHeadersRelay { + relay_task_name: on_demand_headers_relay_name::(), + required_header_number: required_header_number.clone(), + }; async_std::task::spawn(async move { background_task( source_client, target_client, pipeline, maximal_headers_difference, - required_header_rx, + required_header_number, ) .await; }); - let background_task_name = format!( - "{}-background", - on_demand_headers_relay_name::() - ); - OnDemandHeadersRelay { - background_task_name, - required_header_tx, - } + this } /// Someone is asking us to relay given finalized header. - pub fn require_finalized_header(&self, header_id: HeaderIdOf) { - if let Err(error) = self.required_header_tx.clone().try_send(header_id) { - log::error!( + pub async fn require_finalized_header(&self, header_id: HeaderIdOf) { + let mut required_header_number = self.required_header_number.lock().await; + if header_id.0 > *required_header_number { + log::trace!( target: "bridge", - "Failed to send require header id {:?} to {:?}: {:?}", - header_id, - self.background_task_name, - error, + "More {} headers required in {} relay. Going to sync up to the {}", + SourceChain::NAME, + self.relay_task_name, + header_id.0, ); + + *required_header_number = header_id.0; } } } -/// Background task that is responsible for starting and stopping headers relay when required. +/// Background task that is responsible for starting headers relay. async fn background_task( source_client: Client, target_client: Client, pipeline: SubstrateFinalityToSubstrate, maximal_headers_difference: SourceChain::BlockNumber, - mut required_header_rx: mpsc::Receiver>, + required_header_number: RequiredHeaderNumberRef, ) where SourceChain: Chain + Debug, SourceChain::BlockNumber: BlockNumberBase, @@ -138,36 +137,20 @@ async fn background_task( let mut finality_source = SubstrateFinalitySource::< _, SubstrateFinalityToSubstrate, - >::new(source_client.clone()); + >::new(source_client.clone(), Some(required_header_number.clone())); let mut finality_target = SubstrateFinalityTarget::new(target_client.clone(), pipeline.clone()); + let mut latest_non_mandatory_at_source = Zero::zero(); - let mut active_headers_relay = None; - let mut required_header_number = Zero::zero(); - let mut relay_exited_rx = futures::future::pending().left_future(); + let mut restart_relay = true; + let finality_relay_task = futures::future::Fuse::terminated(); + futures::pin_mut!(finality_relay_task); loop { - // wait for next target block or for new required header select! { _ = async_std::task::sleep(TargetChain::AVERAGE_BLOCK_INTERVAL).fuse() => {}, - required_header_id = required_header_rx.next() => { - match required_header_id { - Some(required_header_id) => { - if required_header_id.0 > required_header_number { - required_header_number = required_header_id.0; - } - }, - None => { - // that's the only way to exit background task - to drop `required_header_tx` - break - }, - } - }, - _ = relay_exited_rx => { - // there could be a situation when we're receiving exit signals after we - // have already stopped relay or when we have already started new relay. - // but it isn't critical, because even if we'll accidentally stop new relay - // we'll restart it almost immediately - stop_on_demand_headers_relay(active_headers_relay.take()).await; + _ = finality_relay_task => { + // this should never happen in practice given the current code + restart_relay = true; }, } @@ -199,35 +182,161 @@ async fn background_task( continue; } - // start or stop headers relay if required - let action = select_on_demand_relay_action::( + // submit mandatory header if some headers are missing + let best_finalized_source_header_at_target_fmt = format!("{:?}", best_finalized_source_header_at_target); + let mandatory_scan_range = mandatory_headers_scan_range::( best_finalized_source_header_at_source.ok(), best_finalized_source_header_at_target.ok(), - required_header_number, maximal_headers_difference, - &relay_task_name, - active_headers_relay.is_some(), - ); - match action { - OnDemandRelayAction::Start => { - let (relay_exited_tx, new_relay_exited_rx) = oneshot::channel(); - active_headers_relay = start_on_demand_headers_relay( - relay_task_name.clone(), - relay_exited_tx, - source_client.clone(), - target_client.clone(), - pipeline.clone(), - ); - if active_headers_relay.is_some() { - relay_exited_rx = new_relay_exited_rx.right_future(); + &required_header_number, + ) + .await; + if let Some(mandatory_scan_range) = mandatory_scan_range { + let relay_mandatory_header_result = relay_mandatory_header_from_range( + &finality_source, + &required_header_number, + best_finalized_source_header_at_target_fmt, + ( + std::cmp::max(mandatory_scan_range.0, latest_non_mandatory_at_source), + mandatory_scan_range.1, + ), + &relay_task_name, + ) + .await; + match relay_mandatory_header_result { + Ok(true) => (), + Ok(false) => { + // there are no (or we don't need to relay them) mandatory headers in the range + // => to avoid scanning the same headers over and over again, remember that + latest_non_mandatory_at_source = mandatory_scan_range.1; + } + Err(e) => { + if e.is_connection_error() { + relay_utils::relay_loop::reconnect_failed_client( + FailedClient::Source, + relay_utils::relay_loop::RECONNECT_DELAY, + &mut finality_source, + &mut finality_target, + ) + .await; + continue; + } } } - OnDemandRelayAction::Stop => { - stop_on_demand_headers_relay(active_headers_relay.take()).await; - } - OnDemandRelayAction::None => (), } + + // start/restart relay + if restart_relay { + finality_relay_task.set( + finality_relay::run( + finality_source.clone(), + finality_target.clone(), + FinalitySyncParams { + tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL), + recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, + stall_timeout: STALL_TIMEOUT, + only_mandatory_headers: false, + }, + MetricsParams::disabled(), + futures::future::pending(), + ) + .fuse(), + ); + + restart_relay = false; + } + } +} + +/// Returns `Some()` with inclusive range of headers which must be scanned for manadatory headers +/// and the first of such headers must be submitted to the target node. +async fn mandatory_headers_scan_range( + best_finalized_source_header_at_source: Option, + best_finalized_source_header_at_target: Option, + maximal_headers_difference: C::BlockNumber, + required_header_number: &RequiredHeaderNumberRef, +) -> Option<(C::BlockNumber, C::BlockNumber)> { + let required_header_number = *required_header_number.lock().await; + + // if we have been unable to read header number from the target, then let's assume + // that it is the same as required header number. Otherwise we risk submitting + // unneeded transactions + let best_finalized_source_header_at_target = + best_finalized_source_header_at_target.unwrap_or(required_header_number); + + // if we have been unable to read header number from the source, then let's assume + // that it is the same as at the target + let best_finalized_source_header_at_source = + best_finalized_source_header_at_source.unwrap_or(best_finalized_source_header_at_target); + + // if there are too many source headers missing from the target node, sync mandatory + // headers to target + // + // why do we need that? When complex headers+messages relay is used, it'll normally only relay + // headers when there are undelivered messages/confirmations. But security model of the + // `pallet-bridge-grandpa` module relies on the fact that headers are synced in real-time and + // that it'll see authorities-change header before unbonding period will end for previous + // authorities set. + let current_headers_difference = best_finalized_source_header_at_source + .checked_sub(&best_finalized_source_header_at_target) + .unwrap_or_else(Zero::zero); + if current_headers_difference <= maximal_headers_difference { + return None; + } + + // if relay is already asked to sync headers, don't do anything yet + if required_header_number > best_finalized_source_header_at_target { + return None; } + + Some(( + best_finalized_source_header_at_target + One::one(), + best_finalized_source_header_at_source, + )) +} + +/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay it. +/// +/// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. +async fn relay_mandatory_header_from_range( + finality_source: &SubstrateFinalitySource, + required_header_number: &RequiredHeaderNumberRef, + best_finalized_source_header_at_target: String, + range: (SourceChain::BlockNumber, SourceChain::BlockNumber), + relay_task_name: &str, +) -> Result +where + SubstrateFinalitySource: FinalitySourceClient

, + P: FinalitySyncPipeline, +{ + // search for mandatory header first + let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?; + + // if there are no mandatory headers - we have nothing to do + let mandatory_source_header_number = match mandatory_source_header_number { + Some(mandatory_source_header_number) => mandatory_source_header_number, + None => return Ok(false), + }; + + // `find_mandatory_header` call may take a while => check if `required_header_number` is still + // less than our `mandatory_source_header_number` before logging anything + let mut required_header_number = required_header_number.lock().await; + if *required_header_number >= mandatory_source_header_number { + return Ok(false); + } + + log::trace!( + target: "bridge", + "Too many {} headers missing at target in {} relay ({} vs {}). Going to sync up to the mandatory {}", + SourceChain::NAME, + relay_task_name, + best_finalized_source_header_at_target, + range.1, + mandatory_source_header_number, + ); + + *required_header_number = mandatory_source_header_number; + Ok(true) } /// Read best finalized source block number from source client. @@ -236,21 +345,24 @@ async fn background_task( async fn best_finalized_source_header_at_source( finality_source: &SubstrateFinalitySource, relay_task_name: &str, -) -> Result as RelayClient>::Error> +) -> Result where SubstrateFinalitySource: FinalitySourceClient

, P: FinalitySyncPipeline, { - finality_source.best_finalized_block_number().await.map_err(|error| { - log::error!( - target: "bridge", - "Failed to read best finalized source header from source in {} relay: {:?}", - relay_task_name, - error, - ); + finality_source + .on_chain_best_finalized_block_number() + .await + .map_err(|error| { + log::error!( + target: "bridge", + "Failed to read best finalized source header from source in {} relay: {:?}", + relay_task_name, + error, + ); - error - }) + error + }) } /// Read best finalized source block number from target client. @@ -279,68 +391,28 @@ where }) } -/// What to do with the on-demand relay task? -#[derive(Debug, PartialEq)] -enum OnDemandRelayAction { - Start, - Stop, - None, -} - -fn select_on_demand_relay_action( - best_finalized_source_header_at_source: Option, - best_finalized_source_header_at_target: Option, - mut required_source_header_at_target: C::BlockNumber, - maximal_headers_difference: C::BlockNumber, - relay_task_name: &str, - is_active: bool, -) -> OnDemandRelayAction { - // if we have been unable to read header number from the target, then let's assume - // that it is the same as required header number. Otherwise we risk submitting - // unneeded transactions - let best_finalized_source_header_at_target = - best_finalized_source_header_at_target.unwrap_or(required_source_header_at_target); - - // if we have been unable to read header number from the source, then let's assume - // that it is the same as at the target - let best_finalized_source_header_at_source = - best_finalized_source_header_at_source.unwrap_or(best_finalized_source_header_at_target); - - // if there are too many source headers missing from the target node, require some - // new headers at target - // - // why do we need that? When complex headers+messages relay is used, it'll normally only relay - // headers when there are undelivered messages/confirmations. But security model of the - // `pallet-bridge-grandpa` module relies on the fact that headers are synced in real-time and - // that it'll see authorities-change header before unbonding period will end for previous - // authorities set. - let current_headers_difference = best_finalized_source_header_at_source - .checked_sub(&best_finalized_source_header_at_target) - .unwrap_or_else(Zero::zero); - if current_headers_difference > maximal_headers_difference { - required_source_header_at_target = best_finalized_source_header_at_source; - - // don't log if relay is already running - if !is_active { - log::trace!( - target: "bridge", - "Too many {} headers missing at target in {} relay ({} vs {}). Going to sync up to the {}", - C::NAME, - relay_task_name, - best_finalized_source_header_at_source, - best_finalized_source_header_at_target, - best_finalized_source_header_at_source, - ); +/// Read first mandatory header in given inclusive range. +/// +/// Returns `Ok(None)` if there were no mandatory headers in the range. +async fn find_mandatory_header_in_range( + finality_source: &SubstrateFinalitySource, + range: (SourceChain::BlockNumber, SourceChain::BlockNumber), +) -> Result, relay_substrate_client::Error> +where + SubstrateFinalitySource: FinalitySourceClient

, + P: FinalitySyncPipeline, +{ + let mut current = range.0; + while current <= range.1 { + let header: SyncHeader = finality_source.client().header_by_number(current).await?.into(); + if header.is_mandatory() { + return Ok(Some(current)); } - } - // now let's select what to do with relay - let needs_to_be_active = required_source_header_at_target > best_finalized_source_header_at_target; - match (needs_to_be_active, is_active) { - (true, false) => OnDemandRelayAction::Start, - (false, true) => OnDemandRelayAction::Stop, - _ => OnDemandRelayAction::None, + current += One::one(); } + + Ok(None) } /// On-demand headers relay task name. @@ -348,61 +420,6 @@ fn on_demand_headers_relay_name() -> Str format!("on-demand-{}-to-{}", SourceChain::NAME, TargetChain::NAME) } -/// Start on-demand headers relay task. -fn start_on_demand_headers_relay( - task_name: String, - relay_exited_tx: oneshot::Sender<()>, - source_client: Client, - target_client: Client, - pipeline: SubstrateFinalityToSubstrate, -) -> Option> -where - SourceChain::BlockNumber: BlockNumberBase, - SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< - Hash = HashOf, - Number = BlockNumberOf, - Header = SyncHeader, - FinalityProof = GrandpaJustification, - TargetChain = TargetChain, - >, - TargetSign: 'static, -{ - let headers_relay_future = - crate::finality_pipeline::run(pipeline, source_client, target_client, true, MetricsParams::disabled()); - let closure_task_name = task_name.clone(); - async_std::task::Builder::new() - .name(task_name.clone()) - .spawn(async move { - log::info!(target: "bridge", "Starting {} headers relay", closure_task_name); - let result = headers_relay_future.await; - log::trace!(target: "bridge", "{} headers relay has exited. Result: {:?}", closure_task_name, result); - let _ = relay_exited_tx.send(()); - }) - .map_err(|error| { - log::error!( - target: "bridge", - "Failed to start {} relay: {:?}", - task_name, - error, - ); - }) - .ok() -} - -/// Stop on-demand headers relay task. -async fn stop_on_demand_headers_relay(task: Option>) { - if let Some(task) = task { - let task_name = task - .task() - .name() - .expect("on-demand tasks are always started with name; qed") - .to_string(); - log::trace!(target: "bridge", "Cancelling {} headers relay", task_name); - task.cancel().await; - log::info!(target: "bridge", "Cancelled {} headers relay", task_name); - } -} - #[cfg(test)] mod tests { use super::*; @@ -412,42 +429,19 @@ mod tests { const AT_SOURCE: Option = Some(10); const AT_TARGET: Option = Some(1); - #[test] - fn starts_relay_when_headers_are_required() { - assert_eq!( - select_on_demand_relay_action::(AT_SOURCE, AT_TARGET, 5, 100, "test", false), - OnDemandRelayAction::Start, - ); - - assert_eq!( - select_on_demand_relay_action::(AT_SOURCE, AT_TARGET, 5, 100, "test", true), - OnDemandRelayAction::None, - ); - } - - #[test] - fn starts_relay_when_too_many_headers_missing() { + #[async_std::test] + async fn mandatory_headers_scan_range_selects_range_if_too_many_headers_are_missing() { assert_eq!( - select_on_demand_relay_action::(AT_SOURCE, AT_TARGET, 0, 5, "test", false), - OnDemandRelayAction::Start, - ); - - assert_eq!( - select_on_demand_relay_action::(AT_SOURCE, AT_TARGET, 0, 5, "test", true), - OnDemandRelayAction::None, + mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, 5, &Arc::new(Mutex::new(0))).await, + Some((AT_TARGET.unwrap() + 1, AT_SOURCE.unwrap())), ); } - #[test] - fn stops_relay_if_required_header_is_synced() { - assert_eq!( - select_on_demand_relay_action::(AT_SOURCE, AT_TARGET, AT_TARGET.unwrap(), 100, "test", true), - OnDemandRelayAction::Stop, - ); - + #[async_std::test] + async fn mandatory_headers_scan_range_selects_nothing_if_enough_headers_are_relayed() { assert_eq!( - select_on_demand_relay_action::(AT_SOURCE, AT_TARGET, AT_TARGET.unwrap(), 100, "test", false), - OnDemandRelayAction::None, + mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, 10, &Arc::new(Mutex::new(0))).await, + None, ); } } diff --git a/bridges/relays/client-ethereum/Cargo.toml b/bridges/relays/client-ethereum/Cargo.toml index da4e7ef59efe..64a76a6b5dae 100644 --- a/bridges/relays/client-ethereum/Cargo.toml +++ b/bridges/relays/client-ethereum/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +async-std = "1.6.5" bp-eth-poa = { path = "../../primitives/ethereum-poa" } codec = { package = "parity-scale-codec", version = "2.0.0" } headers-relay = { path = "../headers" } diff --git a/bridges/relays/client-ethereum/src/client.rs b/bridges/relays/client-ethereum/src/client.rs index e2def5fb03d1..71dac5df6d48 100644 --- a/bridges/relays/client-ethereum/src/client.rs +++ b/bridges/relays/client-ethereum/src/client.rs @@ -22,6 +22,7 @@ use crate::types::{ use crate::{ConnectionParams, Error, Result}; use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; +use relay_utils::relay_loop::RECONNECT_DELAY; use std::sync::Arc; /// Number of headers missing from the Ethereum node for us to consider node not synced. @@ -36,7 +37,28 @@ pub struct Client { impl Client { /// Create a new Ethereum RPC Client. - pub async fn new(params: ConnectionParams) -> Result { + /// + /// This function will keep connecting to given Ethereum node until connection is established + /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. + pub async fn new(params: ConnectionParams) -> Self { + loop { + match Self::try_connect(params.clone()).await { + Ok(client) => return client, + Err(error) => log::error!( + target: "bridge", + "Failed to connect to Ethereum node: {:?}. Going to retry in {}s", + error, + RECONNECT_DELAY.as_secs(), + ), + } + + async_std::task::sleep(RECONNECT_DELAY).await; + } + } + + /// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been established + /// or error otherwise. + pub async fn try_connect(params: ConnectionParams) -> Result { Ok(Self { client: Self::build_client(¶ms).await?, params, diff --git a/bridges/relays/client-kusama/src/lib.rs b/bridges/relays/client-kusama/src/lib.rs index 3c3b1cd4c5d8..f2fba32dc1ed 100644 --- a/bridges/relays/client-kusama/src/lib.rs +++ b/bridges/relays/client-kusama/src/lib.rs @@ -41,6 +41,7 @@ impl Chain for Kusama { type Index = bp_kusama::Nonce; type SignedBlock = bp_kusama::SignedBlock; type Call = (); + type Balance = bp_kusama::Balance; } /// Kusama header type used in headers sync. diff --git a/bridges/relays/client-millau/src/lib.rs b/bridges/relays/client-millau/src/lib.rs index 1708a8efa121..8597d9e59200 100644 --- a/bridges/relays/client-millau/src/lib.rs +++ b/bridges/relays/client-millau/src/lib.rs @@ -44,11 +44,10 @@ impl Chain for Millau { type Index = millau_runtime::Index; type SignedBlock = millau_runtime::SignedBlock; type Call = millau_runtime::Call; + type Balance = millau_runtime::Balance; } impl ChainWithBalances for Millau { - type NativeBalance = millau_runtime::Balance; - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { use frame_support::storage::generator::StorageMap; StorageKey(frame_system::Account::::storage_map_final_key( diff --git a/bridges/relays/client-polkadot/src/lib.rs b/bridges/relays/client-polkadot/src/lib.rs index 2c117c6d3d1c..e502463187d2 100644 --- a/bridges/relays/client-polkadot/src/lib.rs +++ b/bridges/relays/client-polkadot/src/lib.rs @@ -41,6 +41,7 @@ impl Chain for Polkadot { type Index = bp_polkadot::Nonce; type SignedBlock = bp_polkadot::SignedBlock; type Call = (); + type Balance = bp_polkadot::Balance; } /// Polkadot header type used in headers sync. diff --git a/bridges/relays/client-rialto/src/lib.rs b/bridges/relays/client-rialto/src/lib.rs index 0ddc03681d29..4a0023a87c4f 100644 --- a/bridges/relays/client-rialto/src/lib.rs +++ b/bridges/relays/client-rialto/src/lib.rs @@ -44,11 +44,10 @@ impl Chain for Rialto { type Index = rialto_runtime::Index; type SignedBlock = rialto_runtime::SignedBlock; type Call = rialto_runtime::Call; + type Balance = rialto_runtime::Balance; } impl ChainWithBalances for Rialto { - type NativeBalance = rialto_runtime::Balance; - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { use frame_support::storage::generator::StorageMap; StorageKey(frame_system::Account::::storage_map_final_key( diff --git a/bridges/relays/client-rococo/Cargo.toml b/bridges/relays/client-rococo/Cargo.toml index 095f365374a8..5611ac27b1ce 100644 --- a/bridges/relays/client-rococo/Cargo.toml +++ b/bridges/relays/client-rococo/Cargo.toml @@ -12,7 +12,16 @@ relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Bridge dependencies +bridge-runtime-common = { path = "../../bin/runtime-common" } +bp-header-chain = { path = "../../primitives/header-chain" } +bp-message-dispatch = { path = "../../primitives/message-dispatch" } +bp-messages = { path = "../../primitives/messages" } +bp-polkadot-core = { path = "../../primitives/polkadot-core" } bp-rococo = { path = "../../primitives/chain-rococo" } +bp-runtime = { path = "../../primitives/runtime" } +bp-wococo = { path = "../../primitives/chain-wococo" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } +pallet-bridge-messages = { path = "../../modules/messages" } # Substrate Dependencies frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/bridges/relays/client-rococo/src/lib.rs b/bridges/relays/client-rococo/src/lib.rs index 09d205f06e91..5a7d8999f7f1 100644 --- a/bridges/relays/client-rococo/src/lib.rs +++ b/bridges/relays/client-rococo/src/lib.rs @@ -22,6 +22,8 @@ use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; +pub mod runtime; + /// Rococo header id. pub type HeaderId = relay_utils::HeaderId; @@ -46,12 +48,11 @@ impl Chain for Rococo { type AccountId = bp_rococo::AccountId; type Index = bp_rococo::Index; type SignedBlock = bp_rococo::SignedBlock; - type Call = bp_rococo::Call; + type Call = crate::runtime::Call; + type Balance = bp_rococo::Balance; } impl ChainWithBalances for Rococo { - type NativeBalance = bp_rococo::Balance; - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { StorageKey(bp_rococo::account_info_storage_key(account_id)) } @@ -60,7 +61,7 @@ impl ChainWithBalances for Rococo { impl TransactionSignScheme for Rococo { type Chain = Rococo; type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = bp_rococo::UncheckedExtrinsic; + type SignedTransaction = crate::runtime::UncheckedExtrinsic; fn sign_transaction( genesis_hash: ::Hash, diff --git a/bridges/relays/client-rococo/src/runtime.rs b/bridges/relays/client-rococo/src/runtime.rs new file mode 100644 index 000000000000..6dbd40bee560 --- /dev/null +++ b/bridges/relays/client-rococo/src/runtime.rs @@ -0,0 +1,135 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types that are specific to the Rococo runtime. + +use bp_messages::{LaneId, UnrewardedRelayersState}; +use bp_polkadot_core::PolkadotLike; +use bp_runtime::Chain; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; + +/// Instance of messages pallet that is used to bridge with Wococo chain. +pub type WithWococoMessagesInstance = pallet_bridge_messages::Instance1; + +/// Unchecked Rococo extrinsic. +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +/// Wococo account ownership digest from Rococo. +/// +/// The byte vector returned by this function should be signed with a Wococo account private key. +/// This way, the owner of `rococo_account_id` on Rococo proves that the Wococo account private key +/// is also under his control. +pub fn rococo_to_wococo_account_ownership_digest( + wococo_call: &Call, + rococo_account_id: AccountId, + wococo_spec_version: SpecVersion, +) -> Vec +where + Call: codec::Encode, + AccountId: codec::Encode, + SpecVersion: codec::Encode, +{ + pallet_bridge_dispatch::account_ownership_digest( + wococo_call, + rococo_account_id, + wococo_spec_version, + bp_runtime::ROCOCO_CHAIN_ID, + bp_runtime::WOCOCO_CHAIN_ID, + ) +} + +/// Rococo Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Rococo chain. +/// Ideally this code would be auto-generated from Metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs +#[allow(clippy::large_enum_variant)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +pub enum Call { + /// System pallet. + #[codec(index = 0)] + System(SystemCall), + /// Wococo bridge pallet. + #[codec(index = 41)] + BridgeGrandpaWococo(BridgeGrandpaWococoCall), + /// Wococo messages pallet. + #[codec(index = 44)] + BridgeMessagesWococo(BridgeMessagesWococoCall), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum SystemCall { + #[codec(index = 1)] + remark(Vec), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeGrandpaWococoCall { + #[codec(index = 0)] + submit_finality_proof( + ::Header, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeMessagesWococoCall { + #[codec(index = 3)] + send_message( + LaneId, + bp_message_dispatch::MessagePayload< + bp_rococo::AccountId, + bp_wococo::AccountId, + bp_wococo::AccountPublic, + Vec, + >, + bp_rococo::Balance, + ), + #[codec(index = 5)] + receive_messages_proof( + bp_wococo::AccountId, + bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, + u32, + Weight, + ), + #[codec(index = 6)] + receive_messages_delivery_proof( + bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof, + UnrewardedRelayersState, + ), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index 699c3da400ff..f5c2e2656059 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -async-std = "1.6.5" +async-std = { version = "1.6.5", features = ["attributes"] } async-trait = "0.1.40" codec = { package = "parity-scale-codec", version = "2.0.0" } jsonrpsee-proc-macros = "=0.2.0-alpha.6" diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 64c0d6af52b2..4cc8a0394d9a 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -17,7 +17,7 @@ use bp_runtime::Chain as ChainBase; use frame_support::Parameter; use jsonrpsee_ws_client::{DeserializeOwned, Serialize}; -use num_traits::{CheckedSub, Zero}; +use num_traits::{CheckedSub, SaturatingAdd, Zero}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{ generic::SignedBlock, @@ -54,14 +54,16 @@ pub trait Chain: ChainBase + Clone { type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; /// The aggregated `Call` type. type Call: Dispatchable + Debug; + /// Balance of an account in native tokens. + /// + /// The chain may suport multiple tokens, but this particular type is for token that is used + /// to pay for transaction dispatch, to reward different relayers (headers, messages), etc. + type Balance: Parameter + Member + DeserializeOwned + Clone + Copy + CheckedSub + PartialOrd + SaturatingAdd + Zero; } /// Substrate-based chain with `frame_system::Config::AccountData` set to -/// the `pallet_balances::AccountData`. +/// the `pallet_balances::AccountData`. pub trait ChainWithBalances: Chain { - /// Balance of an account in native tokens. - type NativeBalance: Parameter + Member + DeserializeOwned + Clone + Copy + CheckedSub + PartialOrd + Zero; - /// Return runtime storage key for getting `frame_system::AccountInfo` of given account. fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey; } diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs index 892a63d6d5b0..f0b7158ecbe3 100644 --- a/bridges/relays/client-substrate/src/client.rs +++ b/bridges/relays/client-substrate/src/client.rs @@ -27,6 +27,7 @@ use jsonrpsee_ws_client::{traits::SubscriptionClient, v2::params::JsonRpcParams, use jsonrpsee_ws_client::{Subscription, WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; use num_traits::Zero; use pallet_balances::AccountData; +use relay_utils::relay_loop::RECONNECT_DELAY; use sp_core::{storage::StorageKey, Bytes}; use sp_trie::StorageProof; use sp_version::RuntimeVersion; @@ -77,7 +78,29 @@ impl std::fmt::Debug for Client { impl Client { /// Returns client that is able to call RPCs on Substrate node over websocket connection. - pub async fn new(params: ConnectionParams) -> Result { + /// + /// This function will keep connecting to given Sustrate node until connection is established + /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. + pub async fn new(params: ConnectionParams) -> Self { + loop { + match Self::try_connect(params.clone()).await { + Ok(client) => return client, + Err(error) => log::error!( + target: "bridge", + "Failed to connect to {} node: {:?}. Going to retry in {}s", + C::NAME, + error, + RECONNECT_DELAY.as_secs(), + ), + } + + async_std::task::sleep(RECONNECT_DELAY).await; + } + } + + /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection + /// has been established or error otherwise. + pub async fn try_connect(params: ConnectionParams) -> Result { let client = Self::build_client(params.clone()).await?; let number: C::BlockNumber = Zero::zero(); @@ -185,7 +208,7 @@ impl Client { } /// Return native tokens balance of the account. - pub async fn free_native_balance(&self, account: C::AccountId) -> Result + pub async fn free_native_balance(&self, account: C::AccountId) -> Result where C: ChainWithBalances, { @@ -194,7 +217,7 @@ impl Client { .await? .ok_or(Error::AccountDoesNotExist)?; let decoded_account_data = - AccountInfo::>::decode(&mut &encoded_account_data.0[..]) + AccountInfo::>::decode(&mut &encoded_account_data.0[..]) .map_err(Error::ResponseParseFailed)?; Ok(decoded_account_data.data.free) } diff --git a/bridges/relays/client-substrate/src/finality_source.rs b/bridges/relays/client-substrate/src/finality_source.rs index 38500934191d..72a11ae99003 100644 --- a/bridges/relays/client-substrate/src/finality_source.rs +++ b/bridges/relays/client-substrate/src/finality_source.rs @@ -21,6 +21,7 @@ use crate::client::Client; use crate::error::Error; use crate::sync_header::SyncHeader; +use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; use bp_header_chain::justification::GrandpaJustification; use codec::Decode; @@ -30,26 +31,46 @@ use relay_utils::relay_loop::Client as RelayClient; use sp_runtime::traits::Header as HeaderT; use std::{marker::PhantomData, pin::Pin}; +/// Shared updatable reference to the maximal header number that we want to sync from the source. +pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; + /// Substrate node as finality source. pub struct FinalitySource { client: Client, + maximal_header_number: Option>, _phantom: PhantomData

, } impl FinalitySource { /// Create new headers source using given client. - pub fn new(client: Client) -> Self { + pub fn new(client: Client, maximal_header_number: Option>) -> Self { FinalitySource { client, + maximal_header_number, _phantom: Default::default(), } } + + /// Returns reference to the underlying RPC client. + pub fn client(&self) -> &Client { + &self.client + } + + /// Returns best finalized block number. + pub async fn on_chain_best_finalized_block_number(&self) -> Result { + // we **CAN** continue to relay finality proofs if source node is out of sync, because + // target node may be missing proofs that are already available at the source + let finalized_header_hash = self.client.best_finalized_header_hash().await?; + let finalized_header = self.client.header_by_hash(finalized_header_hash).await?; + Ok(*finalized_header.number()) + } } impl Clone for FinalitySource { fn clone(&self) -> Self { FinalitySource { client: self.client.clone(), + maximal_header_number: self.maximal_header_number.clone(), _phantom: Default::default(), } } @@ -80,11 +101,16 @@ where type FinalityProofsStream = Pin> + Send>>; async fn best_finalized_block_number(&self) -> Result { - // we **CAN** continue to relay finality proofs if source node is out of sync, because - // target node may be missing proofs that are already available at the source - let finalized_header_hash = self.client.best_finalized_header_hash().await?; - let finalized_header = self.client.header_by_hash(finalized_header_hash).await?; - Ok(*finalized_header.number()) + let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; + // never return block number larger than requested. This way we'll never sync headers + // past `maximal_header_number` + if let Some(ref maximal_header_number) = self.maximal_header_number { + let maximal_header_number = *maximal_header_number.lock().await; + if finalized_header_number > maximal_header_number { + finalized_header_number = maximal_header_number; + } + } + Ok(finalized_header_number) } async fn header_and_finality_proof( diff --git a/bridges/relays/client-substrate/src/guard.rs b/bridges/relays/client-substrate/src/guard.rs index 68fef1c4c9c2..c6e191ce078f 100644 --- a/bridges/relays/client-substrate/src/guard.rs +++ b/bridges/relays/client-substrate/src/guard.rs @@ -33,7 +33,7 @@ pub trait Environment: Send + Sync + 'static { /// Return current runtime version. async fn runtime_version(&mut self) -> Result; /// Return free native balance of the account on the chain. - async fn free_native_balance(&mut self, account: C::AccountId) -> Result; + async fn free_native_balance(&mut self, account: C::AccountId) -> Result; /// Return current time. fn now(&self) -> Instant { @@ -85,7 +85,7 @@ pub fn abort_on_spec_version_change(mut env: impl Environm pub fn abort_when_account_balance_decreased( mut env: impl Environment, account_id: C::AccountId, - maximal_decrease: C::NativeBalance, + maximal_decrease: C::Balance, ) { const DAY: Duration = Duration::from_secs(60 * 60 * 24); @@ -155,7 +155,7 @@ impl Environment for Client { Client::::runtime_version(self).await.map_err(|e| e.to_string()) } - async fn free_native_balance(&mut self, account: C::AccountId) -> Result { + async fn free_native_balance(&mut self, account: C::AccountId) -> Result { Client::::free_native_balance(self, account) .await .map_err(|e| e.to_string()) @@ -191,11 +191,10 @@ mod tests { type SignedBlock = sp_runtime::generic::SignedBlock>; type Call = (); + type Balance = u32; } impl ChainWithBalances for TestChain { - type NativeBalance = u32; - fn account_info_storage_key(_account_id: &u32) -> sp_core::storage::StorageKey { unreachable!() } diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs index 0f1bfb481e71..44895dcdc6e4 100644 --- a/bridges/relays/client-substrate/src/lib.rs +++ b/bridges/relays/client-substrate/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Tools to interact with (Open) Ethereum node using RPC methods. +//! Tools to interact with Substrate node using RPC methods. #![warn(missing_docs)] diff --git a/bridges/relays/client-westend/src/lib.rs b/bridges/relays/client-westend/src/lib.rs index 417938ccf5a9..6768b81f10f8 100644 --- a/bridges/relays/client-westend/src/lib.rs +++ b/bridges/relays/client-westend/src/lib.rs @@ -47,11 +47,10 @@ impl Chain for Westend { type Index = bp_westend::Nonce; type SignedBlock = bp_westend::SignedBlock; type Call = bp_westend::Call; + type Balance = bp_westend::Balance; } impl ChainWithBalances for Westend { - type NativeBalance = bp_westend::Balance; - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { StorageKey(bp_westend::account_info_storage_key(account_id)) } diff --git a/bridges/relays/client-wococo/Cargo.toml b/bridges/relays/client-wococo/Cargo.toml index 22d710ca3d7b..c1b9aafd95e7 100644 --- a/bridges/relays/client-wococo/Cargo.toml +++ b/bridges/relays/client-wococo/Cargo.toml @@ -12,7 +12,16 @@ relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Bridge dependencies +bridge-runtime-common = { path = "../../bin/runtime-common" } +bp-header-chain = { path = "../../primitives/header-chain" } +bp-message-dispatch = { path = "../../primitives/message-dispatch" } +bp-messages = { path = "../../primitives/messages" } +bp-polkadot-core = { path = "../../primitives/polkadot-core" } +bp-rococo = { path = "../../primitives/chain-rococo" } +bp-runtime = { path = "../../primitives/runtime" } bp-wococo = { path = "../../primitives/chain-wococo" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } +pallet-bridge-messages = { path = "../../modules/messages" } # Substrate Dependencies frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/bridges/relays/client-wococo/src/lib.rs b/bridges/relays/client-wococo/src/lib.rs index be2f872b7d77..8ceba7c7c436 100644 --- a/bridges/relays/client-wococo/src/lib.rs +++ b/bridges/relays/client-wococo/src/lib.rs @@ -22,6 +22,8 @@ use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; +pub mod runtime; + /// Wococo header id. pub type HeaderId = relay_utils::HeaderId; @@ -46,12 +48,11 @@ impl Chain for Wococo { type AccountId = bp_wococo::AccountId; type Index = bp_wococo::Index; type SignedBlock = bp_wococo::SignedBlock; - type Call = bp_wococo::Call; + type Call = crate::runtime::Call; + type Balance = bp_wococo::Balance; } impl ChainWithBalances for Wococo { - type NativeBalance = bp_wococo::Balance; - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { StorageKey(bp_wococo::account_info_storage_key(account_id)) } @@ -60,7 +61,7 @@ impl ChainWithBalances for Wococo { impl TransactionSignScheme for Wococo { type Chain = Wococo; type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = bp_wococo::UncheckedExtrinsic; + type SignedTransaction = crate::runtime::UncheckedExtrinsic; fn sign_transaction( genesis_hash: ::Hash, diff --git a/bridges/relays/client-wococo/src/runtime.rs b/bridges/relays/client-wococo/src/runtime.rs new file mode 100644 index 000000000000..e973c3a6d028 --- /dev/null +++ b/bridges/relays/client-wococo/src/runtime.rs @@ -0,0 +1,135 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types that are specific to the Wococo runtime. + +use bp_messages::{LaneId, UnrewardedRelayersState}; +use bp_polkadot_core::PolkadotLike; +use bp_runtime::Chain; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; + +/// Instance of messages pallet that is used to bridge with Rococo chain. +pub type WithRococoMessagesInstance = pallet_bridge_messages::DefaultInstance; + +/// Unchecked Wococo extrinsic. +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +/// Rococo account ownership digest from Wococo. +/// +/// The byte vector returned by this function should be signed with a Rococo account private key. +/// This way, the owner of `wococo_account_id` on Rococo proves that the Rococo account private key +/// is also under his control. +pub fn wococo_to_rococo_account_ownership_digest( + rococo_call: &Call, + wococo_account_id: AccountId, + rococo_spec_version: SpecVersion, +) -> Vec +where + Call: codec::Encode, + AccountId: codec::Encode, + SpecVersion: codec::Encode, +{ + pallet_bridge_dispatch::account_ownership_digest( + rococo_call, + wococo_account_id, + rococo_spec_version, + bp_runtime::WOCOCO_CHAIN_ID, + bp_runtime::ROCOCO_CHAIN_ID, + ) +} + +/// Wococo Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Rococo chain. +/// Ideally this code would be auto-generated from Metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs +#[allow(clippy::large_enum_variant)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +pub enum Call { + /// System pallet. + #[codec(index = 0)] + System(SystemCall), + /// Rococo bridge pallet. + #[codec(index = 40)] + BridgeGrandpaRococo(BridgeGrandpaRococoCall), + /// Rococo messages pallet. + #[codec(index = 43)] + BridgeMessagesRococo(BridgeMessagesRococoCall), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum SystemCall { + #[codec(index = 1)] + remark(Vec), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeGrandpaRococoCall { + #[codec(index = 0)] + submit_finality_proof( + ::Header, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeMessagesRococoCall { + #[codec(index = 3)] + send_message( + LaneId, + bp_message_dispatch::MessagePayload< + bp_rococo::AccountId, + bp_wococo::AccountId, + bp_wococo::AccountPublic, + Vec, + >, + bp_rococo::Balance, + ), + #[codec(index = 5)] + receive_messages_proof( + bp_rococo::AccountId, + bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, + u32, + Weight, + ), + #[codec(index = 6)] + receive_messages_delivery_proof( + bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof, + UnrewardedRelayersState, + ), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} diff --git a/bridges/relays/exchange/src/exchange.rs b/bridges/relays/exchange/src/exchange.rs index 4a2f07fa7f97..b87b99ee4207 100644 --- a/bridges/relays/exchange/src/exchange.rs +++ b/bridges/relays/exchange/src/exchange.rs @@ -324,7 +324,7 @@ async fn wait_transaction_mined( source_tx_hash: &TransactionHashOf

, ) -> Result<(HeaderId

, usize), String> { loop { - let source_header_and_tx = source_client.transaction_block(&source_tx_hash).await.map_err(|err| { + let source_header_and_tx = source_client.transaction_block(source_tx_hash).await.map_err(|err| { format!( "Error retrieving transaction {} from {} node: {:?}", source_tx_hash, @@ -363,7 +363,7 @@ async fn wait_header_imported( source_header_id: &HeaderId

, ) -> Result<(), String> { loop { - let is_header_known = target_client.is_header_known(&source_header_id).await.map_err(|err| { + let is_header_known = target_client.is_header_known(source_header_id).await.map_err(|err| { format!( "Failed to check existence of header {}/{} on {} node: {:?}", source_header_id.0, @@ -406,7 +406,7 @@ async fn wait_header_finalized( ) -> Result<(), String> { loop { let is_header_finalized = target_client - .is_header_finalized(&source_header_id) + .is_header_finalized(source_header_id) .await .map_err(|err| { format!( diff --git a/bridges/relays/exchange/src/exchange_loop.rs b/bridges/relays/exchange/src/exchange_loop.rs index 4525c33e36a1..8da4c3f45687 100644 --- a/bridges/relays/exchange/src/exchange_loop.rs +++ b/bridges/relays/exchange/src/exchange_loop.rs @@ -215,7 +215,7 @@ async fn run_loop_iteration( state.best_processed_header_number = state.best_processed_header_number + One::one(); storage.set_state(state); - if let Some(ref exchange_loop_metrics) = exchange_loop_metrics { + if let Some(exchange_loop_metrics) = exchange_loop_metrics { exchange_loop_metrics.update::

( state.best_processed_header_number, best_finalized_header_id.0, diff --git a/bridges/relays/finality/src/finality_loop.rs b/bridges/relays/finality/src/finality_loop.rs index 3aa55a8ac591..3ea729d123e7 100644 --- a/bridges/relays/finality/src/finality_loop.rs +++ b/bridges/relays/finality/src/finality_loop.rs @@ -39,8 +39,6 @@ use std::{ /// Finality proof synchronization loop parameters. #[derive(Debug, Clone)] pub struct FinalitySyncParams { - /// If `true`, then the separate async task for running finality loop is NOT spawned. - pub is_on_demand_task: bool, /// Interval at which we check updates on both clients. Normally should be larger than /// `min(source_block_time, target_block_time)`. /// @@ -60,6 +58,8 @@ pub struct FinalitySyncParams { pub recent_finality_proofs_limit: usize, /// Timeout before we treat our transactions as lost and restart the whole sync process. pub stall_timeout: Duration, + /// If true, only mandatory headers are relayed. + pub only_mandatory_headers: bool, } /// Source client used in finality synchronization loop. @@ -107,7 +107,6 @@ pub async fn run( ) -> Result<(), String> { let exit_signal = exit_signal.shared(); relay_utils::relay_loop(source_client, target_client) - .spawn_loop_task(!sync_params.is_on_demand_task) .with_metrics(Some(metrics_prefix::

()), metrics_params) .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? @@ -367,7 +366,7 @@ where } } -async fn select_header_to_submit( +pub(crate) async fn select_header_to_submit( source_client: &SC, target_client: &TC, finality_proofs_stream: &mut RestartableFinalityProofsStream, @@ -400,6 +399,11 @@ where .await?; let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof { SelectedFinalityProof::Mandatory(header, finality_proof) => return Ok(Some((header, finality_proof))), + _ if sync_params.only_mandatory_headers => { + // we are not reading finality proofs from the stream, so eventually it'll break + // but we don't care about transient proofs at all, so it is acceptable + return Ok(None); + } SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => { (unjustified_headers, Some((header, finality_proof))) } diff --git a/bridges/relays/finality/src/finality_loop_tests.rs b/bridges/relays/finality/src/finality_loop_tests.rs index 645aeb1777c6..e7e0cdb39fb3 100644 --- a/bridges/relays/finality/src/finality_loop_tests.rs +++ b/bridges/relays/finality/src/finality_loop_tests.rs @@ -20,7 +20,8 @@ use crate::finality_loop::{ prune_recent_finality_proofs, read_finality_proofs_from_stream, run, select_better_recent_finality_proof, - FinalityProofs, FinalitySyncParams, SourceClient, TargetClient, + select_header_to_submit, FinalityProofs, FinalitySyncParams, RestartableFinalityProofsStream, SourceClient, + TargetClient, }; use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; @@ -165,8 +166,11 @@ impl TargetClient for TestTargetClient { } } -fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData { - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); +fn prepare_test_clients( + exit_sender: futures::channel::mpsc::UnboundedSender<()>, + state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, + source_headers: HashMap)>, +) -> (TestSourceClient, TestTargetClient) { let internal_state_function: Arc = Arc::new(move |data| { if state_function(data) { exit_sender.unbounded_send(()).unwrap(); @@ -174,7 +178,30 @@ fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync }); let clients_data = Arc::new(Mutex::new(ClientsData { source_best_block_number: 10, - source_headers: vec![ + source_headers, + source_proofs: vec![TestFinalityProof(12), TestFinalityProof(14)], + + target_best_block_number: 5, + target_headers: vec![], + })); + ( + TestSourceClient { + on_method_call: internal_state_function.clone(), + data: clients_data.clone(), + }, + TestTargetClient { + on_method_call: internal_state_function, + data: clients_data, + }, + ) +} + +fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData { + let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); + let (source_client, target_client) = prepare_test_clients( + exit_sender, + state_function, + vec![ (6, (TestSourceHeader(false, 6), None)), (7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))), (8, (TestSourceHeader(true, 8), Some(TestFinalityProof(8)))), @@ -183,26 +210,15 @@ fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync ] .into_iter() .collect(), - source_proofs: vec![TestFinalityProof(12), TestFinalityProof(14)], - - target_best_block_number: 5, - target_headers: vec![], - })); - let source_client = TestSourceClient { - on_method_call: internal_state_function.clone(), - data: clients_data.clone(), - }; - let target_client = TestTargetClient { - on_method_call: internal_state_function, - data: clients_data.clone(), - }; + ); let sync_params = FinalitySyncParams { - is_on_demand_task: false, tick: Duration::from_secs(0), recent_finality_proofs_limit: 1024, stall_timeout: Duration::from_secs(1), + only_mandatory_headers: false, }; + let clients_data = source_client.data.clone(); let _ = async_std::task::block_on(run( source_client, target_client, @@ -260,6 +276,65 @@ fn finality_sync_loop_works() { ); } +fn run_only_mandatory_headers_mode_test( + only_mandatory_headers: bool, + has_mandatory_headers: bool, +) -> Option<(TestSourceHeader, TestFinalityProof)> { + let (exit_sender, _) = futures::channel::mpsc::unbounded(); + let (source_client, target_client) = prepare_test_clients( + exit_sender, + |_| false, + vec![ + (6, (TestSourceHeader(false, 6), Some(TestFinalityProof(6)))), + (7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))), + ( + 8, + (TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8))), + ), + (9, (TestSourceHeader(false, 9), Some(TestFinalityProof(9)))), + (10, (TestSourceHeader(false, 10), Some(TestFinalityProof(10)))), + ] + .into_iter() + .collect(), + ); + async_std::task::block_on(select_header_to_submit( + &source_client, + &target_client, + &mut RestartableFinalityProofsStream::from(futures::stream::empty().boxed()), + &mut vec![], + 10, + 5, + &FinalitySyncParams { + tick: Duration::from_secs(0), + recent_finality_proofs_limit: 0, + stall_timeout: Duration::from_secs(0), + only_mandatory_headers, + }, + )) + .unwrap() +} + +#[test] +fn select_header_to_submit_skips_non_mandatory_headers_when_only_mandatory_headers_are_required() { + assert_eq!(run_only_mandatory_headers_mode_test(true, false), None); + assert_eq!( + run_only_mandatory_headers_mode_test(false, false), + Some((TestSourceHeader(false, 10), TestFinalityProof(10))), + ); +} + +#[test] +fn select_header_to_submit_selects_mandatory_headers_when_only_mandatory_headers_are_required() { + assert_eq!( + run_only_mandatory_headers_mode_test(true, true), + Some((TestSourceHeader(true, 8), TestFinalityProof(8))), + ); + assert_eq!( + run_only_mandatory_headers_mode_test(false, true), + Some((TestSourceHeader(true, 8), TestFinalityProof(8))), + ); +} + #[test] fn select_better_recent_finality_proof_works() { // if there are no unjustified headers, nothing is changed @@ -343,7 +418,7 @@ fn read_finality_proofs_from_stream_works() { let mut stream = futures::stream::pending().into(); read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]); - assert_eq!(stream.needs_restart, false); + assert!(!stream.needs_restart); // when stream has entry with target, it is added to the recent proofs container let mut stream = futures::stream::iter(vec![TestFinalityProof(4)]) @@ -354,7 +429,7 @@ fn read_finality_proofs_from_stream_works() { recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] ); - assert_eq!(stream.needs_restart, false); + assert!(!stream.needs_restart); // when stream has ended, we'll need to restart it let mut stream = futures::stream::empty().into(); @@ -363,7 +438,7 @@ fn read_finality_proofs_from_stream_works() { recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] ); - assert_eq!(stream.needs_restart, true); + assert!(stream.needs_restart); } #[test] diff --git a/bridges/relays/headers/src/headers.rs b/bridges/relays/headers/src/headers.rs index be3e2cb6e6d8..0b948d9da4cc 100644 --- a/bridges/relays/headers/src/headers.rs +++ b/bridges/relays/headers/src/headers.rs @@ -295,7 +295,7 @@ impl QueuedHeaders

{ &mut self.orphan, &mut self.known_headers, HeaderStatus::Orphan, - &id, + id, ); return; } @@ -305,7 +305,7 @@ impl QueuedHeaders

{ &mut self.maybe_extra, &mut self.known_headers, HeaderStatus::MaybeExtra, - &id, + id, ); } @@ -324,7 +324,7 @@ impl QueuedHeaders

{ destination_queue, &mut self.known_headers, destination_status, - &id, + id, |header| header, ); } @@ -654,7 +654,7 @@ impl QueuedHeaders

{ // remember that the header itself is synced // (condition is here to avoid duplicate log messages) if !id_processed { - set_header_status::

(&mut self.known_headers, &id, HeaderStatus::Synced); + set_header_status::

(&mut self.known_headers, id, HeaderStatus::Synced); } // now let's move all descendants from maybe_orphan && orphan queues to @@ -1505,7 +1505,7 @@ pub(crate) mod tests { let mut queue = QueuedHeaders::::default(); // when we do not know header itself - assert_eq!(queue.is_parent_incomplete(&id(50)), false); + assert!(!queue.is_parent_incomplete(&id(50))); // when we do not know parent queue @@ -1514,7 +1514,7 @@ pub(crate) mod tests { .or_default() .insert(hash(100), HeaderStatus::Incomplete); queue.incomplete.entry(100).or_default().insert(hash(100), header(100)); - assert_eq!(queue.is_parent_incomplete(&id(100)), false); + assert!(!queue.is_parent_incomplete(&id(100))); // when parent is inside incomplete queue (i.e. some other ancestor is actually incomplete) queue @@ -1523,7 +1523,7 @@ pub(crate) mod tests { .or_default() .insert(hash(101), HeaderStatus::Submitted); queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - assert_eq!(queue.is_parent_incomplete(&id(101)), true); + assert!(queue.is_parent_incomplete(&id(101))); // when parent is the incomplete header and we do not have completion data queue.incomplete_headers.insert(id(199), None); @@ -1533,7 +1533,7 @@ pub(crate) mod tests { .or_default() .insert(hash(200), HeaderStatus::Submitted); queue.submitted.entry(200).or_default().insert(hash(200), header(200)); - assert_eq!(queue.is_parent_incomplete(&id(200)), true); + assert!(queue.is_parent_incomplete(&id(200))); // when parent is the incomplete header and we have completion data queue.completion_data.insert(id(299), 299_299); @@ -1543,7 +1543,7 @@ pub(crate) mod tests { .or_default() .insert(hash(300), HeaderStatus::Submitted); queue.submitted.entry(300).or_default().insert(hash(300), header(300)); - assert_eq!(queue.is_parent_incomplete(&id(300)), true); + assert!(queue.is_parent_incomplete(&id(300))); } #[test] diff --git a/bridges/relays/messages/Cargo.toml b/bridges/relays/messages/Cargo.toml index e02f8ccc8682..ea5d46845c5a 100644 --- a/bridges/relays/messages/Cargo.toml +++ b/bridges/relays/messages/Cargo.toml @@ -6,14 +6,16 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -async-std = "1.6.5" +async-std = { version = "1.6.5", features = ["attributes"] } async-trait = "0.1.40" futures = "0.3.5" hex = "0.4" log = "0.4.11" +num-traits = "0.2" parking_lot = "0.11.0" # Bridge Dependencies bp-messages = { path = "../../primitives/messages" } +bp-runtime = { path = "../../primitives/runtime" } relay-utils = { path = "../utils" } diff --git a/bridges/relays/messages/src/message_lane.rs b/bridges/relays/messages/src/message_lane.rs index 6473ec987500..8757e9322ce4 100644 --- a/bridges/relays/messages/src/message_lane.rs +++ b/bridges/relays/messages/src/message_lane.rs @@ -19,6 +19,7 @@ //! 1) relay new messages from source to target node; //! 2) relay proof-of-delivery from target to source node. +use num_traits::{SaturatingAdd, Zero}; use relay_utils::{BlockNumberBase, HeaderId}; use std::fmt::Debug; @@ -34,6 +35,12 @@ pub trait MessageLane: 'static + Clone + Send + Sync { /// Messages receiving proof. type MessagesReceivingProof: Clone + Debug + Send + Sync; + /// The type of the source chain token balance, that is used to: + /// + /// 1) pay transaction fees; + /// 2) pay message delivery and dispatch fee; + /// 3) pay relayer rewards. + type SourceChainBalance: Clone + Copy + Debug + PartialOrd + SaturatingAdd + Zero + Send + Sync; /// Number of the source header. type SourceHeaderNumber: BlockNumberBase; /// Hash of the source header. diff --git a/bridges/relays/messages/src/message_lane_loop.rs b/bridges/relays/messages/src/message_lane_loop.rs index af04bf984e1f..c87c024f3976 100644 --- a/bridges/relays/messages/src/message_lane_loop.rs +++ b/bridges/relays/messages/src/message_lane_loop.rs @@ -31,6 +31,7 @@ use crate::metrics::MessageLaneLoopMetrics; use async_trait::async_trait; use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_runtime::messages::DispatchFeePayment; use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; use relay_utils::{ interval, @@ -58,6 +59,15 @@ pub struct Params { pub delivery_params: MessageDeliveryParams, } +/// Relayer operating mode. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum RelayerMode { + /// The relayer doesn't care about rewards. + Altruistic, + /// The relayer will deliver all messages and confirmations as long as he's not losing any funds. + NoLosses, +} + /// Message delivery race parameters. #[derive(Debug, Clone)] pub struct MessageDeliveryParams { @@ -74,20 +84,26 @@ pub struct MessageDeliveryParams { /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. pub max_messages_weight_in_single_batch: Weight, /// Maximal cumulative size of relayed messages in single delivery transaction. - pub max_messages_size_in_single_batch: usize, + pub max_messages_size_in_single_batch: u32, + /// Relayer operating mode. + pub relayer_mode: RelayerMode, } -/// Message weights. +/// Message details. #[derive(Debug, Clone, Copy, PartialEq)] -pub struct MessageWeights { +pub struct MessageDetails { /// Message dispatch weight. - pub weight: Weight, + pub dispatch_weight: Weight, /// Message size (number of bytes in encoded payload). - pub size: usize, + pub size: u32, + /// The relayer reward paid in the source chain tokens. + pub reward: SourceChainBalance, + /// Where the fee for dispatching message is paid? + pub dispatch_fee_payment: DispatchFeePayment, } -/// Messages weights map. -pub type MessageWeightsMap = BTreeMap; +/// Messages details map. +pub type MessageDetailsMap = BTreeMap>; /// Message delivery race proof parameters. #[derive(Debug, PartialEq)] @@ -117,13 +133,13 @@ pub trait SourceClient: RelayClient { /// Returns mapping of message nonces, generated on this client, to their weights. /// - /// Some weights may be missing from returned map, if corresponding messages were pruned at + /// Some messages may be missing from returned map, if corresponding messages were pruned at /// the source chain. - async fn generated_messages_weights( + async fn generated_message_details( &self, id: SourceHeaderIdOf

, nonces: RangeInclusive, - ) -> Result; + ) -> Result, Self::Error>; /// Prove messages in inclusive range [begin; end]. async fn prove_messages( @@ -142,6 +158,9 @@ pub trait SourceClient: RelayClient { /// We need given finalized target header on source to continue synchronization. async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

); + + /// Estimate cost of single message confirmation transaction in source chain tokens. + async fn estimate_confirmation_transaction(&self) -> P::SourceChainBalance; } /// Target client trait. @@ -183,6 +202,17 @@ pub trait TargetClient: RelayClient { /// We need given finalized source header on target to continue synchronization. async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

); + + /// Estimate cost of messages delivery transaction in source chain tokens. + /// + /// Please keep in mind that the returned cost must be converted to the source chain + /// tokens, even though the transaction fee will be paid in the target chain tokens. + async fn estimate_delivery_transaction_in_source_tokens( + &self, + nonces: RangeInclusive, + total_dispatch_weight: Weight, + total_size: u32, + ) -> P::SourceChainBalance; } /// State of the client. @@ -426,6 +456,10 @@ pub(crate) mod tests { HeaderId(number, number) } + pub const CONFIRMATION_TRANSACTION_COST: TestSourceChainBalance = 1; + pub const BASE_MESSAGE_DELIVERY_TRANSACTION_COST: TestSourceChainBalance = 1; + + pub type TestSourceChainBalance = u64; pub type TestSourceHeaderId = HeaderId; pub type TestTargetHeaderId = HeaderId; @@ -457,6 +491,7 @@ pub(crate) mod tests { type MessagesProof = TestMessagesProof; type MessagesReceivingProof = TestMessagesReceivingProof; + type SourceChainBalance = TestSourceChainBalance; type SourceHeaderNumber = TestSourceHeaderNumber; type SourceHeaderHash = TestSourceHeaderHash; @@ -490,6 +525,15 @@ pub(crate) mod tests { tick: Arc, } + impl Default for TestSourceClient { + fn default() -> Self { + TestSourceClient { + data: Arc::new(Mutex::new(TestClientData::default())), + tick: Arc::new(|_| {}), + } + } + } + #[async_trait] impl RelayClient for TestSourceClient { type Error = TestError; @@ -536,13 +580,23 @@ pub(crate) mod tests { Ok((id, data.source_latest_confirmed_received_nonce)) } - async fn generated_messages_weights( + async fn generated_message_details( &self, _id: SourceHeaderIdOf, nonces: RangeInclusive, - ) -> Result { + ) -> Result, TestError> { Ok(nonces - .map(|nonce| (nonce, MessageWeights { weight: 1, size: 1 })) + .map(|nonce| { + ( + nonce, + MessageDetails { + dispatch_weight: 1, + size: 1, + reward: 1, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + }, + ) + }) .collect()) } @@ -596,6 +650,10 @@ pub(crate) mod tests { data.target_to_source_header_requirements.push(id); (self.tick)(&mut *data); } + + async fn estimate_confirmation_transaction(&self) -> TestSourceChainBalance { + CONFIRMATION_TRANSACTION_COST + } } #[derive(Clone)] @@ -604,6 +662,15 @@ pub(crate) mod tests { tick: Arc, } + impl Default for TestTargetClient { + fn default() -> Self { + TestTargetClient { + data: Arc::new(Mutex::new(TestClientData::default())), + tick: Arc::new(|_| {}), + } + } + } + #[async_trait] impl RelayClient for TestTargetClient { type Error = TestError; @@ -702,6 +769,17 @@ pub(crate) mod tests { data.source_to_target_header_requirements.push(id); (self.tick)(&mut *data); } + + async fn estimate_delivery_transaction_in_source_tokens( + &self, + nonces: RangeInclusive, + total_dispatch_weight: Weight, + total_size: u32, + ) -> TestSourceChainBalance { + BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1) + + total_dispatch_weight + + total_size as TestSourceChainBalance + } } fn run_loop_test( @@ -734,6 +812,7 @@ pub(crate) mod tests { max_messages_in_single_batch: 4, max_messages_weight_in_single_batch: 4, max_messages_size_in_single_batch: 4, + relayer_mode: RelayerMode::Altruistic, }, }, source_client, diff --git a/bridges/relays/messages/src/message_race_delivery.rs b/bridges/relays/messages/src/message_race_delivery.rs index b50b0ffe31ba..bde09af7068f 100644 --- a/bridges/relays/messages/src/message_race_delivery.rs +++ b/bridges/relays/messages/src/message_race_delivery.rs @@ -15,24 +15,27 @@ use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; use crate::message_lane_loop::{ - MessageDeliveryParams, MessageProofParameters, MessageWeightsMap, SourceClient as MessageLaneSourceClient, - SourceClientState, TargetClient as MessageLaneTargetClient, TargetClientState, + MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, RelayerMode, + SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient, + TargetClientState, }; use crate::message_race_loop::{ MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, TargetClient, TargetClientNonces, }; -use crate::message_race_strategy::BasicStrategy; +use crate::message_race_strategy::{BasicStrategy, SourceRangesQueue}; use crate::metrics::MessageLaneLoopMetrics; use async_trait::async_trait; use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; +use bp_runtime::messages::DispatchFeePayment; use futures::stream::FusedStream; +use num_traits::{SaturatingAdd, Zero}; use relay_utils::FailedClient; use std::{ - collections::{BTreeMap, VecDeque}, + collections::VecDeque, marker::PhantomData, - ops::RangeInclusive, + ops::{Range, RangeInclusive}, time::Duration, }; @@ -48,24 +51,27 @@ pub async fn run( ) -> Result<(), FailedClient> { crate::message_race_loop::run( MessageDeliveryRaceSource { - client: source_client, + client: source_client.clone(), metrics_msg: metrics_msg.clone(), _phantom: Default::default(), }, source_state_updates, MessageDeliveryRaceTarget { - client: target_client, + client: target_client.clone(), metrics_msg, _phantom: Default::default(), }, target_state_updates, stall_timeout, - MessageDeliveryStrategy::

{ + MessageDeliveryStrategy:: { + lane_source_client: source_client, + lane_target_client: target_client, max_unrewarded_relayer_entries_at_target: params.max_unrewarded_relayer_entries_at_target, max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, max_messages_in_single_batch: params.max_messages_in_single_batch, max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch, max_messages_size_in_single_batch: params.max_messages_size_in_single_batch, + relayer_mode: params.relayer_mode, latest_confirmed_nonces_at_source: VecDeque::new(), target_nonces: None, strategy: BasicStrategy::new(), @@ -107,7 +113,7 @@ where C: MessageLaneSourceClient

, { type Error = C::Error; - type NoncesRange = MessageWeightsMap; + type NoncesRange = MessageDetailsMap; type ProofParameters = MessageProofParameters; async fn nonces( @@ -125,10 +131,10 @@ where let new_nonces = if latest_generated_nonce > prev_latest_nonce { self.client - .generated_messages_weights(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce) + .generated_message_details(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce) .await? } else { - MessageWeightsMap::new() + MessageDetailsMap::new() }; Ok(( @@ -222,7 +228,11 @@ struct DeliveryRaceTargetNoncesData { } /// Messages delivery strategy. -struct MessageDeliveryStrategy { +struct MessageDeliveryStrategy { + /// The client that is connected to the message lane source node. + lane_source_client: SC, + /// The client that is connected to the message lane target node. + lane_target_client: TC, /// Maximal unrewarded relayer entries at target client. max_unrewarded_relayer_entries_at_target: MessageNonce, /// Maximal unconfirmed nonces at target client. @@ -232,7 +242,9 @@ struct MessageDeliveryStrategy { /// Maximal cumulative messages weight in the single delivery transaction. max_messages_weight_in_single_batch: Weight, /// Maximal messages size in the single delivery transaction. - max_messages_size_in_single_batch: usize, + max_messages_size_in_single_batch: u32, + /// Relayer operating mode. + relayer_mode: RelayerMode, /// Latest confirmed nonces at the source client + the header id where we have first met this nonce. latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, /// Target nonces from the source client. @@ -246,11 +258,11 @@ type MessageDeliveryStrategyBase

= BasicStrategy<

::SourceHeaderHash,

::TargetHeaderNumber,

::TargetHeaderHash, - MessageWeightsMap, + MessageDetailsMap<

::SourceChainBalance>,

::MessagesProof, >; -impl std::fmt::Debug for MessageDeliveryStrategy

{ +impl std::fmt::Debug for MessageDeliveryStrategy { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("MessageDeliveryStrategy") .field( @@ -280,10 +292,26 @@ impl std::fmt::Debug for MessageDeliveryStrategy

{ } } -impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy

+impl MessageDeliveryStrategy { + /// Returns total weight of all undelivered messages. + fn total_queued_dispatch_weight(&self) -> Weight { + self.strategy + .source_queue() + .iter() + .flat_map(|(_, range)| range.values().map(|details| details.dispatch_weight)) + .fold(0, |total, weight| total.saturating_add(weight)) + } +} + +#[async_trait] +impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> + for MessageDeliveryStrategy +where + P: MessageLane, + SC: MessageLaneSourceClient

, + TC: MessageLaneTargetClient

, { - type SourceNoncesRange = MessageWeightsMap; + type SourceNoncesRange = MessageDetailsMap; type ProofParameters = MessageProofParameters; type TargetNoncesData = DeliveryRaceTargetNoncesData; @@ -383,9 +411,9 @@ impl RaceStrategy, TargetHeaderIdOf

, P::M ) } - fn select_nonces_to_deliver( + async fn select_nonces_to_deliver( &mut self, - race_state: &RaceState, TargetHeaderIdOf

, P::MessagesProof>, + race_state: RaceState, TargetHeaderIdOf

, P::MessagesProof>, ) -> Option<(RangeInclusive, Self::ProofParameters)> { let best_finalized_source_header_id_at_best_target = race_state.best_finalized_source_header_id_at_best_target.clone()?; @@ -473,87 +501,236 @@ impl RaceStrategy, TargetHeaderIdOf

, P::M let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let mut selected_weight: Weight = 0; - let mut selected_size: usize = 0; - let mut selected_count: MessageNonce = 0; + let relayer_mode = self.relayer_mode; + let lane_source_client = self.lane_source_client.clone(); + let lane_target_client = self.lane_target_client.clone(); + + let maximal_source_queue_index = self.strategy.maximal_available_source_queue_index(race_state)?; + let previous_total_dispatch_weight = self.total_queued_dispatch_weight(); + let source_queue = self.strategy.source_queue(); + let range_end = select_nonces_for_delivery_transaction( + relayer_mode, + max_nonces, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + lane_source_client.clone(), + lane_target_client.clone(), + source_queue, + 0..maximal_source_queue_index + 1, + ) + .await?; - let selected_nonces = self - .strategy - .select_nonces_to_deliver_with_selector(race_state, |range| { - let to_requeue = range - .into_iter() - .skip_while(|(_, weight)| { - // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` - // and `max_messages_size_in_single_batch`, we may still try to submit transaction - // with single message if message overflows these limits. The worst case would be if - // transaction will be rejected by the target runtime, but at least we have tried. - - // limit messages in the batch by weight - let new_selected_weight = match selected_weight.checked_add(weight.weight) { - Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => { - new_selected_weight - } - new_selected_weight if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with declared dispatch \ - weight {:?} that overflows maximal configured weight {}", - new_selected_weight, - max_messages_weight_in_single_batch, - ); - new_selected_weight.unwrap_or(Weight::MAX) - } - _ => return false, - }; - - // limit messages in the batch by size - let new_selected_size = match selected_size.checked_add(weight.size) { - Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => { - new_selected_size - } - new_selected_size if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with message \ - size {:?} that overflows maximal configured size {}", - new_selected_size, - max_messages_size_in_single_batch, - ); - new_selected_size.unwrap_or(usize::MAX) - } - _ => return false, - }; - - // limit number of messages in the batch - let new_selected_count = selected_count + 1; - if new_selected_count > max_nonces { - return false; - } - - selected_weight = new_selected_weight; - selected_size = new_selected_size; - selected_count = new_selected_count; - true - }) - .collect::>(); - if to_requeue.is_empty() { - None - } else { - Some(to_requeue) - } - })?; + let range_begin = source_queue[0].1.begin(); + let selected_nonces = range_begin..=range_end; + self.strategy.remove_le_nonces_from_source_queue(range_end); + + let new_total_dispatch_weight = self.total_queued_dispatch_weight(); + let dispatch_weight = previous_total_dispatch_weight - new_total_dispatch_weight; Some(( selected_nonces, MessageProofParameters { outbound_state_proof_required, - dispatch_weight: selected_weight, + dispatch_weight, }, )) } } -impl NoncesRange for MessageWeightsMap { +/// From given set of source nonces, that are ready to be delivered, select nonces +/// to fit into single delivery transaction. +/// +/// The function returns nonces that are NOT selected for current batch and will be +/// delivered later. +#[allow(clippy::too_many_arguments)] +async fn select_nonces_for_delivery_transaction( + relayer_mode: RelayerMode, + max_messages_in_this_batch: MessageNonce, + max_messages_weight_in_single_batch: Weight, + max_messages_size_in_single_batch: u32, + lane_source_client: impl MessageLaneSourceClient

, + lane_target_client: impl MessageLaneTargetClient

, + nonces_queue: &SourceRangesQueue< + P::SourceHeaderHash, + P::SourceHeaderNumber, + MessageDetailsMap, + >, + nonces_queue_range: Range, +) -> Option { + let mut hard_selected_count = 0; + let mut soft_selected_count = 0; + + let mut selected_weight: Weight = 0; + let mut selected_unpaid_weight: Weight = 0; + let mut selected_size: u32 = 0; + let mut selected_count: MessageNonce = 0; + + let mut total_reward = P::SourceChainBalance::zero(); + let mut total_confirmations_cost = P::SourceChainBalance::zero(); + let mut total_cost = P::SourceChainBalance::zero(); + + // technically, multiple confirmations will be delivered in a single transaction, + // meaning less loses for relayer. But here we don't know the final relayer yet, so + // we're adding a separate transaction for every message. Normally, this cost is covered + // by the message sender. Probably reconsider this? + let confirmation_transaction_cost = if relayer_mode != RelayerMode::Altruistic { + lane_source_client.estimate_confirmation_transaction().await + } else { + Zero::zero() + }; + + let all_ready_nonces = nonces_queue + .range(nonces_queue_range.clone()) + .flat_map(|(_, ready_nonces)| ready_nonces.iter()) + .enumerate(); + for (index, (nonce, details)) in all_ready_nonces { + // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` + // and `max_messages_size_in_single_batch`, we may still try to submit transaction + // with single message if message overflows these limits. The worst case would be if + // transaction will be rejected by the target runtime, but at least we have tried. + + // limit messages in the batch by weight + let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) { + Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => { + new_selected_weight + } + new_selected_weight if selected_count == 0 => { + log::warn!( + target: "bridge", + "Going to submit message delivery transaction with declared dispatch \ + weight {:?} that overflows maximal configured weight {}", + new_selected_weight, + max_messages_weight_in_single_batch, + ); + new_selected_weight.unwrap_or(Weight::MAX) + } + _ => break, + }; + + // limit messages in the batch by size + let new_selected_size = match selected_size.checked_add(details.size) { + Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => new_selected_size, + new_selected_size if selected_count == 0 => { + log::warn!( + target: "bridge", + "Going to submit message delivery transaction with message \ + size {:?} that overflows maximal configured size {}", + new_selected_size, + max_messages_size_in_single_batch, + ); + new_selected_size.unwrap_or(u32::MAX) + } + _ => break, + }; + + // limit number of messages in the batch + let new_selected_count = selected_count + 1; + if new_selected_count > max_messages_in_this_batch { + break; + } + + // If dispatch fee has been paid at the source chain, it means that it is **relayer** who's + // paying for dispatch at the target chain AND reward must cover this dispatch fee. + // + // If dispatch fee is paid at the target chain, it means that it'll be withdrawn from the + // dispatch origin account AND reward is not covering this fee. + // + // So in the latter case we're not adding the dispatch weight to the delivery transaction weight. + let new_selected_unpaid_weight = match details.dispatch_fee_payment { + DispatchFeePayment::AtSourceChain => selected_unpaid_weight.saturating_add(details.dispatch_weight), + DispatchFeePayment::AtTargetChain => selected_unpaid_weight, + }; + + // now the message has passed all 'strong' checks, and we CAN deliver it. But do we WANT + // to deliver it? It depends on the relayer strategy. + match relayer_mode { + RelayerMode::Altruistic => { + soft_selected_count = index + 1; + } + RelayerMode::NoLosses => { + let delivery_transaction_cost = lane_target_client + .estimate_delivery_transaction_in_source_tokens( + 0..=(new_selected_count as MessageNonce - 1), + new_selected_unpaid_weight, + new_selected_size as u32, + ) + .await; + + // if it is the first message that makes reward less than cost, let's log it + // if this message makes batch profitable again, let's log it + let is_total_reward_less_than_cost = total_reward < total_cost; + let prev_total_cost = total_cost; + let prev_total_reward = total_reward; + total_confirmations_cost = total_confirmations_cost.saturating_add(&confirmation_transaction_cost); + total_reward = total_reward.saturating_add(&details.reward); + total_cost = total_confirmations_cost.saturating_add(&delivery_transaction_cost); + if !is_total_reward_less_than_cost && total_reward < total_cost { + log::debug!( + target: "bridge", + "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it larger than \ + total reward {:?}->{:?}", + nonce, + details.reward, + prev_total_cost, + total_cost, + prev_total_reward, + total_reward, + ); + } else if is_total_reward_less_than_cost && total_reward >= total_cost { + log::debug!( + target: "bridge", + "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it less than or \ + equal to the total reward {:?}->{:?} (again)", + nonce, + details.reward, + prev_total_cost, + total_cost, + prev_total_reward, + total_reward, + ); + } + + // NoLosses relayer never want to lose his funds + if total_reward >= total_cost { + soft_selected_count = index + 1; + } + } + } + + hard_selected_count = index + 1; + selected_weight = new_selected_weight; + selected_unpaid_weight = new_selected_unpaid_weight; + selected_size = new_selected_size; + selected_count = new_selected_count; + } + + let hard_selected_begin_nonce = nonces_queue[nonces_queue_range.start].1.begin(); + if hard_selected_count != soft_selected_count { + let hard_selected_end_nonce = hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1; + let soft_selected_begin_nonce = hard_selected_begin_nonce; + let soft_selected_end_nonce = soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1; + log::warn!( + target: "bridge", + "Relayer may deliver nonces [{:?}; {:?}], but because of its strategy ({:?}) it has selected \ + nonces [{:?}; {:?}].", + hard_selected_begin_nonce, + hard_selected_end_nonce, + relayer_mode, + soft_selected_begin_nonce, + soft_selected_end_nonce, + ); + + hard_selected_count = soft_selected_count; + } + + if hard_selected_count != 0 { + Some(hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1) + } else { + None + } +} + +impl NoncesRange for MessageDetailsMap { fn begin(&self) -> MessageNonce { self.keys().next().cloned().unwrap_or_default() } @@ -576,12 +753,50 @@ impl NoncesRange for MessageWeightsMap { mod tests { use super::*; use crate::message_lane_loop::{ - tests::{header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderId, TestTargetHeaderId}, - MessageWeights, + tests::{ + header_id, TestMessageLane, TestMessagesProof, TestSourceChainBalance, TestSourceClient, + TestSourceHeaderId, TestTargetClient, TestTargetHeaderId, BASE_MESSAGE_DELIVERY_TRANSACTION_COST, + CONFIRMATION_TRANSACTION_COST, + }, + MessageDetails, }; + use bp_runtime::messages::DispatchFeePayment::*; + + const DEFAULT_DISPATCH_WEIGHT: Weight = 1; + const DEFAULT_SIZE: u32 = 1; + const DEFAULT_REWARD: TestSourceChainBalance = CONFIRMATION_TRANSACTION_COST + + BASE_MESSAGE_DELIVERY_TRANSACTION_COST + + DEFAULT_DISPATCH_WEIGHT + + (DEFAULT_SIZE as TestSourceChainBalance); type TestRaceState = RaceState; - type TestStrategy = MessageDeliveryStrategy; + type TestStrategy = MessageDeliveryStrategy; + + fn source_nonces( + new_nonces: RangeInclusive, + confirmed_nonce: MessageNonce, + reward: TestSourceChainBalance, + dispatch_fee_payment: DispatchFeePayment, + ) -> SourceClientNonces> { + SourceClientNonces { + new_nonces: new_nonces + .into_iter() + .map(|nonce| { + ( + nonce, + MessageDetails { + dispatch_weight: DEFAULT_DISPATCH_WEIGHT, + size: DEFAULT_SIZE, + reward, + dispatch_fee_payment, + }, + ) + }) + .into_iter() + .collect(), + confirmed_nonce: Some(confirmed_nonce), + } + } fn prepare_strategy() -> (TestRaceState, TestStrategy) { let mut race_state = RaceState { @@ -594,12 +809,15 @@ mod tests { }; let mut race_strategy = TestStrategy { + relayer_mode: RelayerMode::Altruistic, max_unrewarded_relayer_entries_at_target: 4, max_unconfirmed_nonces_at_target: 4, max_messages_in_single_batch: 4, max_messages_weight_in_single_batch: 4, max_messages_size_in_single_batch: 4, latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), + lane_source_client: TestSourceClient::default(), + lane_target_client: TestTargetClient::default(), target_nonces: Some(TargetClientNonces { latest_nonce: 19, nonces_data: DeliveryRaceTargetNoncesData { @@ -614,20 +832,9 @@ mod tests { strategy: BasicStrategy::new(), }; - race_strategy.strategy.source_nonces_updated( - header_id(1), - SourceClientNonces { - new_nonces: vec![ - (20, MessageWeights { weight: 1, size: 1 }), - (21, MessageWeights { weight: 1, size: 1 }), - (22, MessageWeights { weight: 1, size: 1 }), - (23, MessageWeights { weight: 1, size: 1 }), - ] - .into_iter() - .collect(), - confirmed_nonce: Some(19), - }, - ); + race_strategy + .strategy + .source_nonces_updated(header_id(1), source_nonces(20..=23, 19, DEFAULT_REWARD, AtSourceChain)); let target_nonces = TargetClientNonces { latest_nonce: 19, @@ -652,14 +859,16 @@ mod tests { #[test] fn weights_map_works_as_nonces_range() { - fn build_map(range: RangeInclusive) -> MessageWeightsMap { + fn build_map(range: RangeInclusive) -> MessageDetailsMap { range .map(|idx| { ( idx, - MessageWeights { - weight: idx, + MessageDetails { + dispatch_weight: idx, size: idx as _, + reward: idx as _, + dispatch_fee_payment: AtSourceChain, }, ) }) @@ -678,19 +887,19 @@ mod tests { assert_eq!(map.greater_than(30), None); } - #[test] - fn message_delivery_strategy_selects_messages_to_deliver() { + #[async_std::test] + async fn message_delivery_strategy_selects_messages_to_deliver() { let (state, mut strategy) = prepare_strategy(); // both sides are ready to relay new messages assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=23), proof_parameters(false, 4))) ); } - #[test] - fn message_delivery_strategy_selects_nothing_if_too_many_confirmations_missing() { + #[async_std::test] + async fn message_delivery_strategy_selects_nothing_if_too_many_confirmations_missing() { let (state, mut strategy) = prepare_strategy(); // if there are already `max_unconfirmed_nonces_at_target` messages on target, @@ -701,11 +910,11 @@ mod tests { )] .into_iter() .collect(); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state).await, None); } - #[test] - fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() { + #[async_std::test] + async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() { let (state, mut strategy) = prepare_strategy(); // if there are new confirmed nonces on source, we want to relay this information @@ -713,13 +922,13 @@ mod tests { let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=23), proof_parameters(true, 4))) ); } - #[test] - fn message_delivery_strategy_selects_nothing_if_there_are_too_many_unrewarded_relayers() { + #[async_std::test] + async fn message_delivery_strategy_selects_nothing_if_there_are_too_many_unrewarded_relayers() { let (state, mut strategy) = prepare_strategy(); // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, @@ -729,11 +938,12 @@ mod tests { unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; unrewarded_relayers.messages_in_oldest_entry = 4; } - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state).await, None); } - #[test] - fn message_delivery_strategy_selects_nothing_if_proved_rewards_is_not_enough_to_remove_oldest_unrewarded_entry() { + #[async_std::test] + async fn message_delivery_strategy_selects_nothing_if_proved_rewards_is_not_enough_to_remove_oldest_unrewarded_entry( + ) { let (state, mut strategy) = prepare_strategy(); // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, @@ -746,11 +956,11 @@ mod tests { unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; unrewarded_relayers.messages_in_oldest_entry = 4; } - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state).await, None); } - #[test] - fn message_delivery_strategy_includes_outbound_state_proof_if_proved_rewards_is_enough() { + #[async_std::test] + async fn message_delivery_strategy_includes_outbound_state_proof_if_proved_rewards_is_enough() { let (state, mut strategy) = prepare_strategy(); // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, @@ -764,73 +974,77 @@ mod tests { unrewarded_relayers.messages_in_oldest_entry = 3; } assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=23), proof_parameters(true, 4))) ); } - #[test] - fn message_delivery_strategy_limits_batch_by_messages_weight() { + #[async_std::test] + async fn message_delivery_strategy_limits_batch_by_messages_weight() { let (state, mut strategy) = prepare_strategy(); // not all queued messages may fit in the batch, because batch has max weight strategy.max_messages_weight_in_single_batch = 3; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) ); } - #[test] - fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() { + #[async_std::test] + async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() { let (state, mut strategy) = prepare_strategy(); // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().weight = 10; + strategy.strategy.source_queue_mut()[0] + .1 + .get_mut(&20) + .unwrap() + .dispatch_weight = 10; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=20), proof_parameters(false, 10))) ); } - #[test] - fn message_delivery_strategy_limits_batch_by_messages_size() { + #[async_std::test] + async fn message_delivery_strategy_limits_batch_by_messages_size() { let (state, mut strategy) = prepare_strategy(); // not all queued messages may fit in the batch, because batch has max weight strategy.max_messages_size_in_single_batch = 3; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) ); } - #[test] - fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() { + #[async_std::test] + async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() { let (state, mut strategy) = prepare_strategy(); // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=20), proof_parameters(false, 1))) ); } - #[test] - fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { + #[async_std::test] + async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { let (state, mut strategy) = prepare_strategy(); // not all queued messages may fit in the batch, because batch has max number of messages limit strategy.max_messages_in_single_batch = 3; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) ); } - #[test] - fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() { + #[async_std::test] + async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() { let (state, mut strategy) = prepare_strategy(); // 1 delivery confirmation from target to source is still missing, so we may only @@ -841,13 +1055,13 @@ mod tests { .collect(); strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) ); } - #[test] - fn message_delivery_strategy_waits_for_confirmed_nonce_header_to_appear_on_target() { + #[async_std::test] + async fn message_delivery_strategy_waits_for_confirmed_nonce_header_to_appear_on_target() { // 1 delivery confirmation from target to source is still missing, so we may deliver // reward confirmation with our message delivery transaction. But the problem is that // the reward has been paid at header 2 && this header is still unknown to target node. @@ -864,7 +1078,7 @@ mod tests { strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) ); @@ -881,13 +1095,13 @@ mod tests { state.best_finalized_source_header_id_at_source = Some(header_id(2)); state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state).await, Some(((20..=23), proof_parameters(true, 4))) ); } - #[test] - fn source_header_is_requied_when_confirmations_are_required() { + #[async_std::test] + async fn source_header_is_required_when_confirmations_are_required() { // let's prepare situation when: // - all messages [20; 23] have been generated at source block#1; let (mut state, mut strategy) = prepare_strategy(); @@ -895,7 +1109,7 @@ mod tests { // relayers vector capacity; strategy.max_unconfirmed_nonces_at_target = 2; assert_eq!( - strategy.select_nonces_to_deliver(&state), + strategy.select_nonces_to_deliver(state.clone()).await, Some(((20..=21), proof_parameters(false, 2))) ); strategy.finalized_target_nonces_updated( @@ -912,12 +1126,12 @@ mod tests { }, &mut state, ); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state).await, None); // - messages [1; 10] receiving confirmation has been delivered at source block#2; strategy.source_nonces_updated( header_id(2), SourceClientNonces { - new_nonces: BTreeMap::new(), + new_nonces: MessageDetailsMap::new(), confirmed_nonce: Some(21), }, ); @@ -927,4 +1141,107 @@ mod tests { Some(header_id(2)) ); } + + #[async_std::test] + async fn no_losses_relayer_is_delivering_messages_if_cost_is_equal_to_reward() { + let (state, mut strategy) = prepare_strategy(); + strategy.relayer_mode = RelayerMode::NoLosses; + + // so now we have: + // - 20..=23 with reward = cost + // => strategy shall select all 20..=23 + assert_eq!( + strategy.select_nonces_to_deliver(state).await, + Some(((20..=23), proof_parameters(false, 4))) + ); + } + + #[async_std::test] + async fn no_losses_relayer_is_not_delivering_messages_if_cost_is_larger_than_reward() { + let (mut state, mut strategy) = prepare_strategy(); + let nonces = source_nonces( + 24..=25, + 19, + DEFAULT_REWARD - BASE_MESSAGE_DELIVERY_TRANSACTION_COST, + AtSourceChain, + ); + strategy.strategy.source_nonces_updated(header_id(2), nonces); + state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); + strategy.relayer_mode = RelayerMode::NoLosses; + + // so now we have: + // - 20..=23 with reward = cost + // - 24..=25 with reward less than cost + // => strategy shall only select 20..=23 + assert_eq!( + strategy.select_nonces_to_deliver(state).await, + Some(((20..=23), proof_parameters(false, 4))) + ); + } + + #[async_std::test] + async fn no_losses_relayer_is_delivering_unpaid_messages() { + async fn test_with_dispatch_fee_payment( + dispatch_fee_payment: DispatchFeePayment, + ) -> Option<(RangeInclusive, MessageProofParameters)> { + let (mut state, mut strategy) = prepare_strategy(); + let nonces = source_nonces( + 24..=24, + 19, + DEFAULT_REWARD - DEFAULT_DISPATCH_WEIGHT, + dispatch_fee_payment, + ); + strategy.strategy.source_nonces_updated(header_id(2), nonces); + state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); + strategy.max_unrewarded_relayer_entries_at_target = 100; + strategy.max_unconfirmed_nonces_at_target = 100; + strategy.max_messages_in_single_batch = 100; + strategy.max_messages_weight_in_single_batch = 100; + strategy.max_messages_size_in_single_batch = 100; + strategy.relayer_mode = RelayerMode::NoLosses; + + // so now we have: + // - 20..=23 with reward = cost + // - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT` from the + // cost, so it should be fine; + // => when MSG#24 fee is paid at the target chain, strategy shall select all 20..=24 + // => when MSG#25 fee is paid at the source chain, strategy shall only select 20..=23 + strategy.select_nonces_to_deliver(state).await + } + + assert_eq!( + test_with_dispatch_fee_payment(AtTargetChain).await, + Some(((20..=24), proof_parameters(false, 5))) + ); + assert_eq!( + test_with_dispatch_fee_payment(AtSourceChain).await, + Some(((20..=23), proof_parameters(false, 4))) + ); + } + + #[async_std::test] + async fn relayer_uses_flattened_view_of_the_source_queue_to_select_nonces() { + // Real scenario that has happened on test deployments: + // 1) relayer witnessed M1 at block 1 => it has separate entry in the `source_queue` + // 2) relayer witnessed M2 at block 2 => it has separate entry in the `source_queue` + // 3) if block 2 is known to the target node, then both M1 and M2 are selected for single delivery, + // even though weight(M1+M2) > larger than largest allowed weight + // + // This was happening because selector (`select_nonces_for_delivery_transaction`) has been called + // for every `source_queue` entry separately without preserving any context. + let (mut state, mut strategy) = prepare_strategy(); + let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, AtSourceChain); + strategy.strategy.source_nonces_updated(header_id(2), nonces); + strategy.max_unrewarded_relayer_entries_at_target = 100; + strategy.max_unconfirmed_nonces_at_target = 100; + strategy.max_messages_in_single_batch = 5; + strategy.max_messages_weight_in_single_batch = 100; + strategy.max_messages_size_in_single_batch = 100; + state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); + + assert_eq!( + strategy.select_nonces_to_deliver(state).await, + Some(((20..=24), proof_parameters(false, 5))) + ); + } } diff --git a/bridges/relays/messages/src/message_race_loop.rs b/bridges/relays/messages/src/message_race_loop.rs index 41f5ede1033a..3b427a2d0e27 100644 --- a/bridges/relays/messages/src/message_race_loop.rs +++ b/bridges/relays/messages/src/message_race_loop.rs @@ -143,6 +143,7 @@ pub trait TargetClient { } /// Race strategy. +#[async_trait] pub trait RaceStrategy: Debug { /// Type of nonces range expected from the source client. type SourceNoncesRange: NoncesRange; @@ -182,14 +183,14 @@ pub trait RaceStrategy: Debug { /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated /// data) from source to target node. /// Additionally, parameters required to generate proof are returned. - fn select_nonces_to_deliver( + async fn select_nonces_to_deliver( &mut self, - race_state: &RaceState, + race_state: RaceState, ) -> Option<(RangeInclusive, Self::ProofParameters)>; } /// State of the race. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct RaceState { /// Best finalized source header id at the source client. pub best_finalized_source_header_id_at_source: Option, @@ -438,7 +439,7 @@ pub async fn run, TC: TargetClient

>( if source_client_is_online { source_client_is_online = false; - let nonces_to_deliver = select_nonces_to_deliver(&race_state, &mut strategy); + let nonces_to_deliver = select_nonces_to_deliver(race_state.clone(), &mut strategy).await; let best_at_source = strategy.best_at_source(); if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver { @@ -554,27 +555,25 @@ where now_time } -fn select_nonces_to_deliver( - race_state: &RaceState, +async fn select_nonces_to_deliver( + race_state: RaceState, strategy: &mut Strategy, ) -> Option<(SourceHeaderId, RangeInclusive, Strategy::ProofParameters)> where SourceHeaderId: Clone, Strategy: RaceStrategy, { - race_state - .best_finalized_source_header_id_at_best_target - .as_ref() - .and_then(|best_finalized_source_header_id_at_best_target| { - strategy - .select_nonces_to_deliver(&race_state) - .map(|(nonces_range, proof_parameters)| { - ( - best_finalized_source_header_id_at_best_target.clone(), - nonces_range, - proof_parameters, - ) - }) + let best_finalized_source_header_id_at_best_target = + race_state.best_finalized_source_header_id_at_best_target.clone()?; + strategy + .select_nonces_to_deliver(race_state) + .await + .map(|(nonces_range, proof_parameters)| { + ( + best_finalized_source_header_id_at_best_target, + nonces_range, + proof_parameters, + ) }) } @@ -584,8 +583,8 @@ mod tests { use crate::message_race_strategy::BasicStrategy; use relay_utils::HeaderId; - #[test] - fn proof_is_generated_at_best_block_known_to_target_node() { + #[async_std::test] + async fn proof_is_generated_at_best_block_known_to_target_node() { const GENERATED_AT: u64 = 6; const BEST_AT_SOURCE: u64 = 10; const BEST_AT_TARGET: u64 = 8; @@ -620,7 +619,7 @@ mod tests { // the proof will be generated on source, but using BEST_AT_TARGET block assert_eq!( - select_nonces_to_deliver(&race_state, &mut strategy), + select_nonces_to_deliver(race_state, &mut strategy).await, Some((HeaderId(BEST_AT_TARGET, BEST_AT_TARGET), 6..=10, (),)) ); } diff --git a/bridges/relays/messages/src/message_race_strategy.rs b/bridges/relays/messages/src/message_race_strategy.rs index 7088f8d74b55..ed4a276e1429 100644 --- a/bridges/relays/messages/src/message_race_strategy.rs +++ b/bridges/relays/messages/src/message_race_strategy.rs @@ -19,10 +19,15 @@ use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces}; +use async_trait::async_trait; use bp_messages::MessageNonce; use relay_utils::HeaderId; use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, ops::RangeInclusive}; +/// Queue of nonces known to the source node. +pub type SourceRangesQueue = + VecDeque<(HeaderId, SourceNoncesRange)>; + /// Nonces delivery strategy. #[derive(Debug)] pub struct BasicStrategy< @@ -34,7 +39,7 @@ pub struct BasicStrategy< Proof, > { /// All queued nonces. - source_queue: VecDeque<(HeaderId, SourceNoncesRange)>, + source_queue: SourceRangesQueue, /// Best nonce known to target node (at its best block). `None` if it has not been received yet. best_target_nonce: Option, /// Unused generic types dump. @@ -57,6 +62,13 @@ where } } + /// Reference to source queue. + pub(crate) fn source_queue( + &self, + ) -> &VecDeque<(HeaderId, SourceNoncesRange)> { + &self.source_queue + } + /// Mutable reference to source queue to use in tests. #[cfg(test)] pub(crate) fn source_queue_mut( @@ -65,25 +77,21 @@ where &mut self.source_queue } - /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated - /// data) from source to target node. + /// Returns index of the latest source queue entry, that may be delivered to the target node. /// - /// The `selector` function receives range of nonces and should return `None` if the whole - /// range needs to be delivered. If there are some nonces in the range that can't be delivered - /// right now, it should return `Some` with 'undeliverable' nonces. Please keep in mind that - /// this should be the sub-range that the passed range ends with, because nonces are always - /// delivered in-order. Otherwise the function will panic. - pub fn select_nonces_to_deliver_with_selector( - &mut self, - race_state: &RaceState< + /// Returns `None` if no entries may be delivered. All entries before and including the `Some(_)` + /// index are guaranteed to be witnessed at source blocks that are known to be finalized at the + /// target node. + pub fn maximal_available_source_queue_index( + &self, + race_state: RaceState< HeaderId, HeaderId, Proof, >, - mut selector: impl FnMut(SourceNoncesRange) -> Option, - ) -> Option> { + ) -> Option { // if we do not know best nonce at target node, we can't select anything - let target_nonce = self.best_target_nonce?; + let _ = self.best_target_nonce?; // if we have already selected nonces that we want to submit, do nothing if race_state.nonces_to_submit.is_some() { @@ -99,60 +107,40 @@ where // 2) we can't deliver new nonce until header, that has emitted this nonce, is finalized // by target client // 3) selector is used for more complicated logic - let best_header_at_target = &race_state.best_finalized_source_header_id_at_best_target.as_ref()?; - let mut nonces_end = None; + // + // => let's first select range of entries inside deque that are already finalized at + // the target client and pass this range to the selector + let best_header_at_target = race_state.best_finalized_source_header_id_at_best_target?; + self.source_queue + .iter() + .enumerate() + .take_while(|(_, (queued_at, _))| queued_at.0 <= best_header_at_target.0) + .map(|(index, _)| index) + .last() + } + + /// Remove all nonces that are less than or equal to given nonce from the source queue. + pub fn remove_le_nonces_from_source_queue(&mut self, nonce: MessageNonce) { while let Some((queued_at, queued_range)) = self.source_queue.pop_front() { - // select (sub) range to deliver - let queued_range_begin = queued_range.begin(); - let queued_range_end = queued_range.end(); - let range_to_requeue = if queued_at.0 > best_header_at_target.0 { - // if header that has queued the range is not yet finalized at bridged chain, - // we can't prove anything - Some(queued_range) - } else { - // selector returns `Some(range)` if this `range` needs to be requeued - selector(queued_range) - }; - - // requeue (sub) range and update range to deliver - match range_to_requeue { - Some(range_to_requeue) => { - assert!( - range_to_requeue.begin() <= range_to_requeue.end() - && range_to_requeue.begin() >= queued_range_begin - && range_to_requeue.end() == queued_range_end, - "Incorrect implementation of internal `selector` function. Expected original\ - range {:?} to end with returned range {:?}", - queued_range_begin..=queued_range_end, - range_to_requeue, - ); - - if range_to_requeue.begin() != queued_range_begin { - nonces_end = Some(range_to_requeue.begin() - 1); - } - self.source_queue.push_front((queued_at, range_to_requeue)); - break; - } - None => { - nonces_end = Some(queued_range_end); - } + if let Some(range_to_requeue) = queued_range.greater_than(nonce) { + self.source_queue.push_front((queued_at, range_to_requeue)); + break; } } - - nonces_end.map(|nonces_end| RangeInclusive::new(target_nonce + 1, nonces_end)) } } +#[async_trait] impl RaceStrategy, HeaderId, Proof> for BasicStrategy where - SourceHeaderHash: Clone + Debug, - SourceHeaderNumber: Clone + Ord + Debug, - SourceNoncesRange: NoncesRange + Debug, - TargetHeaderHash: Debug, - TargetHeaderNumber: Debug, - Proof: Debug, + SourceHeaderHash: Clone + Debug + Send, + SourceHeaderNumber: Clone + Ord + Debug + Send, + SourceNoncesRange: NoncesRange + Debug + Send, + TargetHeaderHash: Debug + Send, + TargetHeaderNumber: Debug + Send, + Proof: Debug + Send, { type SourceNoncesRange = SourceNoncesRange; type ProofParameters = (); @@ -271,16 +259,19 @@ where )); } - fn select_nonces_to_deliver( + async fn select_nonces_to_deliver( &mut self, - race_state: &RaceState< + race_state: RaceState< HeaderId, HeaderId, Proof, >, ) -> Option<(RangeInclusive, Self::ProofParameters)> { - self.select_nonces_to_deliver_with_selector(race_state, |_| None) - .map(|range| (range, ())) + let maximal_source_queue_index = self.maximal_available_source_queue_index(race_state)?; + let range_begin = self.source_queue[0].1.begin(); + let range_end = self.source_queue[maximal_source_queue_index].1.end(); + self.remove_le_nonces_from_source_queue(range_end); + Some((range_begin..=range_end, ())) } } @@ -288,7 +279,9 @@ where mod tests { use super::*; use crate::message_lane::MessageLane; - use crate::message_lane_loop::tests::{header_id, TestMessageLane, TestMessagesProof}; + use crate::message_lane_loop::tests::{ + header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, TestSourceHeaderNumber, + }; type SourceNoncesRange = RangeInclusive; @@ -318,9 +311,9 @@ mod tests { #[test] fn strategy_is_empty_works() { let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.is_empty(), true); + assert!(strategy.is_empty()); strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - assert_eq!(strategy.is_empty(), false); + assert!(!strategy.is_empty()); } #[test] @@ -396,28 +389,28 @@ mod tests { assert!(state.nonces_submitted.is_none()); } - #[test] - fn nothing_is_selected_if_something_is_already_selected() { + #[async_std::test] + async fn nothing_is_selected_if_something_is_already_selected() { let mut state = RaceState::default(); let mut strategy = BasicStrategy::::new(); state.nonces_to_submit = Some((header_id(1), 1..=10, (1..=10, None))); strategy.best_target_nonces_updated(target_nonces(0), &mut state); strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); } - #[test] - fn nothing_is_selected_if_something_is_already_submitted() { + #[async_std::test] + async fn nothing_is_selected_if_something_is_already_submitted() { let mut state = RaceState::default(); let mut strategy = BasicStrategy::::new(); state.nonces_submitted = Some(1..=10); strategy.best_target_nonces_updated(target_nonces(0), &mut state); strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); } - #[test] - fn select_nonces_to_deliver_works() { + #[async_std::test] + async fn select_nonces_to_deliver_works() { let mut state = RaceState::<_, _, TestMessagesProof>::default(); let mut strategy = BasicStrategy::::new(); strategy.best_target_nonces_updated(target_nonces(0), &mut state); @@ -427,62 +420,75 @@ mod tests { strategy.source_nonces_updated(header_id(5), source_nonces(7..=8)); state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!(strategy.select_nonces_to_deliver(&state), Some((1..=6, ()))); + assert_eq!( + strategy.select_nonces_to_deliver(state.clone()).await, + Some((1..=6, ())) + ); strategy.best_target_nonces_updated(target_nonces(6), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); state.best_finalized_source_header_id_at_best_target = Some(header_id(5)); - assert_eq!(strategy.select_nonces_to_deliver(&state), Some((7..=8, ()))); + assert_eq!( + strategy.select_nonces_to_deliver(state.clone()).await, + Some((7..=8, ())) + ); strategy.best_target_nonces_updated(target_nonces(8), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); + assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); } #[test] - fn select_nonces_to_deliver_able_to_split_ranges_with_selector() { + fn maximal_available_source_queue_index_works() { let mut state = RaceState::<_, _, TestMessagesProof>::default(); let mut strategy = BasicStrategy::::new(); strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=100)); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=3)); + strategy.source_nonces_updated(header_id(2), source_nonces(4..=6)); + strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); + + state.best_finalized_source_header_id_at_best_target = Some(header_id(0)); + assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), None); - state.best_finalized_source_header_id_at_source = Some(header_id(1)); state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - state.best_target_header_id = Some(header_id(1)); + assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), Some(0)); - assert_eq!( - strategy.select_nonces_to_deliver_with_selector(&state, |_| Some(50..=100)), - Some(1..=49), - ); + state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); + assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), Some(1)); + + state.best_finalized_source_header_id_at_best_target = Some(header_id(3)); + assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), Some(2)); + + state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); + assert_eq!(strategy.maximal_available_source_queue_index(state), Some(2)); } - fn run_panic_test_for_incorrect_selector( - invalid_selector: impl Fn(SourceNoncesRange) -> Option, - ) { + #[test] + fn remove_le_nonces_from_source_queue_works() { let mut state = RaceState::<_, _, TestMessagesProof>::default(); let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=100)); - strategy.best_target_nonces_updated(target_nonces(50), &mut state); - state.best_finalized_source_header_id_at_source = Some(header_id(1)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - state.best_target_header_id = Some(header_id(1)); - strategy.select_nonces_to_deliver_with_selector(&state, invalid_selector); - } + strategy.best_target_nonces_updated(target_nonces(0), &mut state); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=3)); + strategy.source_nonces_updated(header_id(2), source_nonces(4..=6)); + strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); + + fn source_queue_nonces( + source_queue: &SourceRangesQueue, + ) -> Vec { + source_queue.iter().flat_map(|(_, range)| range.clone()).collect() + } - #[test] - #[should_panic] - fn select_nonces_to_deliver_panics_if_selector_returns_empty_range() { - #[allow(clippy::reversed_empty_ranges)] - run_panic_test_for_incorrect_selector(|_| Some(2..=1)) - } + strategy.remove_le_nonces_from_source_queue(1); + assert_eq!( + source_queue_nonces(&strategy.source_queue), + vec![2, 3, 4, 5, 6, 7, 8, 9], + ); - #[test] - #[should_panic] - fn select_nonces_to_deliver_panics_if_selector_returns_range_that_starts_before_passed_range() { - run_panic_test_for_incorrect_selector(|range| Some(range.begin() - 1..=*range.end())) - } + strategy.remove_le_nonces_from_source_queue(5); + assert_eq!(source_queue_nonces(&strategy.source_queue), vec![6, 7, 8, 9],); - #[test] - #[should_panic] - fn select_nonces_to_deliver_panics_if_selector_returns_range_with_mismatched_end() { - run_panic_test_for_incorrect_selector(|range| Some(range.begin()..=*range.end() + 1)) + strategy.remove_le_nonces_from_source_queue(9); + assert_eq!(source_queue_nonces(&strategy.source_queue), Vec::::new(),); + + strategy.remove_le_nonces_from_source_queue(100); + assert_eq!(source_queue_nonces(&strategy.source_queue), Vec::::new(),); } } diff --git a/bridges/relays/utils/src/relay_loop.rs b/bridges/relays/utils/src/relay_loop.rs index 8fcaabe4430c..938136658bd3 100644 --- a/bridges/relays/utils/src/relay_loop.rs +++ b/bridges/relays/utils/src/relay_loop.rs @@ -38,7 +38,6 @@ pub trait Client: 'static + Clone + Send + Sync { pub fn relay_loop(source_client: SC, target_client: TC) -> Loop { Loop { reconnect_delay: RECONNECT_DELAY, - spawn_loop_task: true, source_client, target_client, loop_metric: None, @@ -50,7 +49,6 @@ pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetri LoopMetrics { relay_loop: Loop { reconnect_delay: RECONNECT_DELAY, - spawn_loop_task: true, source_client: (), target_client: (), loop_metric: None, @@ -65,7 +63,6 @@ pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetri /// Generic relay loop. pub struct Loop { reconnect_delay: Duration, - spawn_loop_task: bool, source_client: SC, target_client: TC, loop_metric: Option, @@ -87,23 +84,11 @@ impl Loop { self } - /// Set spawn-dedicated-loop-task flag. - /// - /// If `true` (default), separate async task is spawned to run relay loop. This is the default - /// behavior for all loops. If `false`, then loop is executed as a part of the current - /// task. The `false` is used for on-demand tasks, which are cancelled from time to time - /// and there's already a dedicated on-demand task for running such loops. - pub fn spawn_loop_task(mut self, spawn_loop_task: bool) -> Self { - self.spawn_loop_task = spawn_loop_task; - self - } - /// Start building loop metrics using given prefix. pub fn with_metrics(self, prefix: Option, params: MetricsParams) -> LoopMetrics { LoopMetrics { relay_loop: Loop { reconnect_delay: self.reconnect_delay, - spawn_loop_task: self.spawn_loop_task, source_client: self.source_client, target_client: self.target_client, loop_metric: None, @@ -128,7 +113,6 @@ impl Loop { TC: 'static + Client, LM: 'static + Send + Clone, { - let spawn_loop_task = self.spawn_loop_task; let run_loop_task = async move { crate::initialize::initialize_loop(loop_name); @@ -156,11 +140,7 @@ impl Loop { Ok(()) }; - if spawn_loop_task { - async_std::task::spawn(run_loop_task).await - } else { - run_loop_task.await - } + async_std::task::spawn(run_loop_task).await } } @@ -236,7 +216,6 @@ impl LoopMetrics { Ok(Loop { reconnect_delay: self.relay_loop.reconnect_delay, - spawn_loop_task: self.relay_loop.spawn_loop_task, source_client: self.relay_loop.source_client, target_client: self.relay_loop.target_client, loop_metric: self.loop_metric, diff --git a/bridges/scripts/update-weights-setup.sh b/bridges/scripts/update-weights-setup.sh new file mode 100644 index 000000000000..72534423d63c --- /dev/null +++ b/bridges/scripts/update-weights-setup.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -exu + +# Set up the standardized machine and run `update-weights.sh` script. +# The system is assumed to be pristine Ubuntu 20.04 and we install +# all required dependencies. + +# To avoid interruptions you might want to run this script in `screen` cause it will take a while +# to finish. + +# We start off with upgrading the system +apt update && apt dist-upgrade + +# and installing `git` and other required deps. +apt install -y git clang curl libssl-dev llvm libudev-dev screen + +# Now we clone the repository +git clone https://github.com/paritytech/parity-bridges-common.git +cd parity-bridges-common + +# Install rustup & toolchain +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y + +# Source config +source ~/.cargo/env + +# Add nightly and WASM +rustup install nightly +rustup target add wasm32-unknown-unknown --toolchain nightly + +# Update the weights +./scripts/update-weights.sh