diff --git a/.github/workflows/extrinsic-ordering-check-from-bin.yml b/.github/workflows/extrinsic-ordering-check-from-bin.yml index 0d2ffc6f2b18..199b3be6fe66 100644 --- a/.github/workflows/extrinsic-ordering-check-from-bin.yml +++ b/.github/workflows/extrinsic-ordering-check-from-bin.yml @@ -6,7 +6,7 @@ on: inputs: reference_url: description: The WebSocket url of the reference node - default: wss://rpc.polkadot.io + default: wss://kusama-rpc.polkadot.io required: true binary_url: description: A url to a Linux binary for the node containing the runtime to test @@ -14,7 +14,7 @@ on: required: true chain: description: The name of the chain under test. Usually, you would pass a local chain - default: polkadot-local + default: kusama-local required: true jobs: @@ -27,6 +27,8 @@ jobs: REF_URL: ${{github.event.inputs.reference_url}} steps: + - uses: actions/checkout@v2 + - name: Fetch binary run: | echo Fetching $BIN_URL @@ -46,17 +48,26 @@ jobs: echo "Date: $(date)" >> output.txt echo "Reference: $REF_URL" >> output.txt echo "Target version: $VERSION" >> output.txt - echo "-------------------------------------------" >> output.txt + echo "Chain: $CHAIN" >> output.txt + echo "----------------------------------------------------------------------" >> output.txt + + - name: Pull polkadot-js-tools image + run: docker pull jacogr/polkadot-js-tools - name: Compare the metadata run: | - CMD="docker run --network host jacogr/polkadot-js-tools metadata $REF_URL ws://localhost:9944" + CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata $REF_URL ws://localhost:9944" echo -e "Running:\n$CMD" $CMD >> output.txt sed -z -i 's/\n\n/\n/g' output.txt + cat output.txt | egrep -n -i '' + SUMMARY=$(./scripts/github/extrinsic-ordering-filter.sh output.txt) + echo -e $SUMMARY + echo -e $SUMMARY >> output.txt - name: Show result - run: cat output.txt + run: | + cat output.txt - name: Stop our local node run: pkill polkadot diff --git a/.github/workflows/honggfuzz.yml b/.github/workflows/honggfuzz.yml index ab1cdf0d2ef2..c04e6c12b4de 100644 --- a/.github/workflows/honggfuzz.yml +++ b/.github/workflows/honggfuzz.yml @@ -5,6 +5,45 @@ on: - cron: '0 0 * * *' jobs: + xcm-fuzzer: + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v2 + with: + fetch-depth: 1 + + - name: Install minimal stable Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Install minimal nightly Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + target: wasm32-unknown-unknown + + - name: Install honggfuzz deps + run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev + + - name: Install honggfuzz + uses: actions-rs/cargo@v1 + with: + command: install + args: honggfuzz --version "0.5.54" + + - name: Build fuzzer binaries + working-directory: xcm/xcm-simulator/fuzzer/ + run: cargo hfuzz build + + - name: Run fuzzer + working-directory: xcm/xcm-simulator/fuzzer/ + run: bash $GITHUB_WORKSPACE/scripts/github/run_fuzzer.sh xcm-fuzzer + erasure-coding-round-trip: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/publish-draft-release.yml b/.github/workflows/publish-draft-release.yml index 5b1855c32cc3..e3c995e9bce1 100644 --- a/.github/workflows/publish-draft-release.yml +++ b/.github/workflows/publish-draft-release.yml @@ -55,7 +55,6 @@ jobs: with: name: ${{ matrix.runtime }}-runtime path: | - ${{ steps.srtool_build.outputs.wasm }} ${{ steps.srtool_build.outputs.wasm_compressed }} publish-draft-release: @@ -114,15 +113,6 @@ jobs: ls "${{ matrix.runtime }}-runtime" runtime_ver="$(ruby -e 'require "./scripts/github/lib.rb"; puts get_runtime("${{ matrix.runtime }}")')" echo "::set-output name=runtime_ver::$runtime_ver" - - name: Upload compact ${{ matrix.runtime }} wasm - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-draft-release.outputs.asset_upload_url }} - asset_path: "${{ matrix.runtime }}-runtime/${{ matrix.runtime }}_runtime.compact.wasm" - asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.wasm - asset_content_type: application/wasm - name: Upload compressed ${{ matrix.runtime }} wasm uses: actions/upload-release-asset@v1 env: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 22a0407fabcc..f12de91ea6bc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -241,8 +241,9 @@ spellcheck: - cargo spellcheck --version # compare with the commit parent to the PR, given it's from a default branch - git fetch origin +${CI_DEFAULT_BRANCH}:${CI_DEFAULT_BRANCH} + - cargo spellcheck list-files -vvv $(git diff --diff-filter=AM --name-only $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH} -- :^bridges)) - time cargo spellcheck check -vvv --cfg=scripts/gitlab/spellcheck.toml --checkers hunspell --code 1 - -r $(git diff --name-only ${CI_COMMIT_SHA} $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH})) + $(git diff --diff-filter=AM --name-only $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH} -- :^bridges)) allow_failure: true build-adder-collator: @@ -268,6 +269,27 @@ build-adder-collator: #### stage: build +.check-dependent-project: &check-dependent-project + stage: build + <<: *docker-env + <<: *vault-secrets + script: + - git clone + --depth=1 + "--branch=$PIPELINE_SCRIPTS_TAG" + https://github.com/paritytech/pipeline-scripts + - ./pipeline-scripts/check_dependent_project.sh + paritytech + polkadot + --polkadot + "$DEPENDENT_REPO" + "$GITHUB_PR_TOKEN" + +check-dependent-cumulus: + <<: *check-dependent-project + variables: + DEPENDENT_REPO: cumulus + check-transaction-versions: # image must be ubuntu:20.04 based to match the linkers, this image has npm installed image: paritytech/contracts-ci-linux:production diff --git a/Cargo.lock b/Cargo.lock index 0af8ca3fb4fc..0ba829544a62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -44,7 +44,7 @@ checksum = "495ee669413bfbe9e8cace80f4d3d78e6d8c8d99579f97fb93bde351b185f2d4" dependencies = [ "cfg-if 1.0.0", "cipher", - "cpufeatures", + "cpufeatures 0.1.4", "opaque-debug 0.3.0", ] @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.13" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] @@ -121,6 +121,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "arbitrary" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "577b08a4acd7b99869f863c50011b01eb73424ccc798ecd996f2e24817adfca7" + [[package]] name = "arrayref" version = "0.3.6" @@ -433,9 +439,9 @@ checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base58" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base64" @@ -461,14 +467,14 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "beefy-primitives", "fnv", "futures 0.3.17", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-client-api", "sc-keystore", "sc-network", @@ -489,7 +495,7 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -509,12 +515,12 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -1015,7 +1021,7 @@ checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412" dependencies = [ "cfg-if 1.0.0", "cipher", - "cpufeatures", + "cpufeatures 0.1.4", "zeroize", ] @@ -1100,15 +1106,6 @@ dependencies = [ "vec_map", ] -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - [[package]] name = "cloudabi" version = "0.1.0" @@ -1188,25 +1185,28 @@ dependencies = [ ] [[package]] -name = "cpuid-bool" -version = "0.1.0" +name = "cpufeatures" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d375c433320f6c5057ae04a04376eef4d04ce2801448cf8863a78da99107be4" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +dependencies = [ + "libc", +] [[package]] name = "cranelift-bforest" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e6bea67967505247f54fa2c85cf4f6e0e31c4e5692c9b70e4ae58e339067333" +checksum = "15013642ddda44eebcf61365b2052a23fd8b7314f90ba44aa059ec02643c5139" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48194035d2752bdd5bdae429e3ab88676e95f52a2b1355a5d4e809f9e39b1d74" +checksum = "298f2a7ed5fdcb062d8e78b7496b0f4b95265d20245f2d0ca88f846dd192a3a3" dependencies = [ "cranelift-bforest", "cranelift-codegen-meta", @@ -1215,16 +1215,15 @@ dependencies = [ "gimli", "log", "regalloc", - "serde", "smallvec", "target-lexicon", ] [[package]] name = "cranelift-codegen-meta" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976efb22fcab4f2cd6bd4e9913764616a54d895c1a23530128d04e03633c555f" +checksum = "5cf504261ac62dfaf4ffb3f41d88fd885e81aba947c1241275043885bc5f0bac" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -1232,27 +1231,24 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" -dependencies = [ - "serde", -] +checksum = "1cd2a72db4301dbe7e5a4499035eedc1e82720009fb60603e20504d8691fa9cd" [[package]] name = "cranelift-entity" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" +checksum = "48868faa07cacf948dc4a1773648813c0e453ff9467e800ff10f6a78c021b546" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279afcc0d3e651b773f94837c3d581177b348c8d69e928104b2e9fccb226f921" +checksum = "351c9d13b4ecd1a536215ec2fd1c3ee9ee8bc31af172abf1e45ed0adb7a931df" dependencies = [ "cranelift-codegen", "log", @@ -1262,9 +1258,9 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c04d1fe6a5abb5bb0edc78baa8ef238370fb8e389cc88b6d153f7c3e9680425" +checksum = "6df8b556663d7611b137b24db7f6c8d9a8a27d7f29c7ea7835795152c94c1b75" dependencies = [ "cranelift-codegen", "libc", @@ -1273,19 +1269,18 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.76.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d260ad44f6fd2c91f7f5097191a2a9e3edcbb36df1fb787b600dad5ea148ec" +checksum = "7a69816d90db694fa79aa39b89dda7208a4ac74b6f2b8f3c4da26ee1c8bdfc5e" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "itertools", "log", - "serde", "smallvec", - "thiserror", "wasmparser", + "wasmtime-types", ] [[package]] @@ -1497,6 +1492,12 @@ dependencies = [ "syn", ] +[[package]] +name = "diff" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" + [[package]] name = "difference" version = "2.0.0" @@ -1639,7 +1640,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.2", + "sha2 0.9.8", "zeroize", ] @@ -1724,15 +1725,6 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" -[[package]] -name = "erased-serde" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ca8b296792113e1500fd935ae487be6e00ce318952a6880555554824d6ebf38" -dependencies = [ - "serde", -] - [[package]] name = "errno" version = "0.2.5" @@ -1874,7 +1866,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "scale-info", ] @@ -1918,7 +1910,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", ] @@ -1936,7 +1928,7 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -1956,7 +1948,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "Inflector", "chrono", @@ -1982,7 +1974,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -1996,7 +1988,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -2024,7 +2016,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "bitflags", "frame-metadata", @@ -2051,7 +2043,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -2063,7 +2055,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 1.1.0", @@ -2075,7 +2067,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro2", "quote", @@ -2085,7 +2077,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-support-test-pallet", @@ -2108,7 +2100,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -2119,7 +2111,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "log", @@ -2136,7 +2128,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -2151,7 +2143,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "sp-api", @@ -2160,7 +2152,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "sp-api", @@ -2379,7 +2371,7 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" [[package]] name = "generate-bags" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "chrono", "frame-election-provider-support", @@ -2433,6 +2425,7 @@ dependencies = [ "cfg-if 0.1.10", "libc", "wasi 0.9.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -2617,6 +2610,17 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "honggfuzz" +version = "0.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2990,7 +2994,7 @@ dependencies = [ "jsonrpc-server-utils", "log", "net2", - "parking_lot 0.11.1", + "parking_lot", "unicase", ] @@ -3005,7 +3009,7 @@ dependencies = [ "jsonrpc-server-utils", "log", "parity-tokio-ipc", - "parking_lot 0.11.1", + "parking_lot", "tower-service", ] @@ -3019,7 +3023,7 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log", - "parking_lot 0.11.1", + "parking_lot", "rand 0.7.3", "serde", ] @@ -3053,15 +3057,15 @@ dependencies = [ "jsonrpc-server-utils", "log", "parity-ws", - "parking_lot 0.11.1", + "parking_lot", "slab", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f37924e16300e249a52a22cabb5632f846dc9760b39355f5e8bc70cd23dc6300" +checksum = "8edb341d35279b59c79d7fe9e060a51aec29d45af99cc7c72ea7caa350fa71a4" dependencies = [ "Inflector", "bae", @@ -3073,9 +3077,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d67724d368c59e08b557a516cf8fcc51100e7a708850f502e1044b151fe89788" +checksum = "4cc738fd55b676ada3271ef7c383a14a0867a2a88b0fa941311bf5fc0a29d498" dependencies = [ "async-trait", "beef", @@ -3091,9 +3095,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2834b6e7f57ce9a4412ed4d6dc95125d2c8612e68f86b9d9a07369164e4198" +checksum = "9841352dbecf4c2ed5dc71698df9f1660262ae4e0b610e968602529bdbcf7b30" dependencies = [ "async-trait", "fnv", @@ -3131,7 +3135,7 @@ dependencies = [ [[package]] name = "kusama-runtime" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "bitvec 0.20.1", @@ -3246,7 +3250,7 @@ checksum = "c3b6b85fc643f5acd0bffb2cc8a6d150209379267af0d41db72170021841f9f5" dependencies = [ "kvdb", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot", ] [[package]] @@ -3261,7 +3265,7 @@ dependencies = [ "num_cpus", "owning_ref", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot", "regex", "rocksdb", "smallvec", @@ -3281,9 +3285,9 @@ checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" [[package]] name = "libc" -version = "0.2.103" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" +checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" [[package]] name = "libgit2-sys" @@ -3356,7 +3360,7 @@ dependencies = [ "libp2p-websocket", "libp2p-yamux", "multiaddr", - "parking_lot 0.11.1", + "parking_lot", "pin-project 1.0.8", "smallvec", "wasm-timer", @@ -3381,14 +3385,14 @@ dependencies = [ "multiaddr", "multihash 0.14.0", "multistream-select", - "parking_lot 0.11.1", + "parking_lot", "pin-project 1.0.8", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.2", + "sha2 0.9.8", "smallvec", "thiserror", "unsigned-varint 0.7.0", @@ -3459,7 +3463,7 @@ dependencies = [ "prost-build", "rand 0.7.3", "regex", - "sha2 0.9.2", + "sha2 0.9.8", "smallvec", "unsigned-varint 0.7.0", "wasm-timer", @@ -3499,7 +3503,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.2", + "sha2 0.9.8", "smallvec", "uint", "unsigned-varint 0.7.0", @@ -3540,7 +3544,7 @@ dependencies = [ "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.11.1", + "parking_lot", "rand 0.7.3", "smallvec", "unsigned-varint 0.7.0", @@ -3561,7 +3565,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.4", - "sha2 0.9.2", + "sha2 0.9.8", "snow", "static_assertions", "x25519-dalek", @@ -3649,7 +3653,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "lru", + "lru 0.6.6", "minicbor", "rand 0.7.3", "smallvec", @@ -3752,7 +3756,7 @@ checksum = "214cc0dd9c37cbed27f0bb1eba0c41bbafdb93a8be5e9d6ae1e6b4b42cd044bf" dependencies = [ "futures 0.3.17", "libp2p-core", - "parking_lot 0.11.1", + "parking_lot", "thiserror", "yamux", ] @@ -3784,7 +3788,7 @@ dependencies = [ "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2 0.9.2", + "sha2 0.9.8", "typenum", ] @@ -3803,7 +3807,7 @@ dependencies = [ "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2 0.9.2", + "sha2 0.9.8", "typenum", ] @@ -3822,7 +3826,7 @@ dependencies = [ "libsecp256k1-gen-genmult 0.3.0", "rand 0.8.4", "serde", - "sha2 0.9.2", + "sha2 0.9.8", "typenum", ] @@ -3921,15 +3925,6 @@ dependencies = [ "statrs", ] -[[package]] -name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - [[package]] name = "lock_api" version = "0.4.1" @@ -3971,6 +3966,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "lru" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c748cfe47cb8da225c37595b3108bea1c198c84aaae8ea0ba76d01dda9fc803" +dependencies = [ + "hashbrown", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4057,6 +4061,16 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "memmap2" version = "0.2.0" @@ -4101,7 +4115,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beeb98b3d1ed2c0054bd81b5ba949a0243c3ccad751d45ea898fa8059fa2860a" dependencies = [ - "lru", + "lru 0.6.6", ] [[package]] @@ -4124,7 +4138,7 @@ dependencies = [ [[package]] name = "metered-channel" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "derive_more", @@ -4288,7 +4302,7 @@ dependencies = [ "digest 0.9.0", "generic-array 0.14.4", "multihash-derive", - "sha2 0.9.2", + "sha2 0.9.8", "sha3", "unsigned-varint 0.5.1", ] @@ -4302,7 +4316,7 @@ dependencies = [ "digest 0.9.0", "generic-array 0.14.4", "multihash-derive", - "sha2 0.9.2", + "sha2 0.9.8", "unsigned-varint 0.7.0", ] @@ -4540,12 +4554,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad167a2f54e832b82dbe003a046280dceffe5227b5f79e08e363a29638cfddd" -dependencies = [ - "parking_lot 0.11.1", -] +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "opaque-debug" @@ -4601,7 +4612,7 @@ checksum = "13370dae44474229701bb69b90b4f4dca6404cb0357a2d50d635f1171dc3aa7b" [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4615,7 +4626,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -4631,7 +4642,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -4646,7 +4657,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4670,7 +4681,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4690,7 +4701,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4705,7 +4716,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "beefy-primitives", "frame-support", @@ -4721,7 +4732,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "beefy-merkle-tree", "beefy-primitives", @@ -4746,7 +4757,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4831,7 +4842,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4848,7 +4859,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4864,7 +4875,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4888,7 +4899,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4906,7 +4917,7 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4921,7 +4932,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4944,7 +4955,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4960,7 +4971,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4980,7 +4991,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -4997,7 +5008,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5014,7 +5025,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -5032,7 +5043,7 @@ dependencies = [ [[package]] name = "pallet-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5048,7 +5059,7 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -5065,7 +5076,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5080,7 +5091,7 @@ dependencies = [ [[package]] name = "pallet-nicks" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5094,7 +5105,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5111,7 +5122,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5134,7 +5145,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5149,7 +5160,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5163,7 +5174,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5179,7 +5190,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5200,7 +5211,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5216,7 +5227,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5230,7 +5241,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5253,7 +5264,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -5264,7 +5275,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "log", "sp-arithmetic", @@ -5273,7 +5284,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5287,7 +5298,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5305,7 +5316,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5324,7 +5335,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-support", "frame-system", @@ -5341,7 +5352,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -5358,7 +5369,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5369,7 +5380,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5386,7 +5397,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5402,7 +5413,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-benchmarking", "frame-support", @@ -5416,7 +5427,7 @@ dependencies = [ [[package]] name = "pallet-xcm" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-support", "frame-system", @@ -5463,9 +5474,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241f9c5d25063080f2c02846221f13e1d0e5e18fa00c32c234aad585b744ee55" +checksum = "91b679c6acc14fac74382942e2b73bea441686a33430b951ea03b5aeb6a7f254" dependencies = [ "blake2-rfc", "crc32fast", @@ -5475,7 +5486,7 @@ dependencies = [ "log", "lz4", "memmap2", - "parking_lot 0.11.1", + "parking_lot", "rand 0.8.4", "snap", ] @@ -5536,9 +5547,9 @@ dependencies = [ "ethereum-types", "hashbrown", "impl-trait-for-tuples", - "lru", + "lru 0.6.6", "parity-util-mem-derive", - "parking_lot 0.11.1", + "parking_lot", "primitive-types", "smallvec", "tikv-jemalloc-ctl", @@ -5596,16 +5607,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "parking_lot" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.7.2", -] - [[package]] name = "parking_lot" version = "0.11.1" @@ -5613,22 +5614,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", - "lock_api 0.4.1", - "parking_lot_core 0.8.0", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi 0.0.3", - "libc", - "redox_syscall 0.1.56", - "smallvec", - "winapi 0.3.9", + "lock_api", + "parking_lot_core", ] [[package]] @@ -5638,7 +5625,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" dependencies = [ "cfg-if 0.1.10", - "cloudabi 0.1.0", + "cloudabi", "instant", "libc", "redox_syscall 0.1.56", @@ -5814,7 +5801,7 @@ checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" [[package]] name = "polkadot" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_cmd", "color-eyre", @@ -5826,7 +5813,7 @@ dependencies = [ [[package]] name = "polkadot-approval-distribution" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "env_logger 0.9.0", @@ -5846,7 +5833,7 @@ dependencies = [ [[package]] name = "polkadot-availability-bitfield-distribution" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "bitvec 0.20.1", @@ -5867,13 +5854,13 @@ dependencies = [ [[package]] name = "polkadot-availability-distribution" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "derive_more", "futures 0.3.17", "futures-timer 3.0.2", - "lru", + "lru 0.7.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -5894,14 +5881,14 @@ dependencies = [ [[package]] name = "polkadot-availability-recovery" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "env_logger 0.9.0", "futures 0.3.17", "futures-timer 3.0.2", "log", - "lru", + "lru 0.7.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -5921,7 +5908,7 @@ dependencies = [ [[package]] name = "polkadot-cli" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-benchmarking-cli", "futures 0.3.17", @@ -5940,7 +5927,7 @@ dependencies = [ [[package]] name = "polkadot-client" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "frame-benchmarking", @@ -5972,7 +5959,7 @@ dependencies = [ [[package]] name = "polkadot-collator-protocol" -version = "0.9.11" +version = "0.9.12" dependencies = [ "always-assert", "assert_matches", @@ -5999,7 +5986,7 @@ dependencies = [ [[package]] name = "polkadot-core-primitives" -version = "0.9.11" +version = "0.9.12" dependencies = [ "parity-scale-codec", "parity-util-mem", @@ -6011,7 +5998,7 @@ dependencies = [ [[package]] name = "polkadot-dispute-distribution" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", @@ -6019,7 +6006,7 @@ dependencies = [ "futures 0.3.17", "futures-timer 3.0.2", "lazy_static", - "lru", + "lru 0.7.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -6040,7 +6027,7 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" -version = "0.9.11" +version = "0.9.12" dependencies = [ "parity-scale-codec", "polkadot-node-primitives", @@ -6053,7 +6040,7 @@ dependencies = [ [[package]] name = "polkadot-gossip-support" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", @@ -6079,14 +6066,14 @@ dependencies = [ [[package]] name = "polkadot-network-bridge" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", "futures 0.3.17", "futures-timer 3.0.2", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6102,7 +6089,7 @@ dependencies = [ [[package]] name = "polkadot-node-collation-generation" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "parity-scale-codec", @@ -6120,7 +6107,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "bitvec 0.20.1", @@ -6129,10 +6116,10 @@ dependencies = [ "futures-timer 3.0.2", "kvdb", "kvdb-memorydb", - "lru", + "lru 0.7.0", "merlin", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6157,7 +6144,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-av-store" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "bitvec 0.20.1", @@ -6168,7 +6155,7 @@ dependencies = [ "kvdb-memorydb", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6184,7 +6171,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-backing" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "bitvec 0.20.1", @@ -6209,7 +6196,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-bitfield-signing" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "polkadot-node-subsystem", @@ -6224,7 +6211,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-candidate-validation" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", @@ -6245,7 +6232,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-api" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "maplit", @@ -6264,7 +6251,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-selection" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "futures 0.3.17", @@ -6272,7 +6259,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6285,7 +6272,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "bitvec 0.20.1", @@ -6309,7 +6296,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-participation" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "futures 0.3.17", @@ -6325,7 +6312,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-parachains-inherent" -version = "0.9.11" +version = "0.9.12" dependencies = [ "async-trait", "futures 0.3.17", @@ -6341,7 +6328,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-provisioner" -version = "0.9.11" +version = "0.9.12" dependencies = [ "bitvec 0.20.1", "futures 0.3.17", @@ -6358,7 +6345,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf" -version = "0.9.11" +version = "0.9.12" dependencies = [ "always-assert", "assert_matches", @@ -6392,7 +6379,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-runtime-api" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "memory-lru", @@ -6411,14 +6398,14 @@ dependencies = [ [[package]] name = "polkadot-node-jaeger" -version = "0.9.11" +version = "0.9.12" dependencies = [ "async-std", "lazy_static", "log", "mick-jaeger", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "polkadot-node-primitives", "polkadot-primitives", "sc-network", @@ -6428,7 +6415,7 @@ dependencies = [ [[package]] name = "polkadot-node-metrics" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "futures-timer 3.0.2", @@ -6438,7 +6425,7 @@ dependencies = [ [[package]] name = "polkadot-node-network-protocol" -version = "0.9.11" +version = "0.9.12" dependencies = [ "async-trait", "derive_more", @@ -6449,13 +6436,13 @@ dependencies = [ "polkadot-primitives", "sc-authority-discovery", "sc-network", - "strum 0.21.0", + "strum 0.22.0", "thiserror", ] [[package]] name = "polkadot-node-primitives" -version = "0.9.11" +version = "0.9.12" dependencies = [ "bounded-vec", "futures 0.3.17", @@ -6477,7 +6464,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem" -version = "0.9.11" +version = "0.9.12" dependencies = [ "polkadot-node-jaeger", "polkadot-node-subsystem-types", @@ -6486,11 +6473,11 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-test-helpers" -version = "0.9.11" +version = "0.9.12" dependencies = [ "async-trait", "futures 0.3.17", - "parking_lot 0.11.1", + "parking_lot", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-overseer", @@ -6504,7 +6491,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-types" -version = "0.9.11" +version = "0.9.12" dependencies = [ "derive_more", "futures 0.3.17", @@ -6522,7 +6509,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-util" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", @@ -6531,7 +6518,7 @@ dependencies = [ "futures 0.3.17", "itertools", "log", - "lru", + "lru 0.7.0", "metered-channel", "parity-scale-codec", "pin-project 1.0.8", @@ -6552,16 +6539,16 @@ dependencies = [ [[package]] name = "polkadot-overseer" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "femme", "futures 0.3.17", "futures-timer 3.0.2", - "lru", + "lru 0.7.0", "metered-channel", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6576,7 +6563,7 @@ dependencies = [ [[package]] name = "polkadot-overseer-gen" -version = "0.9.11" +version = "0.9.12" dependencies = [ "async-trait", "futures 0.3.17", @@ -6593,7 +6580,7 @@ dependencies = [ [[package]] name = "polkadot-overseer-gen-proc-macro" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "proc-macro-crate 1.1.0", @@ -6604,7 +6591,7 @@ dependencies = [ [[package]] name = "polkadot-parachain" -version = "0.9.11" +version = "0.9.12" dependencies = [ "derive_more", "frame-support", @@ -6620,7 +6607,7 @@ dependencies = [ [[package]] name = "polkadot-primitives" -version = "0.9.11" +version = "0.9.12" dependencies = [ "bitvec 0.20.1", "frame-system", @@ -6649,7 +6636,7 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-gadget", "beefy-gadget-rpc", @@ -6679,7 +6666,7 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "bitvec 0.20.1", @@ -6696,6 +6683,7 @@ dependencies = [ "pallet-authority-discovery", "pallet-authorship", "pallet-babe", + "pallet-bags-list", "pallet-balances", "pallet-bounties", "pallet-collective", @@ -6761,7 +6749,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-common" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "bitvec 0.20.1", @@ -6811,7 +6799,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" -version = "0.9.11" +version = "0.9.12" dependencies = [ "bitflags", "bitvec 0.20.1", @@ -6854,7 +6842,7 @@ dependencies = [ [[package]] name = "polkadot-service" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", @@ -6868,7 +6856,7 @@ dependencies = [ "kvdb", "kvdb-rocksdb", "log", - "lru", + "lru 0.7.0", "pallet-babe", "pallet-im-online", "pallet-mmr-primitives", @@ -6956,7 +6944,7 @@ dependencies = [ [[package]] name = "polkadot-simnet" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-benchmarking", "frame-support", @@ -6988,14 +6976,14 @@ dependencies = [ [[package]] name = "polkadot-simnet-node" -version = "0.9.11" +version = "0.9.12" dependencies = [ "polkadot-simnet", ] [[package]] name = "polkadot-simnet-test" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-system", "pallet-balances", @@ -7009,7 +6997,7 @@ dependencies = [ [[package]] name = "polkadot-statement-distribution" -version = "0.9.11" +version = "0.9.12" dependencies = [ "arrayvec 0.5.2", "assert_matches", @@ -7038,7 +7026,7 @@ dependencies = [ [[package]] name = "polkadot-statement-table" -version = "0.9.11" +version = "0.9.12" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -7047,7 +7035,7 @@ dependencies = [ [[package]] name = "polkadot-test-client" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "parity-scale-codec", @@ -7073,7 +7061,7 @@ dependencies = [ [[package]] name = "polkadot-test-malus" -version = "0.9.11" +version = "0.9.12" dependencies = [ "assert_matches", "async-trait", @@ -7092,7 +7080,7 @@ dependencies = [ [[package]] name = "polkadot-test-runtime" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "bitvec 0.20.1", @@ -7157,7 +7145,7 @@ dependencies = [ [[package]] name = "polkadot-test-service" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-benchmarking", "frame-system", @@ -7240,7 +7228,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fcffab1f78ebbdf4b93b68c1ffebc24037eedf271edaca795732b24e5e4e349" dependencies = [ - "cpufeatures", + "cpufeatures 0.1.4", "opaque-debug 0.3.0", "universal-hash", ] @@ -7252,7 +7240,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e597450cbf209787f0e6de80bf3795c6b2356a380ee87837b545aded8dbc1823" dependencies = [ "cfg-if 1.0.0", - "cpufeatures", + "cpufeatures 0.1.4", "opaque-debug 0.3.0", "universal-hash", ] @@ -7291,13 +7279,13 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "0.6.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427" +checksum = "ec0cfe1b2403f172ba0f234e500906ee0a3e493fb81092dac23ebefe129301cc" dependencies = [ - "ansi_term 0.11.0", + "ansi_term 0.12.1", "ctor", - "difference", + "diff", "output_vt100", ] @@ -7398,24 +7386,24 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70" dependencies = [ "unicode-xid", ] [[package]] name = "prometheus" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" +checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c" dependencies = [ "cfg-if 1.0.0", "fnv", "lazy_static", - "parking_lot 0.11.1", - "regex", + "memchr", + "parking_lot", "thiserror", ] @@ -7515,9 +7503,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" dependencies = [ "proc-macro2", ] @@ -7760,20 +7748,18 @@ checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ "log", "rustc-hash", - "serde", "smallvec", ] [[package]] name = "regex" -version = "1.4.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] @@ -7788,9 +7774,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "region" @@ -7806,7 +7792,7 @@ dependencies = [ [[package]] name = "remote-ext-tests-bags-list" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-election-provider-support", "frame-support", @@ -7829,7 +7815,7 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "env_logger 0.9.0", "jsonrpsee-proc-macros", @@ -7865,9 +7851,9 @@ dependencies = [ [[package]] name = "retain_mut" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b" +checksum = "448296241d034b96c11173591deaa1302f2c17b56092106c1f92c1bc0183a8c9" [[package]] name = "ring" @@ -7906,7 +7892,7 @@ dependencies = [ [[package]] name = "rococo-runtime" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "bp-messages", @@ -8092,7 +8078,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "log", "sp-core", @@ -8103,7 +8089,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "derive_more", @@ -8130,7 +8116,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "futures-timer 3.0.2", @@ -8153,7 +8139,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -8169,7 +8155,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8185,7 +8171,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -8196,7 +8182,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "chrono", "fdlimit", @@ -8234,14 +8220,14 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "fnv", "futures 0.3.17", "hash-db", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -8262,7 +8248,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "hash-db", "kvdb", @@ -8272,7 +8258,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-client-api", "sc-state-db", "sp-arithmetic", @@ -8287,14 +8273,14 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "futures 0.3.17", "futures-timer 3.0.2", "libp2p", "log", - "parking_lot 0.11.1", + "parking_lot", "sc-client-api", "sc-utils", "serde", @@ -8311,7 +8297,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "derive_more", @@ -8323,7 +8309,7 @@ dependencies = [ "num-rational 0.2.4", "num-traits", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "rand 0.7.3", "retain_mut", "sc-client-api", @@ -8354,7 +8340,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "derive_more", "futures 0.3.17", @@ -8378,7 +8364,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8391,7 +8377,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "assert_matches", "async-trait", @@ -8425,7 +8411,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "futures 0.3.17", @@ -8451,7 +8437,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "sc-client-api", "sp-authorship", @@ -8462,13 +8448,13 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "lazy_static", "libsecp256k1 0.6.0", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-executor-common", "sc-executor-wasmi", "sc-executor-wasmtime", @@ -8488,7 +8474,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "derive_more", "environmental", @@ -8506,7 +8492,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "log", "parity-scale-codec", @@ -8522,7 +8508,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "cfg-if 1.0.0", "libc", @@ -8540,7 +8526,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "derive_more", @@ -8551,7 +8537,7 @@ dependencies = [ "futures-timer 3.0.2", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "rand 0.8.4", "sc-block-builder", "sc-client-api", @@ -8577,7 +8563,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "derive_more", "finality-grandpa", @@ -8601,7 +8587,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "ansi_term 0.12.1", "futures 0.3.17", @@ -8618,12 +8604,12 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "derive_more", "hex", - "parking_lot 0.11.1", + "parking_lot", "serde_json", "sp-application-crypto", "sp-core", @@ -8633,11 +8619,11 @@ dependencies = [ [[package]] name = "sc-light" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "hash-db", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-client-api", "sc-executor", "sp-api", @@ -8651,7 +8637,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-std", "async-trait", @@ -8671,9 +8657,9 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru", + "lru 0.6.6", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "pin-project 1.0.8", "prost", "prost-build", @@ -8702,13 +8688,13 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "futures-timer 3.0.2", "libp2p", "log", - "lru", + "lru 0.6.6", "sc-network", "sp-runtime", "substrate-prometheus-endpoint", @@ -8718,7 +8704,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "bytes 1.0.1", "fnv", @@ -8730,7 +8716,7 @@ dependencies = [ "log", "num_cpus", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "rand 0.7.3", "sc-client-api", "sc-network", @@ -8745,7 +8731,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "libp2p", @@ -8758,7 +8744,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -8767,7 +8753,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "hash-db", @@ -8775,7 +8761,7 @@ dependencies = [ "jsonrpc-pubsub", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -8798,7 +8784,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "jsonrpc-core", @@ -8807,7 +8793,7 @@ dependencies = [ "jsonrpc-pubsub", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sc-chain-spec", "sc-transaction-pool-api", "serde", @@ -8823,7 +8809,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "jsonrpc-core", @@ -8840,7 +8826,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "directories", @@ -8853,7 +8839,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot", "pin-project 1.0.8", "rand 0.7.3", "sc-block-builder", @@ -8905,13 +8891,13 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "log", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", - "parking_lot 0.11.1", + "parking_lot", "sc-client-api", "sp-core", ] @@ -8919,7 +8905,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -8941,13 +8927,13 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "chrono", "futures 0.3.17", "libp2p", "log", - "parking_lot 0.11.1", + "parking_lot", "pin-project 1.0.8", "rand 0.7.3", "serde", @@ -8959,14 +8945,15 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "ansi_term 0.12.1", "atty", + "chrono", "lazy_static", "log", "once_cell", - "parking_lot 0.11.1", + "parking_lot", "regex", "rustc-hash", "sc-client-api", @@ -8988,7 +8975,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -8999,7 +8986,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "intervalier", @@ -9007,7 +8994,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot", "retain_mut", "sc-client-api", "sc-transaction-pool-api", @@ -9026,7 +9013,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "derive_more", "futures 0.3.17", @@ -9040,7 +9027,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "futures-timer 3.0.2", @@ -9114,26 +9101,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scroll" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "sct" version = "0.6.0" @@ -9146,9 +9113,9 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ "zeroize", ] @@ -9276,7 +9243,7 @@ checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", - "cpufeatures", + "cpufeatures 0.1.4", "digest 0.9.0", "opaque-debug 0.3.0", ] @@ -9295,13 +9262,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", - "cpuid-bool", + "cpufeatures 0.2.1", "digest 0.9.0", "opaque-debug 0.3.0", ] @@ -9377,18 +9344,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -[[package]] -name = "slog" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" -dependencies = [ - "erased-serde", -] - [[package]] name = "slot-range-helper" -version = "0.9.11" +version = "0.9.12" dependencies = [ "enumn", "parity-scale-codec", @@ -9431,7 +9389,7 @@ dependencies = [ "rand_core 0.6.1", "ring", "rustc_version 0.3.3", - "sha2 0.9.2", + "sha2 0.9.8", "subtle 2.4.1", "x25519-dalek", ] @@ -9492,7 +9450,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "hash-db", "log", @@ -9509,7 +9467,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "blake2-rfc", "proc-macro-crate 1.1.0", @@ -9521,7 +9479,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -9534,7 +9492,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "integer-sqrt", "num-traits", @@ -9549,7 +9507,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -9562,7 +9520,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "parity-scale-codec", @@ -9574,7 +9532,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "sp-api", @@ -9586,13 +9544,13 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "log", - "lru", + "lru 0.6.6", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sp-api", "sp-consensus", "sp-database", @@ -9604,7 +9562,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "futures 0.3.17", @@ -9623,7 +9581,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "merlin", @@ -9646,7 +9604,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -9657,7 +9615,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -9669,7 +9627,7 @@ dependencies = [ [[package]] name = "sp-core" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "base58", "blake2-rfc", @@ -9688,7 +9646,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot", "primitive-types", "rand 0.7.3", "regex", @@ -9696,12 +9654,13 @@ dependencies = [ "schnorrkel", "secrecy", "serde", - "sha2 0.9.2", + "sha2 0.9.8", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", "sp-std", "sp-storage", + "ss58-registry", "substrate-bip39", "thiserror", "tiny-bip39", @@ -9714,16 +9673,16 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "kvdb", - "parking_lot 0.11.1", + "parking_lot", ] [[package]] name = "sp-debug-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro2", "quote", @@ -9733,7 +9692,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "environmental", "parity-scale-codec", @@ -9744,7 +9703,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "finality-grandpa", "log", @@ -9762,7 +9721,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -9776,14 +9735,14 @@ dependencies = [ [[package]] name = "sp-io" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "hash-db", "libsecp256k1 0.6.0", "log", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "sp-core", "sp-externalities", "sp-keystore", @@ -9800,7 +9759,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "lazy_static", "sp-core", @@ -9811,14 +9770,14 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "derive_more", "futures 0.3.17", "merlin", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "schnorrkel", "serde", "sp-core", @@ -9828,7 +9787,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "zstd", ] @@ -9836,7 +9795,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -9851,7 +9810,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -9862,7 +9821,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "sp-api", "sp-core", @@ -9872,7 +9831,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "backtrace", ] @@ -9880,7 +9839,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "rustc-hash", "serde", @@ -9890,7 +9849,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "either", "hash256-std-hasher", @@ -9912,7 +9871,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -9929,7 +9888,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "Inflector", "proc-macro-crate 1.1.0", @@ -9941,7 +9900,7 @@ dependencies = [ [[package]] name = "sp-serializer" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "serde", "serde_json", @@ -9950,7 +9909,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -9964,7 +9923,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "scale-info", @@ -9975,13 +9934,13 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "hash-db", "log", "num-traits", "parity-scale-codec", - "parking_lot 0.11.1", + "parking_lot", "rand 0.7.3", "smallvec", "sp-core", @@ -9998,12 +9957,12 @@ dependencies = [ [[package]] name = "sp-std" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" [[package]] name = "sp-storage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10016,7 +9975,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "log", "sp-core", @@ -10029,7 +9988,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "futures-timer 3.0.2", @@ -10045,15 +10004,9 @@ dependencies = [ [[package]] name = "sp-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ - "erased-serde", - "log", "parity-scale-codec", - "parking_lot 0.10.2", - "serde", - "serde_json", - "slog", "sp-std", "tracing", "tracing-core", @@ -10063,7 +10016,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "sp-api", "sp-runtime", @@ -10072,7 +10025,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "log", @@ -10088,7 +10041,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "hash-db", "memory-db", @@ -10103,7 +10056,7 @@ dependencies = [ [[package]] name = "sp-version" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10119,7 +10072,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -10130,7 +10083,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10144,6 +10097,20 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "ss58-registry" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2413ecc7946ca99368862851dc1359f1477bc654ecfb135cf3efcb85ceca5f" +dependencies = [ + "Inflector", + "proc-macro2", + "quote", + "serde", + "serde_json", + "unicode-xid", +] + [[package]] name = "stable_deref_trait" version = "1.1.1" @@ -10152,7 +10119,7 @@ checksum = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" [[package]] name = "staking-miner" -version = "0.9.11" +version = "0.9.12" dependencies = [ "env_logger 0.9.0", "frame-election-provider-support", @@ -10200,7 +10167,7 @@ checksum = "11b73400442027c4adedda20a9f9b7945234a5bd8d5f7e86da22bd5d0622369c" dependencies = [ "cfg_aliases", "libc", - "parking_lot 0.11.1", + "parking_lot", "static_init_macro", ] @@ -10238,9 +10205,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" +checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" dependencies = [ "clap", "lazy_static", @@ -10249,9 +10216,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.16" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck", "proc-macro-error 1.0.4", @@ -10274,8 +10241,14 @@ name = "strum" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" + +[[package]] +name = "strum" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7ac893c7d471c8a21f31cfe213ec4f6d9afeed25537c772e08ef3f005f8729e" dependencies = [ - "strum_macros 0.21.1", + "strum_macros 0.22.0", ] [[package]] @@ -10302,6 +10275,18 @@ dependencies = [ "syn", ] +[[package]] +name = "strum_macros" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339f799d8b549e3744c7ac7feb216383e4005d94bdb22561b3ab8f3b808ae9fb" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sub-tokens" version = "0.1.0" @@ -10326,7 +10311,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "platforms", ] @@ -10334,7 +10319,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-system-rpc-runtime-api", "futures 0.3.17", @@ -10356,7 +10341,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-std", "derive_more", @@ -10370,7 +10355,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "async-trait", "futures 0.3.17", @@ -10397,7 +10382,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "futures 0.3.17", "substrate-test-utils-derive", @@ -10407,7 +10392,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -10418,7 +10403,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "ansi_term 0.12.1", "build-helper", @@ -10444,9 +10429,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.77" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5239bc68e0fef57495900cfea4e8dc75596d9a319d7e16b1e0a440d24e6fe0a0" +checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" dependencies = [ "proc-macro2", "quote", @@ -10513,7 +10498,7 @@ dependencies = [ [[package]] name = "test-parachain-adder" -version = "0.9.11" +version = "0.9.12" dependencies = [ "dlmalloc", "parity-scale-codec", @@ -10526,7 +10511,7 @@ dependencies = [ [[package]] name = "test-parachain-adder-collator" -version = "0.9.11" +version = "0.9.12" dependencies = [ "futures 0.3.17", "futures-timer 3.0.2", @@ -10552,14 +10537,14 @@ dependencies = [ [[package]] name = "test-parachain-halt" -version = "0.9.11" +version = "0.9.12" dependencies = [ "substrate-wasm-builder", ] [[package]] name = "test-parachains" -version = "0.9.11" +version = "0.9.12" dependencies = [ "parity-scale-codec", "sp-core", @@ -10571,7 +10556,7 @@ dependencies = [ [[package]] name = "test-runner" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "frame-system", "futures 0.3.17", @@ -10624,18 +10609,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602eca064b2d83369e2b2f34b09c70b605402801927c65c11071ac911d299b88" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad553cc2c78e8de258400763a647e80e6d1b31ee237275d756f6836d204494c" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -10717,9 +10702,9 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" dependencies = [ "anyhow", "hmac 0.8.1", @@ -10727,9 +10712,10 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.2", + "sha2 0.9.8", "thiserror", "unicode-normalization", + "wasm-bindgen", "zeroize", ] @@ -10847,9 +10833,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f96e095c0c82419687c20ddf5cb3eadb61f4e1405923c9dc8e53a1adacbda8" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", "log", @@ -10860,9 +10846,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98863d0dd09fa59a1b79c6750ad80dbda6b75f4e71c437a6a1a8cb91a8bcbd77" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2", "quote", @@ -10871,9 +10857,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46125608c26121c81b0c6d693eab5a420e416da7e43c426d2e8f7df8da8a3acf" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] @@ -10995,7 +10981,7 @@ dependencies = [ "lazy_static", "log", "lru-cache", - "parking_lot 0.11.1", + "parking_lot", "resolv-conf", "smallvec", "thiserror", @@ -11011,7 +10997,7 @@ checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#9be8fdd36a70e19cd286bec00df740331b637546" +source = "git+https://github.com/paritytech/substrate?branch=master#afd572f208b25312f984987b7bb752e71fbf86d7" dependencies = [ "jsonrpsee-ws-client", "log", @@ -11034,9 +11020,9 @@ dependencies = [ [[package]] name = "trybuild" -version = "1.0.45" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bdaf2a1d317f3d58b44b31c7f6436b9b9acafe7bddfeace50897c2b804d7792" +checksum = "dbaccfa9796293406a02ec790614628c88d0b3246249a620ac1ee7076274716b" dependencies = [ "glob", "lazy_static", @@ -11048,12 +11034,12 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" +checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e" dependencies = [ - "cfg-if 0.1.10", - "rand 0.7.3", + "cfg-if 1.0.0", + "rand 0.8.4", "static_assertions", ] @@ -11369,7 +11355,7 @@ checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ "futures 0.3.17", "js-sys", - "parking_lot 0.11.1", + "parking_lot", "pin-utils", "wasm-bindgen", "wasm-bindgen-futures", @@ -11402,15 +11388,15 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.79.0" +version = "0.80.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5894be15a559c85779254700e1d35f02f843b5a69152e5c82c626d9fd66c0e" +checksum = "449167e2832691a1bff24cde28d2804e90e09586a448c8e76984792c44334a6b" [[package]] name = "wasmtime" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bbb8a082a8ef50f7eeb8b82dda9709ef1e68963ea3c94e45581644dd4041835" +checksum = "899b1e5261e3d3420860dacfb952871ace9d7ba9f953b314f67aaf9f8e2a4d89" dependencies = [ "anyhow", "backtrace", @@ -11421,27 +11407,28 @@ dependencies = [ "lazy_static", "libc", "log", + "object", "paste", "psm", + "rayon", "region", "rustc-demangle", "serde", - "smallvec", "target-lexicon", "wasmparser", "wasmtime-cache", + "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", - "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.9", ] [[package]] name = "wasmtime-cache" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d73391579ca7f24573138ef768b73b2aed5f9d542385c64979b65d60d0912399" +checksum = "e2493b81d7a9935f7af15e06beec806f256bc974a90a843685f3d61f2fc97058" dependencies = [ "anyhow", "base64 0.13.0", @@ -11452,7 +11439,7 @@ dependencies = [ "libc", "log", "serde", - "sha2 0.9.2", + "sha2 0.9.8", "toml", "winapi 0.3.9", "zstd", @@ -11460,26 +11447,16 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81c6f5ae9205382345c7cd7454932a906186836999a2161c385e38a15f52e1fe" +checksum = "99706bacdf5143f7f967d417f0437cce83a724cf4518cb1a3ff40e519d793021" dependencies = [ + "anyhow", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", + "cranelift-native", "cranelift-wasm", - "target-lexicon", - "wasmparser", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-debug" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c69e08f55e12f15f50b1b533bc3626723e7224254a065de6576934c86258c9e8" -dependencies = [ - "anyhow", "gimli", "more-asserts", "object", @@ -11491,94 +11468,55 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "005d93174040af37fb8625f891cd9827afdad314261f7ec4ee61ec497d6e9d3c" +checksum = "ac42cb562a2f98163857605f02581d719a410c5abe93606128c59a10e84de85b" dependencies = [ + "anyhow", "cfg-if 1.0.0", - "cranelift-codegen", "cranelift-entity", - "cranelift-wasm", "gimli", "indexmap", "log", "more-asserts", + "object", "serde", + "target-lexicon", "thiserror", "wasmparser", + "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bf1dfb213a35d8f21aefae40e597fe72778a907011ffdff7affb029a02af9a" +checksum = "24f46dd757225f29a419be415ea6fb8558df9b0194f07e3a6a9c99d0e14dd534" dependencies = [ "addr2line", "anyhow", + "bincode", "cfg-if 1.0.0", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-native", - "cranelift-wasm", "gimli", + "libc", "log", "more-asserts", "object", - "rayon", "region", "serde", "target-lexicon", "thiserror", "wasmparser", - "wasmtime-cranelift", - "wasmtime-debug", "wasmtime-environ", - "wasmtime-obj", - "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.9", ] -[[package]] -name = "wasmtime-obj" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231491878e710c68015228c9f9fc5955fe5c96dbf1485c15f7bed55b622c83c" -dependencies = [ - "anyhow", - "more-asserts", - "object", - "target-lexicon", - "wasmtime-debug", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-profiling" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21486cfb5255c2069666c1f116f9e949d4e35c9a494f11112fa407879e42198d" -dependencies = [ - "anyhow", - "cfg-if 1.0.0", - "gimli", - "lazy_static", - "libc", - "object", - "scroll", - "serde", - "target-lexicon", - "wasmtime-environ", - "wasmtime-runtime", -] - [[package]] name = "wasmtime-runtime" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ddfdf32e0a20d81f48be9dacd31612bc61de5a174d1356fef806d300f507de" +checksum = "0122215a44923f395487048cb0a1d60b5b32c73aab15cf9364b798dbaff0996f" dependencies = [ "anyhow", "backtrace", @@ -11598,6 +11536,18 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "wasmtime-types" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9b01caf8a204ef634ebac99700e77ba716d3ebbb68a1abbc2ceb6b16dbec9e4" +dependencies = [ + "cranelift-entity", + "serde", + "thiserror", + "wasmparser", +] + [[package]] name = "web-sys" version = "0.3.46" @@ -11638,7 +11588,7 @@ dependencies = [ [[package]] name = "westend-runtime" -version = "0.9.11" +version = "0.9.12" dependencies = [ "beefy-primitives", "bitvec 0.20.1", @@ -11820,7 +11770,7 @@ dependencies = [ [[package]] name = "xcm" -version = "0.9.11" +version = "0.9.12" dependencies = [ "derivative", "impl-trait-for-tuples", @@ -11832,7 +11782,7 @@ dependencies = [ [[package]] name = "xcm-builder" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-support", "frame-system", @@ -11855,7 +11805,7 @@ dependencies = [ [[package]] name = "xcm-executor" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-benchmarking", "frame-support", @@ -11872,7 +11822,7 @@ dependencies = [ [[package]] name = "xcm-executor-integration-tests" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-support", "frame-system", @@ -11901,7 +11851,7 @@ dependencies = [ [[package]] name = "xcm-simulator" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-support", "parity-scale-codec", @@ -11917,7 +11867,7 @@ dependencies = [ [[package]] name = "xcm-simulator-example" -version = "0.9.11" +version = "0.9.12" dependencies = [ "frame-support", "frame-system", @@ -11938,6 +11888,30 @@ dependencies = [ "xcm-simulator", ] +[[package]] +name = "xcm-simulator-fuzzer" +version = "0.9.9" +dependencies = [ + "frame-support", + "frame-system", + "honggfuzz", + "pallet-balances", + "pallet-xcm", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain", + "polkadot-runtime-parachains", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "xcm", + "xcm-builder", + "xcm-executor", + "xcm-simulator", +] + [[package]] name = "yamux" version = "0.9.0" @@ -11947,16 +11921,16 @@ dependencies = [ "futures 0.3.17", "log", "nohash-hasher", - "parking_lot 0.11.1", + "parking_lot", "rand 0.8.4", "static_assertions", ] [[package]] name = "zeroize" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 114d7ab2e986..8ab176c69b13 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ path = "src/main.rs" name = "polkadot" description = "Implementation of a https://polkadot.network node in Rust based on the Substrate framework." license = "GPL-3.0-only" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" readme = "README.md" @@ -42,6 +42,7 @@ members = [ "xcm/xcm-executor/integration-tests", "xcm/xcm-simulator", "xcm/xcm-simulator/example", + "xcm/xcm-simulator/fuzzer", "xcm/pallet-xcm", "xcm/pallet-xcm-benchmarks", "xcm/procedural", diff --git a/bridges/bin/millau/runtime/src/lib.rs b/bridges/bin/millau/runtime/src/lib.rs index 6e561e8719e4..65c54c3046c1 100644 --- a/bridges/bin/millau/runtime/src/lib.rs +++ b/bridges/bin/millau/runtime/src/lib.rs @@ -307,7 +307,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_shift_session_manager::Pallet; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = (); // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) type WeightInfo = (); } diff --git a/bridges/bin/rialto/runtime/src/lib.rs b/bridges/bin/rialto/runtime/src/lib.rs index e0a1e176c3ae..3a8b8651e346 100644 --- a/bridges/bin/rialto/runtime/src/lib.rs +++ b/bridges/bin/rialto/runtime/src/lib.rs @@ -423,7 +423,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_shift_session_manager::Pallet; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = (); // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) type WeightInfo = (); } diff --git a/bridges/modules/shift-session-manager/src/lib.rs b/bridges/modules/shift-session-manager/src/lib.rs index 7cf05e99f3a0..3635e6223d7f 100644 --- a/bridges/modules/shift-session-manager/src/lib.rs +++ b/bridges/modules/shift-session-manager/src/lib.rs @@ -162,7 +162,6 @@ mod tests { type SessionManager = (); type SessionHandler = TestSessionHandler; type Keys = UintAuthorityId; - type DisabledValidatorsThreshold = (); type WeightInfo = (); } @@ -176,7 +175,7 @@ mod tests { fn on_new_session(_: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)]) {} - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } fn new_test_ext() -> TestExternalities { diff --git a/cli/Cargo.toml b/cli/Cargo.toml index d52b547a9b3f..899bd4cd0013 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-cli" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Polkadot Relay-chain Client Node" edition = "2018" @@ -15,8 +15,8 @@ crate-type = ["cdylib", "rlib"] [dependencies] log = "0.4.13" -thiserror = "1.0.26" -structopt = { version = "0.3.23", optional = true } +thiserror = "1.0.30" +structopt = { version = "0.3.25", optional = true } futures = "0.3.17" service = { package = "polkadot-service", path = "../node/service", default-features = false, optional = true } diff --git a/cli/src/command.rs b/cli/src/command.rs index c9ba571e828c..d9460dd5ac63 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -19,6 +19,7 @@ use futures::future::TryFutureExt; use log::info; use sc_cli::{Role, RuntimeVersion, SubstrateCli}; use service::{self, IdentifyVariant}; +use sp_core::crypto::Ss58AddressFormatRegistry; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -191,15 +192,14 @@ impl SubstrateCli for Cli { } fn set_default_ss58_version(spec: &Box) { - use sp_core::crypto::Ss58AddressFormat; - let ss58_version = if spec.is_kusama() { - Ss58AddressFormat::KusamaAccount + Ss58AddressFormatRegistry::KusamaAccount } else if spec.is_westend() { - Ss58AddressFormat::SubstrateAccount + Ss58AddressFormatRegistry::SubstrateAccount } else { - Ss58AddressFormat::PolkadotAccount - }; + Ss58AddressFormatRegistry::PolkadotAccount + } + .into(); sp_core::crypto::set_default_ss58_version(ss58_version); } diff --git a/core-primitives/Cargo.toml b/core-primitives/Cargo.toml index e3c1a5e8d478..352070cfbc70 100644 --- a/core-primitives/Cargo.toml +++ b/core-primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-core-primitives" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/doc/shell-completion.md b/doc/shell-completion.md index 965a722308c3..986609392e34 100644 --- a/doc/shell-completion.md +++ b/doc/shell-completion.md @@ -10,6 +10,7 @@ source target/release/completion-scripts/polkadot.bash ``` You can find completion scripts for: + - bash - fish - zsh diff --git a/erasure-coding/Cargo.toml b/erasure-coding/Cargo.toml index 2cf311f022eb..76983ed75c1c 100644 --- a/erasure-coding/Cargo.toml +++ b/erasure-coding/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-erasure-coding" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -11,4 +11,4 @@ novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" } parity-scale-codec = { version = "2.3.1", default-features = false, features = ["std", "derive"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.26" +thiserror = "1.0.30" diff --git a/erasure-coding/fuzzer/Cargo.toml b/erasure-coding/fuzzer/Cargo.toml index cf2e512e861e..845622eb4823 100644 --- a/erasure-coding/fuzzer/Cargo.toml +++ b/erasure-coding/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "erasure_coding_fuzzer" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/client/Cargo.toml b/node/client/Cargo.toml index 077429838c38..424ff89b209f 100644 --- a/node/client/Cargo.toml +++ b/node/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-client" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index 9ff9621fcbf8..673c9732979b 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-node-collation-generation" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } @@ -14,7 +14,7 @@ polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.26" +thiserror = "1.0.30" parity-scale-codec = { version = "2.3.1", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index c4b73b8b717a..2d6ef8e6b5e0 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -62,7 +62,7 @@ impl CollationGenerationSubsystem { /// Conceptually, this is very simple: it just loops forever. /// /// - On incoming overseer messages, it starts or stops jobs as appropriate. - /// - On other incoming messages, if they can be converted into Job::ToJob and + /// - On other incoming messages, if they can be converted into `Job::ToJob` and /// include a hash, then they're forwarded to the appropriate individual job. /// - On outgoing messages from the jobs, it forwards them to the overseer. /// diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index da12f27eb6a0..7c73dd2fbb4a 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-approval-voting" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -9,9 +9,9 @@ rand = "0.5" futures = "0.3.17" futures-timer = "3.0.2" parity-scale-codec = { version = "2.3.1", default-features = false, features = ["bit-vec", "derive"] } -tracing = "0.1.28" +tracing = "0.1.29" bitvec = { version = "0.20.1", default-features = false, features = ["alloc"] } -lru = "0.6" +lru = "0.7" merlin = "2.0" schnorrkel = "0.9.1" kvdb = "0.10.0" diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index 644d820f38e5..d681e67eb853 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -315,6 +315,11 @@ fn filled_tranche_iterator<'a>( /// and tick parameters. This method also returns the next tick at which a `no_show` will occur /// amongst the set of validators that have not submitted an approval. /// +/// This also bounds the earliest tick of all assignments to be equal to the +/// block tick for the purposes of the calculation, so no assignment can be treated +/// as being received before the block itself. This is unlikely if not impossible +/// in practice, but can occur during test code. +/// /// If the returned `next_no_show` is not None, there are two possible cases for the value of /// based on the earliest assignment `tick` of a non-approving, yet-to-be-no-show validator: /// - if `tick` <= `clock_drift`: the value will always be `clock_drift` + `no_show_duration`. @@ -323,13 +328,16 @@ fn count_no_shows( assignments: &[(ValidatorIndex, Tick)], approvals: &BitSlice, clock_drift: Tick, + block_tick: Tick, no_show_duration: Tick, drifted_tick_now: Tick, ) -> (usize, Option) { let mut next_no_show = None; let no_shows = assignments .iter() - .map(|(v_index, tick)| (v_index, tick.saturating_sub(clock_drift) + no_show_duration)) + .map(|(v_index, tick)| { + (v_index, tick.max(&block_tick).saturating_sub(clock_drift) + no_show_duration) + }) .filter(|&(v_index, no_show_at)| { let has_approved = if let Some(approved) = approvals.get(v_index.0 as usize) { *approved @@ -418,6 +426,7 @@ pub fn tranches_to_approve( assignments, approvals, clock_drift, + block_tick, no_show_duration, drifted_tick_now, ); @@ -635,7 +644,7 @@ mod tests { #[test] fn tranches_to_approve_everyone_present() { - let block_tick = 0; + let block_tick = 20; let no_show_duration = 10; let needed_approvals = 4; @@ -672,7 +681,7 @@ mod tests { needed: 1, tolerated_missing: 0, next_no_show: None, - last_assignment_tick: Some(1) + last_assignment_tick: Some(21) }, ); } @@ -1127,6 +1136,7 @@ mod tests { fn test_count_no_shows(test: NoShowTest) { let n_validators = 4; + let block_tick = 20; let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators]; for &v_index in &test.approvals { @@ -1137,6 +1147,7 @@ mod tests { &test.assignments, &approvals, test.clock_drift, + block_tick, test.no_show_duration, test.drifted_tick_now, ); @@ -1160,13 +1171,13 @@ mod tests { #[test] fn count_no_shows_single_validator_is_next_no_show() { test_count_no_shows(NoShowTest { - assignments: vec![(ValidatorIndex(1), 21)], + assignments: vec![(ValidatorIndex(1), 31)], approvals: vec![], clock_drift: 10, no_show_duration: 10, drifted_tick_now: 20, exp_no_shows: 0, - exp_next_no_show: Some(31), + exp_next_no_show: Some(41), }) } @@ -1199,26 +1210,26 @@ mod tests { #[test] fn count_no_shows_two_validators_next_no_show_ordered_first() { test_count_no_shows(NoShowTest { - assignments: vec![(ValidatorIndex(1), 21), (ValidatorIndex(2), 22)], + assignments: vec![(ValidatorIndex(1), 31), (ValidatorIndex(2), 32)], approvals: vec![], clock_drift: 10, no_show_duration: 10, drifted_tick_now: 20, exp_no_shows: 0, - exp_next_no_show: Some(31), + exp_next_no_show: Some(41), }) } #[test] fn count_no_shows_two_validators_next_no_show_ordered_last() { test_count_no_shows(NoShowTest { - assignments: vec![(ValidatorIndex(1), 22), (ValidatorIndex(2), 21)], + assignments: vec![(ValidatorIndex(1), 32), (ValidatorIndex(2), 31)], approvals: vec![], clock_drift: 10, no_show_duration: 10, drifted_tick_now: 20, exp_no_shows: 0, - exp_next_no_show: Some(31), + exp_next_no_show: Some(41), }) } @@ -1226,16 +1237,16 @@ mod tests { fn count_no_shows_three_validators_one_almost_late_one_no_show_one_approving() { test_count_no_shows(NoShowTest { assignments: vec![ - (ValidatorIndex(1), 21), - (ValidatorIndex(2), 20), - (ValidatorIndex(3), 20), + (ValidatorIndex(1), 31), + (ValidatorIndex(2), 19), + (ValidatorIndex(3), 19), ], approvals: vec![3], clock_drift: 10, no_show_duration: 10, drifted_tick_now: 20, exp_no_shows: 1, - exp_next_no_show: Some(31), + exp_next_no_show: Some(41), }) } @@ -1282,7 +1293,7 @@ mod tests { no_show_duration: 20, drifted_tick_now: 0, exp_no_shows: 0, - exp_next_no_show: Some(30), + exp_next_no_show: Some(40), }) } @@ -1295,7 +1306,7 @@ mod tests { no_show_duration: 20, drifted_tick_now: 0, exp_no_shows: 0, - exp_next_no_show: Some(30), + exp_next_no_show: Some(40), }) } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 956056b504c5..4c37f244ba33 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -33,7 +33,7 @@ use polkadot_node_primitives::{ approval::{ BlockApprovalMeta, DelayTranche, IndirectAssignmentCert, IndirectSignedApprovalVote, }, - SignedDisputeStatement, ValidationResult, + SignedDisputeStatement, ValidationResult, APPROVAL_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ errors::RecoveryError, @@ -2318,6 +2318,7 @@ async fn launch_approval( validation_code, candidate.descriptor.clone(), available_data.pov, + APPROVAL_EXECUTION_TIMEOUT, val_tx, ) .into(), diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 586bbe2876ba..4ee746ac2fb6 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -1610,7 +1610,7 @@ fn subsystem_process_wakeup_schedules_wakeup() { futures_timer::Delay::new(Duration::from_millis(100)).await; // The wakeup should have been rescheduled. - assert!(clock.inner.lock().current_wakeup_is(20)); + assert!(clock.inner.lock().current_wakeup_is(30)); virtual_overseer }); @@ -2235,8 +2235,8 @@ fn subsystem_process_wakeup_trigger_assignment_launch_approval() { futures_timer::Delay::new(Duration::from_millis(200)).await; - assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot + 1))); - clock.inner.lock().wakeup_all(slot_to_tick(slot + 1)); + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot + 2))); + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); assert_matches!( overseer_recv(&mut virtual_overseer).await, @@ -2468,7 +2468,7 @@ fn subsystem_assignment_triggered_by_all_with_less_than_threshold() { approvals_to_import: vec![2, 4], ticks: vec![ 2, // APPROVAL_DELAY - 20, // Check for no shows + 21, // Check for no shows ], should_be_triggered: |t| t == 20, }); @@ -2484,7 +2484,7 @@ fn subsystem_assignment_not_triggered_by_all_with_threshold() { approvals_to_import: vec![1, 3, 5], ticks: vec![ 2, // APPROVAL_DELAY - 20, // Check no shows + 21, // Check no shows ], should_be_triggered: |_| false, }); @@ -2499,8 +2499,8 @@ fn subsystem_assignment_triggered_if_below_maximum_and_clock_is_equal() { assignments_to_import: vec![1], approvals_to_import: vec![], ticks: vec![ - 20, // Check no shows - 21, // Alice wakeup, assignment triggered + 21, // Check no shows + 23, // Alice wakeup, assignment triggered ], should_be_triggered: |tick| tick >= 21, }); @@ -2517,7 +2517,7 @@ fn subsystem_assignment_not_triggered_more_than_maximum() { ticks: vec![ 2, // APPROVAL_DELAY 13, // Alice wakeup - 20, // Check no shows + 30, // Check no shows ], should_be_triggered: |_| false, }); @@ -2525,16 +2525,15 @@ fn subsystem_assignment_not_triggered_more_than_maximum() { #[test] fn subsystem_assignment_triggered_if_at_maximum() { - // TODO(ladi): is this possible? triggers_assignment_test(TriggersAssignmentConfig { - our_assigned_tranche: 11, + our_assigned_tranche: 21, assign_validator_tranche: |_| Ok(2), no_show_slots: 2, assignments_to_import: vec![1], approvals_to_import: vec![], ticks: vec![ 12, // Bob wakeup - 20, // Check no shows + 30, // Check no shows ], should_be_triggered: |_| false, }); @@ -2584,7 +2583,7 @@ fn subsystem_assignment_not_triggered_if_at_maximum_but_clock_is_before_with_dri 12, // Charlie wakeup 13, // Dave wakeup 15, // Alice wakeup, noop - 20, // Check no shows + 30, // Check no shows 34, // Eve wakeup ], should_be_triggered: |_| false, @@ -2756,7 +2755,7 @@ fn pre_covers_dont_stall_approval() { // Wait for the no-show timer to observe the approval from // tranche 0 and set a wakeup for tranche 1. - clock.inner.lock().set_tick(20); + clock.inner.lock().set_tick(30); // Sleep to ensure we get a consistent read on the database. futures_timer::Delay::new(Duration::from_millis(100)).await; diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index ebd6529d85c5..c6a2d3a76681 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-av-store" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -8,8 +8,8 @@ edition = "2018" futures = "0.3.17" futures-timer = "3.0.2" kvdb = "0.10.0" -thiserror = "1.0.26" -tracing = "0.1.28" +thiserror = "1.0.30" +tracing = "0.1.29" bitvec = "0.20.1" parity-scale-codec = { version = "2.3.1", features = ["derive"] } diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index 0161e828084d..0efd0b8c3444 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-backing" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -16,8 +16,8 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "0.20.1", default-features = false, features = ["alloc"] } -tracing = "0.1.28" -thiserror = "1.0.26" +tracing = "0.1.29" +thiserror = "1.0.30" [dev-dependencies] sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 8fc7bbd35e32..2807f0a84ea4 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -37,6 +37,7 @@ use futures::{ use polkadot_node_primitives::{ AvailableData, PoV, SignedDisputeStatement, SignedFullStatement, Statement, ValidationResult, + BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem_util::{ self as util, @@ -415,7 +416,12 @@ async fn request_candidate_validation( let (tx, rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromChainState(candidate, pov, tx)) + .send_message(CandidateValidationMessage::ValidateFromChainState( + candidate, + pov, + BACKING_EXECUTION_TIMEOUT, + tx, + )) .await; match rx.await { diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 68c1e30dfd31..a262dd45d470 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -317,9 +317,10 @@ fn backing_second_works() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate.descriptor() => { + ) if pov == pov && &c == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -476,9 +477,10 @@ fn backing_works() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate_a.descriptor() => { + ) if pov == pov && &c == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -669,9 +671,10 @@ fn backing_works_while_validation_ongoing() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate_a.descriptor() => { + ) if pov == pov && &c == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { // we never validate the candidate. our local node // shouldn't issue any statements. std::mem::forget(tx); @@ -834,9 +837,10 @@ fn backing_misbehavior_works() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate_a.descriptor() => { + ) if pov == pov && &c == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -980,9 +984,10 @@ fn backing_dont_second_invalid() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate_a.descriptor() => { + ) if pov == pov && &c == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1008,9 +1013,10 @@ fn backing_dont_second_invalid() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate_b.descriptor() => { + ) if pov == pov && &c == candidate_b.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -1138,9 +1144,10 @@ fn backing_second_after_first_fails_works() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate.descriptor() => { + ) if pov == pov && &c == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1186,6 +1193,7 @@ fn backing_second_after_first_fails_works() { _, pov, _, + _, ) ) => { assert_eq!(&*pov, &pov_to_second); @@ -1270,9 +1278,10 @@ fn backing_works_after_failed_validation() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, tx, ) - ) if pov == pov && &c == candidate.descriptor() => { + ) if pov == pov && &c == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); } ); @@ -1646,9 +1655,10 @@ fn retry_works() { CandidateValidationMessage::ValidateFromChainState( c, pov, + timeout, _tx, ) - ) if pov == pov && &c == candidate.descriptor() + ) if pov == pov && &c == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT ); virtual_overseer }); diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml index eae0881d2da3..b274f3796aa3 100644 --- a/node/core/bitfield-signing/Cargo.toml +++ b/node/core/bitfield-signing/Cargo.toml @@ -1,18 +1,18 @@ [package] name = "polkadot-node-core-bitfield-signing" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } wasm-timer = "0.2.5" -thiserror = "1.0.26" +thiserror = "1.0.30" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index 334166bbfb2a..103cd8698963 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "polkadot-node-core-candidate-validation" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] async-trait = "0.1.51" futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", git = "https://github.com/paritytech/substrate", branch = "master" } parity-scale-codec = { version = "2.3.1", default-features = false, features = ["bit-vec", "derive"] } diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 2862d4c3f30e..c9e78db77c47 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -35,7 +35,7 @@ use polkadot_node_subsystem::{ CandidateValidationMessage, RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, }, overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, - SubsystemResult, + SubsystemResult, SubsystemSender, }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_parachain::primitives::{ValidationParams, ValidationResult as WasmValidationResult}; @@ -48,7 +48,7 @@ use parity_scale_codec::Encode; use futures::{channel::oneshot, prelude::*}; -use std::{path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc, time::Duration}; use async_trait::async_trait; @@ -120,7 +120,7 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { - let (mut validation_host, task) = polkadot_node_core_pvf::start( + let (validation_host, task) = polkadot_node_core_pvf::start( polkadot_node_core_pvf::Config::new(cache_path, program_path), pvf_metrics, ); @@ -135,78 +135,104 @@ where CandidateValidationMessage::ValidateFromChainState( descriptor, pov, + timeout, response_sender, ) => { - let _timer = metrics.time_validate_from_chain_state(); - - let res = spawn_validate_from_chain_state( - &mut ctx, - &mut validation_host, - descriptor, - pov, - &metrics, - ) - .await; - - match res { - Ok(x) => { - metrics.on_validation_event(&x); - let _ = response_sender.send(x); - }, - Err(e) => return Err(e), - } + let bg = { + let mut sender = ctx.sender().clone(); + let metrics = metrics.clone(); + let validation_host = validation_host.clone(); + + async move { + let _timer = metrics.time_validate_from_chain_state(); + let res = validate_from_chain_state( + &mut sender, + validation_host, + descriptor, + pov, + timeout, + &metrics, + ) + .await; + + metrics.on_validation_event(&res); + let _ = response_sender.send(res); + } + }; + + ctx.spawn("validate-from-chain-state", bg.boxed())?; }, CandidateValidationMessage::ValidateFromExhaustive( persisted_validation_data, validation_code, descriptor, pov, + timeout, response_sender, ) => { - let _timer = metrics.time_validate_from_exhaustive(); - - let res = validate_candidate_exhaustive( - &mut validation_host, - persisted_validation_data, - validation_code, - descriptor, - pov, - &metrics, - ) - .await; - - match res { - Ok(x) => { - metrics.on_validation_event(&x); - - if let Err(_e) = response_sender.send(x) { - tracing::warn!( - target: LOG_TARGET, - "Requester of candidate validation dropped", - ) - } - }, - Err(e) => return Err(e), - } + let bg = { + let metrics = metrics.clone(); + let validation_host = validation_host.clone(); + + async move { + let _timer = metrics.time_validate_from_exhaustive(); + let res = validate_candidate_exhaustive( + validation_host, + persisted_validation_data, + validation_code, + descriptor, + pov, + timeout, + &metrics, + ) + .await; + + metrics.on_validation_event(&res); + let _ = response_sender.send(res); + } + }; + + ctx.spawn("validate-from-exhaustive", bg.boxed())?; }, }, } } } -async fn runtime_api_request( - ctx: &mut Context, +struct RuntimeRequestFailed; + +async fn runtime_api_request( + sender: &mut Sender, relay_parent: Hash, request: RuntimeApiRequest, receiver: oneshot::Receiver>, -) -> SubsystemResult> +) -> Result where - Context: SubsystemContext, - Context: overseer::SubsystemContext, + Sender: SubsystemSender, { - ctx.send_message(RuntimeApiMessage::Request(relay_parent, request)).await; + sender + .send_message(RuntimeApiMessage::Request(relay_parent, request).into()) + .await; - receiver.await.map_err(Into::into) + receiver + .await + .map_err(|_| { + tracing::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped"); + + RuntimeRequestFailed + }) + .and_then(|res| { + res.map_err(|e| { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + err = ?e, + "Runtime API request internal error" + ); + + RuntimeRequestFailed + }) + }) } #[derive(Debug)] @@ -216,61 +242,57 @@ enum AssumptionCheckOutcome { BadRequest, } -async fn check_assumption_validation_data( - ctx: &mut Context, +async fn check_assumption_validation_data( + sender: &mut Sender, descriptor: &CandidateDescriptor, assumption: OccupiedCoreAssumption, -) -> SubsystemResult +) -> AssumptionCheckOutcome where - Context: SubsystemContext, - Context: overseer::SubsystemContext, + Sender: SubsystemSender, { let validation_data = { let (tx, rx) = oneshot::channel(); let d = runtime_api_request( - ctx, + sender, descriptor.relay_parent, RuntimeApiRequest::PersistedValidationData(descriptor.para_id, assumption, tx), rx, ) - .await?; + .await; match d { - Ok(None) | Err(_) => return Ok(AssumptionCheckOutcome::BadRequest), + Ok(None) | Err(RuntimeRequestFailed) => return AssumptionCheckOutcome::BadRequest, Ok(Some(d)) => d, } }; let persisted_validation_data_hash = validation_data.hash(); - SubsystemResult::Ok( - if descriptor.persisted_validation_data_hash == persisted_validation_data_hash { - let (code_tx, code_rx) = oneshot::channel(); - let validation_code = runtime_api_request( - ctx, - descriptor.relay_parent, - RuntimeApiRequest::ValidationCode(descriptor.para_id, assumption, code_tx), - code_rx, - ) - .await?; + if descriptor.persisted_validation_data_hash == persisted_validation_data_hash { + let (code_tx, code_rx) = oneshot::channel(); + let validation_code = runtime_api_request( + sender, + descriptor.relay_parent, + RuntimeApiRequest::ValidationCode(descriptor.para_id, assumption, code_tx), + code_rx, + ) + .await; - match validation_code { - Ok(None) | Err(_) => AssumptionCheckOutcome::BadRequest, - Ok(Some(v)) => AssumptionCheckOutcome::Matches(validation_data, v), - } - } else { - AssumptionCheckOutcome::DoesNotMatch - }, - ) + match validation_code { + Ok(None) | Err(RuntimeRequestFailed) => AssumptionCheckOutcome::BadRequest, + Ok(Some(v)) => AssumptionCheckOutcome::Matches(validation_data, v), + } + } else { + AssumptionCheckOutcome::DoesNotMatch + } } -async fn find_assumed_validation_data( - ctx: &mut Context, +async fn find_assumed_validation_data( + sender: &mut Sender, descriptor: &CandidateDescriptor, -) -> SubsystemResult +) -> AssumptionCheckOutcome where - Context: SubsystemContext, - Context: overseer::SubsystemContext, + Sender: SubsystemSender, { // The candidate descriptor has a `persisted_validation_data_hash` which corresponds to // one of up to two possible values that we can derive from the state of the @@ -287,41 +309,41 @@ where // Consider running these checks in parallel to reduce validation latency. for assumption in ASSUMPTIONS { - let outcome = check_assumption_validation_data(ctx, descriptor, *assumption).await?; + let outcome = check_assumption_validation_data(sender, descriptor, *assumption).await; match outcome { - AssumptionCheckOutcome::Matches(_, _) => return Ok(outcome), - AssumptionCheckOutcome::BadRequest => return Ok(outcome), + AssumptionCheckOutcome::Matches(_, _) => return outcome, + AssumptionCheckOutcome::BadRequest => return outcome, AssumptionCheckOutcome::DoesNotMatch => continue, } } - Ok(AssumptionCheckOutcome::DoesNotMatch) + AssumptionCheckOutcome::DoesNotMatch } -async fn spawn_validate_from_chain_state( - ctx: &mut Context, - validation_host: &mut ValidationHost, +async fn validate_from_chain_state( + sender: &mut Sender, + validation_host: ValidationHost, descriptor: CandidateDescriptor, pov: Arc, + timeout: Duration, metrics: &Metrics, -) -> SubsystemResult> +) -> Result where - Context: SubsystemContext, - Context: overseer::SubsystemContext, + Sender: SubsystemSender, { let (validation_data, validation_code) = - match find_assumed_validation_data(ctx, &descriptor).await? { + match find_assumed_validation_data(sender, &descriptor).await { AssumptionCheckOutcome::Matches(validation_data, validation_code) => (validation_data, validation_code), AssumptionCheckOutcome::DoesNotMatch => { // If neither the assumption of the occupied core having the para included or the assumption // of the occupied core timing out are valid, then the persisted_validation_data_hash in the descriptor // is not based on the relay parent and is thus invalid. - return Ok(Ok(ValidationResult::Invalid(InvalidCandidate::BadParent))) + return Ok(ValidationResult::Invalid(InvalidCandidate::BadParent)) }, AssumptionCheckOutcome::BadRequest => - return Ok(Err(ValidationFailed("Assumption Check: Bad request".into()))), + return Err(ValidationFailed("Assumption Check: Bad request".into())), }; let validation_result = validate_candidate_exhaustive( @@ -330,24 +352,25 @@ where validation_code, descriptor.clone(), pov, + timeout, metrics, ) .await; - if let Ok(Ok(ValidationResult::Valid(ref outputs, _))) = validation_result { + if let Ok(ValidationResult::Valid(ref outputs, _)) = validation_result { let (tx, rx) = oneshot::channel(); match runtime_api_request( - ctx, + sender, descriptor.relay_parent, RuntimeApiRequest::CheckValidationOutputs(descriptor.para_id, outputs.clone(), tx), rx, ) - .await? + .await { Ok(true) => {}, - Ok(false) => return Ok(Ok(ValidationResult::Invalid(InvalidCandidate::InvalidOutputs))), - Err(_) => - return Ok(Err(ValidationFailed("Check Validation Outputs: Bad request".into()))), + Ok(false) => return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidOutputs)), + Err(RuntimeRequestFailed) => + return Err(ValidationFailed("Check Validation Outputs: Bad request".into())), } } @@ -360,8 +383,9 @@ async fn validate_candidate_exhaustive( validation_code: ValidationCode, descriptor: CandidateDescriptor, pov: Arc, + timeout: Duration, metrics: &Metrics, -) -> SubsystemResult> { +) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); let validation_code_hash = validation_code.hash(); @@ -378,7 +402,7 @@ async fn validate_candidate_exhaustive( &*pov, &validation_code_hash, ) { - return Ok(Ok(ValidationResult::Invalid(e))) + return Ok(ValidationResult::Invalid(e)) } let raw_validation_code = match sp_maybe_compressed_blob::decompress( @@ -390,7 +414,7 @@ async fn validate_candidate_exhaustive( tracing::debug!(target: LOG_TARGET, err=?e, "Invalid validation code"); // If the validation code is invalid, the candidate certainly is. - return Ok(Ok(ValidationResult::Invalid(InvalidCandidate::CodeDecompressionFailure))) + return Ok(ValidationResult::Invalid(InvalidCandidate::CodeDecompressionFailure)) }, }; @@ -401,7 +425,7 @@ async fn validate_candidate_exhaustive( tracing::debug!(target: LOG_TARGET, err=?e, "Invalid PoV code"); // If the PoV is invalid, the candidate certainly is. - return Ok(Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure))) + return Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure)) }, }; @@ -413,7 +437,7 @@ async fn validate_candidate_exhaustive( }; let result = validation_backend - .validate_candidate(raw_validation_code.to_vec(), params) + .validate_candidate(raw_validation_code.to_vec(), timeout, params) .await; if let Err(ref e) = result { @@ -424,7 +448,7 @@ async fn validate_candidate_exhaustive( ); } - let result = match result { + match result { Err(ValidationError::InternalError(e)) => Err(ValidationFailed(e)), Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) => @@ -450,9 +474,7 @@ async fn validate_candidate_exhaustive( }; Ok(ValidationResult::Valid(outputs, persisted_validation_data)) }, - }; - - Ok(result) + } } #[async_trait] @@ -460,21 +482,24 @@ trait ValidationBackend { async fn validate_candidate( &mut self, raw_validation_code: Vec, + timeout: Duration, params: ValidationParams, ) -> Result; } #[async_trait] -impl ValidationBackend for &'_ mut ValidationHost { +impl ValidationBackend for ValidationHost { async fn validate_candidate( &mut self, raw_validation_code: Vec, + timeout: Duration, params: ValidationParams, ) -> Result { let (tx, rx) = oneshot::channel(); if let Err(err) = self .execute_pvf( Pvf::from_code(raw_validation_code), + timeout, params.encode(), polkadot_node_core_pvf::Priority::Normal, tx, diff --git a/node/core/candidate-validation/src/tests.rs b/node/core/candidate-validation/src/tests.rs index 15314c35ed2f..30fa96ccc398 100644 --- a/node/core/candidate-validation/src/tests.rs +++ b/node/core/candidate-validation/src/tests.rs @@ -19,6 +19,7 @@ use assert_matches::assert_matches; use futures::executor; use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_primitives::v1::{HeadData, UpwardMessage}; use sp_core::testing::TaskExecutor; use sp_keyring::Sr25519Keyring; @@ -52,11 +53,15 @@ fn correctly_checks_included_assumption() { candidate.para_id = para_id; let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + let (mut ctx, mut ctx_handle) = + test_helpers::make_subsystem_context::(pool.clone()); - let (check_fut, check_result) = - check_assumption_validation_data(&mut ctx, &candidate, OccupiedCoreAssumption::Included) - .remote_handle(); + let (check_fut, check_result) = check_assumption_validation_data( + ctx.sender(), + &candidate, + OccupiedCoreAssumption::Included, + ) + .remote_handle(); let test_fut = async move { assert_matches!( @@ -89,7 +94,7 @@ fn correctly_checks_included_assumption() { } ); - assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::Matches(o, v) => { + assert_matches!(check_result.await, AssumptionCheckOutcome::Matches(o, v) => { assert_eq!(o, validation_data); assert_eq!(v, validation_code); }); @@ -114,11 +119,15 @@ fn correctly_checks_timed_out_assumption() { candidate.para_id = para_id; let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + let (mut ctx, mut ctx_handle) = + test_helpers::make_subsystem_context::(pool.clone()); - let (check_fut, check_result) = - check_assumption_validation_data(&mut ctx, &candidate, OccupiedCoreAssumption::TimedOut) - .remote_handle(); + let (check_fut, check_result) = check_assumption_validation_data( + ctx.sender(), + &candidate, + OccupiedCoreAssumption::TimedOut, + ) + .remote_handle(); let test_fut = async move { assert_matches!( @@ -151,7 +160,7 @@ fn correctly_checks_timed_out_assumption() { } ); - assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::Matches(o, v) => { + assert_matches!(check_result.await, AssumptionCheckOutcome::Matches(o, v) => { assert_eq!(o, validation_data); assert_eq!(v, validation_code); }); @@ -174,11 +183,15 @@ fn check_is_bad_request_if_no_validation_data() { candidate.para_id = para_id; let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + let (mut ctx, mut ctx_handle) = + test_helpers::make_subsystem_context::(pool.clone()); - let (check_fut, check_result) = - check_assumption_validation_data(&mut ctx, &candidate, OccupiedCoreAssumption::Included) - .remote_handle(); + let (check_fut, check_result) = check_assumption_validation_data( + ctx.sender(), + &candidate, + OccupiedCoreAssumption::Included, + ) + .remote_handle(); let test_fut = async move { assert_matches!( @@ -198,7 +211,7 @@ fn check_is_bad_request_if_no_validation_data() { } ); - assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::BadRequest); + assert_matches!(check_result.await, AssumptionCheckOutcome::BadRequest); }; let test_fut = future::join(test_fut, check_fut); @@ -218,11 +231,15 @@ fn check_is_bad_request_if_no_validation_code() { candidate.para_id = para_id; let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + let (mut ctx, mut ctx_handle) = + test_helpers::make_subsystem_context::(pool.clone()); - let (check_fut, check_result) = - check_assumption_validation_data(&mut ctx, &candidate, OccupiedCoreAssumption::TimedOut) - .remote_handle(); + let (check_fut, check_result) = check_assumption_validation_data( + ctx.sender(), + &candidate, + OccupiedCoreAssumption::TimedOut, + ) + .remote_handle(); let test_fut = async move { assert_matches!( @@ -255,7 +272,7 @@ fn check_is_bad_request_if_no_validation_code() { } ); - assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::BadRequest); + assert_matches!(check_result.await, AssumptionCheckOutcome::BadRequest); }; let test_fut = future::join(test_fut, check_fut); @@ -274,11 +291,15 @@ fn check_does_not_match() { candidate.para_id = para_id; let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + let (mut ctx, mut ctx_handle) = + test_helpers::make_subsystem_context::(pool.clone()); - let (check_fut, check_result) = - check_assumption_validation_data(&mut ctx, &candidate, OccupiedCoreAssumption::Included) - .remote_handle(); + let (check_fut, check_result) = check_assumption_validation_data( + ctx.sender(), + &candidate, + OccupiedCoreAssumption::Included, + ) + .remote_handle(); let test_fut = async move { assert_matches!( @@ -298,7 +319,7 @@ fn check_does_not_match() { } ); - assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::DoesNotMatch); + assert_matches!(check_result.await, AssumptionCheckOutcome::DoesNotMatch); }; let test_fut = future::join(test_fut, check_fut); @@ -320,6 +341,7 @@ impl ValidationBackend for MockValidatorBackend { async fn validate_candidate( &mut self, _raw_validation_code: Vec, + _timeout: Duration, _params: ValidationParams, ) -> Result { self.result.clone() @@ -363,9 +385,9 @@ fn candidate_validation_ok_is_ok() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), )) - .unwrap() .unwrap(); assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { @@ -406,9 +428,9 @@ fn candidate_validation_bad_return_is_invalid() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), )) - .unwrap() .unwrap(); assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::ExecutionError(_))); @@ -442,9 +464,9 @@ fn candidate_validation_timeout_is_internal_error() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), - )) - .unwrap(); + )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); } @@ -477,9 +499,9 @@ fn candidate_validation_code_mismatch_is_invalid() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), )) - .unwrap() .unwrap(); assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::CodeHashMismatch)); @@ -517,9 +539,9 @@ fn compressed_code_works() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), - )) - .unwrap(); + )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); } @@ -557,9 +579,9 @@ fn code_decompression_failure_is_invalid() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), - )) - .unwrap(); + )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::CodeDecompressionFailure))); } @@ -598,9 +620,9 @@ fn pov_decompression_failure_is_invalid() { validation_code, descriptor, Arc::new(pov), + Duration::from_secs(0), &Default::default(), - )) - .unwrap(); + )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure))); } diff --git a/node/core/chain-api/Cargo.toml b/node/core/chain-api/Cargo.toml index ffea3a4e559a..6ce2f7b2b7e8 100644 --- a/node/core/chain-api/Cargo.toml +++ b/node/core/chain-api/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-node-core-chain-api" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } diff --git a/node/core/chain-api/src/tests.rs b/node/core/chain-api/src/tests.rs index 22b6dbed546a..3f28a3e269f3 100644 --- a/node/core/chain-api/src/tests.rs +++ b/node/core/chain-api/src/tests.rs @@ -96,6 +96,7 @@ impl HeaderBackend for TestClient { finalized_number, number_leaves: 0, finalized_state: None, + block_gap: None, } } fn number(&self, hash: Hash) -> sp_blockchain::Result> { diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index 64a84cdf29be..2e0ab2c89e32 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "polkadot-node-core-chain-selection" description = "Chain Selection Subsystem" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" futures-timer = "3" -tracing = "0.1.28" +tracing = "0.1.29" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.10.0" -thiserror = "1.0.26" +thiserror = "1.0.30" parity-scale-codec = "2" [dev-dependencies] diff --git a/node/core/dispute-coordinator/Cargo.toml b/node/core/dispute-coordinator/Cargo.toml index 0cb6bcc2bf3f..747b06ec0c14 100644 --- a/node/core/dispute-coordinator/Cargo.toml +++ b/node/core/dispute-coordinator/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] bitvec = { version = "0.20.1", default-features = false, features = ["alloc"] } futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" parity-scale-codec = "2" kvdb = "0.10.0" derive_more = "0.99.14" -thiserror = "1.0.26" +thiserror = "1.0.30" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/node/core/dispute-coordinator/src/real/mod.rs b/node/core/dispute-coordinator/src/real/mod.rs index 8b41915bf39f..eb8d5738226d 100644 --- a/node/core/dispute-coordinator/src/real/mod.rs +++ b/node/core/dispute-coordinator/src/real/mod.rs @@ -31,6 +31,9 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +use futures::{channel::oneshot, prelude::*}; +use kvdb::KeyValueDB; +use parity_scale_codec::{Decode, Encode, Error as CodecError}; use polkadot_node_primitives::{ CandidateVotes, DisputeMessage, DisputeMessageCheckError, SignedDisputeStatement, DISPUTE_WINDOW, @@ -39,7 +42,7 @@ use polkadot_node_subsystem::{ errors::{ChainApiError, RuntimeApiError}, messages::{ BlockDescription, DisputeCoordinatorMessage, DisputeDistributionMessage, - DisputeParticipationMessage, ImportStatementsResult, + DisputeParticipationMessage, ImportStatementsResult, RuntimeApiMessage, RuntimeApiRequest, }, overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, }; @@ -47,13 +50,10 @@ use polkadot_node_subsystem_util::rolling_session_window::{ RollingSessionWindow, SessionWindowUpdate, }; use polkadot_primitives::v1::{ - BlockNumber, CandidateHash, CandidateReceipt, DisputeStatement, Hash, SessionIndex, - SessionInfo, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, + DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; - -use futures::{channel::oneshot, prelude::*}; -use kvdb::KeyValueDB; -use parity_scale_codec::{Decode, Encode, Error as CodecError}; use sc_keystore::LocalKeystore; use crate::metrics::Metrics; @@ -350,6 +350,8 @@ where &mut overlay_db, &mut state, update.activated.into_iter().map(|a| a.hash), + clock.now(), + &metrics, ) .await?; if !state.recovery_state.complete() { @@ -478,6 +480,8 @@ async fn handle_new_activations( overlay_db: &mut OverlayedBackend<'_, impl Backend>, state: &mut State, new_activations: impl IntoIterator, + now: u64, + metrics: &Metrics, ) -> Result<(), Error> { for new_leaf in new_activations { match state.rolling_session_window.cache_session_info_for_head(ctx, new_leaf).await { @@ -487,7 +491,6 @@ async fn handle_new_activations( err = ?e, "Failed to update session cache for disputes", ); - continue }, Ok(SessionWindowUpdate::Initialized { window_end, .. }) | @@ -502,10 +505,217 @@ async fn handle_new_activations( db::v1::note_current_session(overlay_db, session)?; } }, - _ => {}, + Ok(SessionWindowUpdate::Unchanged) => {}, + }; + scrape_on_chain_votes(ctx, overlay_db, state, new_leaf, now, metrics).await?; + } + + Ok(()) +} + +/// Scrapes on-chain votes (backing votes and concluded disputes) for a active leaf of the relay chain. +async fn scrape_on_chain_votes( + ctx: &mut (impl SubsystemContext + + overseer::SubsystemContext), + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + state: &mut State, + new_leaf: Hash, + now: u64, + metrics: &Metrics, +) -> Result<(), Error> { + // obtain the concluded disputes as well as the candidate backing votes + // from the new leaf + let ScrapedOnChainVotes { session, backing_validators_per_candidate, disputes } = { + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + new_leaf, + RuntimeApiRequest::FetchOnChainVotes(tx), + )) + .await; + match rx.await { + Ok(Ok(Some(val))) => val, + Ok(Ok(None)) => { + tracing::trace!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + "No on chain votes stored for relay chain leaf"); + return Ok(()) + }, + Ok(Err(e)) => { + tracing::debug!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + error = ?e, + "Could not retrieve on chain votes due to an API error"); + return Ok(()) + }, + Err(e) => { + tracing::debug!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + error = ?e, + "Could not retrieve onchain votes due to oneshot cancellation"); + return Ok(()) + }, } + }; + + if backing_validators_per_candidate.is_empty() && disputes.is_empty() { + return Ok(()) } + // Obtain the session info, for sake of `ValidatorId`s + // either from the rolling session window. + // Must be called _after_ `fn cache_session_info_for_head` + // which guarantees that the session info is available + // for the current session. + let session_info: SessionInfo = + if let Some(session_info) = state.rolling_session_window.session_info(session) { + session_info.clone() + } else { + tracing::warn!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + "Could not retrieve session info from rolling session window"); + return Ok(()) + }; + + // Scraped on-chain backing votes for the candidates with + // the new active leaf as if we received them via gossip. + for (candidate_receipt, backers) in backing_validators_per_candidate { + let candidate_hash = candidate_receipt.hash(); + let statements = backers.into_iter().filter_map(|(validator_index, attestation)| { + let validator_public: ValidatorId = session_info + .validators + .get(validator_index.0 as usize) + .or_else(|| { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + "Missing public key for validator {:?}", + &validator_index); + None + }) + .cloned()?; + let validator_signature = attestation.signature().clone(); + let valid_statement_kind = match attestation.to_compact_statement(candidate_hash) { + CompactStatement::Seconded(_) => + ValidDisputeStatementKind::BackingSeconded(new_leaf), + CompactStatement::Valid(_) => ValidDisputeStatementKind::BackingValid(new_leaf), + }; + let signed_dispute_statement = + SignedDisputeStatement::new_unchecked_from_trusted_source( + DisputeStatement::Valid(valid_statement_kind), + candidate_hash, + session, + validator_public, + validator_signature, + ); + Some((signed_dispute_statement, validator_index)) + }); + let import_result = handle_import_statements( + ctx, + overlay_db, + state, + candidate_hash, + MaybeCandidateReceipt::Provides(candidate_receipt), + session, + statements, + now, + metrics, + ) + .await?; + match import_result { + ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET, + relay_parent = ?new_leaf, + ?session, + "Imported backing vote from on-chain"), + ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET, + relay_parent = ?new_leaf, + ?session, + "Attempted import of on-chain backing votes failed"), + } + } + + if disputes.is_empty() { + return Ok(()) + } + + // Import concluded disputes from on-chain, this already went through a vote so it's assumed + // as verified. This will only be stored, gossiping it is not necessary. + + // First try to obtain all the backings which ultimately contain the candidate + // receipt which we need. + + for DisputeStatementSet { candidate_hash, session, statements } in disputes { + let statements = statements + .into_iter() + .filter_map(|(dispute_statement, validator_index, validator_signature)| { + let session_info: SessionInfo = if let Some(session_info) = + state.rolling_session_window.session_info(session) + { + session_info.clone() + } else { + tracing::warn!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + ?session, + "Could not retrieve session info from rolling session window for recently concluded dispute"); + return None + }; + + let validator_public: ValidatorId = session_info + .validators + .get(validator_index.0 as usize) + .or_else(|| { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?new_leaf, + ?session, + "Missing public key for validator {:?} that participated in concluded dispute", + &validator_index); + None + }) + .cloned()?; + + Some(( + SignedDisputeStatement::new_unchecked_from_trusted_source( + dispute_statement, + candidate_hash, + session, + validator_public, + validator_signature, + ), + validator_index, + )) + }) + .collect::>(); + let import_result = handle_import_statements( + ctx, + overlay_db, + state, + candidate_hash, + // TODO + MaybeCandidateReceipt::AssumeBackingVotePresent, + session, + statements, + now, + metrics, + ) + .await?; + match import_result { + ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET, + relay_parent = ?new_leaf, + ?candidate_hash, + ?session, + "Imported statement of conlcuded dispute from on-chain"), + ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET, + relay_parent = ?new_leaf, + ?candidate_hash, + ?session, + "Attempted import of on-chain statement of concluded dispute failed"), + } + } Ok(()) } @@ -530,7 +740,7 @@ async fn handle_incoming( overlay_db, state, candidate_hash, - candidate_receipt, + MaybeCandidateReceipt::Provides(candidate_receipt), session, statements, now, @@ -648,14 +858,22 @@ fn insert_into_statement_vec( vec.insert(pos, (tag, val_index, val_signature)); } +#[derive(Debug, Clone)] +enum MaybeCandidateReceipt { + /// Directly provides the candiate receipt. + Provides(CandidateReceipt), + /// Assumes it was seen before by means of seconded message. + AssumeBackingVotePresent, +} + async fn handle_import_statements( ctx: &mut impl SubsystemContext, overlay_db: &mut OverlayedBackend<'_, impl Backend>, state: &mut State, candidate_hash: CandidateHash, - candidate_receipt: CandidateReceipt, + candidate_receipt: MaybeCandidateReceipt, session: SessionIndex, - statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, + statements: impl IntoIterator, now: Timestamp, metrics: &Metrics, ) -> Result { @@ -664,7 +882,7 @@ async fn handle_import_statements( return Ok(ImportStatementsResult::InvalidImport) } - let validators = match state.rolling_session_window.session_info(session) { + let session_info = match state.rolling_session_window.session_info(session) { None => { tracing::debug!( target: LOG_TARGET, @@ -675,29 +893,40 @@ async fn handle_import_statements( return Ok(ImportStatementsResult::InvalidImport) }, - Some(info) => { - tracing::debug!( - target: DEBUG_LOG_TARGET, - "SessionInfo received for SessionIndex {:?} CandidateHash {:?}.", - session, - candidate_hash, - ); - info.validators.clone() - }, + Some(info) => info, }; + let validators = session_info.validators.clone(); let n_validators = validators.len(); let supermajority_threshold = polkadot_primitives::v1::supermajority_threshold(n_validators); - let mut votes = overlay_db + // In case we are not provided with a candidate receipt + // we operate under the assumption, that a previous vote + // which included a `CandidateReceipt` was seen. + // This holds since every block is preceeded by the `Backing`-phase. + // + // There is one exception: A sufficiently sophisticated attacker could prevent + // us from seeing the backing votes by witholding arbitrary blocks, and hence we do + // not have a `CandidateReceipt` available. + let mut votes = match overlay_db .load_candidate_votes(session, &candidate_hash)? .map(CandidateVotes::from) - .unwrap_or_else(|| CandidateVotes { - candidate_receipt: candidate_receipt.clone(), - valid: Vec::new(), - invalid: Vec::new(), - }); + { + Some(votes) => votes, + None => + if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt { + CandidateVotes { candidate_receipt, valid: Vec::new(), invalid: Vec::new() } + } else { + tracing::warn!( + target: LOG_TARGET, + session, + "Missing info for session which has an active dispute", + ); + return Ok(ImportStatementsResult::InvalidImport) + }, + }; + let candidate_receipt = votes.candidate_receipt.clone(); // Update candidate votes. for (statement, val_index) in statements { @@ -941,7 +1170,7 @@ async fn issue_local_statement( overlay_db, state, candidate_hash, - candidate_receipt, + MaybeCandidateReceipt::Provides(candidate_receipt), session, statements, now, @@ -962,7 +1191,7 @@ async fn issue_local_statement( target: LOG_TARGET, ?candidate_hash, ?session, - "handle_import_statements` considers our own votes invalid!" + "`handle_import_statements` considers our own votes invalid!" ); }, Ok(ImportStatementsResult::ValidImport) => { @@ -970,7 +1199,7 @@ async fn issue_local_statement( target: LOG_TARGET, ?candidate_hash, ?session, - "handle_import_statements` successfully imported our vote!" + "`handle_import_statements` successfully imported our vote!" ); }, } diff --git a/node/core/dispute-coordinator/src/real/tests.rs b/node/core/dispute-coordinator/src/real/tests.rs index c8709da92916..147fcede8a58 100644 --- a/node/core/dispute-coordinator/src/real/tests.rs +++ b/node/core/dispute-coordinator/src/real/tests.rs @@ -203,6 +203,17 @@ impl TestState { } ) } + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _new_leaf, + RuntimeApiRequest::FetchOnChainVotes(tx), + )) => { + // add some `BackedCandidates` or resolved disputes here as needed + tx.send(Ok(Some(ScrapedOnChainVotes::default()))).unwrap(); + } + ) } async fn handle_resume_sync( diff --git a/node/core/dispute-participation/Cargo.toml b/node/core/dispute-participation/Cargo.toml index 5b1843d02b9c..0813197eaa41 100644 --- a/node/core/dispute-participation/Cargo.toml +++ b/node/core/dispute-participation/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "polkadot-node-core-dispute-participation" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -thiserror = "1.0.26" -tracing = "0.1.28" +thiserror = "1.0.30" +tracing = "0.1.29" polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/node/core/dispute-participation/src/lib.rs b/node/core/dispute-participation/src/lib.rs index 22416ae00721..ba311c379c32 100644 --- a/node/core/dispute-participation/src/lib.rs +++ b/node/core/dispute-participation/src/lib.rs @@ -22,7 +22,7 @@ use futures::{channel::oneshot, prelude::*}; -use polkadot_node_primitives::ValidationResult; +use polkadot_node_primitives::{ValidationResult, APPROVAL_EXECUTION_TIMEOUT}; use polkadot_node_subsystem::{ errors::{RecoveryError, RuntimeApiError}, messages::{ @@ -321,11 +321,16 @@ async fn participate( // we issue a request to validate the candidate with the provided exhaustive // parameters + // + // We use the approval execution timeout because this is intended to + // be run outside of backing and therefore should be subject to the + // same level of leeway. ctx.send_message(CandidateValidationMessage::ValidateFromExhaustive( available_data.validation_data, validation_code, candidate_receipt.descriptor.clone(), available_data.pov, + APPROVAL_EXECUTION_TIMEOUT, validation_tx, )) .await; diff --git a/node/core/dispute-participation/src/tests.rs b/node/core/dispute-participation/src/tests.rs index e2c98af53b5e..513f673f81a2 100644 --- a/node/core/dispute-participation/src/tests.rs +++ b/node/core/dispute-participation/src/tests.rs @@ -295,8 +295,8 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, tx) - ) => { + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == APPROVAL_EXECUTION_TIMEOUT => { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); }, "overseer did not receive candidate validation message", @@ -331,8 +331,8 @@ fn cast_invalid_vote_if_validation_passes_but_commitments_dont_match() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, tx) - ) => { + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == APPROVAL_EXECUTION_TIMEOUT => { let mut commitments = CandidateCommitments::default(); // this should lead to a commitments hash mismatch commitments.processed_downward_messages = 42; @@ -371,8 +371,8 @@ fn cast_valid_vote_if_validation_passes() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, tx) - ) => { + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == APPROVAL_EXECUTION_TIMEOUT => { tx.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))).unwrap(); }, "overseer did not receive candidate validation message", @@ -408,8 +408,8 @@ fn failure_to_store_available_data_does_not_preclude_participation() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, tx) - ) => { + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == APPROVAL_EXECUTION_TIMEOUT => { tx.send(Err(ValidationFailed("fail".to_string()))).unwrap(); }, "overseer did not receive candidate validation message", diff --git a/node/core/parachains-inherent/Cargo.toml b/node/core/parachains-inherent/Cargo.toml index 4a40d3f1e383..9a81398e7594 100644 --- a/node/core/parachains-inherent/Cargo.toml +++ b/node/core/parachains-inherent/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "polkadot-node-core-parachains-inherent" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" futures-timer = "3.0.2" -tracing = "0.1.28" -thiserror = "1.0.26" +tracing = "0.1.29" +thiserror = "1.0.30" async-trait = "0.1.51" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index a5313e348226..bc057e87624e 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "polkadot-node-core-provisioner" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] bitvec = { version = "0.20.1", default-features = false, features = ["alloc"] } futures = "0.3.17" -tracing = "0.1.28" -thiserror = "1.0.26" +tracing = "0.1.29" +thiserror = "1.0.30" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 51f4c8a7b173..a235fcb6d032 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-pvf" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -15,9 +15,9 @@ async-process = "1.1.0" assert_matches = "1.4.0" futures = "0.3.17" futures-timer = "3.0.2" -libc = "0.2.103" +libc = "0.2.105" slotmap = "1.0" -tracing = "0.1.28" +tracing = "0.1.29" pin-project = "1.0.8" rand = "0.8.3" parity-scale-codec = { version = "2.3.1", default-features = false, features = ["derive"] } diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs index dfe08afe1a70..e69478479efc 100644 --- a/node/core/pvf/src/artifacts.rs +++ b/node/core/pvf/src/artifacts.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use crate::error::PrepareError; use always_assert::always; use async_std::path::{Path, PathBuf}; use parity_scale_codec::{Decode, Encode}; @@ -23,30 +24,19 @@ use std::{ time::{Duration, SystemTime}, }; -/// A final product of preparation process. Contains either a ready to run compiled artifact or -/// a description what went wrong. +/// A wrapper for the compiled PVF code. #[derive(Encode, Decode)] -pub enum Artifact { - /// During the prevalidation stage of preparation an issue was found with the PVF. - PrevalidationErr(String), - /// Compilation failed for the given PVF. - PreparationErr(String), - /// This state indicates that the process assigned to prepare the artifact wasn't responsible - /// or were killed. This state is reported by the validation host (not by the worker). - DidntMakeIt, - /// The PVF passed all the checks and is ready for execution. - Compiled { compiled_artifact: Vec }, -} +pub struct CompiledArtifact(Vec); -impl Artifact { - /// Serializes this struct into a byte buffer. - pub fn serialize(&self) -> Vec { - self.encode() +impl CompiledArtifact { + pub fn new(code: Vec) -> Self { + Self(code) } +} - /// Deserialize the given byte buffer to an artifact. - pub fn deserialize(mut bytes: &[u8]) -> Result { - Artifact::decode(&mut bytes).map_err(|e| format!("{:?}", e)) +impl AsRef<[u8]> for CompiledArtifact { + fn as_ref(&self) -> &[u8] { + self.0.as_slice() } } @@ -117,6 +107,9 @@ pub enum ArtifactState { }, /// A task to prepare this artifact is scheduled. Preparing, + /// The code couldn't be compiled due to an error. Such artifacts + /// never reach the executor and stay in the host's memory. + FailedToProcess(PrepareError), } /// A container of all known artifact ids and their states. diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs index f0ba95515054..8afd0ddddb4b 100644 --- a/node/core/pvf/src/error.rs +++ b/node/core/pvf/src/error.rs @@ -14,6 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use parity_scale_codec::{Decode, Encode}; + +/// An error that occurred during the prepare part of the PVF pipeline. +#[derive(Debug, Clone, Encode, Decode)] +pub enum PrepareError { + /// During the prevalidation stage of preparation an issue was found with the PVF. + Prevalidation(String), + /// Compilation failed for the given PVF. + Preparation(String), + /// This state indicates that the process assigned to prepare the artifact wasn't responsible + /// or were killed. This state is reported by the validation host (not by the worker). + DidNotMakeIt, +} + /// A error raised during validation of the candidate. #[derive(Debug, Clone)] pub enum ValidationError { @@ -54,3 +68,14 @@ pub enum InvalidCandidate { /// PVF execution (compilation is not included) took more time than was allotted. HardTimeout, } + +impl From for ValidationError { + fn from(error: PrepareError) -> Self { + let error_str = match error { + PrepareError::Prevalidation(err) => err, + PrepareError::Preparation(err) => err, + PrepareError::DidNotMakeIt => "preparation timeout".to_owned(), + }; + ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedError(error_str)) + } +} diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index 09e848196820..9376d7d76f78 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -38,11 +38,17 @@ slotmap::new_key_type! { struct Worker; } #[derive(Debug)] pub enum ToQueue { - Enqueue { artifact: ArtifactPathId, params: Vec, result_tx: ResultSender }, + Enqueue { + artifact: ArtifactPathId, + execution_timeout: Duration, + params: Vec, + result_tx: ResultSender, + }, } struct ExecuteJob { artifact: ArtifactPathId, + execution_timeout: Duration, params: Vec, result_tx: ResultSender, } @@ -167,14 +173,14 @@ async fn purge_dead(metrics: &Metrics, workers: &mut Workers) { } fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) { - let ToQueue::Enqueue { artifact, params, result_tx } = to_queue; + let ToQueue::Enqueue { artifact, execution_timeout, params, result_tx } = to_queue; tracing::debug!( target: LOG_TARGET, validation_code_hash = ?artifact.id.code_hash, "enqueueing an artifact for execution", ); queue.metrics.execute_enqueued(); - let job = ExecuteJob { artifact, params, result_tx }; + let job = ExecuteJob { artifact, execution_timeout, params, result_tx }; if let Some(available) = queue.workers.find_available() { assign(queue, available, job); @@ -326,7 +332,13 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { queue.mux.push( async move { let _timer = execution_timer; - let outcome = super::worker::start_work(idle, job.artifact.clone(), job.params).await; + let outcome = super::worker::start_work( + idle, + job.artifact.clone(), + job.execution_timeout, + job.params, + ) + .await; QueueEvent::StartWork(worker, outcome, job.artifact.id, job.result_tx) } .boxed(), diff --git a/node/core/pvf/src/execute/worker.rs b/node/core/pvf/src/execute/worker.rs index 97fe5aec3dbf..a1c4e6d55788 100644 --- a/node/core/pvf/src/execute/worker.rs +++ b/node/core/pvf/src/execute/worker.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::{ - artifacts::{Artifact, ArtifactPathId}, + artifacts::{ArtifactPathId, CompiledArtifact}, executor_intf::TaskExecutor, worker_common::{ bytes_to_path, framed_recv, framed_send, path_to_bytes, spawn_with_program_path, @@ -34,8 +34,6 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::ValidationResult; use std::time::{Duration, Instant}; -const EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); - /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. /// /// The program should be able to handle ` execute-worker ` invocation. @@ -51,8 +49,8 @@ pub enum Outcome { /// PVF execution completed successfully and the result is returned. The worker is ready for /// another job. Ok { result_descriptor: ValidationResult, duration_ms: u64, idle_worker: IdleWorker }, - /// The candidate validation failed. It may be for example because the preparation process - /// produced an error or the wasm execution triggered a trap. + /// The candidate validation failed. It may be for example because the wasm execution triggered a trap. + /// Errors related to the preparation process are not expected to be encountered by the execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. @@ -69,6 +67,7 @@ pub enum Outcome { pub async fn start_work( worker: IdleWorker, artifact: ArtifactPathId, + execution_timeout: Duration, validation_params: Vec, ) -> Outcome { let IdleWorker { mut stream, pid } = worker; @@ -108,7 +107,7 @@ pub async fn start_work( Ok(response) => response, } }, - _ = Delay::new(EXECUTION_TIMEOUT).fuse() => { + _ = Delay::new(execution_timeout).fuse() => { tracing::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -217,18 +216,12 @@ async fn validate_using_artifact( Ok(b) => b, }; - let artifact = match Artifact::deserialize(&artifact_bytes) { + let artifact = match CompiledArtifact::decode(&mut artifact_bytes.as_slice()) { Err(e) => return Response::InternalError(format!("artifact deserialization: {:?}", e)), Ok(a) => a, }; - let compiled_artifact = match &artifact { - Artifact::PrevalidationErr(msg) => return Response::format_invalid("prevalidation", msg), - Artifact::PreparationErr(msg) => return Response::format_invalid("preparation", msg), - Artifact::DidntMakeIt => return Response::format_invalid("preparation timeout", ""), - - Artifact::Compiled { compiled_artifact } => compiled_artifact, - }; + let compiled_artifact = artifact.as_ref(); let validation_started_at = Instant::now(); let descriptor_bytes = match unsafe { diff --git a/node/core/pvf/src/executor_intf.rs b/node/core/pvf/src/executor_intf.rs index 732894546745..0cdfd40e6414 100644 --- a/node/core/pvf/src/executor_intf.rs +++ b/node/core/pvf/src/executor_intf.rs @@ -41,7 +41,10 @@ const CONFIG: Config = Config { // wasm pages. // // Thus let's assume that 32 pages or 2 MiB are used for these needs. - max_memory_pages: Some(2048 + 32), + // + // Note that the memory limit is specified in bytes, so we multiply this value + // by wasm page size -- 64 KiB. + max_memory_size: Some((2048 + 32) * 65536), heap_pages: 2048, allow_missing_func_imports: true, @@ -65,6 +68,7 @@ const CONFIG: Config = Config { native_stack_max: 256 * 1024 * 1024, }), canonicalize_nans: true, + parallel_compilation: true, }, }; diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index 89b230bc90d7..292e37cdc30d 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -48,8 +48,8 @@ pub struct ValidationHost { } impl ValidationHost { - /// Execute PVF with the given code, parameters and priority. The result of execution will be sent - /// to the provided result sender. + /// Execute PVF with the given code, execution timeout, parameters and priority. + /// The result of execution will be sent to the provided result sender. /// /// This is async to accommodate the fact a possibility of back-pressure. In the vast majority of /// situations this function should return immediately. @@ -58,12 +58,13 @@ impl ValidationHost { pub async fn execute_pvf( &mut self, pvf: Pvf, + execution_timeout: Duration, params: Vec, priority: Priority, result_tx: ResultSender, ) -> Result<(), String> { self.to_host_tx - .send(ToHost::ExecutePvf { pvf, params, priority, result_tx }) + .send(ToHost::ExecutePvf { pvf, execution_timeout, params, priority, result_tx }) .await .map_err(|_| "the inner loop hung up".to_string()) } @@ -83,8 +84,16 @@ impl ValidationHost { } enum ToHost { - ExecutePvf { pvf: Pvf, params: Vec, priority: Priority, result_tx: ResultSender }, - HeadsUp { active_pvfs: Vec }, + ExecutePvf { + pvf: Pvf, + execution_timeout: Duration, + params: Vec, + priority: Priority, + result_tx: ResultSender, + }, + HeadsUp { + active_pvfs: Vec, + }, } /// Configuration for the validation host. @@ -200,6 +209,7 @@ pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future, result_tx: ResultSender, } @@ -210,11 +220,18 @@ struct PendingExecutionRequest { struct AwaitingPrepare(HashMap>); impl AwaitingPrepare { - fn add(&mut self, artifact_id: ArtifactId, params: Vec, result_tx: ResultSender) { - self.0 - .entry(artifact_id) - .or_default() - .push(PendingExecutionRequest { params, result_tx }); + fn add( + &mut self, + artifact_id: ArtifactId, + execution_timeout: Duration, + params: Vec, + result_tx: ResultSender, + ) { + self.0.entry(artifact_id).or_default().push(PendingExecutionRequest { + execution_timeout, + params, + result_tx, + }); } fn take(&mut self, artifact_id: &ArtifactId) -> Vec { @@ -327,8 +344,7 @@ async fn run( .await); }, from_prepare_queue = from_prepare_queue_rx.next() => { - let prepare::FromQueue::Prepared(artifact_id) - = break_if_fatal!(from_prepare_queue.ok_or(Fatal)); + let from_queue = break_if_fatal!(from_prepare_queue.ok_or(Fatal)); // Note that preparation always succeeds. // @@ -344,7 +360,7 @@ async fn run( &mut artifacts, &mut to_execute_queue_tx, &mut awaiting_prepare, - artifact_id, + from_queue, ).await); }, } @@ -360,7 +376,7 @@ async fn handle_to_host( to_host: ToHost, ) -> Result<(), Fatal> { match to_host { - ToHost::ExecutePvf { pvf, params, priority, result_tx } => { + ToHost::ExecutePvf { pvf, execution_timeout, params, priority, result_tx } => { handle_execute_pvf( cache_path, artifacts, @@ -368,6 +384,7 @@ async fn handle_to_host( execute_queue, awaiting_prepare, pvf, + execution_timeout, params, priority, result_tx, @@ -389,6 +406,7 @@ async fn handle_execute_pvf( execute_queue: &mut mpsc::Sender, awaiting_prepare: &mut AwaitingPrepare, pvf: Pvf, + execution_timeout: Duration, params: Vec, priority: Priority, result_tx: ResultSender, @@ -404,6 +422,7 @@ async fn handle_execute_pvf( execute_queue, execute::ToQueue::Enqueue { artifact: ArtifactPathId::new(artifact_id, cache_path), + execution_timeout, params, result_tx, }, @@ -417,7 +436,10 @@ async fn handle_execute_pvf( ) .await?; - awaiting_prepare.add(artifact_id, params, result_tx); + awaiting_prepare.add(artifact_id, execution_timeout, params, result_tx); + }, + ArtifactState::FailedToProcess(error) => { + let _ = result_tx.send(Err(ValidationError::from(error.clone()))); }, } } else { @@ -426,7 +448,7 @@ async fn handle_execute_pvf( artifacts.insert_preparing(artifact_id.clone()); send_prepare(prepare_queue, prepare::ToQueue::Enqueue { priority, pvf }).await?; - awaiting_prepare.add(artifact_id, params, result_tx); + awaiting_prepare.add(artifact_id, execution_timeout, params, result_tx); } return Ok(()) @@ -450,6 +472,7 @@ async fn handle_heads_up( // Already preparing. We don't need to send a priority amend either because // it can't get any lower than the background. }, + ArtifactState::FailedToProcess(_) => {}, } } else { // The artifact is unknown: register it and put a background job into the prepare queue. @@ -471,8 +494,10 @@ async fn handle_prepare_done( artifacts: &mut Artifacts, execute_queue: &mut mpsc::Sender, awaiting_prepare: &mut AwaitingPrepare, - artifact_id: ArtifactId, + from_queue: prepare::FromQueue, ) -> Result<(), Fatal> { + let prepare::FromQueue { artifact_id, result } = from_queue; + // Make some sanity checks and extract the current state. let state = match artifacts.artifact_state_mut(&artifact_id) { None => { @@ -493,23 +518,36 @@ async fn handle_prepare_done( never!("the artifact is already prepared: {:?}", artifact_id); return Ok(()) }, + Some(ArtifactState::FailedToProcess(_)) => { + // The reasoning is similar to the above, the artifact cannot be + // processed at this point. + never!("the artifact is already processed unsuccessfully: {:?}", artifact_id); + return Ok(()) + }, Some(state @ ArtifactState::Preparing) => state, }; // It's finally time to dispatch all the execution requests that were waiting for this artifact // to be prepared. let pending_requests = awaiting_prepare.take(&artifact_id); - for PendingExecutionRequest { params, result_tx } in pending_requests { + for PendingExecutionRequest { execution_timeout, params, result_tx } in pending_requests { if result_tx.is_canceled() { // Preparation could've taken quite a bit of time and the requester may be not interested // in execution anymore, in which case we just skip the request. continue } + // Don't send failed artifacts to the execution's queue. + if let Err(ref error) = result { + let _ = result_tx.send(Err(ValidationError::from(error.clone()))); + continue + } + send_execute( execute_queue, execute::ToQueue::Enqueue { artifact: ArtifactPathId::new(artifact_id.clone(), cache_path), + execution_timeout, params, result_tx, }, @@ -517,8 +555,10 @@ async fn handle_prepare_done( .await?; } - // Now consider the artifact prepared. - *state = ArtifactState::Prepared { last_time_needed: SystemTime::now() }; + *state = match result { + Ok(()) => ArtifactState::Prepared { last_time_needed: SystemTime::now() }, + Err(error) => ArtifactState::FailedToProcess(error.clone()), + }; Ok(()) } @@ -597,6 +637,8 @@ mod tests { use assert_matches::assert_matches; use futures::future::BoxFuture; + const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); + #[async_std::test] async fn pulse_test() { let pulse = pulse_every(Duration::from_millis(100)); @@ -840,9 +882,15 @@ mod tests { .await; let (result_tx, _result_rx) = oneshot::channel(); - host.execute_pvf(Pvf::from_discriminator(1), vec![], Priority::Critical, result_tx) - .await - .unwrap(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + vec![], + Priority::Critical, + result_tx, + ) + .await + .unwrap(); run_until( &mut test.run, @@ -862,13 +910,20 @@ mod tests { let mut host = test.host_handle(); let (result_tx, result_rx_pvf_1_1) = oneshot::channel(); - host.execute_pvf(Pvf::from_discriminator(1), b"pvf1".to_vec(), Priority::Normal, result_tx) - .await - .unwrap(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf1".to_vec(), + Priority::Normal, + result_tx, + ) + .await + .unwrap(); let (result_tx, result_rx_pvf_1_2) = oneshot::channel(); host.execute_pvf( Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, b"pvf1".to_vec(), Priority::Critical, result_tx, @@ -877,9 +932,15 @@ mod tests { .unwrap(); let (result_tx, result_rx_pvf_2) = oneshot::channel(); - host.execute_pvf(Pvf::from_discriminator(2), b"pvf2".to_vec(), Priority::Normal, result_tx) - .await - .unwrap(); + host.execute_pvf( + Pvf::from_discriminator(2), + TEST_EXECUTION_TIMEOUT, + b"pvf2".to_vec(), + Priority::Normal, + result_tx, + ) + .await + .unwrap(); assert_matches!( test.poll_and_recv_to_prepare_queue().await, @@ -895,7 +956,7 @@ mod tests { ); test.from_prepare_queue_tx - .send(prepare::FromQueue::Prepared(artifact_id(1))) + .send(prepare::FromQueue { artifact_id: artifact_id(1), result: Ok(()) }) .await .unwrap(); let result_tx_pvf_1_1 = assert_matches!( @@ -908,7 +969,7 @@ mod tests { ); test.from_prepare_queue_tx - .send(prepare::FromQueue::Prepared(artifact_id(2))) + .send(prepare::FromQueue { artifact_id: artifact_id(2), result: Ok(()) }) .await .unwrap(); let result_tx_pvf_2 = assert_matches!( @@ -947,9 +1008,15 @@ mod tests { let mut host = test.host_handle(); let (result_tx, result_rx) = oneshot::channel(); - host.execute_pvf(Pvf::from_discriminator(1), b"pvf1".to_vec(), Priority::Normal, result_tx) - .await - .unwrap(); + host.execute_pvf( + Pvf::from_discriminator(1), + TEST_EXECUTION_TIMEOUT, + b"pvf1".to_vec(), + Priority::Normal, + result_tx, + ) + .await + .unwrap(); assert_matches!( test.poll_and_recv_to_prepare_queue().await, @@ -957,7 +1024,7 @@ mod tests { ); test.from_prepare_queue_tx - .send(prepare::FromQueue::Prepared(artifact_id(1))) + .send(prepare::FromQueue { artifact_id: artifact_id(1), result: Ok(()) }) .await .unwrap(); diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs index 035d799ac594..729f813432f9 100644 --- a/node/core/pvf/src/prepare/pool.rs +++ b/node/core/pvf/src/prepare/pool.rs @@ -16,6 +16,7 @@ use super::worker::{self, Outcome}; use crate::{ + error::PrepareError, metrics::Metrics, worker_common::{IdleWorker, WorkerHandle}, LOG_TARGET, @@ -78,9 +79,16 @@ pub enum FromPool { /// The given worker was just spawned and is ready to be used. Spawned(Worker), - /// The given worker either succeeded or failed the given job. Under any circumstances the - /// artifact file has been written. The `bool` says whether the worker ripped. - Concluded(Worker, bool), + /// The given worker either succeeded or failed the given job. + Concluded { + /// A key for retrieving the worker data from the pool. + worker: Worker, + /// Indicates whether the worker process was killed. + rip: bool, + /// [`Ok`] indicates that compiled artifact is successfully stored on disk. + /// Otherwise, an [error](PrepareError) is supplied. + result: Result<(), PrepareError>, + }, /// The given worker ceased to exist. Rip(Worker), @@ -295,7 +303,7 @@ fn handle_mux( }, PoolEvent::StartWork(worker, outcome) => { match outcome { - Outcome::Concluded(idle) => { + Outcome::Concluded { worker: idle, result } => { let data = match spawned.get_mut(worker) { None => { // Perhaps the worker was killed meanwhile and the result is no longer @@ -310,7 +318,7 @@ fn handle_mux( let old = data.idle.replace(idle); assert_matches!(old, None, "attempt to overwrite an idle worker"); - reply(from_pool, FromPool::Concluded(worker, false))?; + reply(from_pool, FromPool::Concluded { worker, rip: false, result })?; Ok(()) }, @@ -321,9 +329,16 @@ fn handle_mux( Ok(()) }, - Outcome::DidntMakeIt => { + Outcome::DidNotMakeIt => { if attempt_retire(metrics, spawned, worker) { - reply(from_pool, FromPool::Concluded(worker, true))?; + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::DidNotMakeIt), + }, + )?; } Ok(()) diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index 4ffa21de435b..d85e6b8a1422 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -17,7 +17,9 @@ //! A queue that handles requests for PVF preparation. use super::pool::{self, Worker}; -use crate::{artifacts::ArtifactId, metrics::Metrics, Priority, Pvf, LOG_TARGET}; +use crate::{ + artifacts::ArtifactId, error::PrepareError, metrics::Metrics, Priority, Pvf, LOG_TARGET, +}; use always_assert::{always, never}; use async_std::path::PathBuf; use futures::{channel::mpsc, stream::StreamExt as _, Future, SinkExt}; @@ -29,7 +31,7 @@ pub enum ToQueue { /// This schedules preparation of the given PVF. /// /// Note that it is incorrect to enqueue the same PVF again without first receiving the - /// [`FromQueue::Prepared`] response. In case there is a need to bump the priority, use + /// [`FromQueue`] response. In case there is a need to bump the priority, use /// [`ToQueue::Amend`]. Enqueue { priority: Priority, pvf: Pvf }, /// Amends the priority for the given [`ArtifactId`] if it is running. If it's not, then it's noop. @@ -37,9 +39,13 @@ pub enum ToQueue { } /// A response from queue. -#[derive(Debug, PartialEq, Eq)] -pub enum FromQueue { - Prepared(ArtifactId), +#[derive(Debug)] +pub struct FromQueue { + /// Identifier of an artifact. + pub(crate) artifact_id: ArtifactId, + /// Outcome of the PVF processing. [`Ok`] indicates that compiled artifact + /// is successfully stored on disk. Otherwise, an [error](PrepareError) is supplied. + pub(crate) result: Result<(), PrepareError>, } #[derive(Default)] @@ -299,7 +305,8 @@ async fn handle_from_pool(queue: &mut Queue, from_pool: pool::FromPool) -> Resul use pool::FromPool::*; match from_pool { Spawned(worker) => handle_worker_spawned(queue, worker).await?, - Concluded(worker, rip) => handle_worker_concluded(queue, worker, rip).await?, + Concluded { worker, rip, result } => + handle_worker_concluded(queue, worker, rip, result).await?, Rip(worker) => handle_worker_rip(queue, worker).await?, } Ok(()) @@ -320,6 +327,7 @@ async fn handle_worker_concluded( queue: &mut Queue, worker: Worker, rip: bool, + result: Result<(), PrepareError>, ) -> Result<(), Fatal> { queue.metrics.prepare_concluded(); @@ -377,7 +385,7 @@ async fn handle_worker_concluded( "prepare worker concluded", ); - reply(&mut queue.from_queue_tx, FromQueue::Prepared(artifact_id))?; + reply(&mut queue.from_queue_tx, FromQueue { artifact_id, result })?; // Figure out what to do with the worker. if rip { @@ -641,12 +649,9 @@ mod tests { let w = test.workers.insert(()); test.send_from_pool(pool::FromPool::Spawned(w)); - test.send_from_pool(pool::FromPool::Concluded(w, false)); + test.send_from_pool(pool::FromPool::Concluded { worker: w, rip: false, result: Ok(()) }); - assert_eq!( - test.poll_and_recv_from_queue().await, - FromQueue::Prepared(pvf(1).as_artifact_id()) - ); + assert_eq!(test.poll_and_recv_from_queue().await.artifact_id, pvf(1).as_artifact_id()); } #[async_std::test] @@ -671,7 +676,7 @@ mod tests { assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); - test.send_from_pool(pool::FromPool::Concluded(w1, false)); + test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, result: Ok(()) }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); @@ -704,7 +709,7 @@ mod tests { // That's a bit silly in this context, but in production there will be an entire pool up // to the `soft_capacity` of workers and it doesn't matter which one to cull. Either way, // we just check that edge case of an edge case works. - test.send_from_pool(pool::FromPool::Concluded(w1, false)); + test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, result: Ok(()) }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Kill(w1)); } @@ -749,15 +754,12 @@ mod tests { assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); // Conclude worker 1 and rip it. - test.send_from_pool(pool::FromPool::Concluded(w1, true)); + test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: true, result: Ok(()) }); // Since there is still work, the queue requested one extra worker to spawn to handle the // remaining enqueued work items. assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); - assert_eq!( - test.poll_and_recv_from_queue().await, - FromQueue::Prepared(pvf(1).as_artifact_id()) - ); + assert_eq!(test.poll_and_recv_from_queue().await.artifact_id, pvf(1).as_artifact_id()); } #[async_std::test] @@ -773,7 +775,11 @@ mod tests { assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); - test.send_from_pool(pool::FromPool::Concluded(w1, true)); + test.send_from_pool(pool::FromPool::Concluded { + worker: w1, + rip: true, + result: Err(PrepareError::DidNotMakeIt), + }); test.poll_ensure_to_pool_is_empty().await; } diff --git a/node/core/pvf/src/prepare/worker.rs b/node/core/pvf/src/prepare/worker.rs index 510d582f7e03..a8bb3516e296 100644 --- a/node/core/pvf/src/prepare/worker.rs +++ b/node/core/pvf/src/prepare/worker.rs @@ -15,7 +15,8 @@ // along with Polkadot. If not, see . use crate::{ - artifacts::Artifact, + artifacts::CompiledArtifact, + error::PrepareError, worker_common::{ bytes_to_path, framed_recv, framed_send, path_to_bytes, spawn_with_program_path, tmpfile_in, worker_event_loop, IdleWorker, SpawnErr, WorkerHandle, @@ -29,6 +30,8 @@ use async_std::{ }; use futures::FutureExt as _; use futures_timer::Delay; +use parity_scale_codec::{Decode, Encode}; +use sp_core::hexdisplay::HexDisplay; use std::{sync::Arc, time::Duration}; const NICENESS_BACKGROUND: i32 = 10; @@ -48,7 +51,7 @@ pub async fn spawn( pub enum Outcome { /// The worker has finished the work assigned to it. - Concluded(IdleWorker), + Concluded { worker: IdleWorker, result: Result<(), PrepareError> }, /// The host tried to reach the worker but failed. This is most likely because the worked was /// killed by the system. Unreachable, @@ -59,7 +62,7 @@ pub enum Outcome { /// the artifact). /// /// This doesn't return an idle worker instance, thus this worker is no longer usable. - DidntMakeIt, + DidNotMakeIt, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -99,13 +102,11 @@ pub async fn start_work( // Wait for the result from the worker, keeping in mind that there may be a timeout, the // worker may get killed, or something along these lines. // - // In that case we should handle these gracefully by writing the artifact file by ourselves. - // We may potentially overwrite the artifact in rare cases where the worker didn't make - // it to report back the result. + // In that case we should propagate the error to the pool. #[derive(Debug)] enum Selected { - Done, + Done(Result<(), PrepareError>), IoErr, Deadline, } @@ -113,41 +114,48 @@ pub async fn start_work( let selected = futures::select! { res = framed_recv(&mut stream).fuse() => { match res { - Ok(x) if x == &[1u8] => { - tracing::debug!( - target: LOG_TARGET, - worker_pid = %pid, - "promoting WIP artifact {} to {}", - tmp_file.display(), - artifact_path.display(), - ); - - async_std::fs::rename(&tmp_file, &artifact_path) - .await - .map(|_| Selected::Done) - .unwrap_or_else(|err| { - tracing::warn!( + Ok(response_bytes) => { + // By convention we expect encoded `Result<(), PrepareError>`. + if let Ok(result) = + >::decode(&mut response_bytes.clone().as_slice()) + { + if result.is_ok() { + tracing::debug!( target: LOG_TARGET, worker_pid = %pid, - "failed to rename the artifact from {} to {}: {:?}", + "promoting WIP artifact {} to {}", tmp_file.display(), artifact_path.display(), - err, ); - Selected::IoErr - }) - } - Ok(response_bytes) => { - use sp_core::hexdisplay::HexDisplay; - let bound_bytes = - &response_bytes[..response_bytes.len().min(4)]; - tracing::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "received unexpected response from the prepare worker: {}", - HexDisplay::from(&bound_bytes), - ); - Selected::IoErr + + async_std::fs::rename(&tmp_file, &artifact_path) + .await + .map(|_| Selected::Done(result)) + .unwrap_or_else(|err| { + tracing::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "failed to rename the artifact from {} to {}: {:?}", + tmp_file.display(), + artifact_path.display(), + err, + ); + Selected::IoErr + }) + } else { + Selected::Done(result) + } + } else { + // We received invalid bytes from the worker. + let bound_bytes = &response_bytes[..response_bytes.len().min(4)]; + tracing::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "received unexpected response from the prepare worker: {}", + HexDisplay::from(&bound_bytes), + ); + Selected::IoErr + } }, Err(err) => { tracing::warn!( @@ -164,24 +172,11 @@ pub async fn start_work( }; match selected { - Selected::Done => { + Selected::Done(result) => { renice(pid, NICENESS_FOREGROUND); - Outcome::Concluded(IdleWorker { stream, pid }) - }, - Selected::IoErr | Selected::Deadline => { - let bytes = Artifact::DidntMakeIt.serialize(); - // best effort: there is nothing we can do here if the write fails. - if let Err(err) = async_std::fs::write(&artifact_path, &bytes).await { - tracing::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "preparation didn't make it, because of `{:?}`: {:?}", - selected, - err, - ); - } - Outcome::DidntMakeIt + Outcome::Concluded { worker: IdleWorker { stream, pid }, result } }, + Selected::IoErr | Selected::Deadline => Outcome::DidNotMakeIt, } }) .await @@ -205,7 +200,7 @@ where "failed to create a temp file for the artifact: {:?}", err, ); - return Outcome::DidntMakeIt + return Outcome::DidNotMakeIt }, }; @@ -288,31 +283,47 @@ pub fn worker_entrypoint(socket_path: &str) { worker_pid = %std::process::id(), "worker: preparing artifact", ); - let artifact_bytes = prepare_artifact(&code).serialize(); - // Write the serialized artifact into into a temp file. - tracing::debug!( - target: LOG_TARGET, - worker_pid = %std::process::id(), - "worker: writing artifact to {}", - dest.display(), - ); - async_std::fs::write(&dest, &artifact_bytes).await?; + let result = match prepare_artifact(&code) { + Err(err) => { + // Serialized error will be written into the socket. + Err(err) + }, + Ok(compiled_artifact) => { + // Write the serialized artifact into a temp file. + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare + // worker is only required to send an empty `Ok` to the pool + // to indicate the success. + + let artifact_bytes = compiled_artifact.encode(); + + tracing::debug!( + target: LOG_TARGET, + worker_pid = %std::process::id(), + "worker: writing artifact to {}", + dest.display(), + ); + async_std::fs::write(&dest, &artifact_bytes).await?; + + Ok(()) + }, + }; - // Return back a byte that signals finishing the work. - framed_send(&mut stream, &[1u8]).await?; + framed_send(&mut stream, result.encode().as_slice()).await?; } }); } -fn prepare_artifact(code: &[u8]) -> Artifact { +fn prepare_artifact(code: &[u8]) -> Result { let blob = match crate::executor_intf::prevalidate(code) { - Err(err) => return Artifact::PrevalidationErr(format!("{:?}", err)), + Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), Ok(b) => b, }; match crate::executor_intf::prepare(blob) { - Ok(compiled_artifact) => Artifact::Compiled { compiled_artifact }, - Err(err) => Artifact::PreparationErr(format!("{:?}", err)), + Ok(compiled_artifact) => Ok(CompiledArtifact::new(compiled_artifact)), + Err(err) => Err(PrepareError::Preparation(format!("{:?}", err))), } } diff --git a/node/core/pvf/tests/it/main.rs b/node/core/pvf/tests/it/main.rs index 3689217880ef..e8fd7b665aa3 100644 --- a/node/core/pvf/tests/it/main.rs +++ b/node/core/pvf/tests/it/main.rs @@ -20,11 +20,13 @@ use polkadot_node_core_pvf::{ start, Config, InvalidCandidate, Metrics, Pvf, ValidationError, ValidationHost, }; use polkadot_parachain::primitives::{BlockData, ValidationParams, ValidationResult}; +use std::time::Duration; mod adder; mod worker_common; const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_puppet_worker"); +const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); struct TestHost { _cache_dir: tempfile::TempDir, @@ -64,6 +66,7 @@ impl TestHost { .await .execute_pvf( Pvf::from_code(code.into()), + TEST_EXECUTION_TIMEOUT, params.encode(), polkadot_node_core_pvf::Priority::Normal, result_tx, diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml index ed730f70d492..fe593248a2e9 100644 --- a/node/core/runtime-api/Cargo.toml +++ b/node/core/runtime-api/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-node-core-runtime-api" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" memory-lru = "0.1.0" parity-util-mem = { version = "0.10.0", default-features = false } diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 962959a23841..88b579402e64 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -24,7 +24,8 @@ use polkadot_primitives::v1::{ AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, }; const AUTHORITIES_CACHE_SIZE: usize = 128 * 1024; @@ -32,6 +33,7 @@ const VALIDATORS_CACHE_SIZE: usize = 64 * 1024; const VALIDATOR_GROUPS_CACHE_SIZE: usize = 64 * 1024; const AVAILABILITY_CORES_CACHE_SIZE: usize = 64 * 1024; const PERSISTED_VALIDATION_DATA_CACHE_SIZE: usize = 64 * 1024; +const ASSUMED_VALIDATION_DATA_CACHE_SIZE: usize = 64 * 1024; const CHECK_VALIDATION_OUTPUTS_CACHE_SIZE: usize = 64 * 1024; const SESSION_INDEX_FOR_CHILD_CACHE_SIZE: usize = 64 * 1024; const VALIDATION_CODE_CACHE_SIZE: usize = 10 * 1024 * 1024; @@ -41,6 +43,7 @@ const SESSION_INFO_CACHE_SIZE: usize = 64 * 1024; const DMQ_CONTENTS_CACHE_SIZE: usize = 64 * 1024; const INBOUND_HRMP_CHANNELS_CACHE_SIZE: usize = 64 * 1024; const CURRENT_BABE_EPOCH_CACHE_SIZE: usize = 64 * 1024; +const ON_CHAIN_VOTES_CACHE_SIZE: usize = 3 * 1024; struct ResidentSizeOf(T); @@ -78,6 +81,10 @@ pub(crate) struct RequestResultCache { (Hash, ParaId, OccupiedCoreAssumption), ResidentSizeOf>, >, + assumed_validation_data: MemoryLruCache< + (ParaId, Hash), + ResidentSizeOf>, + >, check_validation_outputs: MemoryLruCache<(Hash, ParaId, CandidateCommitments), ResidentSizeOf>, session_index_for_child: MemoryLruCache>, @@ -98,6 +105,7 @@ pub(crate) struct RequestResultCache { ResidentSizeOf>>>, >, current_babe_epoch: MemoryLruCache>, + on_chain_votes: MemoryLruCache>>, } impl Default for RequestResultCache { @@ -108,6 +116,7 @@ impl Default for RequestResultCache { validator_groups: MemoryLruCache::new(VALIDATOR_GROUPS_CACHE_SIZE), availability_cores: MemoryLruCache::new(AVAILABILITY_CORES_CACHE_SIZE), persisted_validation_data: MemoryLruCache::new(PERSISTED_VALIDATION_DATA_CACHE_SIZE), + assumed_validation_data: MemoryLruCache::new(ASSUMED_VALIDATION_DATA_CACHE_SIZE), check_validation_outputs: MemoryLruCache::new(CHECK_VALIDATION_OUTPUTS_CACHE_SIZE), session_index_for_child: MemoryLruCache::new(SESSION_INDEX_FOR_CHILD_CACHE_SIZE), validation_code: MemoryLruCache::new(VALIDATION_CODE_CACHE_SIZE), @@ -120,6 +129,7 @@ impl Default for RequestResultCache { dmq_contents: MemoryLruCache::new(DMQ_CONTENTS_CACHE_SIZE), inbound_hrmp_channels_contents: MemoryLruCache::new(INBOUND_HRMP_CHANNELS_CACHE_SIZE), current_babe_epoch: MemoryLruCache::new(CURRENT_BABE_EPOCH_CACHE_SIZE), + on_chain_votes: MemoryLruCache::new(ON_CHAIN_VOTES_CACHE_SIZE), } } } @@ -186,6 +196,21 @@ impl RequestResultCache { self.persisted_validation_data.insert(key, ResidentSizeOf(data)); } + pub(crate) fn assumed_validation_data( + &mut self, + key: (Hash, ParaId, Hash), + ) -> Option<&Option<(PersistedValidationData, ValidationCodeHash)>> { + self.assumed_validation_data.get(&(key.1, key.2)).map(|v| &v.0) + } + + pub(crate) fn cache_assumed_validation_data( + &mut self, + key: (ParaId, Hash), + data: Option<(PersistedValidationData, ValidationCodeHash)>, + ) { + self.assumed_validation_data.insert(key, ResidentSizeOf(data)); + } + pub(crate) fn check_validation_outputs( &mut self, key: (Hash, ParaId, CandidateCommitments), @@ -320,6 +345,21 @@ impl RequestResultCache { pub(crate) fn cache_current_babe_epoch(&mut self, relay_parent: Hash, epoch: Epoch) { self.current_babe_epoch.insert(relay_parent, DoesNotAllocate(epoch)); } + + pub(crate) fn on_chain_votes( + &mut self, + relay_parent: &Hash, + ) -> Option<&Option> { + self.on_chain_votes.get(relay_parent).map(|v| &v.0) + } + + pub(crate) fn cache_on_chain_votes( + &mut self, + relay_parent: Hash, + scraped: Option, + ) { + self.on_chain_votes.insert(relay_parent, ResidentSizeOf(scraped)); + } } pub(crate) enum RequestResult { @@ -328,6 +368,12 @@ pub(crate) enum RequestResult { ValidatorGroups(Hash, (Vec>, GroupRotationInfo)), AvailabilityCores(Hash, Vec), PersistedValidationData(Hash, ParaId, OccupiedCoreAssumption, Option), + AssumedValidationData( + Hash, + ParaId, + Hash, + Option<(PersistedValidationData, ValidationCodeHash)>, + ), CheckValidationOutputs(Hash, ParaId, CandidateCommitments, bool), SessionIndexForChild(Hash, SessionIndex), ValidationCode(Hash, ParaId, OccupiedCoreAssumption, Option), @@ -342,4 +388,5 @@ pub(crate) enum RequestResult { BTreeMap>>, ), CurrentBabeEpoch(Hash, Epoch), + FetchOnChainVotes(Hash, Option), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index cf2ec719c8e5..4aadae4cfa5f 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -119,6 +119,15 @@ where PersistedValidationData(relay_parent, para_id, assumption, data) => self .requests_cache .cache_persisted_validation_data((relay_parent, para_id, assumption), data), + AssumedValidationData( + _relay_parent, + para_id, + expected_persisted_validation_data_hash, + data, + ) => self.requests_cache.cache_assumed_validation_data( + (para_id, expected_persisted_validation_data_hash), + data, + ), CheckValidationOutputs(relay_parent, para_id, commitments, b) => self .requests_cache .cache_check_validation_outputs((relay_parent, para_id, commitments), b), @@ -143,6 +152,8 @@ where .cache_inbound_hrmp_channel_contents((relay_parent, para_id), contents), CurrentBabeEpoch(relay_parent, epoch) => self.requests_cache.cache_current_babe_epoch(relay_parent, epoch), + FetchOnChainVotes(relay_parent, scraped) => + self.requests_cache.cache_on_chain_votes(relay_parent, scraped), } } @@ -184,6 +195,21 @@ where Request::PersistedValidationData(para, assumption, sender) => query!(persisted_validation_data(para, assumption), sender) .map(|sender| Request::PersistedValidationData(para, assumption, sender)), + Request::AssumedValidationData( + para, + expected_persisted_validation_data_hash, + sender, + ) => query!( + assumed_validation_data(para, expected_persisted_validation_data_hash), + sender + ) + .map(|sender| { + Request::AssumedValidationData( + para, + expected_persisted_validation_data_hash, + sender, + ) + }), Request::CheckValidationOutputs(para, commitments, sender) => query!(check_validation_outputs(para, commitments), sender) .map(|sender| Request::CheckValidationOutputs(para, commitments, sender)), @@ -209,6 +235,8 @@ where .map(|sender| Request::InboundHrmpChannelsContents(id, sender)), Request::CurrentBabeEpoch(sender) => query!(current_babe_epoch(), sender).map(|sender| Request::CurrentBabeEpoch(sender)), + Request::FetchOnChainVotes(sender) => + query!(on_chain_votes(), sender).map(|sender| Request::FetchOnChainVotes(sender)), } } @@ -326,6 +354,12 @@ where query!(AvailabilityCores, availability_cores(), sender), Request::PersistedValidationData(para, assumption, sender) => query!(PersistedValidationData, persisted_validation_data(para, assumption), sender), + Request::AssumedValidationData(para, expected_persisted_validation_data_hash, sender) => + query!( + AssumedValidationData, + assumed_validation_data(para, expected_persisted_validation_data_hash), + sender + ), Request::CheckValidationOutputs(para, commitments, sender) => query!(CheckValidationOutputs, check_validation_outputs(para, commitments), sender), Request::SessionIndexForChild(sender) => @@ -342,6 +376,7 @@ where Request::InboundHrmpChannelsContents(id, sender) => query!(InboundHrmpChannelsContents, inbound_hrmp_channels_contents(id), sender), Request::CurrentBabeEpoch(sender) => query!(CurrentBabeEpoch, current_epoch(), sender), + Request::FetchOnChainVotes(sender) => query!(FetchOnChainVotes, on_chain_votes(), sender), } } diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 18906e196b82..ab2156551949 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -22,8 +22,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::v1::{ AuthorityDiscoveryId, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, + PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, }; use sp_core::testing::TaskExecutor; use std::{ @@ -49,6 +49,7 @@ struct MockRuntimeApi { dmq_contents: HashMap>, hrmp_channels: HashMap>>, babe_epoch: Option, + on_chain_votes: Option, } impl ProvideRuntimeApi for MockRuntimeApi { @@ -89,6 +90,17 @@ sp_api::mock_impl_runtime_apis! { self.validation_data.get(¶).cloned() } + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + self.validation_data + .get(¶_id) + .cloned() + .filter(|data| data.hash() == expected_persisted_validation_data_hash) + .zip(self.validation_code.get(¶_id).map(|code| code.hash())) + } + fn check_validation_outputs( &self, para_id: ParaId, @@ -149,6 +161,10 @@ sp_api::mock_impl_runtime_apis! { ) -> Option { self.validation_code_by_hash.get(&hash).map(|c| c.clone()) } + + fn on_chain_votes(&self) -> Option { + self.on_chain_votes.clone() + } } impl BabeApi for MockRuntimeApi { @@ -340,6 +356,58 @@ fn requests_persisted_validation_data() { futures::executor::block_on(future::join(subsystem_task, test_task)); } +#[test] +fn requests_assumed_validation_data() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let relay_parent = [1; 32].into(); + let para_a = 5.into(); + let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); + + let validation_code = ValidationCode(vec![1, 2, 3]); + let expected_data_hash = ::default().hash(); + let expected_code_hash = validation_code.hash(); + + let mut runtime_api = MockRuntimeApi::default(); + runtime_api.validation_data.insert(para_a, Default::default()); + runtime_api.validation_code.insert(para_a, validation_code); + runtime_api.validation_data.insert(para_b, Default::default()); + let runtime_api = Arc::new(runtime_api); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle + .send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::AssumedValidationData(para_a, expected_data_hash, tx), + ), + }) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), Some((Default::default(), expected_code_hash))); + + let (tx, rx) = oneshot::channel(); + ctx_handle + .send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::AssumedValidationData(para_a, Hash::zero(), tx), + ), + }) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), None); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); +} + #[test] fn requests_check_validation_outputs() { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); diff --git a/node/jaeger/Cargo.toml b/node/jaeger/Cargo.toml index 308a76d494f8..059043ed6318 100644 --- a/node/jaeger/Cargo.toml +++ b/node/jaeger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-jaeger" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Polkadot Jaeger primitives" @@ -14,6 +14,6 @@ polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.26" +thiserror = "1.0.30" log = "0.4.13" parity-scale-codec = { version = "2.3.1", default-features = false } diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index 94ab2d33ac5b..49c7d7a3f599 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -10,7 +10,7 @@ path = "src/variant-a.rs" name = "polkadot-test-malus" description = "Misbehaving nodes for local testnets, system and Simnet tests." license = "GPL-3.0-only" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" readme = "README.md" @@ -25,7 +25,7 @@ polkadot-node-core-pvf = { path = "../core/pvf" } parity-util-mem = { version = "0.10.0", default-features = false, features = ["jemalloc-global"] } color-eyre = { version = "0.5.11", default-features = false } assert_matches = "1.5" -structopt = "0.3.23" +structopt = "0.3.25" async-trait = "0.1.51" [dev-dependencies] diff --git a/node/metered-channel/Cargo.toml b/node/metered-channel/Cargo.toml index dfbea6ba9586..8e955f88386f 100644 --- a/node/metered-channel/Cargo.toml +++ b/node/metered-channel/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "metered-channel" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Channels with attached Meters" @@ -9,12 +9,12 @@ description = "Channels with attached Meters" futures = "0.3.17" futures-timer = "3.0.2" derive_more = "0.99" -tracing = "0.1.28" -thiserror = "1.0.29" +tracing = "0.1.29" +thiserror = "1.0.30" [dev-dependencies] futures = { version = "0.3.17", features = ["thread-pool"] } assert_matches = "1.5" env_logger = "0.9" log = "0.4" -tracing = { version = "0.1.28", features = ["log"] } +tracing = { version = "0.1.29", features = ["log"] } diff --git a/node/metered-channel/src/oneshot.rs b/node/metered-channel/src/oneshot.rs index bf1a52682462..5f2c72cb701e 100644 --- a/node/metered-channel/src/oneshot.rs +++ b/node/metered-channel/src/oneshot.rs @@ -95,7 +95,7 @@ pub fn channel( #[allow(missing_docs)] #[derive(thiserror::Error, Debug)] pub enum Error { - #[error("Oneshot was cancelled.")] + #[error("Oneshot was canceled.")] Canceled(#[source] Canceled, Measurements), #[error("Oneshot did not receive a response within {}", Duration::as_secs_f64(.0))] HardTimeout(Duration, Measurements), @@ -124,7 +124,7 @@ impl MeteredSender { inner.send((Instant::now(), t)).map_err(|(_, t)| t) } - /// Poll if the thing is already cancelled. + /// Poll if the thing is already canceled. pub fn poll_canceled(&mut self, ctx: &mut Context<'_>) -> Poll<()> { self.inner.poll_canceled(ctx) } diff --git a/node/metrics/Cargo.toml b/node/metrics/Cargo.toml index d12a290c17a1..3e74dbdd4caa 100644 --- a/node/metrics/Cargo.toml +++ b/node/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-metrics" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Subsystem traits and message definitions" diff --git a/node/network/approval-distribution/Cargo.toml b/node/network/approval-distribution/Cargo.toml index cf3c71faf2cf..15a9cf08f47c 100644 --- a/node/network/approval-distribution/Cargo.toml +++ b/node/network/approval-distribution/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-approval-distribution" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -12,7 +12,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-primitives = { path = "../../../primitives" } futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" [dev-dependencies] sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml index 1f7d89c5ddb3..36c0569ba87e 100644 --- a/node/network/availability-distribution/Cargo.toml +++ b/node/network/availability-distribution/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-availability-distribution" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" parity-scale-codec = { version = "2.3.1", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } @@ -16,10 +16,10 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-primitives = { path = "../../primitives" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.26" +thiserror = "1.0.30" rand = "0.8.3" derive_more = "0.99.11" -lru = "0.6.6" +lru = "0.7.0" [dev-dependencies] polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" } diff --git a/node/network/availability-distribution/src/error.rs b/node/network/availability-distribution/src/error.rs index 881e00ac28a0..d9db0ec42fae 100644 --- a/node/network/availability-distribution/src/error.rs +++ b/node/network/availability-distribution/src/error.rs @@ -117,7 +117,18 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<( match result { Err(Error::Fatal(f)) => Err(f), Err(Error::NonFatal(error)) => { - tracing::warn!(target: LOG_TARGET, error = ?error, ctx); + match error { + NonFatal::UnexpectedPoV | + NonFatal::InvalidValidatorIndex | + NonFatal::NoSuchCachedSession | + NonFatal::QueryAvailableDataResponseChannel(_) | + NonFatal::QueryChunkResponseChannel(_) => + tracing::warn!(target: LOG_TARGET, error = %error, ctx), + NonFatal::FetchPoV(_) | + NonFatal::SendResponse | + NonFatal::NoSuchPoV | + NonFatal::Runtime(_) => tracing::debug!(target: LOG_TARGET, error = ?error, ctx), + } Ok(()) }, Ok(()) => Ok(()), diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index f1615d1f33ae..0c5dd6e684c1 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -344,7 +344,7 @@ impl RunningTask { Err(TaskError::PeerError) }, Err(RequestError::NetworkError(err)) => { - tracing::warn!( + tracing::debug!( target: LOG_TARGET, origin= ?validator, err= ?err, @@ -353,7 +353,7 @@ impl RunningTask { Err(TaskError::PeerError) }, Err(RequestError::Canceled(oneshot::Canceled)) => { - tracing::warn!(target: LOG_TARGET, + tracing::debug!(target: LOG_TARGET, origin= ?validator, "Erasure chunk request got canceled"); Err(TaskError::PeerError) diff --git a/node/network/availability-recovery/Cargo.toml b/node/network/availability-recovery/Cargo.toml index d23d29a8dd29..469834f18c8a 100644 --- a/node/network/availability-recovery/Cargo.toml +++ b/node/network/availability-recovery/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "polkadot-availability-recovery" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -lru = "0.6.6" +lru = "0.7.0" rand = "0.8.3" -thiserror = "1.0.26" -tracing = "0.1.28" +thiserror = "1.0.30" +tracing = "0.1.29" polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-primitives = { path = "../../../primitives" } diff --git a/node/network/availability-recovery/src/futures_undead.rs b/node/network/availability-recovery/src/futures_undead.rs index 550f41a9b3cf..aa91c39f8ed1 100644 --- a/node/network/availability-recovery/src/futures_undead.rs +++ b/node/network/availability-recovery/src/futures_undead.rs @@ -18,7 +18,7 @@ //! futures will still get polled, but will not count towards length. So length will only count //! futures, which are still considered live. //! -//! Usecase: If futures take longer than we would like them too, we may be able to request the data +//! Use case: If futures take longer than we would like them too, we may be able to request the data //! from somewhere else as well. We don't really want to cancel the old future, because maybe it //! was almost done, thus we would have wasted time with our impatience. By simply making them //! not count towards length, we can make sure to have enough "live" requests ongoing, while at the diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index 5b951bd8e030..8c5ff49a8add 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-availability-bitfield-distribution" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml index 7758fd7148c7..500e713113c3 100644 --- a/node/network/bridge/Cargo.toml +++ b/node/network/bridge/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "polkadot-network-bridge" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] async-trait = "0.1.51" futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" polkadot-primitives = { path = "../../../primitives" } parity-scale-codec = { version = "2.3.1", default-features = false, features = ["derive"] } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 036254cb558e..d5114f72b452 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -81,18 +81,14 @@ pub trait Network: Clone + Send + 'static { /// Ask the network to keep a substream open with these nodes and not disconnect from them /// until removed from the protocol's peer set. /// Note that `out_peers` setting has no effect on this. - async fn add_to_peers_set( + async fn set_reserved_peers( &mut self, protocol: Cow<'static, str>, multiaddresses: HashSet, ) -> Result<(), String>; - /// Cancels the effects of `add_to_peers_set`. - async fn remove_from_peers_set( - &mut self, - protocol: Cow<'static, str>, - multiaddresses: HashSet, - ) -> Result<(), String>; + /// Removes the peers for the protocol's peer set (both reserved and non-reserved). + async fn remove_from_peers_set(&mut self, protocol: Cow<'static, str>, peers: Vec); /// Send a request to a remote peer. async fn start_request( @@ -118,25 +114,16 @@ impl Network for Arc> { NetworkService::event_stream(self, "polkadot-network-bridge").boxed() } - async fn add_to_peers_set( + async fn set_reserved_peers( &mut self, protocol: Cow<'static, str>, multiaddresses: HashSet, ) -> Result<(), String> { - sc_network::NetworkService::add_peers_to_reserved_set(&**self, protocol, multiaddresses) + sc_network::NetworkService::set_reserved_peers(&**self, protocol, multiaddresses) } - async fn remove_from_peers_set( - &mut self, - protocol: Cow<'static, str>, - multiaddresses: HashSet, - ) -> Result<(), String> { - sc_network::NetworkService::remove_peers_from_reserved_set( - &**self, - protocol.clone(), - multiaddresses.clone(), - )?; - sc_network::NetworkService::remove_from_peers_set(&**self, protocol, multiaddresses) + async fn remove_from_peers_set(&mut self, protocol: Cow<'static, str>, peers: Vec) { + sc_network::NetworkService::remove_peers_from_reserved_set(&**self, protocol, peers); } fn report_peer(&self, who: PeerId, cost_benefit: Rep) { diff --git a/node/network/bridge/src/tests.rs b/node/network/bridge/src/tests.rs index 40ade08df2b0..3be01dd8ae76 100644 --- a/node/network/bridge/src/tests.rs +++ b/node/network/bridge/src/tests.rs @@ -98,7 +98,7 @@ impl Network for TestNetwork { .boxed() } - async fn add_to_peers_set( + async fn set_reserved_peers( &mut self, _protocol: Cow<'static, str>, _: HashSet, @@ -106,13 +106,7 @@ impl Network for TestNetwork { Ok(()) } - async fn remove_from_peers_set( - &mut self, - _protocol: Cow<'static, str>, - _: HashSet, - ) -> Result<(), String> { - Ok(()) - } + async fn remove_from_peers_set(&mut self, _protocol: Cow<'static, str>, _: Vec) {} async fn start_request( &self, diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 7f768044ad50..c338e0e62f70 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -23,10 +23,13 @@ use std::collections::HashSet; use futures::channel::oneshot; -use sc_network::multiaddr::Multiaddr; +use sc_network::multiaddr::{self, Multiaddr}; pub use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery; -use polkadot_node_network_protocol::peer_set::{PeerSet, PerPeerSet}; +use polkadot_node_network_protocol::{ + peer_set::{PeerSet, PerPeerSet}, + PeerId, +}; use polkadot_primitives::v1::AuthorityDiscoveryId; const LOG_TARGET: &str = "parachain::validator-discovery"; @@ -39,7 +42,7 @@ pub(super) struct Service { #[derive(Default)] struct StatePerPeerSet { - previously_requested: HashSet, + previously_requested: HashSet, } impl Service { @@ -47,7 +50,7 @@ impl Service { Self { state: Default::default(), _phantom: PhantomData } } - /// Connect to already resolved addresses: + /// Connect to already resolved addresses. pub async fn on_resolved_request( &mut self, newly_requested: HashSet, @@ -55,31 +58,32 @@ impl Service { mut network_service: N, ) -> N { let state = &mut self.state[peer_set]; - // clean up revoked requests - let multiaddr_to_remove: HashSet<_> = - state.previously_requested.difference(&newly_requested).cloned().collect(); - let multiaddr_to_add: HashSet<_> = - newly_requested.difference(&state.previously_requested).cloned().collect(); - state.previously_requested = newly_requested; + let new_peer_ids: HashSet = extract_peer_ids(newly_requested.iter().cloned()); + let num_peers = new_peer_ids.len(); + + let peers_to_remove: Vec = + state.previously_requested.difference(&new_peer_ids).cloned().collect(); + let removed = peers_to_remove.len(); + state.previously_requested = new_peer_ids; tracing::debug!( target: LOG_TARGET, ?peer_set, - added = multiaddr_to_add.len(), - removed = multiaddr_to_remove.len(), + ?num_peers, + ?removed, "New ConnectToValidators resolved request", ); // ask the network to connect to these nodes and not disconnect // from them until removed from the set if let Err(e) = network_service - .add_to_peers_set(peer_set.into_protocol_name(), multiaddr_to_add) + .set_reserved_peers(peer_set.into_protocol_name(), newly_requested) .await { tracing::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); } // the addresses are known to be valid let _ = network_service - .remove_from_peers_set(peer_set.into_protocol_name(), multiaddr_to_remove) + .remove_from_peers_set(peer_set.into_protocol_name(), peers_to_remove) .await; network_service @@ -136,6 +140,15 @@ impl Service { } } +fn extract_peer_ids(multiaddr: impl Iterator) -> HashSet { + multiaddr + .filter_map(|mut addr| match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key).ok(), + _ => None, + }) + .collect() +} + #[cfg(test)] mod tests { use super::*; @@ -158,7 +171,7 @@ mod tests { #[derive(Default, Clone)] struct TestNetwork { - peers_set: HashSet, + peers_set: HashSet, } #[derive(Default, Clone, Debug)] @@ -171,9 +184,14 @@ mod tests { fn new() -> Self { let peer_ids = known_peer_ids(); let authorities = known_authorities(); - let multiaddr = known_multiaddr(); + let multiaddr = known_multiaddr().into_iter().zip(peer_ids.iter().cloned()).map( + |(mut addr, peer_id)| { + addr.push(multiaddr::Protocol::P2p(peer_id.into())); + addr + }, + ); Self { - by_authority_id: authorities.iter().cloned().zip(multiaddr.into_iter()).collect(), + by_authority_id: authorities.iter().cloned().zip(multiaddr).collect(), by_peer_id: peer_ids.into_iter().zip(authorities.into_iter()).collect(), } } @@ -185,22 +203,21 @@ mod tests { panic!() } - async fn add_to_peers_set( + async fn set_reserved_peers( &mut self, _protocol: Cow<'static, str>, multiaddresses: HashSet, ) -> Result<(), String> { - self.peers_set.extend(multiaddresses.into_iter()); + self.peers_set = extract_peer_ids(multiaddresses.into_iter()); Ok(()) } async fn remove_from_peers_set( &mut self, _protocol: Cow<'static, str>, - multiaddresses: HashSet, - ) -> Result<(), String> { - self.peers_set.retain(|elem| !multiaddresses.contains(elem)); - Ok(()) + peers: Vec, + ) { + self.peers_set.retain(|elem| !peers.contains(elem)); } async fn start_request( @@ -281,9 +298,14 @@ mod tests { let state = &service.state[PeerSet::Validation]; assert_eq!(state.previously_requested.len(), 1); - assert!(state - .previously_requested - .contains(ads.by_authority_id.get(&authority_ids[1]).unwrap())); + let peer_1 = extract_peer_ids( + vec![ads.by_authority_id.get(&authority_ids[1]).unwrap().clone()].into_iter(), + ) + .iter() + .cloned() + .next() + .unwrap(); + assert!(state.previously_requested.contains(&peer_1)); }); } @@ -310,9 +332,14 @@ mod tests { let state = &service.state[PeerSet::Validation]; assert_eq!(state.previously_requested.len(), 1); - assert!(state - .previously_requested - .contains(ads.by_authority_id.get(&authority_ids[0]).unwrap())); + let peer_0 = extract_peer_ids( + vec![ads.by_authority_id.get(&authority_ids[0]).unwrap().clone()].into_iter(), + ) + .iter() + .cloned() + .next() + .unwrap(); + assert!(state.previously_requested.contains(&peer_0)); let failed = failed_rx.await.unwrap(); assert_eq!(failed, 1); diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index 295a296911db..8eb98cfff59e 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-collator-protocol" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -9,8 +9,8 @@ always-assert = "0.1.2" derive_more = "0.99.14" futures = "0.3.17" futures-timer = "3" -thiserror = "1.0.26" -tracing = "0.1.28" +thiserror = "1.0.30" +tracing = "0.1.29" sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 3f77e499de08..d9e1de062552 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1362,7 +1362,7 @@ where .await; }, Err(RequestError::NetworkError(err)) => { - tracing::warn!( + tracing::debug!( target: LOG_TARGET, hash = ?pending_collation.relay_parent, para_id = ?pending_collation.para_id, @@ -1377,7 +1377,7 @@ where modify_reputation(ctx, pending_collation.peer_id.clone(), COST_NETWORK_ERROR).await; }, Err(RequestError::Canceled(_)) => { - tracing::warn!( + tracing::debug!( target: LOG_TARGET, hash = ?pending_collation.relay_parent, para_id = ?pending_collation.para_id, diff --git a/node/network/dispute-distribution/Cargo.toml b/node/network/dispute-distribution/Cargo.toml index 11443b069a71..17eb33884d91 100644 --- a/node/network/dispute-distribution/Cargo.toml +++ b/node/network/dispute-distribution/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-dispute-distribution" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" derive_more = "0.99.14" parity-scale-codec = { version = "2.3.1", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } @@ -18,8 +18,8 @@ polkadot-node-primitives = { path = "../../primitives" } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.26" -lru = "0.6.6" +thiserror = "1.0.30" +lru = "0.7.0" [dev-dependencies] async-trait = "0.1.51" diff --git a/node/network/gossip-support/Cargo.toml b/node/network/gossip-support/Cargo.toml index 71df67b3fa9f..943ae17f2e9e 100644 --- a/node/network/gossip-support/Cargo.toml +++ b/node/network/gossip-support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-gossip-support" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -19,7 +19,7 @@ futures = "0.3.17" futures-timer = "3.0.2" rand = { version = "0.8.3", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } -tracing = "0.1.28" +tracing = "0.1.29" [dev-dependencies] sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index 9046c232fca4..4fdc2eb4082f 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-network-protocol" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Primitives types for the Node-side" @@ -13,7 +13,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } parity-scale-codec = { version = "2.3.1", default-features = false, features = ["derive"] } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } -strum = { version = "0.21", features = ["derive"] } +strum = { version = "0.22", features = ["derive"] } derive_more = "0.99.11" futures = "0.3.17" -thiserror = "1.0.26" +thiserror = "1.0.30" diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 5573363956f7..d5b76fff9a75 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "polkadot-statement-distribution" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Statement Distribution Subsystem" edition = "2018" [dependencies] futures = "0.3.17" -tracing = "0.1.28" +tracing = "0.1.29" polkadot-primitives = { path = "../../../primitives" } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -19,7 +19,7 @@ arrayvec = "0.5.2" indexmap = "1.7.0" parity-scale-codec = { version = "2.3.1", default-features = false, features = ["derive"] } derive_more = "0.99.11" -thiserror = "1.0.26" +thiserror = "1.0.30" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index 819440e6f29c..ac997544b5c8 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -87,8 +87,12 @@ pub enum NonFatal { #[error("Relay parent could not be found in active heads")] NoSuchHead(Hash), + /// Received message from actually disconnected peer. + #[error("Message from not connected peer")] + NoSuchPeer(PeerId), + /// Peer requested statement data for candidate that was never announced to it. - #[error("Peer requested data for candidate it never received a notification for")] + #[error("Peer requested data for candidate it never received a notification for (malicious?)")] RequestedUnannouncedCandidate(PeerId, CandidateHash), /// A large statement status was requested, which could not be found. @@ -112,7 +116,11 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<( match result { Err(Error::Fatal(f)) => Err(f), Err(Error::NonFatal(error)) => { - tracing::warn!(target: LOG_TARGET, error = ?error, ctx); + match error { + NonFatal::RequestedUnannouncedCandidate(_, _) => + tracing::warn!(target: LOG_TARGET, error = %error, ctx), + _ => tracing::debug!(target: LOG_TARGET, error = %error, ctx), + } Ok(()) }, Ok(()) => Ok(()), diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index e2c08c8216bc..a810a663a87d 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -1625,7 +1625,7 @@ impl StatementDistribution { &requesting_peer, &relay_parent, &candidate_hash, - ) { + )? { return Err(NonFatal::RequestedUnannouncedCandidate( requesting_peer, candidate_hash, @@ -1896,27 +1896,15 @@ fn requesting_peer_knows_about_candidate( requesting_peer: &PeerId, relay_parent: &Hash, candidate_hash: &CandidateHash, -) -> bool { - requesting_peer_knows_about_candidate_inner( - peers, - requesting_peer, - relay_parent, - candidate_hash, - ) - .is_some() -} - -/// Helper function for `requesting_peer_knows_about_statement`. -fn requesting_peer_knows_about_candidate_inner( - peers: &HashMap, - requesting_peer: &PeerId, - relay_parent: &Hash, - candidate_hash: &CandidateHash, -) -> Option<()> { - let peer_data = peers.get(requesting_peer)?; - let knowledge = peer_data.view_knowledge.get(relay_parent)?; - knowledge.sent_candidates.get(&candidate_hash)?; - Some(()) +) -> NonFatalResult { + let peer_data = peers + .get(requesting_peer) + .ok_or_else(|| NonFatal::NoSuchPeer(*requesting_peer))?; + let knowledge = peer_data + .view_knowledge + .get(relay_parent) + .ok_or_else(|| NonFatal::NoSuchHead(*relay_parent))?; + Ok(knowledge.sent_candidates.get(&candidate_hash).is_some()) } #[derive(Clone)] diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 5345fb031811..d0f67c111cdd 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-overseer" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -16,8 +16,8 @@ polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-metrics = { path = "../metrics" } polkadot-primitives = { path = "../../primitives" } polkadot-overseer-gen = { path = "./overseer-gen" } -tracing = "0.1.28" -lru = "0.6" +tracing = "0.1.29" +lru = "0.7" parity-util-mem = { version = ">= 0.10.1", default-features = false } [dev-dependencies] diff --git a/node/overseer/examples/minimal-example.rs b/node/overseer/examples/minimal-example.rs index 0ff8201594fb..41ec66642976 100644 --- a/node/overseer/examples/minimal-example.rs +++ b/node/overseer/examples/minimal-example.rs @@ -75,6 +75,7 @@ impl Subsystem1 { let msg = CandidateValidationMessage::ValidateFromChainState( Default::default(), PoV { block_data: BlockData(Vec::new()) }.into(), + Default::default(), tx, ); ctx.send_message(::AllMessages::from(msg)) diff --git a/node/overseer/overseer-gen/Cargo.toml b/node/overseer/overseer-gen/Cargo.toml index dfa1d8a508f4..98f040efcf11 100644 --- a/node/overseer/overseer-gen/Cargo.toml +++ b/node/overseer/overseer-gen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-overseer-gen" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Generate an overseer including builder pattern and message wrapper from a single struct." @@ -19,4 +19,4 @@ futures-timer = "3.0.2" pin-project = "1.0" [dev-dependencies] -trybuild = "1.0.45" +trybuild = "1.0.50" diff --git a/node/overseer/overseer-gen/proc-macro/Cargo.toml b/node/overseer/overseer-gen/proc-macro/Cargo.toml index aa972b0e6a56..11bdf036952b 100644 --- a/node/overseer/overseer-gen/proc-macro/Cargo.toml +++ b/node/overseer/overseer-gen/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-overseer-gen-proc-macro" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." @@ -12,9 +12,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.77", features = ["full", "extra-traits"] } -quote = "1.0.9" -proc-macro2 = "1.0.26" +syn = { version = "1.0.80", features = ["full", "extra-traits"] } +quote = "1.0.10" +proc-macro2 = "1.0.30" proc-macro-crate = "1.1.0" [dev-dependencies] diff --git a/node/overseer/overseer-gen/proc-macro/build.rs b/node/overseer/overseer-gen/proc-macro/build.rs new file mode 100644 index 000000000000..ebc4e5d75ab5 --- /dev/null +++ b/node/overseer/overseer-gen/proc-macro/build.rs @@ -0,0 +1,2 @@ +/// A dummy build script, so `OUT_DIR` is set. +fn main() {} diff --git a/node/overseer/overseer-gen/proc-macro/src/lib.rs b/node/overseer/overseer-gen/proc-macro/src/lib.rs index 1a0634b29720..1c7d32d6449f 100644 --- a/node/overseer/overseer-gen/proc-macro/src/lib.rs +++ b/node/overseer/overseer-gen/proc-macro/src/lib.rs @@ -98,12 +98,12 @@ pub(crate) fn impl_overseer_gen( additive.extend(impl_message_wrapper_enum(&info)?); additive.extend(impl_dispatch(&info)); - #[cfg(feature = "expansion")] - { + if cfg!(feature = "expansion") { use std::io::Write; - let cwd = std::env::current_dir().unwrap(); - let path: std::path::PathBuf = cwd.join("overlord-expansion.rs"); + let out = env!("OUT_DIR"); + let out = std::path::PathBuf::from(out); + let path = out.join("overlord-expansion.rs"); let mut f = std::fs::OpenOptions::new() .write(true) .create(true) @@ -117,9 +117,15 @@ pub(crate) fn impl_overseer_gen( std::process::Command::new("rustfmt") .arg("--edition=2018") .arg(&path) - .current_dir(cwd) + .current_dir(out) .spawn() .expect("Running rustfmt works. qed"); + + let path = path.display().to_string(); + Ok(quote! { + include!( #path ); + }) + } else { + Ok(additive) } - Ok(additive) } diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 39eb91e0f6d6..7fe1ed701a83 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -112,6 +112,7 @@ where ctx.send_message(CandidateValidationMessage::ValidateFromChainState( Default::default(), PoV { block_data: BlockData(Vec::new()) }.into(), + Default::default(), tx, )) .await; @@ -791,7 +792,12 @@ where fn test_candidate_validation_msg() -> CandidateValidationMessage { let (sender, _) = oneshot::channel(); let pov = Arc::new(PoV { block_data: BlockData(Vec::new()) }); - CandidateValidationMessage::ValidateFromChainState(Default::default(), pov, sender) + CandidateValidationMessage::ValidateFromChainState( + Default::default(), + pov, + Default::default(), + sender, + ) } fn test_candidate_backing_msg() -> CandidateBackingMessage { diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index 4a6d9abb6880..eade6bf1e26c 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-primitives" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Primitives types for the Node-side" @@ -18,7 +18,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" } polkadot-parachain = { path = "../../parachain", default-features = false } schnorrkel = "0.9.1" -thiserror = "1.0.26" +thiserror = "1.0.30" serde = { version = "1.0.130", features = ["derive"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] diff --git a/node/primitives/src/disputes/mod.rs b/node/primitives/src/disputes/mod.rs index a787fdd4dd0a..fe5df710c76c 100644 --- a/node/primitives/src/disputes/mod.rs +++ b/node/primitives/src/disputes/mod.rs @@ -66,6 +66,26 @@ impl CandidateVotes { } impl SignedDisputeStatement { + /// Create a new `SignedDisputeStatement` from information + /// that is available on-chain, and hence already can be trusted. + /// + /// Attention: Not to be used other than with guaranteed fetches. + pub fn new_unchecked_from_trusted_source( + dispute_statement: DisputeStatement, + candidate_hash: CandidateHash, + session_index: SessionIndex, + validator_public: ValidatorId, + validator_signature: ValidatorSignature, + ) -> Self { + SignedDisputeStatement { + dispute_statement, + candidate_hash, + validator_public, + validator_signature, + session_index, + } + } + /// Create a new `SignedDisputeStatement`, which is only possible by checking the signature. pub fn new_checked( dispute_statement: DisputeStatement, diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 6e8490b67b91..adc8846f4298 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -22,7 +22,7 @@ #![deny(missing_docs)] -use std::{convert::TryFrom, pin::Pin}; +use std::{convert::TryFrom, pin::Pin, time::Duration}; use bounded_vec::BoundedVec; use futures::Future; @@ -71,6 +71,17 @@ pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize; /// Number of sessions we want to consider in disputes. pub const DISPUTE_WINDOW: SessionIndex = 6; +/// The amount of time to spend on execution during backing. +pub const BACKING_EXECUTION_TIMEOUT: Duration = Duration::from_secs(2); + +/// The amount of time to spend on execution during approval or disputes. +/// +/// This is deliberately much longer than the backing execution timeout to +/// ensure that in the absence of extremely large disparities between hardware, +/// blocks that pass backing are considerd executable by approval checkers or +/// dispute participants. +pub const APPROVAL_EXECUTION_TIMEOUT: Duration = Duration::from_secs(6); + /// The cumulative weight of a block in a fork-choice rule. pub type BlockWeight = u32; diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 4b2162fa29ee..ba15a841e8a5 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-service" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -62,13 +62,13 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https: # External Crates futures = "0.3.17" hex-literal = "0.3.3" -tracing = "0.1.28" +tracing = "0.1.29" serde = { version = "1.0.130", features = ["derive"] } -thiserror = "1.0.26" +thiserror = "1.0.30" kvdb = "0.10.0" kvdb-rocksdb = { version = "0.14.0", optional = true } async-trait = "0.1.51" -lru = "0.6" +lru = "0.7" # Polkadot polkadot-node-core-parachains-inherent = { path = "../core/parachains-inherent" } diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 502c9dbf7217..84c6e767f0ea 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -558,6 +558,7 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi registrar: westend_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::v1::LOWEST_PUBLIC_ID, }, + xcm_pallet: westend_runtime::XcmPalletConfig { safe_xcm_version: Some(2) }, } } @@ -755,6 +756,7 @@ fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisC }, gilt: Default::default(), paras: Default::default(), + xcm_pallet: kusama::XcmPalletConfig { safe_xcm_version: Some(2) }, } } @@ -1054,6 +1056,7 @@ fn rococo_staging_testnet_config_genesis(wasm_binary: &[u8]) -> rococo_runtime:: registrar: rococo_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::v1::LOWEST_PUBLIC_ID, }, + xcm_pallet: rococo_runtime::XcmPalletConfig { safe_xcm_version: Some(2) }, // bridge_rococo_grandpa: rococo_runtime::BridgeRococoGrandpaConfig { // owner: Some(endowed_accounts[0].clone()), // ..Default::default() @@ -1411,6 +1414,7 @@ pub fn kusama_testnet_genesis( }, gilt: Default::default(), paras: Default::default(), + xcm_pallet: kusama::XcmPalletConfig { safe_xcm_version: Some(2) }, } } @@ -1492,6 +1496,7 @@ pub fn westend_testnet_genesis( registrar: westend_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::v1::LOWEST_PUBLIC_ID, }, + xcm_pallet: westend_runtime::XcmPalletConfig { safe_xcm_version: Some(2) }, } } @@ -1568,6 +1573,7 @@ pub fn rococo_testnet_genesis( registrar: rococo_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::v1::LOWEST_PUBLIC_ID, }, + xcm_pallet: rococo_runtime::XcmPalletConfig { safe_xcm_version: Some(2) }, // bridge_rococo_grandpa: rococo_runtime::BridgeRococoGrandpaConfig { // owner: Some(root_key.clone()), // ..Default::default() diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index a1005f284686..7f004c49f211 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -723,17 +723,9 @@ where let chain_spec = config.chain_spec.cloned_box(); - // we should remove this check before we deploy parachains on polkadot - // TODO: https://github.com/paritytech/polkadot/issues/3326 - let is_relay_chain = chain_spec.is_kusama() || - chain_spec.is_westend() || - chain_spec.is_rococo() || - chain_spec.is_wococo(); - let local_keystore = basics.keystore_container.local_keystore(); - let requires_overseer_for_chain_sel = local_keystore.is_some() && - is_relay_chain && - (role.is_authority() || is_collator.is_collator()); + let requires_overseer_for_chain_sel = + local_keystore.is_some() && (role.is_authority() || is_collator.is_collator()); let select_chain = SelectRelayChain::new( basics.backend.clone(), @@ -788,9 +780,16 @@ where let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(); config.network.request_response_protocols.push(cfg); + let grandpa_hard_forks = if config.chain_spec.is_kusama() { + grandpa_support::kusama_hard_forks() + } else { + Vec::new() + }; + let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( backend.clone(), import_setup.1.shared_authority_set().clone(), + grandpa_hard_forks, )); let (network, system_rpc_tx, network_starter) = diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index e17ce65a763f..0e196d582d47 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem-test-helpers" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Subsystem traits and message definitions" diff --git a/node/subsystem-types/Cargo.toml b/node/subsystem-types/Cargo.toml index 0fa9a96f1a43..8b7e493decf1 100644 --- a/node/subsystem-types/Cargo.toml +++ b/node/subsystem-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem-types" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Subsystem traits and message definitions" @@ -17,4 +17,4 @@ polkadot-overseer-gen = { path = "../overseer/overseer-gen" } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } smallvec = "1.6.1" substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.26" +thiserror = "1.0.30" diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 6cc535bedb60..801d5c595a01 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -51,6 +51,7 @@ use polkadot_statement_table::v1::Misbehavior; use std::{ collections::{BTreeMap, HashSet}, sync::Arc, + time::Duration, }; /// Network events as transmitted to other subsystems, wrapped in their message types. @@ -114,6 +115,8 @@ pub enum CandidateValidationMessage { ValidateFromChainState( CandidateDescriptor, Arc, + /// Execution timeout + Duration, oneshot::Sender>, ), /// Validate a candidate with provided, exhaustive parameters for validation. @@ -130,6 +133,8 @@ pub enum CandidateValidationMessage { ValidationCode, CandidateDescriptor, Arc, + /// Execution timeout + Duration, oneshot::Sender>, ), } @@ -138,8 +143,8 @@ impl CandidateValidationMessage { /// If the current variant contains the relay parent hash, return it. pub fn relay_parent(&self) -> Option { match self { - Self::ValidateFromChainState(_, _, _) => None, - Self::ValidateFromExhaustive(_, _, _, _, _) => None, + Self::ValidateFromChainState(_, _, _, _) => None, + Self::ValidateFromExhaustive(_, _, _, _, _, _) => None, } } } @@ -614,6 +619,13 @@ pub enum RuntimeApiRequest { OccupiedCoreAssumption, RuntimeApiSender>, ), + /// Get the persisted validation data for a particular para along with the current validation code + /// hash, matching the data hash against an expected one. + AssumedValidationData( + ParaId, + Hash, + RuntimeApiSender>, + ), /// Sends back `true` if the validation outputs pass all acceptance criteria checks. CheckValidationOutputs( ParaId, @@ -646,6 +658,8 @@ pub enum RuntimeApiRequest { ), /// Get information about the BABE epoch the block was included in. CurrentBabeEpoch(RuntimeApiSender), + /// Get all disputes in relation to a relay parent. + FetchOnChainVotes(RuntimeApiSender>), } /// A message to the Runtime API subsystem. diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index 8fb43e7946f7..749b0bd4b1ba 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem-util" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Subsystem traits and message definitions" @@ -12,10 +12,10 @@ itertools = "0.10" parity-scale-codec = { version = "2.3.1", default-features = false, features = ["derive"] } pin-project = "1.0.8" rand = "0.8.3" -thiserror = "1.0.26" -tracing = "0.1.28" +thiserror = "1.0.30" +tracing = "0.1.29" derive_more = "0.99.11" -lru = "0.6.6" +lru = "0.7.0" polkadot-node-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" } polkadot-node-jaeger = { path = "../jaeger" } diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 0c5e35d1abc8..7ef0fccffc4e 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -53,7 +53,7 @@ use polkadot_primitives::v1::{ AuthorityDiscoveryId, CandidateEvent, CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, - ValidatorId, ValidatorIndex, + ValidationCodeHash, ValidatorId, ValidatorIndex, }; use sp_application_crypto::AppKey; use sp_core::{traits::SpawnNamed, Public}; @@ -206,6 +206,7 @@ specialize_requests! { fn request_validator_groups() -> (Vec>, GroupRotationInfo); ValidatorGroups; fn request_availability_cores() -> Vec; AvailabilityCores; fn request_persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; PersistedValidationData; + fn request_assumed_validation_data(para_id: ParaId, expected_persisted_validation_data_hash: Hash) -> Option<(PersistedValidationData, ValidationCodeHash)>; AssumedValidationData; fn request_session_index_for_child() -> SessionIndex; SessionIndexForChild; fn request_validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCode; fn request_candidate_pending_availability(para_id: ParaId) -> Option; CandidatePendingAvailability; @@ -638,7 +639,7 @@ struct JobSubsystemParams { /// Conceptually, this is very simple: it just loops forever. /// /// - On incoming overseer messages, it starts or stops jobs as appropriate. -/// - On other incoming messages, if they can be converted into Job::ToJob and +/// - On other incoming messages, if they can be converted into `Job::ToJob` and /// include a hash, then they're forwarded to the appropriate individual job. /// - On outgoing messages from the jobs, it forwards them to the overseer. pub struct JobSubsystem { diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml index 5f6f1f2ca4a7..63925c5c328d 100644 --- a/node/subsystem/Cargo.toml +++ b/node/subsystem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" description = "Subsystem traits and message definitions and the generated overseer" diff --git a/node/test/client/Cargo.toml b/node/test/client/Cargo.toml index b6aa140fece6..d7ae86a39490 100644 --- a/node/test/client/Cargo.toml +++ b/node/test/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-test-client" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/test/polkadot-simnet/common/Cargo.toml b/node/test/polkadot-simnet/common/Cargo.toml index 70a788e33640..c4d0cb0f22a6 100644 --- a/node/test/polkadot-simnet/common/Cargo.toml +++ b/node/test/polkadot-simnet/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-simnet" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -33,7 +33,7 @@ polkadot-runtime = { path = "../../../../runtime/polkadot" } polkadot-runtime-common = { path = "../../../../runtime/common" } codec = { package = "parity-scale-codec", version = "2.0.0" } -structopt = "0.3.23" +structopt = "0.3.25" [dev-dependencies] log = "0.4.14" diff --git a/node/test/polkadot-simnet/node/Cargo.toml b/node/test/polkadot-simnet/node/Cargo.toml index 6cf6ed9990c6..e64ed09b567d 100644 --- a/node/test/polkadot-simnet/node/Cargo.toml +++ b/node/test/polkadot-simnet/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-simnet-node" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/test/polkadot-simnet/test/Cargo.toml b/node/test/polkadot-simnet/test/Cargo.toml index 18b41010f6ac..85d60968d6d0 100644 --- a/node/test/polkadot-simnet/test/Cargo.toml +++ b/node/test/polkadot-simnet/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-simnet-test" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 97b885f9dc49..bfd478797df3 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-test-service" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -8,7 +8,7 @@ edition = "2018" futures = "0.3.17" futures01 = { package = "futures", version = "0.1.29" } hex = "0.4.3" -tracing = "0.1.28" +tracing = "0.1.29" rand = "0.8.3" tempfile = "3.2.0" tokio = "1.12.0" diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index d52b9ecc3284..103be52c5fd7 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -177,6 +177,7 @@ pub fn node_config( rpc_ws_max_connections: None, rpc_cors: None, rpc_methods: Default::default(), + ws_max_out_buffer_capacity: None, prometheus_config: None, telemetry_endpoints: None, default_heap_pages: None, @@ -190,7 +191,6 @@ pub fn node_config( announce_block: true, base_path: Some(base_path), informant_output_format: Default::default(), - disable_log_reloading: false, } } diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index 5a09573f7926..77dcd13b5ce5 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Types and utilities for creating and working with parachains" edition = "2018" diff --git a/parachain/test-parachains/Cargo.toml b/parachain/test-parachains/Cargo.toml index cf2f4415628d..8d43827b1a2c 100644 --- a/parachain/test-parachains/Cargo.toml +++ b/parachain/test-parachains/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachains" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Integration tests using the test-parachains" edition = "2018" diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index fbc6bc24613d..52e9da3a5536 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-adder" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Test parachain which adds to a number as its state transition" edition = "2018" diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 4315051ff9a9..eb3840bf5d6e 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-adder-collator" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Collator for the adder test parachain" edition = "2018" @@ -18,7 +18,7 @@ parity-scale-codec = { version = "2.3.1", default-features = false, features = [ futures = "0.3.17" futures-timer = "3.0.2" log = "0.4.13" -structopt = "0.3.23" +structopt = "0.3.25" test-parachain-adder = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index dd9a087928cf..1f3dc1db4c0f 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-halt" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Test parachain which executes forever" edition = "2018" diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 4aa5d00a3a7c..c84c0f8f77a2 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-primitives" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/primitives/src/v0.rs b/primitives/src/v0.rs index 7e15a0b87308..2000c173b879 100644 --- a/primitives/src/v0.rs +++ b/primitives/src/v0.rs @@ -735,6 +735,7 @@ impl CompactStatement { /// An either implicit or explicit attestation to the validity of a parachain /// candidate. #[derive(Clone, Eq, PartialEq, Decode, Encode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum ValidityAttestation { /// Implicit validity attestation by issuing. /// This corresponds to issuance of a `Candidate` statement. @@ -747,6 +748,18 @@ pub enum ValidityAttestation { } impl ValidityAttestation { + /// Produce the underlying signed payload of the attestation, given the hash of the candidate, + /// which should be known in context. + pub fn to_compact_statement(&self, candidate_hash: CandidateHash) -> CompactStatement { + // Explicit and implicit map directly from + // `ValidityVote::Valid` and `ValidityVote::Issued`, and hence there is a + // `1:1` relationshow which enables the conversion. + match *self { + ValidityAttestation::Implicit(_) => CompactStatement::Seconded(candidate_hash), + ValidityAttestation::Explicit(_) => CompactStatement::Valid(candidate_hash), + } + } + /// Get a reference to the signature. pub fn signature(&self) -> &ValidatorSignature { match *self { diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index 40ca368fc6e9..a4b598dd6c6a 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -945,6 +945,22 @@ pub struct SessionInfo { pub needed_approvals: u32, } +/// Scraped runtime backing votes and resolved disputes. +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq, Default, MallocSizeOf))] +pub struct ScrapedOnChainVotes { + /// The session in which the block was included. + pub session: SessionIndex, + /// Set of backing validators for each candidate, represented by its candidate + /// receipt. + pub backing_validators_per_candidate: + Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, + /// On-chain-recorded set of disputes. + /// Note that the above `backing_validators` are + /// unrelated to the backers of the disputes candidates. + pub disputes: MultiDisputeStatementSet, +} + /// A vote of approval on a candidate. #[derive(Clone, RuntimeDebug)] pub struct ApprovalVote(pub CandidateHash); @@ -960,7 +976,7 @@ impl ApprovalVote { sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. - pub trait ParachainHost { + pub trait ParachainHost { /// Get the current validators. fn validators() -> Vec; @@ -981,6 +997,14 @@ sp_api::decl_runtime_apis! { fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) -> Option>; + /// Returns the persisted validation data for the given `ParaId` along with the corresponding + /// validation code hash. Instead of accepting assumption about the para, matches the validation + /// data hash against an expected one and yields `None` if they're not equal. + fn assumed_validation_data( + para_id: Id, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)>; + /// Checks if the given validation outputs pass the acceptance criteria. fn check_validation_outputs(para_id: Id, outputs: CandidateCommitments) -> bool; @@ -1017,6 +1041,9 @@ sp_api::decl_runtime_apis! { /// Get the validation code from its hash. fn validation_code_by_hash(hash: ValidationCodeHash) -> Option; + + /// Scrape dispute relevant from on-chain, backing votes and resolved disputes. + fn on_chain_votes() -> Option>; } } @@ -1182,6 +1209,7 @@ impl From for runtime_primitives::DigestItem { /// /// Statements are either in favor of the candidate's validity or against it. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum DisputeStatement { /// A valid statement, of the given kind. #[codec(index = 0)] @@ -1251,6 +1279,7 @@ impl DisputeStatement { /// Different kinds of statements of validity on a candidate. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum ValidDisputeStatementKind { /// An explicit statement issued as part of a dispute. #[codec(index = 0)] @@ -1268,6 +1297,7 @@ pub enum ValidDisputeStatementKind { /// Different kinds of statements of invalidity on a candidate. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum InvalidDisputeStatementKind { /// An explicit statement issued as part of a dispute. #[codec(index = 0)] @@ -1296,6 +1326,7 @@ impl ExplicitDisputeStatement { /// A set of statements about a specific candidate. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct DisputeStatementSet { /// The candidate referenced by this set. pub candidate_hash: CandidateHash, diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index c6367c050a04..adb95e1f6389 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -281,7 +281,7 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: * Load the historical validation code of the parachain by dispatching a `RuntimeApiRequest::ValidationCodeByHash(descriptor.validation_code_hash)` against the state of `block_hash`. * Spawn a background task with a clone of `background_tx` * Wait for the available data - * Issue a `CandidateValidationMessage::ValidateFromExhaustive` message + * Issue a `CandidateValidationMessage::ValidateFromExhaustive` message with `APPROVAL_EXECUTION_TIMEOUT` as the timeout parameter. * Wait for the result of validation * Check that the result of validation, if valid, matches the commitments in the receipt. * If valid, issue a message on `background_tx` detailing the request. diff --git a/roadmap/implementers-guide/src/node/backing/candidate-backing.md b/roadmap/implementers-guide/src/node/backing/candidate-backing.md index c39ffabdcd98..4b25a89cb1a5 100644 --- a/roadmap/implementers-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementers-guide/src/node/backing/candidate-backing.md @@ -123,7 +123,7 @@ Dispatch a [`AvailabilityDistributionMessage`][ADM]`::FetchPoV{ validator_index, ### Validate PoV Block Create a `(sender, receiver)` pair. -Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, pov, sender)` and listen on the receiver for a response. +Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, pov, BACKING_EXECUTION_TIMEOUT, sender)` and listen on the receiver for a response. ### Distribute Signed Statement diff --git a/roadmap/implementers-guide/src/node/collators/collation-generation.md b/roadmap/implementers-guide/src/node/collators/collation-generation.md index 34be8ea7c139..0a17a8619ab1 100644 --- a/roadmap/implementers-guide/src/node/collators/collation-generation.md +++ b/roadmap/implementers-guide/src/node/collators/collation-generation.md @@ -21,27 +21,49 @@ Output: `CollationDistributionMessage` The process of generating a collation for a parachain is very parachain-specific. As such, the details of how to do so are left beyond the scope of this description. The subsystem should be implemented as an abstract wrapper, which is aware of this configuration: ```rust +/// The output of a collator. +/// +/// This differs from `CandidateCommitments` in two ways: +/// +/// - does not contain the erasure root; that's computed at the Polkadot level, not at Cumulus +/// - contains a proof of validity. pub struct Collation { /// Messages destined to be interpreted by the Relay chain itself. pub upward_messages: Vec, + /// The horizontal messages sent by the parachain. + pub horizontal_messages: Vec>, /// New validation code. pub new_validation_code: Option, /// The head-data produced as a result of execution. pub head_data: HeadData, /// Proof to verify the state transition of the parachain. pub proof_of_validity: PoV, + /// The number of messages processed from the DMQ. + pub processed_downward_messages: u32, + /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + pub hrmp_watermark: BlockNumber, } /// Result of the [`CollatorFn`] invocation. pub struct CollationResult { - /// The collation that was build. - collation: Collation, - /// An optional result sender that should be informed about a successfully seconded collation. - /// - /// There is no guarantee that this sender is informed ever about any result, it is completly okay to just drop it. - /// However, if it is called, it should be called with the signed statement of a parachain validator seconding the - /// collation. - result_sender: Option>, + /// The collation that was build. + pub collation: Collation, + /// An optional result sender that should be informed about a successfully seconded collation. + /// + /// There is no guarantee that this sender is informed ever about any result, it is completely okay to just drop it. + /// However, if it is called, it should be called with the signed statement of a parachain validator seconding the + /// collation. + pub result_sender: Option>, +} + +/// Signal that is being returned when a collation was seconded by a validator. +pub struct CollationSecondedSignal { + /// The hash of the relay chain block that was used as context to sign [`Self::statement`]. + pub relay_parent: Hash, + /// The statement about seconding the collation. + /// + /// Anything else than `Statement::Seconded` is forbidden here. + pub statement: SignedFullStatement, } /// Collation function. @@ -51,18 +73,22 @@ pub struct CollationResult { /// /// Returns an optional [`CollationResult`]. pub type CollatorFn = Box< - dyn Fn(Hash, &PersistedValidationData) -> Pin> + Send>> - + Send - + Sync, + dyn Fn( + Hash, + &PersistedValidationData, + ) -> Pin> + Send>> + + Send + + Sync, >; -struct CollationGenerationConfig { - key: CollatorPair, - /// Collate will be called with the relay chain hash the parachain should build - /// a block on and the `ValidationData` that provides information about the state - /// of the parachain on the relay chain. - collator: CollatorFn, - para_id: ParaId, +/// Configuration for the collation generator +pub struct CollationGenerationConfig { + /// Collator's authentication key, so it can sign things. + pub key: CollatorPair, + /// Collation function. See [`CollatorFn`] for more details. + pub collator: CollatorFn, + /// The parachain that this collator collates for + pub para_id: ParaId, } ``` diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index 2b4f936f1b6e..c8bf0153f494 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -89,6 +89,8 @@ For each leaf in the leaves update: * Use `iter_with_prefix` to remove everything from `"earliest-session"` up to `state.highest_session - DISPUTE_WINDOW` from the DB under `"candidate-votes"`. * Update `"earliest-session"` to be equal to `state.highest_session - DISPUTE_WINDOW`. * For each new block, explicitly or implicitly, under the new leaf, scan for a dispute digest which indicates a rollback. If a rollback is detected, use the `ChainApi` subsystem to blacklist the chain. +* For each new block, use the `RuntimeApi` to obtain a `ScrapedOnChainVotes` and handle them as if they were provided by means of a incoming `DisputeCoordinatorMessage::ImportStatement` message. + * In the case of a concluded dispute, there are some cases that do not guarantee the presence of a `CandidateReceipt`, where handling has to be defered . ### On `OverseerSignal::Conclude` diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-participation.md b/roadmap/implementers-guide/src/node/disputes/dispute-participation.md index b3e1c11fa2be..fc0517fa4e1e 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-participation.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-participation.md @@ -48,7 +48,7 @@ Conclude. * If the data is recovered, dispatch a [`RuntimeApiMessage::ValidationCodeByHash`][RuntimeApiMessage] with the parameters `(candidate_receipt.descriptor.validation_code_hash)` at `state.recent_block.hash`. * Dispatch a [`AvailabilityStoreMessage::StoreAvailableData`][AvailabilityStoreMessage] with the data. * If the code is not fetched from the chain, return. This should be impossible with correct relay chain configuration, at least if chain synchronization is working correctly. -* Dispatch a [`CandidateValidationMessage::ValidateFromExhaustive`][CandidateValidationMessage] with the available data and the validation code. +* Dispatch a [`CandidateValidationMessage::ValidateFromExhaustive`][CandidateValidationMessage] with the available data and the validation code and `APPROVAL_EXECUTION_TIMEOUT` as the timeout parameter. * If the validation result is `Invalid`, [cast invalid votes](#cast-votes) and return. * If the validation fails, [cast invalid votes](#cast-votes) and return. * If the validation succeeds, compute the `CandidateCommitments` based on the validation result and compare against the candidate receipt's `commitments_hash`. If they match, [cast valid votes](#cast-votes) and if not, [cast invalid votes](#cast-votes). diff --git a/roadmap/implementers-guide/src/runtime/parainherent.md b/roadmap/implementers-guide/src/runtime/parainherent.md index cb5bb45d8d81..cc5e209362e9 100644 --- a/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/roadmap/implementers-guide/src/runtime/parainherent.md @@ -5,6 +5,7 @@ This module is responsible for providing all data given to the runtime by the bl This module does not have the same initialization/finalization concerns as the others, as it only requires that entry points be triggered after all modules have initialized and that finalization happens after entry points are triggered. Both of these are assumptions we have already made about the runtime's order of operations, so this module doesn't need to be initialized or finalized by the `Initializer`. There are a couple of important notes to the operations in this inherent as they relate to disputes. + 1. We don't accept bitfields or backed candidates if in "governance-only" mode from having a local dispute conclude on this fork. 1. When disputes are initiated, we remove the block from pending availability. This allows us to roll back chains to the block before blocks are included as opposed to backing. It's important to do this before processing bitfields. 1. `Inclusion::collect_disputed` is kind of expensive so it's important to gate this on whether there are actually any new disputes. Which should be never. @@ -13,9 +14,15 @@ There are a couple of important notes to the operations in this inherent as they ## Storage ```rust +/// Whether the para inherent was included or not. Included: Option<()>, ``` +```rust +/// Scraped on chain votes to be used in disputes off-chain. +OnChainVotes: Option, +``` + ## Finalization 1. Take (get and clear) the value of `Included`. If it is not `Some`, throw an unrecoverable error. @@ -26,16 +33,18 @@ Included: Option<()>, 1. Hash the parent header and make sure that it corresponds to the block hash of the parent (tracked by the `frame_system` FRAME module), 1. Invoke `Disputes::provide_multi_dispute_data`. 1. If `Disputes::is_frozen`, return and set `Included` to `Some(())`. - 1. If there are any concluded disputes from the current session, invoke `Inclusion::collect_disputed` with the disputed candidates. Annotate each returned core with `FreedReason::Concluded`. - 1. The `Bitfields` are first forwarded to the `Inclusion::process_bitfields` routine, returning a set of freed cores. Provide the number of availability cores (`Scheduler::availability_cores().len()`) as the expected number of bits and a `Scheduler::core_para` as a core-lookup to the `process_bitfields` routine. Annotate each of these freed cores with `FreedReason::Concluded`. + 1. If there are any concluded disputes from the current session, invoke `Inclusion::collect_disputed` with the disputed candidates. Annotate each returned core with `FreedReason::Concluded`, sort them, and invoke `Scheduler::free_cores` with them. + 1. The `Bitfields` are first forwarded to the `Inclusion::process_bitfields` routine, returning a set included candidates and the respective freed cores. Provide the number of availability cores (`Scheduler::availability_cores().len()`) as the expected number of bits and a `Scheduler::core_para` as a core-lookup to the `process_bitfields` routine. Annotate each of these freed cores with `FreedReason::Concluded`. 1. For each freed candidate from the `Inclusion::process_bitfields` call, invoke `Disputes::note_included(current_session, candidate)`. 1. If `Scheduler::availability_timeout_predicate` is `Some`, invoke `Inclusion::collect_pending` using it and annotate each of those freed cores with `FreedReason::TimedOut`. - 1. Combine and sort the dispute-freed cores, the bitfield-freed cores, and the timed-out cores. + 1. Combine and sort the the bitfield-freed cores and the timed-out cores. 1. Invoke `Scheduler::clear` 1. Invoke `Scheduler::schedule(freed_cores, System::current_block())` 1. Extract `parent_storage_root` from the parent header, 1. If `Disputes::concluded_invalid(current_session, candidate)` is true for any of the `backed_candidates`, fail. 1. Invoke the `Inclusion::process_candidates` routine with the parameters `(parent_storage_root, backed_candidates, Scheduler::scheduled(), Scheduler::group_validators)`. - 1. Call `Scheduler::occupied` using the return value of the `Inclusion::process_candidates` call above, first sorting the list of assigned core indices. + 1. Deconstruct the returned `ProcessedCandidates` value into `occupied` core indices, and backing validators by candidate `backing_validators_per_candidate` represented by `Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>`. + 1. Set `OnChainVotes` to `ScrapedOnChainVotes`, based on the `current_session`, concluded `disputes`, and `backing_validators_per_candidate`. + 1. Call `Scheduler::occupied` using the `occupied` core indices of the returned above, first sorting the list of assigned core indices. 1. Call the `Ump::process_pending_upward_messages` routine to execute all messages in upward dispatch queues. 1. If all of the above succeeds, set `Included` to `Some(())`. diff --git a/roadmap/implementers-guide/src/runtime/scheduler.md b/roadmap/implementers-guide/src/runtime/scheduler.md index 68b1a8abb722..16c3280d1808 100644 --- a/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/roadmap/implementers-guide/src/runtime/scheduler.md @@ -82,7 +82,7 @@ digraph { ## Validator Groups -Validator group assignments do not need to change very quickly. The security benefits of fast rotation are redundant with the challenge mechanism in the [Approval process](../protocol-approval.md). Because of this, we only divide validators into groups at the beginning of the session and do not shuffle membership during the session. However, we do take steps to ensure that no particular validator group has dominance over a single parachain or parathread-multiplexer for an entire session to provide better guarantees of liveness. +Validator group assignments do not need to change very quickly. The security benefits of fast rotation are redundant with the challenge mechanism in the [Approval process](../protocol-approval.md). Because of this, we only divide validators into groups at the beginning of the session and do not shuffle membership during the session. However, we do take steps to ensure that no particular validator group has dominance over a single parachain or parathread-multiplexer for an entire session to provide better guarantees of live-ness. Validator groups rotate across availability cores in a round-robin fashion, with rotation occurring at fixed intervals. The i'th group will be assigned to the `(i+k)%n`'th core at any point in time, where `k` is the number of rotations that have occurred in the session, and `n` is the number of cores. This makes upcoming rotations within the same session predictable. @@ -185,7 +185,7 @@ Actions: 1. Resize `AvailabilityCores` to have length `n_cores` with all `None` entries. 1. Compute new validator groups by shuffling using a secure randomness beacon - Note that the total number of validators `V` in AV may not be evenly divided by `n_cores`. - - The groups are selected by partitioning AV. The first V % N groups will have (V / n_cores) + 1 members, while the remaining groups will have (V / N) members each. + - The groups are selected by partitioning AV. The first `V % N` groups will have `(V / n_cores) + 1` members, while the remaining groups will have `(V / N)` members each. - Instead of using the indices within AV, which point to the broader set, indices _into_ AV should be used. This implies that groups should have simply ascending validator indices. 1. Prune the parathread queue to remove all retries beyond `configuration.parathread_retries`. - Also prune all parathread claims corresponding to de-registered parathreads. @@ -209,11 +209,13 @@ No finalization routine runs for this module. - The core used for the parathread claim is the `next_core` field of the `ParathreadQueue` and adding `Paras::parachains().len()` to it. - `next_core` is then updated by adding 1 and taking it modulo `config.parathread_cores`. - The claim is then added to the claim index. -- `schedule(Vec<(CoreIndex, FreedReason)>, now: BlockNumber)`: schedule new core assignments, with a parameter indicating previously-occupied cores which are to be considered returned and why they are being returned. +- `free_cores(Vec<(CoreIndex, FreedReason)>)`: indicate previosuly-occupied cores which are to be considered returned and why they are being returned. - All freed parachain cores should be assigned to their respective parachain - All freed parathread cores whose reason for freeing was `FreedReason::Concluded` should have the claim removed from the claim index. - All freed parathread cores whose reason for freeing was `FreedReason::TimedOut` should have the claim added to the parathread queue again without retries incremented - All freed parathread cores should take the next parathread entry from the queue. +- `schedule(Vec<(CoreIndex, FreedReason)>, now: BlockNumber)`: schedule new core assignments, with a parameter indicating previously-occupied cores which are to be considered returned and why they are being returned. + - Invoke `free_cores(freed_cores)` - The i'th validator group will be assigned to the `(i+k)%n`'th core at any point in time, where `k` is the number of rotations that have occurred in the session, and `n` is the total number of cores. This makes upcoming rotations within the same session predictable. Rotations are based off of `now`. - `scheduled() -> Vec`: Get currently scheduled core assignments. - `occupied(Vec)`. Note that the given cores have become occupied. diff --git a/roadmap/implementers-guide/src/types/disputes.md b/roadmap/implementers-guide/src/types/disputes.md index 3043b7615abd..24f152b1308f 100644 --- a/roadmap/implementers-guide/src/types/disputes.md +++ b/roadmap/implementers-guide/src/types/disputes.md @@ -71,3 +71,20 @@ struct DisputeState { concluded_at: Option, } ``` + +## `ScrapedOnChainVotes` + +```rust +/// Type for transcending recorded on-chain +/// dispute relevant votes and conclusions to +/// the off-chain `DisputesCoordinator`. +struct ScrapedOnChainVotes { + /// The session index at which the block was included. + session: SessionIndex, + /// The backing and seconding validity attestations for all candidates, provigind the full candidate receipt. + backing_validators_per_candidate: Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)> + /// Set of concluded disputes that were recorded + /// on chain within the inherent. + disputes: MultiDisputeStatementSet, +} +``` diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 78d536f1a21c..f3195e713399 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -785,6 +785,9 @@ enum ValidationResult { Invalid, } +const BACKING_EXECUTION_TIMEOUT: Duration = 2 seconds; +const APPROVAL_EXECUTION_TIMEOUT: Duration = 6 seconds; + /// Messages received by the Validation subsystem. /// /// ## Validation Requests @@ -807,6 +810,7 @@ pub enum CandidateValidationMessage { ValidateFromChainState( CandidateDescriptor, Arc, + Duration, // Execution timeout. oneshot::Sender>, ), /// Validate a candidate with provided, exhaustive parameters for validation. @@ -823,6 +827,7 @@ pub enum CandidateValidationMessage { ValidationCode, CandidateDescriptor, Arc, + Duration, // Execution timeout. oneshot::Sender>, ), } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index a6a49f333562..9d2339c0db08 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-rpc" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 48ba8d36055e..38b4092c6fe4 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-common" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/runtime/common/slot_range_helper/Cargo.toml b/runtime/common/slot_range_helper/Cargo.toml index aec6b69355fe..d4bbb9bd60c7 100644 --- a/runtime/common/slot_range_helper/Cargo.toml +++ b/runtime/common/slot_range_helper/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "slot-range-helper" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/runtime/common/src/auctions.rs b/runtime/common/src/auctions.rs index a3866fed2ba2..1dddacc92c1f 100644 --- a/runtime/common/src/auctions.rs +++ b/runtime/common/src/auctions.rs @@ -34,8 +34,9 @@ use primitives::v1::Id as ParaId; use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; use sp_std::{mem::swap, prelude::*}; -type CurrencyOf = <::Leaser as Leaser>::Currency; -type BalanceOf = <<::Leaser as Leaser>::Currency as Currency< +type CurrencyOf = + <::Leaser as Leaser<::BlockNumber>>::Currency; +type BalanceOf = <<::Leaser as Leaser<::BlockNumber>>::Currency as Currency< ::AccountId, >>::Balance; @@ -65,7 +66,9 @@ impl WeightInfo for TestWeightInfo { /// An auction index. We count auctions in this type. pub type AuctionIndex = u32; -type LeasePeriodOf = <::Leaser as Leaser>::LeasePeriod; +type LeasePeriodOf = + <::Leaser as Leaser<::BlockNumber>>::LeasePeriod; + // Winning data type. This encodes the top bidders of each range together with their bid. type WinningData = [Option<(::AccountId, ParaId, BalanceOf)>; SlotRange::SLOT_RANGE_COUNT]; @@ -91,7 +94,11 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// The type representing the leasing system. - type Leaser: Leaser; + type Leaser: Leaser< + Self::BlockNumber, + AccountId = Self::AccountId, + LeasePeriod = Self::BlockNumber, + >; /// The parachain registrar type. type Registrar: Registrar; @@ -299,9 +306,8 @@ pub mod pallet { } } -impl Auctioneer for Pallet { +impl Auctioneer for Pallet { type AccountId = T::AccountId; - type BlockNumber = T::BlockNumber; type LeasePeriod = T::BlockNumber; type Currency = CurrencyOf; @@ -313,7 +319,7 @@ impl Auctioneer for Pallet { } // Returns the status of the auction given the current block number. - fn auction_status(now: Self::BlockNumber) -> AuctionStatus { + fn auction_status(now: T::BlockNumber) -> AuctionStatus { let early_end = match AuctionInfo::::get() { Some((_, early_end)) => early_end, None => return AuctionStatus::NotStarted, @@ -346,12 +352,13 @@ impl Auctioneer for Pallet { Self::handle_bid(bidder, para, AuctionCounter::::get(), first_slot, last_slot, amount) } - fn lease_period_index() -> Self::LeasePeriod { - T::Leaser::lease_period_index() + fn lease_period_index(b: T::BlockNumber) -> Option<(Self::LeasePeriod, bool)> { + T::Leaser::lease_period_index(b) } - fn lease_period() -> Self::LeasePeriod { - T::Leaser::lease_period() + #[cfg(any(feature = "runtime-benchmarks", test))] + fn lease_period_length() -> (T::BlockNumber, T::BlockNumber) { + T::Leaser::lease_period_length() } fn has_won_an_auction(para: ParaId, bidder: &T::AccountId) -> bool { @@ -374,10 +381,11 @@ impl Pallet { ) -> DispatchResult { let maybe_auction = AuctionInfo::::get(); ensure!(maybe_auction.is_none(), Error::::AuctionInProgress); - ensure!( - lease_period_index >= T::Leaser::lease_period_index(), - Error::::LeasePeriodInPast - ); + let now = frame_system::Pallet::::block_number(); + if let Some((current_lease_period, _)) = T::Leaser::lease_period_index(now) { + // If there is no active lease period, then we don't need to make this check. + ensure!(lease_period_index >= current_lease_period, Error::::LeasePeriodInPast); + } // Bump the counter. let n = AuctionCounter::::mutate(|n| { @@ -567,7 +575,9 @@ impl Pallet { let period_count = LeasePeriodOf::::from(range.len() as u32); match T::Leaser::lease_out(para, &leaser, amount, period_begin, period_count) { - Err(LeaseError::ReserveFailed) | Err(LeaseError::AlreadyEnded) => { + Err(LeaseError::ReserveFailed) | + Err(LeaseError::AlreadyEnded) | + Err(LeaseError::NoLeasePeriod) => { // Should never happen since we just unreserved this amount (and our offset is from the // present period). But if it does, there's not much we can do. }, @@ -735,7 +745,7 @@ mod tests { } pub struct TestLeaser; - impl Leaser for TestLeaser { + impl Leaser for TestLeaser { type AccountId = u64; type LeasePeriod = BlockNumber; type Currency = Balances; @@ -749,7 +759,10 @@ mod tests { ) -> Result<(), LeaseError> { LEASES.with(|l| { let mut leases = l.borrow_mut(); - if period_begin < Self::lease_period_index() { + let now = System::block_number(); + let (current_lease_period, _) = + Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; + if period_begin < current_lease_period { return Err(LeaseError::AlreadyEnded) } for period in period_begin..(period_begin + period_count) { @@ -779,12 +792,18 @@ mod tests { .unwrap_or_default() } - fn lease_period() -> Self::LeasePeriod { - 10 + fn lease_period_length() -> (BlockNumber, BlockNumber) { + (10, 0) } - fn lease_period_index() -> Self::LeasePeriod { - (System::block_number() / Self::lease_period()).into() + fn lease_period_index(b: BlockNumber) -> Option<(Self::LeasePeriod, bool)> { + let (lease_period_length, offset) = Self::lease_period_length(); + let b = b.checked_sub(offset)?; + + let lease_period = b / lease_period_length; + let first_block = (b % lease_period_length).is_zero(); + + Some((lease_period, first_block)) } fn already_leased( @@ -1689,7 +1708,7 @@ mod benchmarking { use frame_system::RawOrigin; use sp_runtime::{traits::Bounded, SaturatedConversion}; - use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; + use frame_benchmarking::{account, benchmarks, whitelisted_caller}; fn assert_last_event(generic_event: ::Event) { let events = frame_system::Pallet::::events(); @@ -1864,11 +1883,11 @@ mod benchmarking { verify { assert!(AuctionInfo::::get().is_none()); } - } - impl_benchmark_test_suite!( - Auctions, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); + impl_benchmark_test_suite!( + Auctions, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); + } } diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index a89bd0b4d03a..66a9e12d71d7 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -1388,6 +1388,7 @@ mod benchmarking { use super::*; use crate::claims::Call; use frame_benchmarking::{account, benchmarks}; + use frame_support::dispatch::UnfilteredDispatchable; use frame_system::RawOrigin; use secp_utils::*; use sp_runtime::{traits::ValidateUnsigned, DispatchResult}; @@ -1443,10 +1444,15 @@ mod benchmarking { super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, None)?; assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); let source = sp_runtime::transaction_validity::TransactionSource::External; - let call = Call::::claim { dest: account.clone(), ethereum_signature: signature.clone() }; + let call_enc = Call::::claim { + dest: account.clone(), + ethereum_signature: signature.clone() + }.encode(); }: { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); super::Pallet::::validate_unsigned(source, &call).map_err(|e| -> &'static str { e.into() })?; - super::Pallet::::claim(RawOrigin::None.into(), account, signature)?; + call.dispatch_bypass_filter(RawOrigin::None.into())?; } verify { assert_eq!(Claims::::get(eth_address), None); @@ -1488,11 +1494,17 @@ mod benchmarking { let signature = sig::(&secret_key, &account.encode(), statement.to_text()); super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, Some(statement))?; assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let call = Call::::claim_attest { dest: account.clone(), ethereum_signature: signature.clone(), statement: StatementKind::Regular.to_text().to_vec() }; + let call_enc = Call::::claim_attest { + dest: account.clone(), + ethereum_signature: signature.clone(), + statement: StatementKind::Regular.to_text().to_vec() + }.encode(); let source = sp_runtime::transaction_validity::TransactionSource::External; }: { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); super::Pallet::::validate_unsigned(source, &call).map_err(|e| -> &'static str { e.into() })?; - super::Pallet::::claim_attest(RawOrigin::None.into(), account, signature, statement.to_text().to_vec())?; + call.dispatch_bypass_filter(RawOrigin::None.into())?; } verify { assert_eq!(Claims::::get(eth_address), None); @@ -1518,7 +1530,7 @@ mod benchmarking { Preclaims::::insert(&account, eth_address); assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let call = super::Call::attest { statement: StatementKind::Regular.to_text().to_vec() }; + let call = super::Call::::attest { statement: StatementKind::Regular.to_text().to_vec() }; // We have to copy the validate statement here because of trait issues... :( let validate = |who: &T::AccountId, call: &super::Call| -> DispatchResult { if let Call::attest{ statement: attested_statement } = call { @@ -1529,9 +1541,12 @@ mod benchmarking { } Ok(()) }; + let call_enc = call.encode(); }: { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); validate(&account, &call)?; - super::Pallet::::attest(RawOrigin::Signed(account).into(), statement.to_text().to_vec())?; + call.dispatch_bypass_filter(RawOrigin::Signed(account).into())?; } verify { assert_eq!(Claims::::get(eth_address), None); @@ -1589,13 +1604,8 @@ mod benchmarking { assert!(super::Pallet::::eth_recover(&signature, &data, extra).is_some()); } } - } - - #[cfg(test)] - mod tests { - use super::*; - frame_benchmarking::impl_benchmark_test_suite!( + impl_benchmark_test_suite!( Pallet, crate::claims::tests::new_test_ext(), crate::claims::tests::Test, diff --git a/runtime/common/src/crowdloan.rs b/runtime/common/src/crowdloan.rs index c45f9750f631..d496617730fa 100644 --- a/runtime/common/src/crowdloan.rs +++ b/runtime/common/src/crowdloan.rs @@ -72,8 +72,11 @@ use sp_runtime::{ }; use sp_std::vec::Vec; -type CurrencyOf = <::Auctioneer as Auctioneer>::Currency; -type LeasePeriodOf = <::Auctioneer as Auctioneer>::LeasePeriod; +type CurrencyOf = + <::Auctioneer as Auctioneer<::BlockNumber>>::Currency; +type LeasePeriodOf = <::Auctioneer as Auctioneer< + ::BlockNumber, +>>::LeasePeriod; type BalanceOf = as Currency<::AccountId>>::Balance; #[allow(dead_code)] @@ -203,8 +206,8 @@ pub mod pallet { /// The type representing the auctioning system. type Auctioneer: Auctioneer< + Self::BlockNumber, AccountId = Self::AccountId, - BlockNumber = Self::BlockNumber, LeasePeriod = Self::BlockNumber, >; @@ -313,6 +316,8 @@ pub mod pallet { AlreadyInNewRaise, /// No contributions allowed during the VRF delay VrfDelayInProgress, + /// A lease period has not started yet, due to an offset in the starting block. + NoLeasePeriod, } #[pallet::hooks] @@ -365,20 +370,31 @@ pub mod pallet { verifier: Option, ) -> DispatchResult { let depositor = ensure_signed(origin)?; + let now = frame_system::Pallet::::block_number(); ensure!(first_period <= last_period, Error::::LastPeriodBeforeFirstPeriod); let last_period_limit = first_period .checked_add(&((SlotRange::LEASE_PERIODS_PER_SLOT as u32) - 1).into()) .ok_or(Error::::FirstPeriodTooFarInFuture)?; ensure!(last_period <= last_period_limit, Error::::LastPeriodTooFarInFuture); - ensure!(end > >::block_number(), Error::::CannotEndInPast); - let last_possible_win_date = (first_period.saturating_add(One::one())) - .saturating_mul(T::Auctioneer::lease_period()); - ensure!(end <= last_possible_win_date, Error::::EndTooFarInFuture); - ensure!( - first_period >= T::Auctioneer::lease_period_index(), - Error::::FirstPeriodInPast - ); + ensure!(end > now, Error::::CannotEndInPast); + + // Here we check the lease period on the ending block is at most the first block of the + // period after `first_period`. If it would be larger, there is no way we could win an + // active auction, thus it would make no sense to have a crowdloan this long. + let (lease_period_at_end, is_first_block) = + T::Auctioneer::lease_period_index(end).ok_or(Error::::NoLeasePeriod)?; + let adjusted_lease_period_at_end = if is_first_block { + lease_period_at_end.saturating_sub(One::one()) + } else { + lease_period_at_end + }; + ensure!(adjusted_lease_period_at_end <= first_period, Error::::EndTooFarInFuture); + + // Can't start a crowdloan for a lease period that already passed. + if let Some((current_lease_period, _)) = T::Auctioneer::lease_period_index(now) { + ensure!(first_period >= current_lease_period, Error::::FirstPeriodInPast); + } // There should not be an existing fund. ensure!(!Funds::::contains_key(index), Error::::FundNotEnded); @@ -439,7 +455,9 @@ pub mod pallet { ensure!(now < fund.end, Error::::ContributionPeriodOver); // Make sure crowdloan is in a valid lease period - let current_lease_period = T::Auctioneer::lease_period_index(); + let now = frame_system::Pallet::::block_number(); + let (current_lease_period, _) = + T::Auctioneer::lease_period_index(now).ok_or(Error::::NoLeasePeriod)?; ensure!(current_lease_period <= fund.first_period, Error::::ContributionPeriodOver); // Make sure crowdloan has not already won. @@ -751,7 +769,8 @@ impl Pallet { // `fund.end` can represent the end of a failed crowdloan or the beginning of retirement // If the current lease period is past the first period they are trying to bid for, then // it is already too late to win the bid. - let current_lease_period = T::Auctioneer::lease_period_index(); + let (current_lease_period, _) = + T::Auctioneer::lease_period_index(now).ok_or(Error::::NoLeasePeriod)?; ensure!( now >= fund.end || current_lease_period > fund.first_period, Error::::FundNotEnded @@ -931,14 +950,16 @@ mod tests { } pub struct TestAuctioneer; - impl Auctioneer for TestAuctioneer { + impl Auctioneer for TestAuctioneer { type AccountId = u64; - type BlockNumber = BlockNumber; type LeasePeriod = u64; type Currency = Balances; fn new_auction(duration: u64, lease_period_index: u64) -> DispatchResult { - assert!(lease_period_index >= Self::lease_period_index()); + let now = System::block_number(); + let (current_lease_period, _) = + Self::lease_period_index(now).ok_or("no lease period yet")?; + assert!(lease_period_index >= current_lease_period); let ending = System::block_number().saturating_add(duration); AUCTION.with(|p| *p.borrow_mut() = Some((lease_period_index, ending))); @@ -991,12 +1012,17 @@ mod tests { Ok(()) } - fn lease_period_index() -> u64 { - System::block_number() / Self::lease_period() + fn lease_period_index(b: BlockNumber) -> Option<(u64, bool)> { + let (lease_period_length, offset) = Self::lease_period_length(); + let b = b.checked_sub(offset)?; + + let lease_period = b / lease_period_length; + let first_block = (b % lease_period_length).is_zero(); + Some((lease_period, first_block)) } - fn lease_period() -> u64 { - 20 + fn lease_period_length() -> (u64, u64) { + (20, 0) } fn has_won_an_auction(para: ParaId, bidder: &u64) -> bool { @@ -1367,7 +1393,8 @@ mod tests { let para_3 = new_para(); assert_ok!(Crowdloan::create(Origin::signed(1), para_3, 1000, 1, 4, 40, None)); run_to_block(40); - assert_eq!(TestAuctioneer::lease_period_index(), 2); + let now = System::block_number(); + assert_eq!(TestAuctioneer::lease_period_index(now).unwrap().0, 2); assert_noop!( Crowdloan::contribute(Origin::signed(1), para_3, 49, None), Error::::ContributionPeriodOver @@ -1830,7 +1857,7 @@ mod benchmarking { use sp_runtime::traits::{Bounded, CheckedSub}; use sp_std::prelude::*; - use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; + use frame_benchmarking::{account, benchmarks, whitelisted_caller}; fn assert_last_event(generic_event: ::Event) { let events = frame_system::Pallet::::events(); @@ -1842,7 +1869,11 @@ mod benchmarking { fn create_fund(id: u32, end: T::BlockNumber) -> ParaId { let cap = BalanceOf::::max_value(); - let lease_period_index = T::Auctioneer::lease_period_index(); + let (_, offset) = T::Auctioneer::lease_period_length(); + // Set to the very beginning of lease period index 0. + frame_system::Pallet::::set_block_number(offset); + let now = frame_system::Pallet::::block_number(); + let (lease_period_index, _) = T::Auctioneer::lease_period_index(now).unwrap_or_default(); let first_period = lease_period_index; let last_period = lease_period_index + ((SlotRange::LEASE_PERIODS_PER_SLOT as u32) - 1).into(); @@ -1894,7 +1925,8 @@ mod benchmarking { let cap = BalanceOf::::max_value(); let first_period = 0u32.into(); let last_period = 3u32.into(); - let end = T::Auctioneer::lease_period(); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; let caller: T::AccountId = whitelisted_caller(); let head_data = T::Registrar::worst_head_data(); @@ -1913,7 +1945,9 @@ mod benchmarking { // Contribute has two arms: PreEnding and Ending, but both are equal complexity. contribute { - let fund_index = create_fund::(1, 100u32.into()); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; + let fund_index = create_fund::(1, end); let caller: T::AccountId = whitelisted_caller(); let contribution = T::MinContribution::get(); CurrencyOf::::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -1931,7 +1965,9 @@ mod benchmarking { } withdraw { - let fund_index = create_fund::(1337, 100u32.into()); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; + let fund_index = create_fund::(1337, end); let caller: T::AccountId = whitelisted_caller(); let contributor = account("contributor", 0, 0); contribute_fund::(&contributor, fund_index); @@ -1945,7 +1981,9 @@ mod benchmarking { #[skip_meta] refund { let k in 0 .. T::RemoveKeysLimit::get(); - let fund_index = create_fund::(1337, 100u32.into()); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; + let fund_index = create_fund::(1337, end); // Dissolve will remove at most `RemoveKeysLimit` at once. for i in 0 .. k { @@ -1960,7 +1998,9 @@ mod benchmarking { } dissolve { - let fund_index = create_fund::(1337, 100u32.into()); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; + let fund_index = create_fund::(1337, end); let caller: T::AccountId = whitelisted_caller(); frame_system::Pallet::::set_block_number(T::BlockNumber::max_value()); }: _(RawOrigin::Signed(caller.clone()), fund_index) @@ -1973,7 +2013,8 @@ mod benchmarking { let cap = BalanceOf::::max_value(); let first_period = 0u32.into(); let last_period = 3u32.into(); - let end = T::Auctioneer::lease_period(); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; let caller: T::AccountId = whitelisted_caller(); let head_data = T::Registrar::worst_head_data(); @@ -1997,7 +2038,9 @@ mod benchmarking { } add_memo { - let fund_index = create_fund::(1, 100u32.into()); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; + let fund_index = create_fund::(1, end); let caller: T::AccountId = whitelisted_caller(); contribute_fund::(&caller, fund_index); let worst_memo = vec![42; T::MaxMemoLength::get().into()]; @@ -2011,7 +2054,9 @@ mod benchmarking { } poke { - let fund_index = create_fund::(1, 100u32.into()); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end = lpl + offset; + let fund_index = create_fund::(1, end); let caller: T::AccountId = whitelisted_caller(); contribute_fund::(&caller, fund_index); NewRaise::::kill(); @@ -2028,7 +2073,8 @@ mod benchmarking { on_initialize { // We test the complexity over different number of new raise let n in 2 .. 100; - let end_block: T::BlockNumber = 100u32.into(); + let (lpl, offset) = T::Auctioneer::lease_period_length(); + let end_block = lpl + offset - 1u32.into(); let pubkey = crypto::create_ed25519_pubkey(b"//verifier".to_vec()); @@ -2043,7 +2089,8 @@ mod benchmarking { Crowdloan::::contribute(RawOrigin::Signed(contributor).into(), fund_index, contribution, Some(sig))?; } - let lease_period_index = T::Auctioneer::lease_period_index(); + let now = frame_system::Pallet::::block_number(); + let (lease_period_index, _) = T::Auctioneer::lease_period_index(now).unwrap_or_default(); let duration = end_block .checked_sub(&frame_system::Pallet::::block_number()) .ok_or("duration of auction less than zero")?; @@ -2058,11 +2105,11 @@ mod benchmarking { assert_eq!(EndingsCount::::get(), old_endings_count + 1); assert_last_event::(Event::::HandleBidResult((n - 1).into(), Ok(())).into()); } - } - impl_benchmark_test_suite!( - Crowdloan, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); + impl_benchmark_test_suite!( + Crowdloan, + crate::integration_tests::new_test_ext_with_offset(10), + crate::integration_tests::Test, + ); + } } diff --git a/runtime/common/src/elections.rs b/runtime/common/src/elections.rs index 03989a64b439..ae0ec401f63a 100644 --- a/runtime/common/src/elections.rs +++ b/runtime/common/src/elections.rs @@ -70,7 +70,7 @@ pub type GenesisElectionOf = /// pallet-election-provider-multi-phase. pub const MINER_MAX_ITERATIONS: u32 = 10; -/// A source of random balance for the NPoS Solver, which is meant to be run by the offchain worker +/// A source of random balance for the NPoS Solver, which is meant to be run by the off-chain worker /// election miner. pub struct OffchainRandomBalancing; impl frame_support::pallet_prelude::Get> diff --git a/runtime/common/src/integration_tests.rs b/runtime/common/src/integration_tests.rs index 5653100815aa..ad750602b0db 100644 --- a/runtime/common/src/integration_tests.rs +++ b/runtime/common/src/integration_tests.rs @@ -159,7 +159,7 @@ impl pallet_balances::Config for Test { } impl configuration::Config for Test { - type WeightInfo = configuration::weights::WeightInfo; + type WeightInfo = configuration::TestWeightInfo; } impl shared::Config for Test {} @@ -167,7 +167,7 @@ impl shared::Config for Test {} impl paras::Config for Test { type Origin = Origin; type Event = Event; - type WeightInfo = paras::weights::WeightInfo; + type WeightInfo = paras::TestWeightInfo; } parameter_types! { @@ -203,6 +203,7 @@ impl auctions::Config for Test { parameter_types! { pub const LeasePeriod: BlockNumber = 100; + pub static LeaseOffset: BlockNumber = 0; } impl slots::Config for Test { @@ -210,6 +211,7 @@ impl slots::Config for Test { type Currency = Balances; type Registrar = Registrar; type LeasePeriod = LeasePeriod; + type LeaseOffset = LeaseOffset; type WeightInfo = crate::slots::TestWeightInfo; } @@ -254,6 +256,12 @@ pub fn new_test_ext() -> TestExternalities { ext } +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext_with_offset(n: BlockNumber) -> TestExternalities { + LeaseOffset::set(n); + new_test_ext() +} + const BLOCKS_PER_SESSION: u32 = 10; fn maybe_new_session(n: u32) { @@ -298,171 +306,176 @@ fn last_event() -> Event { System::events().pop().expect("Event expected").event } +// Runs an end to end test of the auction, crowdloan, slots, and onboarding process over varying +// lease period offsets. #[test] fn basic_end_to_end_works() { - new_test_ext().execute_with(|| { - let para_1 = LOWEST_PUBLIC_ID; - let para_2 = LOWEST_PUBLIC_ID + 1; - assert!(System::block_number().is_one()); - // User 1 and 2 will own parachains - Balances::make_free_balance_be(&1, 1_000_000_000); - Balances::make_free_balance_be(&2, 1_000_000_000); - // First register 2 parathreads - let genesis_head = Registrar::worst_head_data(); - let validation_code = Registrar::worst_validation_code(); - assert_ok!(Registrar::reserve(Origin::signed(1))); - assert_ok!(Registrar::register( - Origin::signed(1), - ParaId::from(para_1), - genesis_head.clone(), - validation_code.clone(), - )); - assert_ok!(Registrar::reserve(Origin::signed(2))); - assert_ok!(Registrar::register( - Origin::signed(2), - ParaId::from(2001), - genesis_head, - validation_code, - )); - - // Paras should be onboarding - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Onboarding)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Onboarding)); - - // Start a new auction in the future - let duration = 99u32; - let lease_period_index_start = 4u32; - assert_ok!(Auctions::new_auction(Origin::root(), duration, lease_period_index_start)); - - // 2 sessions later they are parathreads - run_to_session(2); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); - - // Para 1 will bid directly for slot 1, 2 - // Open a crowdloan for Para 2 for slot 3, 4 - assert_ok!(Crowdloan::create( - Origin::signed(2), - ParaId::from(para_2), - 1_000, // Cap - lease_period_index_start + 2, // First Slot - lease_period_index_start + 3, // Last Slot - 200, // Block End - None, - )); - let crowdloan_account = Crowdloan::fund_account_id(ParaId::from(para_2)); - - // Auction ending begins on block 100, so we make a bid before then. - run_to_block(90); - - Balances::make_free_balance_be(&10, 1_000_000_000); - Balances::make_free_balance_be(&20, 1_000_000_000); + for offset in [0u32, 50, 100, 200].iter() { + LeaseOffset::set(*offset); + new_test_ext().execute_with(|| { + let para_1 = LOWEST_PUBLIC_ID; + let para_2 = LOWEST_PUBLIC_ID + 1; + assert!(System::block_number().is_one()); + // User 1 and 2 will own parachains + Balances::make_free_balance_be(&1, 1_000_000_000); + Balances::make_free_balance_be(&2, 1_000_000_000); + // First register 2 parathreads + let genesis_head = Registrar::worst_head_data(); + let validation_code = Registrar::worst_validation_code(); + assert_ok!(Registrar::reserve(Origin::signed(1))); + assert_ok!(Registrar::register( + Origin::signed(1), + ParaId::from(para_1), + genesis_head.clone(), + validation_code.clone(), + )); + assert_ok!(Registrar::reserve(Origin::signed(2))); + assert_ok!(Registrar::register( + Origin::signed(2), + ParaId::from(2001), + genesis_head, + validation_code, + )); - // User 10 will bid directly for parachain 1 - assert_ok!(Auctions::bid( - Origin::signed(10), - ParaId::from(para_1), - 1, // Auction Index - lease_period_index_start + 0, // First Slot - lease_period_index_start + 1, // Last slot - 910, // Amount - )); + // Paras should be onboarding + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Onboarding)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Onboarding)); - // User 2 will be a contribute to crowdloan for parachain 2 - Balances::make_free_balance_be(&2, 1_000_000_000); - assert_ok!(Crowdloan::contribute(Origin::signed(2), ParaId::from(para_2), 920, None)); + // Start a new auction in the future + let duration = 99u32 + offset; + let lease_period_index_start = 4u32; + assert_ok!(Auctions::new_auction(Origin::root(), duration, lease_period_index_start)); - // Auction ends at block 110 - run_to_block(109); - assert_eq!( - last_event(), - crowdloan::Event::::HandleBidResult(ParaId::from(para_2), Ok(())).into(), - ); - run_to_block(110); - assert_eq!(last_event(), auctions::Event::::AuctionClosed(1).into()); + // 2 sessions later they are parathreads + run_to_session(2); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); - // Paras should have won slots - assert_eq!( - slots::Leases::::get(ParaId::from(para_1)), - // -- 1 --- 2 --- 3 --------- 4 ------------ 5 -------- - vec![None, None, None, Some((10, 910)), Some((10, 910))], - ); - assert_eq!( - slots::Leases::::get(ParaId::from(para_2)), - // -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 ---------------- - vec![ - None, - None, - None, - None, + // Para 1 will bid directly for slot 1, 2 + // Open a crowdloan for Para 2 for slot 3, 4 + assert_ok!(Crowdloan::create( + Origin::signed(2), + ParaId::from(para_2), + 1_000, // Cap + lease_period_index_start + 2, // First Slot + lease_period_index_start + 3, // Last Slot + 200 + offset, // Block End None, - Some((crowdloan_account, 920)), - Some((crowdloan_account, 920)) - ], - ); - - // Should not be able to contribute to a winning crowdloan - Balances::make_free_balance_be(&3, 1_000_000_000); - assert_noop!( - Crowdloan::contribute(Origin::signed(3), ParaId::from(2001), 10, None), - CrowdloanError::::BidOrLeaseActive - ); - - // New leases will start on block 400 - let lease_start_block = 400; - run_to_block(lease_start_block); - - // First slot, Para 1 should be transitioning to Parachain - assert_eq!( - Paras::lifecycle(ParaId::from(para_1)), - Some(ParaLifecycle::UpgradingParathread) - ); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); - - // Two sessions later, it has upgraded - run_to_block(lease_start_block + 20); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parachain)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); - - // Second slot nothing happens :) - run_to_block(lease_start_block + 100); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parachain)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); - - // Third slot, Para 2 should be upgrading, and Para 1 is downgrading - run_to_block(lease_start_block + 200); - assert_eq!( - Paras::lifecycle(ParaId::from(para_1)), - Some(ParaLifecycle::DowngradingParachain) - ); - assert_eq!( - Paras::lifecycle(ParaId::from(para_2)), - Some(ParaLifecycle::UpgradingParathread) - ); + )); + let crowdloan_account = Crowdloan::fund_account_id(ParaId::from(para_2)); - // Two sessions later, they have transitioned - run_to_block(lease_start_block + 220); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parachain)); + // Auction ending begins on block 100 + offset, so we make a bid before then. + run_to_block(90 + offset); - // Fourth slot nothing happens :) - run_to_block(lease_start_block + 300); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parachain)); + Balances::make_free_balance_be(&10, 1_000_000_000); + Balances::make_free_balance_be(&20, 1_000_000_000); - // Fifth slot, Para 2 is downgrading - run_to_block(lease_start_block + 400); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); - assert_eq!( - Paras::lifecycle(ParaId::from(para_2)), - Some(ParaLifecycle::DowngradingParachain) - ); + // User 10 will bid directly for parachain 1 + assert_ok!(Auctions::bid( + Origin::signed(10), + ParaId::from(para_1), + 1, // Auction Index + lease_period_index_start + 0, // First Slot + lease_period_index_start + 1, // Last slot + 910, // Amount + )); - // Two sessions later, Para 2 is downgraded - run_to_block(lease_start_block + 420); - assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); - assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); - }); + // User 2 will be a contribute to crowdloan for parachain 2 + Balances::make_free_balance_be(&2, 1_000_000_000); + assert_ok!(Crowdloan::contribute(Origin::signed(2), ParaId::from(para_2), 920, None)); + + // Auction ends at block 110 + offset + run_to_block(109 + offset); + assert_eq!( + last_event(), + crowdloan::Event::::HandleBidResult(ParaId::from(para_2), Ok(())).into(), + ); + run_to_block(110 + offset); + assert_eq!(last_event(), auctions::Event::::AuctionClosed(1).into()); + + // Paras should have won slots + assert_eq!( + slots::Leases::::get(ParaId::from(para_1)), + // -- 1 --- 2 --- 3 --------- 4 ------------ 5 -------- + vec![None, None, None, Some((10, 910)), Some((10, 910))], + ); + assert_eq!( + slots::Leases::::get(ParaId::from(para_2)), + // -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 ---------------- + vec![ + None, + None, + None, + None, + None, + Some((crowdloan_account, 920)), + Some((crowdloan_account, 920)) + ], + ); + + // Should not be able to contribute to a winning crowdloan + Balances::make_free_balance_be(&3, 1_000_000_000); + assert_noop!( + Crowdloan::contribute(Origin::signed(3), ParaId::from(2001), 10, None), + CrowdloanError::::BidOrLeaseActive + ); + + // New leases will start on block 400 + let lease_start_block = 400 + offset; + run_to_block(lease_start_block); + + // First slot, Para 1 should be transitioning to Parachain + assert_eq!( + Paras::lifecycle(ParaId::from(para_1)), + Some(ParaLifecycle::UpgradingParathread) + ); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); + + // Two sessions later, it has upgraded + run_to_block(lease_start_block + 20); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parachain)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); + + // Second slot nothing happens :) + run_to_block(lease_start_block + 100); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parachain)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); + + // Third slot, Para 2 should be upgrading, and Para 1 is downgrading + run_to_block(lease_start_block + 200); + assert_eq!( + Paras::lifecycle(ParaId::from(para_1)), + Some(ParaLifecycle::DowngradingParachain) + ); + assert_eq!( + Paras::lifecycle(ParaId::from(para_2)), + Some(ParaLifecycle::UpgradingParathread) + ); + + // Two sessions later, they have transitioned + run_to_block(lease_start_block + 220); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parachain)); + + // Fourth slot nothing happens :) + run_to_block(lease_start_block + 300); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parachain)); + + // Fifth slot, Para 2 is downgrading + run_to_block(lease_start_block + 400); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); + assert_eq!( + Paras::lifecycle(ParaId::from(para_2)), + Some(ParaLifecycle::DowngradingParachain) + ); + + // Two sessions later, Para 2 is downgraded + run_to_block(lease_start_block + 420); + assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); + assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); + }); + } } #[test] diff --git a/runtime/common/src/lib.rs b/runtime/common/src/lib.rs index 704924a411db..ac32b64f85f1 100644 --- a/runtime/common/src/lib.rs +++ b/runtime/common/src/lib.rs @@ -150,7 +150,7 @@ impl OneSessionHandler { } - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } /// A placeholder since there is currently no provided session key handler for parachain validator @@ -179,7 +179,7 @@ impl OneSessionHandler { } - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } #[cfg(test)] diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 448bc5092403..a714a45a5577 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -657,11 +657,11 @@ mod tests { impl paras::Config for Test { type Origin = Origin; type Event = Event; - type WeightInfo = paras::weights::WeightInfo; + type WeightInfo = paras::TestWeightInfo; } impl configuration::Config for Test { - type WeightInfo = configuration::weights::WeightInfo; + type WeightInfo = configuration::TestWeightInfo; } parameter_types! { @@ -1048,7 +1048,7 @@ mod benchmarking { use runtime_parachains::{paras, shared, Origin as ParaOrigin}; use sp_runtime::traits::Bounded; - use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; + use frame_benchmarking::{account, benchmarks, whitelisted_caller}; fn assert_last_event(generic_event: ::Event) { let events = frame_system::Pallet::::events(); @@ -1160,11 +1160,11 @@ mod benchmarking { assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parathread)); assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parachain)); } - } - impl_benchmark_test_suite!( - Registrar, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); + impl_benchmark_test_suite!( + Registrar, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); + } } diff --git a/runtime/common/src/slots.rs b/runtime/common/src/slots.rs index 8fe4a77f187e..26ada547c7fe 100644 --- a/runtime/common/src/slots.rs +++ b/runtime/common/src/slots.rs @@ -83,6 +83,10 @@ pub mod pallet { #[pallet::constant] type LeasePeriod: Get; + /// The number of blocks to offset each lease period by. + #[pallet::constant] + type LeaseOffset: Get; + /// Weight Information for the Extrinsics in the Pallet type WeightInfo: WeightInfo; } @@ -138,14 +142,15 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(n: T::BlockNumber) -> Weight { - // If we're beginning a new lease period then handle that. - let lease_period = T::LeasePeriod::get(); - if (n % lease_period).is_zero() { - let lease_period_index = n / lease_period; - Self::manage_lease_period_start(lease_period_index) - } else { - 0 + if let Some((lease_period, first_block)) = Self::lease_period_index(n) { + // If we're beginning a new lease period then handle that. + if first_block { + return Self::manage_lease_period_start(lease_period) + } } + + // We didn't return early above, so we didn't do anything. + 0 } } @@ -321,7 +326,7 @@ impl crate::traits::OnSwap for Pallet { } } -impl Leaser for Pallet { +impl Leaser for Pallet { type AccountId = T::AccountId; type LeasePeriod = T::BlockNumber; type Currency = T::Currency; @@ -333,7 +338,9 @@ impl Leaser for Pallet { period_begin: Self::LeasePeriod, period_count: Self::LeasePeriod, ) -> Result<(), LeaseError> { - let current_lease_period = Self::lease_period_index(); + let now = frame_system::Pallet::::block_number(); + let (current_lease_period, _) = + Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; // Finally, we update the deposit held so it is `amount` for the new lease period // indices that were won in the auction. let offset = period_begin @@ -427,12 +434,18 @@ impl Leaser for Pallet { .unwrap_or_else(Zero::zero) } - fn lease_period() -> Self::LeasePeriod { - T::LeasePeriod::get() + #[cfg(any(feature = "runtime-benchmarks", test))] + fn lease_period_length() -> (T::BlockNumber, T::BlockNumber) { + (T::LeasePeriod::get(), T::LeaseOffset::get()) } - fn lease_period_index() -> Self::LeasePeriod { - >::block_number() / T::LeasePeriod::get() + fn lease_period_index(b: T::BlockNumber) -> Option<(Self::LeasePeriod, bool)> { + // Note that blocks before `LeaseOffset` do not count as any lease period. + let offset_block_now = b.checked_sub(&T::LeaseOffset::get())?; + let lease_period = offset_block_now / T::LeasePeriod::get(); + let first_block = (offset_block_now % T::LeasePeriod::get()).is_zero(); + + Some((lease_period, first_block)) } fn already_leased( @@ -440,7 +453,11 @@ impl Leaser for Pallet { first_period: Self::LeasePeriod, last_period: Self::LeasePeriod, ) -> bool { - let current_lease_period = Self::lease_period_index(); + let now = frame_system::Pallet::::block_number(); + let (current_lease_period, _) = match Self::lease_period_index(now) { + Some(clp) => clp, + None => return true, + }; // Can't look in the past, so we pick whichever is the biggest. let start_period = first_period.max(current_lease_period); @@ -545,6 +562,7 @@ mod tests { parameter_types! { pub const LeasePeriod: BlockNumber = 10; + pub static LeaseOffset: BlockNumber = 0; pub const ParaDeposit: u64 = 1; } @@ -553,6 +571,7 @@ mod tests { type Currency = Balances; type Registrar = TestRegistrar; type LeasePeriod = LeasePeriod; + type LeaseOffset = LeaseOffset; type WeightInfo = crate::slots::TestWeightInfo; } @@ -584,12 +603,14 @@ mod tests { fn basic_setup_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_eq!(Slots::lease_period(), 10); - assert_eq!(Slots::lease_period_index(), 0); + assert_eq!(Slots::lease_period_length(), (10, 0)); + let now = System::block_number(); + assert_eq!(Slots::lease_period_index(now).unwrap().0, 0); assert_eq!(Slots::deposit_held(1.into(), &1), 0); run_to_block(10); - assert_eq!(Slots::lease_period_index(), 1); + let now = System::block_number(); + assert_eq!(Slots::lease_period_index(now).unwrap().0, 1); }); } @@ -850,7 +871,8 @@ mod tests { )); run_to_block(20); - assert_eq!(Slots::lease_period_index(), 2); + let now = System::block_number(); + assert_eq!(Slots::lease_period_index(now).unwrap().0, 2); // Can't lease from the past assert!(Slots::lease_out(1.into(), &1, 1, 1, 1).is_err()); // Lease in the current period triggers onboarding @@ -913,6 +935,37 @@ mod tests { assert_eq!(TestRegistrar::::operations(), vec![(2.into(), 1, true),]); }); } + + #[test] + fn lease_period_offset_works() { + new_test_ext().execute_with(|| { + let (lpl, offset) = Slots::lease_period_length(); + assert_eq!(offset, 0); + assert_eq!(Slots::lease_period_index(0), Some((0, true))); + assert_eq!(Slots::lease_period_index(1), Some((0, false))); + assert_eq!(Slots::lease_period_index(lpl - 1), Some((0, false))); + assert_eq!(Slots::lease_period_index(lpl), Some((1, true))); + assert_eq!(Slots::lease_period_index(lpl + 1), Some((1, false))); + assert_eq!(Slots::lease_period_index(2 * lpl - 1), Some((1, false))); + assert_eq!(Slots::lease_period_index(2 * lpl), Some((2, true))); + assert_eq!(Slots::lease_period_index(2 * lpl + 1), Some((2, false))); + + // Lease period is 10, and we add an offset of 5. + LeaseOffset::set(5); + let (lpl, offset) = Slots::lease_period_length(); + assert_eq!(offset, 5); + assert_eq!(Slots::lease_period_index(0), None); + assert_eq!(Slots::lease_period_index(1), None); + assert_eq!(Slots::lease_period_index(offset), Some((0, true))); + assert_eq!(Slots::lease_period_index(lpl), Some((0, false))); + assert_eq!(Slots::lease_period_index(lpl - 1 + offset), Some((0, false))); + assert_eq!(Slots::lease_period_index(lpl + offset), Some((1, true))); + assert_eq!(Slots::lease_period_index(lpl + offset + 1), Some((1, false))); + assert_eq!(Slots::lease_period_index(2 * lpl - 1 + offset), Some((1, false))); + assert_eq!(Slots::lease_period_index(2 * lpl + offset), Some((2, true))); + assert_eq!(Slots::lease_period_index(2 * lpl + offset + 1), Some((2, false))); + }); + } } #[cfg(feature = "runtime-benchmarks")] @@ -922,7 +975,7 @@ mod benchmarking { use frame_system::RawOrigin; use sp_runtime::traits::Bounded; - use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; + use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use crate::slots::Pallet as Slots; @@ -1059,11 +1112,11 @@ mod benchmarking { T::Registrar::execute_pending_transitions(); assert!(T::Registrar::is_parachain(para)); } - } - impl_benchmark_test_suite!( - Slots, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); + impl_benchmark_test_suite!( + Slots, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); + } } diff --git a/runtime/common/src/traits.rs b/runtime/common/src/traits.rs index 938fefd963c0..98abe62c2a11 100644 --- a/runtime/common/src/traits.rs +++ b/runtime/common/src/traits.rs @@ -94,10 +94,12 @@ pub enum LeaseError { AlreadyLeased, /// The period to be leased has already ended. AlreadyEnded, + /// A lease period has not started yet, due to an offset in the starting block. + NoLeasePeriod, } /// Lease manager. Used by the auction module to handle parachain slot leases. -pub trait Leaser { +pub trait Leaser { /// An account identifier for a leaser. type AccountId; @@ -133,11 +135,16 @@ pub trait Leaser { leaser: &Self::AccountId, ) -> >::Balance; - /// The lease period. This is constant, but can't be a `const` due to it being a runtime configurable quantity. - fn lease_period() -> Self::LeasePeriod; + /// The length of a lease period, and any offset which may be introduced. + /// This is only used in benchmarking to automate certain calls. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn lease_period_length() -> (BlockNumber, BlockNumber); - /// Returns the current lease period. - fn lease_period_index() -> Self::LeasePeriod; + /// Returns the lease period at `block`, and if this is the first block of a new lease period. + /// + /// Will return `None` if the first lease period has not started yet, for example when an offset + /// is placed. + fn lease_period_index(block: BlockNumber) -> Option<(Self::LeasePeriod, bool)>; /// Returns true if the parachain already has a lease in any of lease periods in the inclusive /// range `[first_period, last_period]`, intersected with the unbounded range [`current_lease_period`..] . @@ -189,13 +196,10 @@ impl AuctionStatus { } } -pub trait Auctioneer { +pub trait Auctioneer { /// An account identifier for a leaser. type AccountId; - /// The measurement type for counting blocks. - type BlockNumber; - /// The measurement type for counting lease periods (generally the same as `BlockNumber`). type LeasePeriod; @@ -207,13 +211,10 @@ pub trait Auctioneer { /// This can only happen when there isn't already an auction in progress. Accepts the `duration` /// of this auction and the `lease_period_index` of the initial lease period of the four that /// are to be auctioned. - fn new_auction( - duration: Self::BlockNumber, - lease_period_index: Self::LeasePeriod, - ) -> DispatchResult; + fn new_auction(duration: BlockNumber, lease_period_index: Self::LeasePeriod) -> DispatchResult; /// Given the current block number, return the current auction status. - fn auction_status(now: Self::BlockNumber) -> AuctionStatus; + fn auction_status(now: BlockNumber) -> AuctionStatus; /// Place a bid in the current auction. /// @@ -234,11 +235,16 @@ pub trait Auctioneer { amount: >::Balance, ) -> DispatchResult; - /// Returns the current lease period. - fn lease_period_index() -> Self::LeasePeriod; + /// The length of a lease period, and any offset which may be introduced. + /// This is only used in benchmarking to automate certain calls. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn lease_period_length() -> (BlockNumber, BlockNumber); - /// Returns the length of a lease period. - fn lease_period() -> Self::LeasePeriod; + /// Returns the lease period at `block`, and if this is the first block of a new lease period. + /// + /// Will return `None` if the first lease period has not started yet, for example when an offset + /// is placed. + fn lease_period_index(block: BlockNumber) -> Option<(Self::LeasePeriod, bool)>; /// Check if the para and user combination has won an auction in the past. fn has_won_an_auction(para: ParaId, bidder: &Self::AccountId) -> bool; diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index 1e04a113bdfa..57657b817d0e 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kusama-runtime" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -37,6 +37,7 @@ sp-npos-elections = { git = "https://github.com/paritytech/substrate", branch = pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-bags-list = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -72,7 +73,6 @@ pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "m pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -pallet-bags-list = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-election-provider-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } @@ -124,6 +124,7 @@ std = [ "frame-executive/std", "pallet-authority-discovery/std", "pallet-authorship/std", + "pallet-bags-list/std", "pallet-balances/std", "pallet-bounties/std", "pallet-transaction-payment/std", @@ -175,7 +176,6 @@ std = [ "xcm-executor/std", "xcm-builder/std", "frame-election-provider-support/std", - "pallet-bags-list/std", ] runtime-benchmarks = [ "runtime-common/runtime-benchmarks", diff --git a/runtime/kusama/src/voter_bags.rs b/runtime/kusama/src/bag_thresholds.rs similarity index 100% rename from runtime/kusama/src/voter_bags.rs rename to runtime/kusama/src/bag_thresholds.rs diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index ad97e499e958..e773febefb97 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -25,8 +25,8 @@ use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, }; use runtime_common::{ auctions, claims, crowdloan, impls::DealWithFees, paras_registrar, slots, xcm_sender, @@ -48,11 +48,8 @@ use runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::crypto::AuthorityId as BeefyId; use frame_support::{ - construct_runtime, parameter_types, - traits::{ - Contains, Everything, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Nothing, - OnRuntimeUpgrade, - }, + construct_runtime, match_type, parameter_types, + traits::{Contains, Everything, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Nothing}, weights::Weight, PalletId, RuntimeDebug, }; @@ -80,11 +77,12 @@ use sp_version::RuntimeVersion; use static_assertions::const_assert; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, - BackingToPlurality, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, - IsChildSystemParachain, IsConcrete, LocationInverter, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, + AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, BackingToPlurality, + ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, + CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, IsChildSystemParachain, IsConcrete, + LocationInverter, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, + TakeWeightCredit, UsingComponents, }; use xcm_executor::XcmExecutor; @@ -104,7 +102,7 @@ use constants::{currency::*, fee::*, time::*}; mod weights; // Voter bag threshold definitions. -mod voter_bags; +mod bag_thresholds; #[cfg(test)] mod tests; @@ -118,7 +116,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("kusama"), impl_name: create_runtime_str!("parity-kusama"), authoring_version: 2, - spec_version: 9110, + spec_version: 9130, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -276,11 +274,15 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; + /// This value increases the priority of `Operational` transactions by adding + /// a "virtual tip" that's equal to the `OperationalFeeMultiplier * final_fee`. + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; } @@ -322,10 +324,6 @@ impl_opaque_keys! { } } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); -} - impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; @@ -335,7 +333,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = weights::pallet_session::WeightInfo; } @@ -357,8 +354,8 @@ parameter_types! { pub SignedRewardBase: Balance = UNITS / 10; pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(5u32, 10_000); - // miner configs - pub OffchainRepeat: BlockNumber = 5; + // 1 hour session, 15 minutes unsigned phase, 8 offchain executions. + pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 8; /// Whilst `UseNominatorsAndUpdateBagsList` or `UseNominatorsMap` is in use, this can still be a /// very large value. Once the `BagsList` is in full motion, staking might open its door to many @@ -413,7 +410,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { } parameter_types! { - pub const BagThresholds: &'static [u64] = &voter_bags::THRESHOLDS; + pub const BagThresholds: &'static [u64] = &bag_thresholds::THRESHOLDS; } impl pallet_bags_list::Config for Runtime { @@ -493,6 +490,7 @@ parameter_types! { // 27 eras in which slashes can be cancelled (slightly less than 7 days). pub const SlashDeferDuration: pallet_staking::EraIndex = 27; pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); } type SlashCancelOrigin = EnsureOneOf< @@ -527,8 +525,8 @@ impl pallet_staking::Config for Runtime { type EraPayout = EraPayout; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - // Use the nominators map to iter voters, but also perform the bags-list migration and keep - // it up-to-date. + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; + // Use the nominators map to iter voters, but also keep bags-list up-to-date. type SortedListProvider = runtime_common::elections::UseNominatorsAndUpdateBagsList; type WeightInfo = weights::pallet_staking::WeightInfo; } @@ -1081,7 +1079,8 @@ impl InstanceFilter for ProxyType { Call::Registrar(paras_registrar::Call::reserve {..}) | Call::Crowdloan(..) | Call::Slots(..) | - Call::Auctions(..) // Specifically omitting the entire XCM Pallet + Call::Auctions(..) | // Specifically omitting the entire XCM Pallet + Call::BagsList(..) ), ProxyType::Governance => matches!( c, @@ -1208,6 +1207,7 @@ impl slots::Config for Runtime { type Currency = Balances; type Registrar = Registrar; type LeasePeriod = LeasePeriod; + type LeaseOffset = (); type WeightInfo = weights::runtime_common_slots::WeightInfo; } @@ -1321,14 +1321,20 @@ parameter_types! { /// individual routers. pub type XcmRouter = ( // Only one router so far - use DMP to communicate with child parachains. - xcm_sender::ChildParachainRouter, + xcm_sender::ChildParachainRouter, ); parameter_types! { pub const Kusama: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(KsmLocation::get()) }); - pub const KusamaForStatemint: (MultiAssetFilter, MultiLocation) = (Kusama::get(), Parachain(1000).into()); + pub const KusamaForStatemine: (MultiAssetFilter, MultiLocation) = (Kusama::get(), Parachain(1000).into()); +} +pub type TrustedTeleporters = (xcm_builder::Case,); + +match_type! { + pub type OnlyParachains: impl Contains = { + MultiLocation { parents: 0, interior: X1(Parachain(_)) } + }; } -pub type TrustedTeleporters = (xcm_builder::Case,); /// The barriers one of which must be passed for an XCM message to be executed. pub type Barrier = ( @@ -1338,6 +1344,10 @@ pub type Barrier = ( AllowTopLevelPaidExecutionFrom, // Messages coming from system parachains need not pay for execution. AllowUnpaidExecutionFrom>, + // Expected responses are OK. + AllowKnownQueryResponses, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, ); pub struct XcmConfig; @@ -1522,7 +1532,7 @@ construct_runtime! { Crowdloan: crowdloan::{Pallet, Call, Storage, Event} = 73, // Pallet for sending XCM. - XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 99, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, } } @@ -1555,186 +1565,11 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPallets, - ( - CouncilStoragePrefixMigration, - TechnicalCommitteeStoragePrefixMigration, - TechnicalMembershipStoragePrefixMigration, - MigrateTipsPalletPrefix, - BountiesPrefixMigration, - StakingBagsListMigrationV8, - ), + (), >; /// The payload being signed in the transactions. pub type SignedPayload = generic::SignedPayload; -const BOUNTIES_OLD_PREFIX: &str = "Treasury"; - -/// Migrate from 'Treasury' to the new prefix 'Bounties' -pub struct BountiesPrefixMigration; - -impl OnRuntimeUpgrade for BountiesPrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("Bounties is part of runtime, so it has a name; qed"); - pallet_bounties::migrations::v4::migrate::(BOUNTIES_OLD_PREFIX, name) - } - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("Bounties is part of runtime, so it has a name; qed"); - pallet_bounties::migrations::v4::pre_migration::( - BOUNTIES_OLD_PREFIX, - name, - ); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("Bounties is part of runtime, so it has a name; qed"); - pallet_bounties::migrations::v4::post_migration::( - BOUNTIES_OLD_PREFIX, - name, - ); - Ok(()) - } -} - -const COUNCIL_OLD_PREFIX: &str = "Instance1Collective"; -/// Migrate from `Instance1Collective` to the new pallet prefix `Council` -pub struct CouncilStoragePrefixMigration; - -impl OnRuntimeUpgrade for CouncilStoragePrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_collective::migrations::v4::migrate::(COUNCIL_OLD_PREFIX) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::pre_migrate::(COUNCIL_OLD_PREFIX); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::post_migrate::(COUNCIL_OLD_PREFIX); - Ok(()) - } -} - -const TECHNICAL_COMMITTEE_OLD_PREFIX: &str = "Instance2Collective"; -/// Migrate from `Instance2Collective` to the new pallet prefix `TechnicalCommittee` -pub struct TechnicalCommitteeStoragePrefixMigration; - -impl OnRuntimeUpgrade for TechnicalCommitteeStoragePrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_collective::migrations::v4::migrate::( - TECHNICAL_COMMITTEE_OLD_PREFIX, - ) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::pre_migrate::( - TECHNICAL_COMMITTEE_OLD_PREFIX, - ); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::post_migrate::( - TECHNICAL_COMMITTEE_OLD_PREFIX, - ); - Ok(()) - } -} - -const TECHNICAL_MEMBERSHIP_OLD_PREFIX: &str = "Instance1Membership"; -/// Migrate from `Instance1Membership` to the new pallet prefix `TechnicalMembership` -pub struct TechnicalMembershipStoragePrefixMigration; - -impl OnRuntimeUpgrade for TechnicalMembershipStoragePrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("TechnicalMembership is part of runtime, so it has a name; qed"); - pallet_membership::migrations::v4::migrate::( - TECHNICAL_MEMBERSHIP_OLD_PREFIX, - name, - ) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("TechnicalMembership is part of runtime, so it has a name; qed"); - pallet_membership::migrations::v4::pre_migrate::( - TECHNICAL_MEMBERSHIP_OLD_PREFIX, - name, - ); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("TechnicalMembership is part of runtime, so it has a name; qed"); - pallet_membership::migrations::v4::post_migrate::( - TECHNICAL_MEMBERSHIP_OLD_PREFIX, - name, - ); - Ok(()) - } -} - -const TIPS_OLD_PREFIX: &str = "Treasury"; -/// Migrate pallet-tips from `Treasury` to the new pallet prefix `Tips` -pub struct MigrateTipsPalletPrefix; - -impl OnRuntimeUpgrade for MigrateTipsPalletPrefix { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_tips::migrations::v4::migrate::(TIPS_OLD_PREFIX) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - pallet_tips::migrations::v4::pre_migrate::(TIPS_OLD_PREFIX); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - pallet_tips::migrations::v4::post_migrate::(TIPS_OLD_PREFIX); - Ok(()) - } -} - -// Migration to generate pallet staking's `SortedListProvider` from pre-existing nominators. -pub struct StakingBagsListMigrationV8; - -impl OnRuntimeUpgrade for StakingBagsListMigrationV8 { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_staking::migrations::v8::migrate::() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - pallet_staking::migrations::v8::pre_migrate::() - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - pallet_staking::migrations::v8::post_migrate::() - } -} - #[cfg(not(feature = "disable-runtime-api"))] sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { @@ -1812,6 +1647,16 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) } + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + parachains_runtime_api_impl::assumed_validation_data::( + para_id, + expected_persisted_validation_data_hash, + ) + } + fn check_validation_outputs( para_id: ParaId, outputs: primitives::v1::CandidateCommitments, @@ -1860,6 +1705,10 @@ sp_api::impl_runtime_apis! { fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { parachains_runtime_api_impl::validation_code_by_hash::(hash) } + + fn on_chain_votes() -> Option> { + parachains_runtime_api_impl::on_chain_votes::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/kusama/src/weights/runtime_common_claims.rs b/runtime/kusama/src/weights/runtime_common_claims.rs index 9b2ea68299d0..26e37dcf27d3 100644 --- a/runtime/kusama/src/weights/runtime_common_claims.rs +++ b/runtime/kusama/src/weights/runtime_common_claims.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// Copyright 2017-2021 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -15,8 +15,8 @@ // along with Polkadot. If not, see . //! Autogenerated weights for `runtime_common::claims` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-10-06, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 // Executed Command: @@ -31,8 +31,10 @@ // --wasm-execution=compiled // --heap-pages=4096 // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ +// --output=./runtime/kusama/src/weights/runtime_common_claims.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -42,28 +44,58 @@ use sp_std::marker::PhantomData; /// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); impl runtime_common::claims::WeightInfo for WeightInfo { + // Storage: Claims Claims (r:1 w:1) + // Storage: Claims Signing (r:1 w:1) + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:1 w:1) + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn claim() -> Weight { - (438_457_000 as Weight) + (442_992_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:0 w:1) + // Storage: Claims Claims (r:0 w:1) + // Storage: Claims Signing (r:0 w:1) fn mint_claim() -> Weight { - (13_035_000 as Weight) + (12_098_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } + // Storage: Claims Claims (r:1 w:1) + // Storage: Claims Signing (r:1 w:1) + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:1 w:1) + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn claim_attest() -> Weight { - (439_894_000 as Weight) + (442_328_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } + // Storage: Claims Preclaims (r:1 w:1) + // Storage: Claims Signing (r:1 w:1) + // Storage: Claims Claims (r:1 w:1) + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:1 w:1) + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn attest() -> Weight { - (132_760_000 as Weight) + (126_506_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(7 as Weight)) } + // Storage: Claims Claims (r:1 w:2) + // Storage: Claims Vesting (r:1 w:2) + // Storage: Claims Signing (r:1 w:2) + // Storage: Claims Preclaims (r:1 w:1) fn move_claim() -> Weight { - (28_459_000 as Weight) + (26_618_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(7 as Weight)) } diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index c281269b8682..da5e695b71fc 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-parachains" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 69ff43615678..f2a70a1b0635 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -28,12 +28,12 @@ use sp_std::prelude::*; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod weights; pub use pallet::*; pub mod migration; +#[allow(dead_code)] const LOG_TARGET: &str = "runtime::configuration"; /// All configuration of the runtime with respect to parachains and parathreads. @@ -278,6 +278,28 @@ pub trait WeightInfo { fn set_hrmp_open_request_ttl() -> Weight; } +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn set_config_with_block_number() -> Weight { + Weight::MAX + } + fn set_config_with_u32() -> Weight { + Weight::MAX + } + fn set_config_with_option_u32() -> Weight { + Weight::MAX + } + fn set_config_with_weight() -> Weight { + Weight::MAX + } + fn set_config_with_balance() -> Weight { + Weight::MAX + } + fn set_hrmp_open_request_ttl() -> Weight { + Weight::MAX + } +} + #[frame_support::pallet] pub mod pallet { use super::*; diff --git a/runtime/parachains/src/configuration/benchmarking.rs b/runtime/parachains/src/configuration/benchmarking.rs index 8b319cb8fabb..4b98f22c7f76 100644 --- a/runtime/parachains/src/configuration/benchmarking.rs +++ b/runtime/parachains/src/configuration/benchmarking.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::configuration::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, BenchmarkError, BenchmarkResult}; +use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; use frame_system::RawOrigin; use sp_runtime::traits::One; @@ -35,10 +35,10 @@ benchmarks! { } set_config_with_balance {}: set_hrmp_sender_deposit(RawOrigin::Root, 100_000_000_000) -} -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(Default::default()), - crate::mock::Test -); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/runtime/parachains/src/configuration/migration.rs b/runtime/parachains/src/configuration/migration.rs index 6909eecc15a9..ce7959053d1b 100644 --- a/runtime/parachains/src/configuration/migration.rs +++ b/runtime/parachains/src/configuration/migration.rs @@ -16,299 +16,13 @@ //! A module that is responsible for migration of storage. -use crate::configuration::{self, Config, Pallet, Store}; -use frame_support::{pallet_prelude::*, traits::StorageVersion, weights::Weight}; -use frame_system::pallet_prelude::BlockNumberFor; +use crate::configuration::Config; +use frame_support::{traits::StorageVersion, weights::Weight}; /// The current storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); /// Migrates the pallet storage to the most recent version, checking and setting the `StorageVersion`. pub fn migrate_to_latest() -> Weight { - let mut weight = 0; - - if StorageVersion::get::>() == 0 { - weight += migrate_to_v1::(); - StorageVersion::new(1).put::>(); - } - - weight -} - -mod v0 { - use super::*; - use primitives::v1::{Balance, SessionIndex}; - - #[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug)] - pub struct HostConfiguration { - pub max_code_size: u32, - pub max_head_data_size: u32, - pub max_upward_queue_count: u32, - pub max_upward_queue_size: u32, - pub max_upward_message_size: u32, - pub max_upward_message_num_per_candidate: u32, - pub hrmp_max_message_num_per_candidate: u32, - pub validation_upgrade_frequency: BlockNumber, - pub validation_upgrade_delay: BlockNumber, - pub max_pov_size: u32, - pub max_downward_message_size: u32, - pub ump_service_total_weight: Weight, - pub hrmp_max_parachain_outbound_channels: u32, - pub hrmp_max_parathread_outbound_channels: u32, - pub _hrmp_open_request_ttl: u32, - pub hrmp_sender_deposit: Balance, - pub hrmp_recipient_deposit: Balance, - pub hrmp_channel_max_capacity: u32, - pub hrmp_channel_max_total_size: u32, - pub hrmp_max_parachain_inbound_channels: u32, - pub hrmp_max_parathread_inbound_channels: u32, - pub hrmp_channel_max_message_size: u32, - pub code_retention_period: BlockNumber, - pub parathread_cores: u32, - pub parathread_retries: u32, - pub group_rotation_frequency: BlockNumber, - pub chain_availability_period: BlockNumber, - pub thread_availability_period: BlockNumber, - pub scheduling_lookahead: u32, - pub max_validators_per_core: Option, - pub max_validators: Option, - pub dispute_period: SessionIndex, - pub dispute_post_conclusion_acceptance_period: BlockNumber, - pub dispute_max_spam_slots: u32, - pub dispute_conclusion_by_time_out_period: BlockNumber, - pub no_show_slots: u32, - pub n_delay_tranches: u32, - pub zeroth_delay_tranche_width: u32, - pub needed_approvals: u32, - pub relay_vrf_modulo_samples: u32, - } - - impl> Default for HostConfiguration { - fn default() -> Self { - HostConfiguration { - group_rotation_frequency: 1u32.into(), - chain_availability_period: 1u32.into(), - thread_availability_period: 1u32.into(), - no_show_slots: 1u32.into(), - validation_upgrade_frequency: Default::default(), - validation_upgrade_delay: Default::default(), - code_retention_period: Default::default(), - max_code_size: Default::default(), - max_pov_size: Default::default(), - max_head_data_size: Default::default(), - parathread_cores: Default::default(), - parathread_retries: Default::default(), - scheduling_lookahead: Default::default(), - max_validators_per_core: Default::default(), - max_validators: None, - dispute_period: 6, - dispute_post_conclusion_acceptance_period: 100.into(), - dispute_max_spam_slots: 2, - dispute_conclusion_by_time_out_period: 200.into(), - n_delay_tranches: Default::default(), - zeroth_delay_tranche_width: Default::default(), - needed_approvals: Default::default(), - relay_vrf_modulo_samples: Default::default(), - max_upward_queue_count: Default::default(), - max_upward_queue_size: Default::default(), - max_downward_message_size: Default::default(), - ump_service_total_weight: Default::default(), - max_upward_message_size: Default::default(), - max_upward_message_num_per_candidate: Default::default(), - _hrmp_open_request_ttl: Default::default(), - hrmp_sender_deposit: Default::default(), - hrmp_recipient_deposit: Default::default(), - hrmp_channel_max_capacity: Default::default(), - hrmp_channel_max_total_size: Default::default(), - hrmp_max_parachain_inbound_channels: Default::default(), - hrmp_max_parathread_inbound_channels: Default::default(), - hrmp_channel_max_message_size: Default::default(), - hrmp_max_parachain_outbound_channels: Default::default(), - hrmp_max_parathread_outbound_channels: Default::default(), - hrmp_max_message_num_per_candidate: Default::default(), - } - } - } -} - -/// Migrates the `HostConfiguration` from v0 (with deprecated `hrmp_open_request_ttl` and without -/// `ump_max_individual_weight`) to v1 (without HRMP TTL and with max individual weight). -/// Uses the `Default` implementation of `HostConfiguration` to choose a value for `ump_max_individual_weight`. -/// -/// NOTE: Only use this function if you know what you are doing. Default to using `migrate_to_latest`. -pub fn migrate_to_v1() -> Weight { - // Unusual formatting is justified: - // - make it easier to verify that fields assign what they supposed to assign. - // - this code is transient and will be removed after all migrations are done. - // - this code is important enough to optimize for legibility sacrificing consistency. - #[rustfmt::skip] - let translate = - |pre: v0::HostConfiguration>| -> configuration::HostConfiguration> - { - super::HostConfiguration { - -max_code_size : pre.max_code_size, -max_head_data_size : pre.max_head_data_size, -max_upward_queue_count : pre.max_upward_queue_count, -max_upward_queue_size : pre.max_upward_queue_size, -max_upward_message_size : pre.max_upward_message_size, -max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, -hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, -validation_upgrade_frequency : pre.validation_upgrade_frequency, -validation_upgrade_delay : pre.validation_upgrade_delay, -max_pov_size : pre.max_pov_size, -max_downward_message_size : pre.max_downward_message_size, -ump_service_total_weight : pre.ump_service_total_weight, -hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, -hrmp_max_parathread_outbound_channels : pre.hrmp_max_parathread_outbound_channels, -hrmp_sender_deposit : pre.hrmp_sender_deposit, -hrmp_recipient_deposit : pre.hrmp_recipient_deposit, -hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, -hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, -hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, -hrmp_max_parathread_inbound_channels : pre.hrmp_max_parathread_inbound_channels, -hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, -code_retention_period : pre.code_retention_period, -parathread_cores : pre.parathread_cores, -parathread_retries : pre.parathread_retries, -group_rotation_frequency : pre.group_rotation_frequency, -chain_availability_period : pre.chain_availability_period, -thread_availability_period : pre.thread_availability_period, -scheduling_lookahead : pre.scheduling_lookahead, -max_validators_per_core : pre.max_validators_per_core, -max_validators : pre.max_validators, -dispute_period : pre.dispute_period, -dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, -dispute_max_spam_slots : pre.dispute_max_spam_slots, -dispute_conclusion_by_time_out_period : pre.dispute_conclusion_by_time_out_period, -no_show_slots : pre.no_show_slots, -n_delay_tranches : pre.n_delay_tranches, -zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, -needed_approvals : pre.needed_approvals, -relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, - -ump_max_individual_weight: >>::default().ump_max_individual_weight, - } - }; - - if let Err(_) = as Store>::ActiveConfig::translate(|pre| pre.map(translate)) { - // `Err` is returned when the pre-migration type cannot be deserialized. This - // cannot happen if the migration runs correctly, i.e. against the expected version. - // - // This happening almost surely will lead to a panic somewhere else. Corruption seems - // to be unlikely to be caused by this. So we just log. Maybe it'll work out still? - log::error!( - target: configuration::LOG_TARGET, - "unexpected error when performing translation of the configuration type during storage upgrade to v1." - ); - } - - T::DbWeight::get().reads_writes(1, 1) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - - #[test] - fn v0_deserialized_from_actual_data() { - // Fetched at Kusama 9,207,703 (0xbfe5227324c08b3ab67e0473a360acbce43efbd7b42041d0033adaf9ff2c5330) - // - // This exceeds the maximal line width length, but that's fine, since this is not code and - // doesn't need to be read and also leaving it as one line allows to easily copy it. - let raw_config = hex_literal::hex!["0000a000005000000a00000000c8000000c800000a0000000a00000040380000580200000000500000c8000000e87648170000000a0000000000000048000000c09e5d9a2f3d00000000000000000000c09e5d9a2f3d00000000000000000000e8030000009001000a00000000000000009001008070000000000000000000000a0000000a0000000a00000001000000010500000001c8000000060000005802000002000000580200000200000059000000000000001e00000028000000"]; - - let v0 = v0::HostConfiguration::::decode(&mut &raw_config[..]) - .unwrap(); - - // We check only a sample of the values here. If we missed any fields or messed up data types - // that would skew all the fields coming after. - assert_eq!(v0.max_code_size, 10_485_760); - assert_eq!(v0.validation_upgrade_frequency, 14_400); - assert_eq!(v0.max_pov_size, 5_242_880); - assert_eq!(v0._hrmp_open_request_ttl, 72); - assert_eq!(v0.hrmp_channel_max_message_size, 102_400); - assert_eq!(v0.dispute_max_spam_slots, 2); - assert_eq!(v0.n_delay_tranches, 89); - assert_eq!(v0.relay_vrf_modulo_samples, 40); - } - - #[test] - fn test_migrate_to_v1() { - // Host configuration has lots of fields. However, in this migration we add one and remove one - // field. The most important part to check are a couple of the last fields. We also pick - // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and - // also their type. - // - // We specify only the picked fields and the rest should be provided by the `Default` - // implementation. That implementation is copied over between the two types and should work - // fine. - let v0 = v0::HostConfiguration:: { - relay_vrf_modulo_samples: 0xFEEDBEEFu32, - needed_approvals: 69, - thread_availability_period: 55, - hrmp_recipient_deposit: 1337, - max_pov_size: 1111, - ..Default::default() - }; - - new_test_ext(Default::default()).execute_with(|| { - // Implant the v0 version in the state. - frame_support::storage::unhashed::put_raw( - &configuration::ActiveConfig::::hashed_key(), - &v0.encode(), - ); - - migrate_to_v1::(); - - let v1 = configuration::ActiveConfig::::get(); - - // The same motivation as for the migration code. See `migrate_to_v1`. - #[rustfmt::skip] - { - assert_eq!(v0.max_code_size , v1.max_code_size); - assert_eq!(v0.max_head_data_size , v1.max_head_data_size); - assert_eq!(v0.max_upward_queue_count , v1.max_upward_queue_count); - assert_eq!(v0.max_upward_queue_size , v1.max_upward_queue_size); - assert_eq!(v0.max_upward_message_size , v1.max_upward_message_size); - assert_eq!(v0.max_upward_message_num_per_candidate , v1.max_upward_message_num_per_candidate); - assert_eq!(v0.hrmp_max_message_num_per_candidate , v1.hrmp_max_message_num_per_candidate); - assert_eq!(v0.validation_upgrade_frequency , v1.validation_upgrade_frequency); - assert_eq!(v0.validation_upgrade_delay , v1.validation_upgrade_delay); - assert_eq!(v0.max_pov_size , v1.max_pov_size); - assert_eq!(v0.max_downward_message_size , v1.max_downward_message_size); - assert_eq!(v0.ump_service_total_weight , v1.ump_service_total_weight); - assert_eq!(v0.hrmp_max_parachain_outbound_channels , v1.hrmp_max_parachain_outbound_channels); - assert_eq!(v0.hrmp_max_parathread_outbound_channels , v1.hrmp_max_parathread_outbound_channels); - assert_eq!(v0.hrmp_sender_deposit , v1.hrmp_sender_deposit); - assert_eq!(v0.hrmp_recipient_deposit , v1.hrmp_recipient_deposit); - assert_eq!(v0.hrmp_channel_max_capacity , v1.hrmp_channel_max_capacity); - assert_eq!(v0.hrmp_channel_max_total_size , v1.hrmp_channel_max_total_size); - assert_eq!(v0.hrmp_max_parachain_inbound_channels , v1.hrmp_max_parachain_inbound_channels); - assert_eq!(v0.hrmp_max_parathread_inbound_channels , v1.hrmp_max_parathread_inbound_channels); - assert_eq!(v0.hrmp_channel_max_message_size , v1.hrmp_channel_max_message_size); - assert_eq!(v0.code_retention_period , v1.code_retention_period); - assert_eq!(v0.parathread_cores , v1.parathread_cores); - assert_eq!(v0.parathread_retries , v1.parathread_retries); - assert_eq!(v0.group_rotation_frequency , v1.group_rotation_frequency); - assert_eq!(v0.chain_availability_period , v1.chain_availability_period); - assert_eq!(v0.thread_availability_period , v1.thread_availability_period); - assert_eq!(v0.scheduling_lookahead , v1.scheduling_lookahead); - assert_eq!(v0.max_validators_per_core , v1.max_validators_per_core); - assert_eq!(v0.max_validators , v1.max_validators); - assert_eq!(v0.dispute_period , v1.dispute_period); - assert_eq!(v0.dispute_post_conclusion_acceptance_period, v1.dispute_post_conclusion_acceptance_period); - assert_eq!(v0.dispute_max_spam_slots , v1.dispute_max_spam_slots); - assert_eq!(v0.dispute_conclusion_by_time_out_period , v1.dispute_conclusion_by_time_out_period); - assert_eq!(v0.no_show_slots , v1.no_show_slots); - assert_eq!(v0.n_delay_tranches , v1.n_delay_tranches); - assert_eq!(v0.zeroth_delay_tranche_width , v1.zeroth_delay_tranche_width); - assert_eq!(v0.needed_approvals , v1.needed_approvals); - assert_eq!(v0.relay_vrf_modulo_samples , v1.relay_vrf_modulo_samples); - - assert_eq!(v1.ump_max_individual_weight, 20_000_000_000); - }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. - }); - } + 0 } diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 1ec5515667fb..7dde5f2b515a 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -22,7 +22,7 @@ use crate::{ session_info, }; use bitvec::{bitvec, order::Lsb0 as BitOrderLsb0}; -use frame_support::{ensure, traits::Get, weights::Weight}; +use frame_support::{ensure, storage::TransactionOutcome, traits::Get, weights::Weight}; use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::v1::{ @@ -708,7 +708,7 @@ impl Pallet { /// /// This functions modifies the state when failing. It is expected to be called in inherent, /// and to fail the extrinsic on error. As invalid inherents are not allowed, the dirty state - /// is not commited. + /// is not committed. pub(crate) fn provide_multi_dispute_data( statement_sets: MultiDisputeStatementSet, ) -> Result, DispatchError> { @@ -739,26 +739,29 @@ impl Pallet { } fn filter_multi_dispute_data(statement_sets: &mut MultiDisputeStatementSet) { - let config = >::config(); - - let old_statement_sets = sp_std::mem::take(statement_sets); - - // Deduplicate. - let dedup_iter = { - let mut targets = BTreeSet::new(); - old_statement_sets.into_iter().filter(move |set| { - let target = (set.candidate_hash, set.session); - targets.insert(target) - }) - }; + frame_support::storage::with_transaction(|| { + let config = >::config(); + + let old_statement_sets = sp_std::mem::take(statement_sets); + + // Deduplicate. + let dedup_iter = { + let mut targets = BTreeSet::new(); + old_statement_sets.into_iter().filter(move |set| { + let target = (set.candidate_hash, set.session); + targets.insert(target) + }) + }; - *statement_sets = dedup_iter - .filter_map(|set| { - let filter = Self::filter_dispute_data(&config, &set); + *statement_sets = dedup_iter + .filter_map(|set| { + let filter = Self::filter_dispute_data(&config, &set); - filter.filter_statement_set(set) - }) - .collect(); + filter.filter_statement_set(set) + }) + .collect(); + TransactionOutcome::Rollback(()) + }) } // Given a statement set, this produces a filter to be applied to the statement set. @@ -1210,7 +1213,7 @@ mod tests { REWARD_VALIDATORS, }; use frame_support::{ - assert_err, assert_noop, assert_ok, + assert_err, assert_noop, assert_ok, assert_storage_noop, traits::{OnFinalize, OnInitialize}, }; use frame_system::InitKind; @@ -2836,7 +2839,7 @@ mod tests { ], }]; - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert_eq!( statements, @@ -2918,7 +2921,7 @@ mod tests { ], }]; - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert!(statements.is_empty()); }) @@ -3059,7 +3062,7 @@ mod tests { ]; let old_statements = statements.clone(); - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert_eq!(statements, old_statements); }) @@ -3096,7 +3099,7 @@ mod tests { )], }]; - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert!(statements.is_empty()); }) @@ -3188,7 +3191,7 @@ mod tests { }, ]; - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert_eq!( statements, @@ -3278,7 +3281,7 @@ mod tests { }, ]; - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert_eq!( statements, @@ -3333,7 +3336,7 @@ mod tests { )], }]; - Pallet::::filter_multi_dispute_data(&mut statements); + assert_storage_noop!(Pallet::::filter_multi_dispute_data(&mut statements)); assert!(statements.is_empty()); }) diff --git a/runtime/parachains/src/disputes/benchmarking.rs b/runtime/parachains/src/disputes/benchmarking.rs index b884d1a6e799..fc54c4929323 100644 --- a/runtime/parachains/src/disputes/benchmarking.rs +++ b/runtime/parachains/src/disputes/benchmarking.rs @@ -16,7 +16,7 @@ use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use sp_runtime::traits::One; @@ -27,10 +27,10 @@ benchmarks! { verify { assert!(Frozen::::get().is_none()) } -} -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(Default::default()), - crate::mock::Test -); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 7e489ba73cf0..87ba4ad861b8 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -480,7 +480,7 @@ pub mod pallet { /// This cancels a pending open channel request. It can be canceled be either of the sender /// or the recipient for that request. The origin must be either of those. /// - /// The cancelling happens immediately. It is not possible to cancel the request if it is + /// The cancellation happens immediately. It is not possible to cancel the request if it is /// already accepted. #[pallet::weight(0)] pub fn hrmp_cancel_open_request( diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index 1c37bce3bd27..dd865bc8572b 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -25,8 +25,9 @@ use frame_support::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::v1::{ AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, HeadData, - Id as ParaId, SigningContext, UncheckedSignedAvailabilityBitfields, ValidatorIndex, + CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, + HeadData, Id as ParaId, SigningContext, UncheckedSignedAvailabilityBitfields, ValidatorIndex, + ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -110,6 +111,24 @@ pub trait RewardValidators { fn reward_bitfields(validators: impl IntoIterator); } +/// Helper return type for `process_candidates`. +#[derive(Encode, Decode, PartialEq, TypeInfo)] +#[cfg_attr(test, derive(Debug))] +pub(crate) struct ProcessedCandidates { + pub(crate) core_indices: Vec, + pub(crate) candidate_receipt_with_backing_validator_indices: + Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, +} + +impl Default for ProcessedCandidates { + fn default() -> Self { + Self { + core_indices: Vec::new(), + candidate_receipt_with_backing_validator_indices: Vec::new(), + } + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -180,8 +199,6 @@ pub mod pallet { NotCollatorSigned, /// The validation data hash does not match expected. ValidationDataHashMismatch, - /// Internal error only returned when compiled with debug assertions. - InternalError, /// The downward message queue is not processed correctly. IncorrectDownwardMessageHandling, /// At least one upward message sent does not pass the acceptance criteria. @@ -328,8 +345,6 @@ impl Pallet { candidate_pending_availability.availability_votes.get_mut(val_idx) }) { *bit = true; - } else if cfg!(debug_assertions) { - ensure!(false, Error::::InternalError); } } @@ -396,11 +411,11 @@ impl Pallet { candidates: Vec>, scheduled: Vec, group_validators: impl Fn(GroupIndex) -> Option>, - ) -> Result, DispatchError> { + ) -> Result, DispatchError> { ensure!(candidates.len() <= scheduled.len(), Error::::UnscheduledCandidate); if scheduled.is_empty() { - return Ok(Vec::new()) + return Ok(ProcessedCandidates::default()) } let validators = shared::Pallet::::active_validator_keys(); @@ -412,7 +427,11 @@ impl Pallet { let relay_parent_number = now - One::one(); let check_cx = CandidateCheckContext::::new(now, relay_parent_number); - // do all checks before writing storage. + // Collect candidate receipts with backers. + let mut candidate_receipt_with_backing_validator_indices = + Vec::with_capacity(candidates.len()); + + // Do all checks before writing storage. let core_indices_and_backers = { let mut skip = 0; let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); @@ -441,17 +460,17 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - 'a: for (candidate_idx, candidate) in candidates.iter().enumerate() { - let para_id = candidate.descriptor().para_id; + 'a: for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { + let para_id = backed_candidate.descriptor().para_id; let mut backers = bitvec::bitvec![BitOrderLsb0, u8; 0; validators.len()]; // we require that the candidate is in the context of the parent block. ensure!( - candidate.descriptor().relay_parent == parent_hash, + backed_candidate.descriptor().relay_parent == parent_hash, Error::::CandidateNotInParentContext, ); ensure!( - candidate.descriptor().check_collator_signature().is_ok(), + backed_candidate.descriptor().check_collator_signature().is_ok(), Error::::NotCollatorSigned, ); @@ -460,24 +479,24 @@ impl Pallet { // A candidate for a parachain without current validation code is not scheduled. .ok_or_else(|| Error::::UnscheduledCandidate)?; ensure!( - candidate.descriptor().validation_code_hash == validation_code_hash, + backed_candidate.descriptor().validation_code_hash == validation_code_hash, Error::::InvalidValidationCodeHash, ); ensure!( - candidate.descriptor().para_head == - candidate.candidate.commitments.head_data.hash(), + backed_candidate.descriptor().para_head == + backed_candidate.candidate.commitments.head_data.hash(), Error::::ParaHeadMismatch, ); if let Err(err) = check_cx.check_validation_outputs( para_id, - &candidate.candidate.commitments.head_data, - &candidate.candidate.commitments.new_validation_code, - candidate.candidate.commitments.processed_downward_messages, - &candidate.candidate.commitments.upward_messages, - T::BlockNumber::from(candidate.candidate.commitments.hrmp_watermark), - &candidate.candidate.commitments.horizontal_messages, + &backed_candidate.candidate.commitments.head_data, + &backed_candidate.candidate.commitments.new_validation_code, + backed_candidate.candidate.commitments.processed_downward_messages, + &backed_candidate.candidate.commitments.upward_messages, + T::BlockNumber::from(backed_candidate.candidate.commitments.hrmp_watermark), + &backed_candidate.candidate.commitments.horizontal_messages, ) { log::debug!( target: LOG_TARGET, @@ -495,7 +514,7 @@ impl Pallet { if para_id == assignment.para_id { if let Some(required_collator) = assignment.required_collator() { ensure!( - required_collator == &candidate.descriptor().collator, + required_collator == &backed_candidate.descriptor().collator, Error::::WrongCollator, ); } @@ -513,14 +532,15 @@ impl Pallet { // We don't want to error out here because it will // brick the relay-chain. So we return early without // doing anything. - return Ok(Vec::new()) + return Ok(ProcessedCandidates::default()) }, }; let expected = persisted_validation_data.hash(); ensure!( - expected == candidate.descriptor().persisted_validation_data_hash, + expected == + backed_candidate.descriptor().persisted_validation_data_hash, Error::::ValidationDataHashMismatch, ); } @@ -540,7 +560,7 @@ impl Pallet { // check the signatures in the backing and that it is a majority. { let maybe_amount_validated = primitives::v1::check_candidate_backing( - &candidate, + &backed_candidate, &signing_context, group_vals.len(), |idx| { @@ -561,17 +581,28 @@ impl Pallet { }, } - for (bit_idx, _) in candidate + let mut backer_idx_and_attestation = + Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( + backed_candidate.validator_indices.count_ones(), + ); + let candidate_receipt = backed_candidate.receipt(); + + for ((bit_idx, _), attestation) in backed_candidate .validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes.iter().cloned()) { - let val_idx = - group_vals.get(bit_idx).expect("this query done above; qed"); + let val_idx = group_vals + .get(bit_idx) + .expect("this query succeeded above; qed"); + backer_idx_and_attestation.push((*val_idx, attestation)); backers.set(val_idx.0 as _, true); } + candidate_receipt_with_backing_validator_indices + .push((candidate_receipt, backer_idx_and_attestation)); } core_indices_and_backers.push(( @@ -629,7 +660,7 @@ impl Pallet { descriptor, availability_votes, relay_parent_number, - backers, + backers: backers.to_bitvec(), backed_in_number: check_cx.now, backing_group: group, }, @@ -637,7 +668,10 @@ impl Pallet { >::insert(¶_id, commitments); } - Ok(core_indices) + Ok(ProcessedCandidates:: { + core_indices, + candidate_receipt_with_backing_validator_indices, + }) } /// Run the acceptance criteria checks on the given candidate commitments. @@ -1412,13 +1446,14 @@ mod tests { bare_bitfield, &signing_context, )); - - assert!(ParaInclusion::process_bitfields( - expected_bits(), - vec![signed.into()], - &core_lookup, - ) - .is_err()); + assert_eq!( + ParaInclusion::process_bitfields( + expected_bits(), + vec![signed.into()], + &core_lookup, + ), + Ok(vec![]) + ); } // empty bitfield signed: always OK, but kind of useless. @@ -2389,9 +2424,32 @@ mod tests { BackingKind::Threshold, )); - let occupied_cores = ParaInclusion::process_candidates( + let backed_candidates = vec![backed_a, backed_b, backed_c]; + let get_backing_group_idx = { + // the order defines the group implicitly for this test case + let backed_candidates_with_groups = backed_candidates + .iter() + .enumerate() + .map(|(idx, backed_candidate)| (backed_candidate.hash(), GroupIndex(idx as _))) + .collect::>(); + + move |candidate_hash_x: CandidateHash| -> Option { + backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { + if *candidate_hash == candidate_hash_x { + Some(*grp) + } else { + None + } + }) + } + }; + + let ProcessedCandidates { + core_indices: occupied_cores, + candidate_receipt_with_backing_validator_indices, + } = ParaInclusion::process_candidates( Default::default(), - vec![backed_a, backed_b, backed_c], + backed_candidates.clone(), vec![ chain_a_assignment.clone(), chain_b_assignment.clone(), @@ -2406,6 +2464,55 @@ mod tests { vec![CoreIndex::from(0), CoreIndex::from(1), CoreIndex::from(2)] ); + // Transform the votes into the setup we expect + let expected = { + let mut intermediate = std::collections::HashMap::< + CandidateHash, + (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), + >::new(); + backed_candidates.into_iter().for_each(|backed_candidate| { + let candidate_receipt_with_backers = intermediate + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + + assert_eq!( + backed_candidate.validity_votes.len(), + backed_candidate.validator_indices.count_ones() + ); + candidate_receipt_with_backers.1.extend( + backed_candidate + .validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes.iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = + get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + }); + intermediate.into_values().collect::>() + }; + + // sort, since we use a hashmap above + let assure_candidate_sorting = |mut candidate_receipts_with_backers: Vec<( + CandidateReceipt, + Vec<(ValidatorIndex, ValidityAttestation)>, + )>| { + candidate_receipts_with_backers.sort_by(|(cr1, _), (cr2, _)| { + cr1.descriptor().para_id.cmp(&cr2.descriptor().para_id) + }); + candidate_receipts_with_backers + }; + assert_eq!( + assure_candidate_sorting(expected), + assure_candidate_sorting(candidate_receipt_with_backing_validator_indices) + ); + assert_eq!( >::get(&chain_a), Some(CandidatePendingAvailability { @@ -2533,13 +2640,14 @@ mod tests { BackingKind::Threshold, )); - let occupied_cores = ParaInclusion::process_candidates( - Default::default(), - vec![backed_a], - vec![chain_a_assignment.clone()], - &group_validators, - ) - .expect("candidates scheduled, in order, and backed"); + let ProcessedCandidates { core_indices: occupied_cores, .. } = + ParaInclusion::process_candidates( + Default::default(), + vec![backed_a], + vec![chain_a_assignment.clone()], + &group_validators, + ) + .expect("candidates scheduled, in order, and backed"); assert_eq!(occupied_cores, vec![CoreIndex::from(0)]); diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index ebaa68b46ba5..95e3310e37fe 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -329,7 +329,7 @@ impl OneSessionHandler for Pal >::on_new_session(changed, session_index, validators, Some(queued)); } - fn on_disabled(_i: usize) {} + fn on_disabled(_i: u32) {} } #[cfg(test)] diff --git a/runtime/parachains/src/initializer/benchmarking.rs b/runtime/parachains/src/initializer/benchmarking.rs index d73c427f133d..c6fb38d68f86 100644 --- a/runtime/parachains/src/initializer/benchmarking.rs +++ b/runtime/parachains/src/initializer/benchmarking.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_system::{DigestItemOf, RawOrigin}; use primitives::v1::ConsensusLog; @@ -35,10 +35,10 @@ benchmarks! { &>::from(ConsensusLog::ForceApprove(d + 1)), ); } -} -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(Default::default()), - crate::mock::Test -); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index 9e646dbbc1ca..a5b58cc54b00 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -116,7 +116,7 @@ impl crate::initializer::Config for Test { } impl crate::configuration::Config for Test { - type WeightInfo = crate::configuration::weights::WeightInfo; + type WeightInfo = crate::configuration::TestWeightInfo; } impl crate::shared::Config for Test {} @@ -124,7 +124,7 @@ impl crate::shared::Config for Test {} impl crate::paras::Config for Test { type Origin = Origin; type Event = Event; - type WeightInfo = crate::paras::weights::WeightInfo; + type WeightInfo = crate::paras::TestWeightInfo; } impl crate::dmp::Config for Test {} diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index 666012a3b4e3..d7bc9e7a7619 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -43,7 +43,6 @@ pub use crate::Origin as ParachainOrigin; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod weights; pub use pallet::*; @@ -279,6 +278,25 @@ pub trait WeightInfo { fn force_queue_action() -> Weight; } +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn force_set_current_code(_c: u32) -> Weight { + Weight::MAX + } + fn force_set_current_head(_s: u32) -> Weight { + Weight::MAX + } + fn force_schedule_code_upgrade(_c: u32) -> Weight { + Weight::MAX + } + fn force_note_new_head(_s: u32) -> Weight { + Weight::MAX + } + fn force_queue_action() -> Weight { + Weight::MAX + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -347,6 +365,7 @@ pub mod pallet { /// /// Corresponding code can be retrieved with [`CodeByHash`]. #[pallet::storage] + #[pallet::getter(fn current_code_hash)] pub(super) type CurrentCodeHash = StorageMap<_, Twox64Concat, ParaId, ValidationCodeHash>; @@ -605,7 +624,7 @@ impl Pallet { /// The validation code of live para. pub(crate) fn current_code(para_id: &ParaId) -> Option { - CurrentCodeHash::::get(para_id).and_then(|code_hash| { + Self::current_code_hash(para_id).and_then(|code_hash| { let code = CodeByHash::::get(&code_hash); if code.is_none() { log::error!( diff --git a/runtime/parachains/src/paras/benchmarking.rs b/runtime/parachains/src/paras/benchmarking.rs index 2e9a26c260a3..b37ee5b83618 100644 --- a/runtime/parachains/src/paras/benchmarking.rs +++ b/runtime/parachains/src/paras/benchmarking.rs @@ -16,7 +16,7 @@ use super::*; use crate::{configuration::HostConfiguration, shared}; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use primitives::v1::{HeadData, Id as ParaId, ValidationCode, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE}; use sp_runtime::traits::{One, Saturating}; @@ -126,10 +126,10 @@ benchmarks! { let next_session = crate::shared::Pallet::::session_index().saturating_add(One::one()); assert_last_event::(Event::ActionQueued(para_id, next_session).into()); } -} -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(Default::default()), - crate::mock::Test -); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/runtime/parachains/src/paras_inherent.rs b/runtime/parachains/src/paras_inherent.rs index ea480ad7c96a..c866a077ccb2 100644 --- a/runtime/parachains/src/paras_inherent.rs +++ b/runtime/parachains/src/paras_inherent.rs @@ -33,7 +33,8 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use primitives::v1::{ - BackedCandidate, InherentData as ParachainsInherentData, PARACHAINS_INHERENT_IDENTIFIER, + BackedCandidate, InherentData as ParachainsInherentData, ScrapedOnChainVotes, + PARACHAINS_INHERENT_IDENTIFIER, }; use sp_runtime::traits::Header as HeaderT; use sp_std::prelude::*; @@ -79,6 +80,11 @@ pub mod pallet { #[pallet::storage] pub(crate) type Included = StorageValue<_, ()>; + /// Scraped on chain data for extracting resolved disputes as well as backing votes. + #[pallet::storage] + #[pallet::getter(fn on_chain_votes)] + pub(crate) type OnChainVotes = StorageValue<_, ScrapedOnChainVotes>; + #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_: T::BlockNumber) -> Weight { @@ -173,21 +179,21 @@ pub mod pallet { // Handle disputes logic. let current_session = >::session_index(); - let freed_disputed: Vec<(_, FreedReason)> = { + { let new_current_dispute_sets: Vec<_> = disputes .iter() .filter(|s| s.session == current_session) .map(|s| (s.session, s.candidate_hash)) .collect(); - let _ = T::DisputesHandler::provide_multi_dispute_data(disputes)?; + let _ = T::DisputesHandler::provide_multi_dispute_data(disputes.clone())?; if T::DisputesHandler::is_frozen() { // The relay chain we are currently on is invalid. Proceed no further on parachains. Included::::set(Some(())); return Ok(Some(MINIMAL_INCLUSION_INHERENT_WEIGHT).into()) } - if !new_current_dispute_sets.is_empty() { + let mut freed_disputed = if !new_current_dispute_sets.is_empty() { let concluded_invalid_disputes: Vec<_> = new_current_dispute_sets .iter() .filter(|(s, c)| T::DisputesHandler::concluded_invalid(*s, *c)) @@ -200,6 +206,13 @@ pub mod pallet { .collect() } else { Vec::new() + }; + + if !freed_disputed.is_empty() { + // unstable sort is fine, because core indices are unique + // i.e. the same candidate can't occupy 2 cores at once. + freed_disputed.sort_unstable_by_key(|pair| pair.0); // sort by core index + >::free_cores(freed_disputed); } }; @@ -227,12 +240,13 @@ pub mod pallet { }; // Schedule paras again, given freed cores, and reasons for freeing. - let mut freed = freed_disputed + let mut freed = freed_concluded .into_iter() - .chain(freed_concluded.into_iter().map(|(c, _hash)| (c, FreedReason::Concluded))) + .map(|(c, _hash)| (c, FreedReason::Concluded)) .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) .collect::>(); + // unstable sort is fine, because core indices are unique. freed.sort_unstable_by_key(|pair| pair.0); // sort by core index >::clear(); @@ -254,13 +268,24 @@ pub mod pallet { // Process backed candidates according to scheduled cores. let parent_storage_root = parent_header.state_root().clone(); - let occupied = >::process_candidates( + let inclusion::ProcessedCandidates::<::Hash> { + core_indices: occupied, + candidate_receipt_with_backing_validator_indices, + } = >::process_candidates( parent_storage_root, backed_candidates, >::scheduled(), >::group_validators, )?; + // The number of disputes included in a block is + // limited by the weight as well as the number of candidate blocks. + OnChainVotes::::put(ScrapedOnChainVotes::<::Hash> { + session: current_session, + backing_validators_per_candidate: candidate_receipt_with_backing_validator_indices, + disputes, + }); + // Note which of the scheduled cores were actually occupied by a backed candidate. >::occupied(&occupied); diff --git a/runtime/parachains/src/runtime_api_impl/v1.rs b/runtime/parachains/src/runtime_api_impl/v1.rs index 3100a324f3c5..d544ec4ed646 100644 --- a/runtime/parachains/src/runtime_api_impl/v1.rs +++ b/runtime/parachains/src/runtime_api_impl/v1.rs @@ -18,14 +18,15 @@ //! functions. use crate::{ - configuration, dmp, hrmp, inclusion, initializer, paras, scheduler, session_info, shared, + configuration, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler, + session_info, shared, }; use primitives::v1::{ AuthorityDiscoveryId, CandidateEvent, CommittedCandidateReceipt, CoreIndex, CoreOccupied, - CoreState, GroupIndex, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, + CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, - ScheduledCore, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, + ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, }; use sp_runtime::traits::One; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -172,6 +173,16 @@ pub fn availability_cores() -> Vec( +) -> (::BlockNumber, ::Hash) { + use parity_scale_codec::Decode as _; + let relay_parent_number = >::block_number(); + let relay_parent_storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) + .expect("storage root must decode to the Hash type; qed"); + (relay_parent_number, relay_parent_storage_root) +} + fn with_assumption( para_id: ParaId, assumption: OccupiedCoreAssumption, @@ -202,10 +213,7 @@ pub fn persisted_validation_data( para_id: ParaId, assumption: OccupiedCoreAssumption, ) -> Option> { - use parity_scale_codec::Decode as _; - let relay_parent_number = >::block_number(); - let relay_parent_storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) - .expect("storage root must decode to the Hash type; qed"); + let (relay_parent_number, relay_parent_storage_root) = current_relay_parent::(); with_assumption::(para_id, assumption, || { crate::util::make_persisted_validation_data::( para_id, @@ -215,6 +223,35 @@ pub fn persisted_validation_data( }) } +/// Implementation for the `assumed_validation_data` function of the runtime API. +pub fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, +) -> Option<(PersistedValidationData, ValidationCodeHash)> { + let (relay_parent_number, relay_parent_storage_root) = current_relay_parent::(); + // This closure obtains the `persisted_validation_data` for the given `para_id` and matches + // its hash against an expected one. + let make_validation_data = || { + crate::util::make_persisted_validation_data::( + para_id, + relay_parent_number, + relay_parent_storage_root, + ) + .filter(|validation_data| validation_data.hash() == expected_persisted_validation_data_hash) + }; + + let persisted_validation_data = make_validation_data().or_else(|| { + // Try again with force enacting the core. This check only makes sense if + // the core is occupied. + >::pending_availability(para_id).and_then(|_| { + >::force_enact(para_id); + make_validation_data() + }) + }); + // If we were successful, also query current validation code hash. + persisted_validation_data.zip(>::current_code_hash(¶_id)) +} + /// Implementation for the `check_validation_outputs` function of the runtime API. pub fn check_validation_outputs( para_id: ParaId, @@ -329,3 +366,8 @@ pub fn validation_code_by_hash( ) -> Option { >::code_by_hash(hash) } + +/// Disputes imported via means of on-chain imports. +pub fn on_chain_votes() -> Option> { + >::on_chain_votes() +} diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index e6772c19d910..8e948e3b5529 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -369,6 +369,43 @@ impl Pallet { }) } + /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along with the reason + /// for them being freed. The list is assumed to be sorted in ascending order by core index. + pub(crate) fn free_cores(just_freed_cores: impl IntoIterator) { + let config = >::config(); + + AvailabilityCores::::mutate(|cores| { + for (freed_index, freed_reason) in just_freed_cores { + if (freed_index.0 as usize) < cores.len() { + match cores[freed_index.0 as usize].take() { + None => continue, + Some(CoreOccupied::Parachain) => {}, + Some(CoreOccupied::Parathread(entry)) => { + match freed_reason { + FreedReason::Concluded => { + // After a parathread candidate has successfully been included, + // open it up for further claims! + ParathreadClaimIndex::::mutate(|index| { + if let Ok(i) = index.binary_search(&entry.claim.0) { + index.remove(i); + } + }) + }, + FreedReason::TimedOut => { + // If a parathread candidate times out, it's not the collator's fault, + // so we don't increment retries. + ParathreadQueue::::mutate(|queue| { + queue.enqueue_entry(entry, config.parathread_cores); + }) + }, + } + }, + } + } + } + }) + } + /// Schedule all unassigned cores, where possible. Provide a list of cores that should be considered /// newly-freed along with the reason for them being freed. The list is assumed to be sorted in /// ascending order by core index. @@ -376,38 +413,9 @@ impl Pallet { just_freed_cores: impl IntoIterator, now: T::BlockNumber, ) { - let mut cores = AvailabilityCores::::get(); - let config = >::config(); - - for (freed_index, freed_reason) in just_freed_cores { - if (freed_index.0 as usize) < cores.len() { - match cores[freed_index.0 as usize].take() { - None => continue, - Some(CoreOccupied::Parachain) => {}, - Some(CoreOccupied::Parathread(entry)) => { - match freed_reason { - FreedReason::Concluded => { - // After a parathread candidate has successfully been included, - // open it up for further claims! - ParathreadClaimIndex::::mutate(|index| { - if let Ok(i) = index.binary_search(&entry.claim.0) { - index.remove(i); - } - }) - }, - FreedReason::TimedOut => { - // If a parathread candidate times out, it's not the collator's fault, - // so we don't increment retries. - ParathreadQueue::::mutate(|queue| { - queue.enqueue_entry(entry, config.parathread_cores); - }) - }, - } - }, - } - } - } + Self::free_cores(just_freed_cores); + let cores = AvailabilityCores::::get(); let parachains = >::parachains(); let mut scheduled = Scheduled::::get(); let mut parathread_queue = ParathreadQueue::::get(); @@ -510,7 +518,6 @@ impl Pallet { Scheduled::::set(scheduled); ParathreadQueue::::set(parathread_queue); - AvailabilityCores::::set(cores); } /// Note that the given cores have become occupied. Behavior undefined if any of the given cores were not scheduled diff --git a/runtime/parachains/src/session_info.rs b/runtime/parachains/src/session_info.rs index 93f0ba736577..d69f71e827b9 100644 --- a/runtime/parachains/src/session_info.rs +++ b/runtime/parachains/src/session_info.rs @@ -166,7 +166,7 @@ impl OneSessionHandler for Pal AssignmentKeysUnsafe::::set(assignment_keys); } - fn on_disabled(_i: usize) {} + fn on_disabled(_i: u32) {} } #[cfg(test)] diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs index b7a1c9f508fd..47111e357db9 100644 --- a/runtime/parachains/src/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -202,20 +202,20 @@ pub mod pallet { /// Upward message executed with the given outcome. /// \[ id, outcome \] ExecutedUpward(MessageId, Outcome), - /// The weight limit for handling downward messages was reached. + /// The weight limit for handling upward messages was reached. /// \[ id, remaining, required \] WeightExhausted(MessageId, Weight, Weight), - /// Some downward messages have been received and will be processed. + /// Some upward messages have been received and will be processed. /// \[ para, count, size \] UpwardMessagesReceived(ParaId, u32, u32), - /// The weight budget was exceeded for an individual downward message. + /// The weight budget was exceeded for an individual upward message. /// /// This message can be later dispatched manually using `service_overweight` dispatchable /// using the assigned `overweight_index`. /// /// \[ para, id, overweight_index, required \] OverweightEnqueued(ParaId, MessageId, OverweightIndex, Weight), - /// Downward message from the overweight queue was executed with the given actual weight + /// Upward message from the overweight queue was executed with the given actual weight /// used. /// /// \[ overweight_index, used \] diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index 07558ef8881d..0217cc334428 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -36,6 +36,7 @@ sp-npos-elections = { git = "https://github.com/paritytech/substrate", branch = pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-bags-list = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-bounties = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -114,6 +115,7 @@ std = [ "frame-executive/std", "pallet-authority-discovery/std", "pallet-authorship/std", + "pallet-bags-list/std", "pallet-balances/std", "pallet-bounties/std", "pallet-transaction-payment/std", @@ -164,6 +166,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "pallet-babe/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", diff --git a/runtime/polkadot/src/bag_thresholds.rs b/runtime/polkadot/src/bag_thresholds.rs new file mode 100644 index 000000000000..2eb2158d8f13 --- /dev/null +++ b/runtime/polkadot/src/bag_thresholds.rs @@ -0,0 +1,234 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated bag thresholds. +//! +//! Generated on 2021-10-14T08:36:33.156699497+00:00 +//! for the polkadot runtime. + +/// Existential weight for this runtime. +#[cfg(any(test, feature = "std"))] +#[allow(unused)] +pub const EXISTENTIAL_WEIGHT: u64 = 10_000_000_000; + +/// Constant ratio between bags for this runtime. +#[cfg(any(test, feature = "std"))] +#[allow(unused)] +pub const CONSTANT_RATIO: f64 = 1.1131723507077667; + +/// Upper thresholds delimiting the bag list. +pub const THRESHOLDS: [u64; 200] = [ + 10_000_000_000, + 11_131_723_507, + 12_391_526_824, + 13_793_905_044, + 15_354_993_703, + 17_092_754_435, + 19_027_181_634, + 21_180_532_507, + 23_577_583_160, + 26_245_913_670, + 29_216_225_417, + 32_522_694_326, + 36_203_364_094, + 40_300_583_912, + 44_861_495_728, + 49_938_576_656, + 55_590_242_767, + 61_881_521_217, + 68_884_798_439, + 76_680_653_006, + 85_358_782_760, + 95_019_036_859, + 105_772_564_622, + 117_743_094_401, + 131_068_357_174, + 145_901_671_259, + 162_413_706_368, + 180_794_447_305, + 201_255_379_901, + 224_031_924_337, + 249_386_143_848, + 277_609_759_981, + 309_027_509_097, + 344_000_878_735, + 382_932_266_827, + 426_269_611_626, + 474_511_545_609, + 528_213_132_664, + 587_992_254_562, + 654_536_720_209, + 728_612_179_460, + 811_070_932_564, + 902_861_736_593, + 1_005_040_721_687, + 1_118_783_542_717, + 1_245_398_906_179, + 1_386_343_627_960, + 1_543_239_395_225, + 1_717_891_425_287, + 1_912_309_236_147, + 2_128_729_767_682, + 2_369_643_119_512, + 2_637_821_201_686, + 2_936_349_627_828, + 3_268_663_217_709, + 3_638_585_517_729, + 4_050_372_794_022, + 4_508_763_004_364, + 5_019_030_312_352, + 5_587_045_771_074, + 6_219_344_874_498, + 6_923_202_753_807, + 7_706_717_883_882, + 8_578_905_263_043, + 9_549_800_138_161, + 10_630_573_468_586, + 11_833_660_457_397, + 13_172_903_628_838, + 14_663_712_098_160, + 16_323_238_866_411, + 18_170_578_180_087, + 20_226_985_226_447, + 22_516_120_692_255, + 25_064_322_999_817, + 27_900_911_352_605, + 31_058_523_077_268, + 34_573_489_143_434, + 38_486_252_181_966, + 42_841_831_811_331, + 47_690_342_626_046, + 53_087_570_807_094, + 59_095_615_988_698, + 65_783_605_766_662, + 73_228_491_069_308, + 81_515_931_542_404, + 90_741_281_135_191, + 101_010_685_227_495, + 112_442_301_921_293, + 125_167_661_548_718, + 139_333_180_038_781, + 155_101_843_555_358, + 172_655_083_789_626, + 192_194_865_483_744, + 213_946_010_204_502, + 238_158_783_103_893, + 265_111_772_429_462, + 295_115_094_915_607, + 328_513_963_936_552, + 365_692_661_475_578, + 407_078_959_611_349, + 453_149_042_394_237, + 504_432_984_742_966, + 561_520_851_400_862, + 625_069_486_125_324, + 695_810_069_225_823, + 774_556_530_406_243, + 862_214_913_708_369, + 959_793_802_308_039, + 1_068_415_923_109_985, + 1_189_331_064_661_951, + 1_323_930_457_019_515, + 1_473_762_779_014_021, + 1_640_551_977_100_649, + 1_826_217_100_807_404, + 2_032_894_383_008_501, + 2_262_961_819_074_188, + 2_519_066_527_700_738, + 2_804_155_208_229_882, + 3_121_508_044_894_685, + 3_474_776_448_088_622, + 3_868_025_066_902_796, + 4_305_778_556_320_752, + 4_793_073_637_166_665, + 5_335_517_047_800_242, + 5_939_350_054_341_159, + 6_611_520_261_667_250, + 7_359_761_551_432_161, + 8_192_683_066_856_378, + 9_119_868_268_136_230, + 10_151_985_198_186_376, + 11_300_909_227_415_580, + 12_579_859_689_817_292, + 14_003_551_982_487_792, + 15_588_366_878_604_342, + 17_352_539_001_951_086, + 19_316_366_631_550_092, + 21_502_445_250_375_680, + 23_935_927_525_325_748, + 26_644_812_709_737_600, + 29_660_268_798_266_784, + 33_016_991_140_790_860, + 36_753_601_641_491_664, + 40_913_093_136_236_104, + 45_543_324_061_189_736, + 50_697_569_104_240_168, + 56_435_132_174_936_472, + 62_822_028_745_677_552, + 69_931_745_415_056_768, + 77_846_085_432_775_824, + 86_656_109_914_600_688, + 96_463_185_576_826_656, + 107_380_151_045_315_664, + 119_532_615_158_469_088, + 133_060_402_202_199_856, + 148_119_160_705_543_712, + 164_882_154_307_451_552, + 183_542_255_300_186_560, + 204_314_163_786_713_728, + 227_436_877_985_347_776, + 253_176_444_104_585_088, + 281_829_017_427_734_464, + 313_724_269_827_691_328, + 349_229_182_918_168_832, + 388_752_270_484_770_624, + 432_748_278_778_513_664, + 481_723_418_752_617_984, + 536_241_190_443_833_600, + 596_928_866_512_693_376, + 664_484_709_541_257_600, + 739_686_006_129_409_280, + 823_398_010_228_713_984, + 916_583_898_614_395_264, + 1_020_315_853_041_475_584, + 1_135_787_396_594_579_584, + 1_264_327_126_171_442_688, + 1_407_413_999_103_859_968, + 1_566_694_349_801_462_272, + 1_744_000_832_209_069_824, + 1_941_373_506_026_471_680, + 2_161_083_309_305_266_176, + 2_405_658_187_494_662_656, + 2_677_912_179_572_818_944, + 2_980_977_795_924_034_048, + 3_318_342_060_496_414_208, + 3_693_886_631_935_247_360, + 4_111_932_465_319_354_368, + 4_577_289_528_371_127_808, + 5_095_312_144_166_932_480, + 5_671_960_597_112_134_656, + 6_313_869_711_009_142_784, + 7_028_425_188_266_614_784, + 7_823_848_588_596_424_704, + 8_709_291_924_949_524_480, + 9_694_942_965_096_232_960, + 10_792_142_450_433_898_496, + 12_013_514_580_722_579_456, + 13_373_112_266_084_982_784, + 14_886_578_817_516_689_408, + 16_571_327_936_291_497_984, + 18_446_744_073_709_551_615, +]; diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 3addbf6932d7..df0b85c80a3a 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -54,8 +54,8 @@ use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, }; use sp_core::{ u32_trait::{_1, _2, _3, _4, _5}, @@ -95,6 +95,8 @@ use frame_support::traits::InstanceFilter; // Weights used in the runtime. mod weights; +mod bag_thresholds; + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -105,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("polkadot"), impl_name: create_runtime_str!("parity-polkadot"), authoring_version: 0, - spec_version: 9110, + spec_version: 9130, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -167,9 +169,13 @@ impl Contains for BaseFilter { Call::Dmp(_) | Call::Ump(_) | Call::Hrmp(_) | - Call::Slots(_) => true, - // Disable paras registration, crowdloans, and auctions for now. - Call::Registrar(_) | Call::Auctions(_) | Call::Crowdloan(_) => false, + Call::Slots(_) | + Call::Registrar(_) | + Call::Auctions(_) | + Call::Crowdloan(_) | + Call::BagsList(_) => true, + // All pallets are allowed, but exhaustive match is defensive + // in the case of adding new pallets. } } } @@ -302,11 +308,15 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; + /// This value increases the priority of `Operational` transactions by adding + /// a "virtual tip" that's equal to the `OperationalFeeMultiplier * final_fee`. + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; } @@ -344,10 +354,6 @@ impl_opaque_keys! { } } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); -} - impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; @@ -357,7 +363,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = weights::pallet_session::WeightInfo; } @@ -381,8 +386,8 @@ parameter_types! { pub SignedRewardBase: Balance = 1 * UNITS; pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(5u32, 10_000); - // miner configs - pub OffchainRepeat: BlockNumber = 5; + // 4 hour session, 1 hour unsigned phase, 32 offchain executions. + pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 32; /// Whilst `UseNominatorsAndUpdateBagsList` or `UseNominatorsMap` is in use, this can still be a /// very large value. Once the `BagsList` is in full motion, staking might open its door to many @@ -436,6 +441,17 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type VoterSnapshotPerBlock = VoterSnapshotPerBlock; } +parameter_types! { + pub const BagThresholds: &'static [u64] = &bag_thresholds::THRESHOLDS; +} + +impl pallet_bags_list::Config for Runtime { + type Event = Event; + type VoteWeightProvider = Staking; + type WeightInfo = weights::pallet_bags_list::WeightInfo; + type BagThresholds = BagThresholds; +} + // TODO #6469: This shouldn't be static, but a lazily cached value, not built unless needed, and // re-built in case input parameters have changed. The `ideal_stake` should be determined by the // amount of parachain slots being bid on: this should be around `(75 - 25.min(slots / 4))%`. @@ -460,6 +476,7 @@ parameter_types! { pub const SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); } type SlashCancelOrigin = EnsureOneOf< @@ -491,12 +508,12 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = runtime_common::elections::GenesisElectionOf; - // Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The migration - // to bags-list is a no-op, but the storage version will be updated. - type SortedListProvider = pallet_staking::UseNominatorsMap; + // Use the nominators map to iter voters, but also keep bags-list up-to-date. + type SortedListProvider = runtime_common::elections::UseNominatorsAndUpdateBagsList; type WeightInfo = weights::pallet_staking::WeightInfo; } @@ -1046,7 +1063,8 @@ impl InstanceFilter for ProxyType { Call::Registrar(paras_registrar::Call::reserve {..}) | Call::Crowdloan(..) | Call::Slots(..) | - Call::Auctions(..) // Specifically omitting the entire XCM Pallet + Call::Auctions(..) | // Specifically omitting the entire XCM Pallet + Call::BagsList(..) ), ProxyType::Governance => matches!( c, @@ -1169,6 +1187,12 @@ impl paras_registrar::Config for Runtime { parameter_types! { // 12 weeks = 3 months per lease period -> 8 lease periods ~ 2 years pub const LeasePeriod: BlockNumber = 12 * WEEKS; + // Polkadot Genesis was on May 26, 2020. + // Target Parachain Onboarding Date: Dec 15, 2021. + // Difference is 568 days. + // We want a lease period to start on the target onboarding date. + // 568 % (12 * 7) = 64 day offset + pub const LeaseOffset: BlockNumber = 64 * DAYS; } impl slots::Config for Runtime { @@ -1176,6 +1200,7 @@ impl slots::Config for Runtime { type Currency = Balances; type Registrar = Registrar; type LeasePeriod = LeasePeriod; + type LeaseOffset = LeaseOffset; type WeightInfo = weights::runtime_common_slots::WeightInfo; } @@ -1289,6 +1314,9 @@ construct_runtime! { // Election pallet. Only works with staking, but placed here to maintain indices. ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Pallet, Call, Storage, Event, ValidateUnsigned} = 36, + // Provides a semi-sorted list of nominators for staking. + BagsList: pallet_bags_list::{Pallet, Call, Storage, Event} = 37, + // Parachains pallets. Start indices at 50 to leave room. ParachainsOrigin: parachains_origin::{Pallet, Origin} = 50, Configuration: parachains_configuration::{Pallet, Call, Storage, Config} = 51, @@ -1341,143 +1369,27 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPallets, - ( - SetInitialHostConfiguration, - BountiesPrefixMigration, - CouncilStoragePrefixMigration, - TechnicalCommitteeStoragePrefixMigration, - TechnicalMembershipStoragePrefixMigration, - MigrateTipsPalletPrefix, - ), + StakingBagsListMigrationV8, >; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; -const BOUNTIES_OLD_PREFIX: &str = "Treasury"; - -/// Migrate from 'Treasury' to the new prefix 'Bounties' -pub struct BountiesPrefixMigration; - -impl OnRuntimeUpgrade for BountiesPrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("Bounties is part of runtime, so it has a name; qed"); - pallet_bounties::migrations::v4::migrate::(BOUNTIES_OLD_PREFIX, name) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("Bounties is part of runtime, so it has a name; qed"); - pallet_bounties::migrations::v4::pre_migration::( - BOUNTIES_OLD_PREFIX, - name, - ); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("Bounties is part of runtime, so it has a name; qed"); - pallet_bounties::migrations::v4::post_migration::( - BOUNTIES_OLD_PREFIX, - name, - ); - Ok(()) - } -} - -const COUNCIL_OLD_PREFIX: &str = "Instance1Collective"; -/// Migrate from `Instance1Collective` to the new pallet prefix `Council` -pub struct CouncilStoragePrefixMigration; - -impl OnRuntimeUpgrade for CouncilStoragePrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_collective::migrations::v4::migrate::(COUNCIL_OLD_PREFIX) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::pre_migrate::(COUNCIL_OLD_PREFIX); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::post_migrate::(COUNCIL_OLD_PREFIX); - Ok(()) - } -} - -const TECHNICAL_COMMITTEE_OLD_PREFIX: &str = "Instance2Collective"; -/// Migrate from `Instance2Collective` to the new pallet prefix `TechnicalCommittee` -pub struct TechnicalCommitteeStoragePrefixMigration; +// Migration to generate pallet staking's `SortedListProvider` from pre-existing nominators. +pub struct StakingBagsListMigrationV8; -impl OnRuntimeUpgrade for TechnicalCommitteeStoragePrefixMigration { +impl OnRuntimeUpgrade for StakingBagsListMigrationV8 { fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_collective::migrations::v4::migrate::( - TECHNICAL_COMMITTEE_OLD_PREFIX, - ) + pallet_staking::migrations::v8::migrate::() } #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::pre_migrate::( - TECHNICAL_COMMITTEE_OLD_PREFIX, - ); - Ok(()) + pallet_staking::migrations::v8::pre_migrate::() } #[cfg(feature = "try-runtime")] fn post_upgrade() -> Result<(), &'static str> { - pallet_collective::migrations::v4::post_migrate::( - TECHNICAL_COMMITTEE_OLD_PREFIX, - ); - Ok(()) - } -} - -const TECHNICAL_MEMBERSHIP_OLD_PREFIX: &str = "Instance1Membership"; -/// Migrate from `Instance1Membership` to the new pallet prefix `TechnicalMembership` -pub struct TechnicalMembershipStoragePrefixMigration; - -impl OnRuntimeUpgrade for TechnicalMembershipStoragePrefixMigration { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("TechnialMembership is part of runtime, so it has a name; qed"); - pallet_membership::migrations::v4::migrate::( - TECHNICAL_MEMBERSHIP_OLD_PREFIX, - name, - ) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("TechnicalMembership is part of runtime, so it has a name; qed"); - pallet_membership::migrations::v4::pre_migrate::( - TECHNICAL_MEMBERSHIP_OLD_PREFIX, - name, - ); - Ok(()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - use frame_support::traits::PalletInfo; - let name = ::PalletInfo::name::() - .expect("TechnicalMembership is part of runtime, so it has a name; qed"); - pallet_membership::migrations::v4::post_migrate::( - TECHNICAL_MEMBERSHIP_OLD_PREFIX, - name, - ); - Ok(()) + pallet_staking::migrations::v8::post_migrate::() } } @@ -1535,6 +1447,26 @@ impl OnRuntimeUpgrade for SetInitialHostConfiguration { Configuration::force_set_active_config(active_config); } + { + // At the moment, the `parachains_configuration` crate has already had one runtime + // storage migration (performed as part of [#3575]). As the result a call to + // `StorageVersion::get::` will return `Some(1)` + // + // However, Polkadot is just about to have its first version of parachains runtime + // pallets and thus there is no existing storage which needs to be migrated. Above + // we just have set the active configuration of the actual version, i.e. the same as the + // version 1 on Kusama. + // + // The caveat here is when we deploy a module for the first time, it's runtime version + // will be empty and thus it will be considered as version 0. Since we want to avoid + // the situation where the same storage structure has version 0 on Polkadot and + // version 1 on Kusama we need to set the storage version explicitly. + // + // [#3575]: https://github.com/paritytech/polkadot/pull/3575 + use frame_support::traits::StorageVersion; + StorageVersion::new(1).put::(); + } + RocksDbWeight::get().reads(1) + RocksDbWeight::get().writes(1) } } @@ -1638,6 +1570,16 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) } + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + parachains_runtime_api_impl::assumed_validation_data::( + para_id, + expected_persisted_validation_data_hash, + ) + } + fn check_validation_outputs( para_id: ParaId, outputs: primitives::v1::CandidateCommitments, @@ -1686,6 +1628,10 @@ sp_api::impl_runtime_apis! { fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { parachains_runtime_api_impl::validation_code_by_hash::(hash) } + + fn on_chain_votes() -> Option> { + parachains_runtime_api_impl::on_chain_votes::() + } } impl beefy_primitives::BeefyApi for Runtime { @@ -1881,7 +1827,11 @@ sp_api::impl_runtime_apis! { list_benchmark!(list, extra, runtime_common::claims, Claims); list_benchmark!(list, extra, runtime_common::slots, Slots); list_benchmark!(list, extra, runtime_common::paras_registrar, Registrar); + list_benchmark!(list, extra, runtime_parachains::configuration, Configuration); + list_benchmark!(list, extra, runtime_parachains::initializer, Initializer); + list_benchmark!(list, extra, runtime_parachains::paras, Paras); // Substrate + list_benchmark!(list, extra, pallet_bags_list, BagsList); list_benchmark!(list, extra, pallet_balances, Balances); list_benchmark!(list, extra, pallet_bounties, Bounties); list_benchmark!(list, extra, pallet_collective, Council); @@ -1953,7 +1903,11 @@ sp_api::impl_runtime_apis! { add_benchmark!(params, batches, runtime_common::claims, Claims); add_benchmark!(params, batches, runtime_common::slots, Slots); add_benchmark!(params, batches, runtime_common::paras_registrar, Registrar); + add_benchmark!(params, batches, runtime_parachains::configuration, Configuration); + add_benchmark!(params, batches, runtime_parachains::initializer, Initializer); + add_benchmark!(params, batches, runtime_parachains::paras, Paras); // Substrate + add_benchmark!(params, batches, pallet_bags_list, BagsList); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_bounties, Bounties); add_benchmark!(params, batches, pallet_collective, Council); diff --git a/runtime/polkadot/src/weights/mod.rs b/runtime/polkadot/src/weights/mod.rs index 19842799a533..c913094df553 100644 --- a/runtime/polkadot/src/weights/mod.rs +++ b/runtime/polkadot/src/weights/mod.rs @@ -16,6 +16,7 @@ //! A list of the different weight modules for our runtime. pub mod frame_system; +pub mod pallet_bags_list; pub mod pallet_balances; pub mod pallet_bounties; pub mod pallet_collective_council; diff --git a/runtime/polkadot/src/weights/pallet_bags_list.rs b/runtime/polkadot/src/weights/pallet_bags_list.rs new file mode 100644 index 000000000000..38285f4c2145 --- /dev/null +++ b/runtime/polkadot/src/weights/pallet_bags_list.rs @@ -0,0 +1,65 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Autogenerated weights for `pallet_bags_list` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-09-17, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bags_list +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_bags_list`. +pub struct WeightInfo(PhantomData); +impl pallet_bags_list::WeightInfo for WeightInfo { + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: BagsList ListNodes (r:4 w:4) + // Storage: BagsList ListBags (r:1 w:1) + fn rebag_non_terminal() -> Weight { + (65_491_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) + fn rebag_terminal() -> Weight { + (64_253_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } +} diff --git a/runtime/polkadot/src/weights/runtime_common_claims.rs b/runtime/polkadot/src/weights/runtime_common_claims.rs index 1086ebdee551..7f352a3661cb 100644 --- a/runtime/polkadot/src/weights/runtime_common_claims.rs +++ b/runtime/polkadot/src/weights/runtime_common_claims.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// Copyright 2017-2021 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -15,8 +15,8 @@ // along with Polkadot. If not, see . //! Autogenerated weights for `runtime_common::claims` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-07-01, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-10-06, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 // Executed Command: @@ -31,8 +31,10 @@ // --wasm-execution=compiled // --heap-pages=4096 // --header=./file_header.txt -// --output=./runtime/polkadot/src/weights/ +// --output=./runtime/polkadot/src/weights/runtime_common_claims.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -42,28 +44,58 @@ use sp_std::marker::PhantomData; /// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); impl runtime_common::claims::WeightInfo for WeightInfo { + // Storage: Claims Claims (r:1 w:1) + // Storage: Claims Signing (r:1 w:1) + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:1 w:1) + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn claim() -> Weight { - (447_705_000 as Weight) + (440_159_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:0 w:1) + // Storage: Claims Claims (r:0 w:1) + // Storage: Claims Signing (r:0 w:1) fn mint_claim() -> Weight { - (11_995_000 as Weight) + (11_988_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } + // Storage: Claims Claims (r:1 w:1) + // Storage: Claims Signing (r:1 w:1) + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:1 w:1) + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn claim_attest() -> Weight { - (439_703_000 as Weight) + (443_037_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } + // Storage: Claims Preclaims (r:1 w:1) + // Storage: Claims Signing (r:1 w:1) + // Storage: Claims Claims (r:1 w:1) + // Storage: Claims Total (r:1 w:1) + // Storage: Claims Vesting (r:1 w:1) + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn attest() -> Weight { - (128_588_000 as Weight) + (125_350_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(7 as Weight)) } + // Storage: Claims Claims (r:1 w:2) + // Storage: Claims Vesting (r:1 w:2) + // Storage: Claims Signing (r:1 w:2) + // Storage: Claims Preclaims (r:1 w:1) fn move_claim() -> Weight { - (26_297_000 as Weight) + (26_261_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(7 as Weight)) } diff --git a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs index ae840f37752d..ad84a780a4d0 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs @@ -16,13 +16,13 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-17, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 +//! DATE: 2021-10-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 // Executed Command: // target/release/polkadot // benchmark -// --chain=kusama-dev +// --chain=polkadot-dev // --steps=50 // --repeat=20 // --pallet=runtime_parachains::configuration @@ -31,7 +31,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/polkadot/src/weights/runtime_parachains_configuration.rs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -48,7 +48,7 @@ impl runtime_parachains::configuration::WeightInfo for // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_block_number() -> Weight { - (12_378_000 as Weight) + (12_506_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -56,7 +56,7 @@ impl runtime_parachains::configuration::WeightInfo for // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_u32() -> Weight { - (12_384_000 as Weight) + (12_550_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -64,7 +64,7 @@ impl runtime_parachains::configuration::WeightInfo for // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_option_u32() -> Weight { - (12_746_000 as Weight) + (12_521_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -72,7 +72,7 @@ impl runtime_parachains::configuration::WeightInfo for // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_weight() -> Weight { - (12_563_000 as Weight) + (12_867_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -84,7 +84,7 @@ impl runtime_parachains::configuration::WeightInfo for // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_balance() -> Weight { - (12_644_000 as Weight) + (12_852_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/polkadot/src/weights/runtime_parachains_initializer.rs b/runtime/polkadot/src/weights/runtime_parachains_initializer.rs index 6b41892d3e45..b74f6056e8de 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_initializer.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_initializer.rs @@ -16,13 +16,13 @@ //! Autogenerated weights for `runtime_parachains::initializer` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 +//! DATE: 2021-10-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 // Executed Command: // target/release/polkadot // benchmark -// --chain=kusama-dev +// --chain=polkadot-dev // --steps=50 // --repeat=20 // --pallet=runtime_parachains::initializer @@ -31,7 +31,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/runtime_parachains_initializer.rs +// --output=./runtime/polkadot/src/weights/runtime_parachains_initializer.rs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -46,7 +46,7 @@ pub struct WeightInfo(PhantomData); impl runtime_parachains::initializer::WeightInfo for WeightInfo { // Storage: System Digest (r:1 w:1) fn force_approve(d: u32, ) -> Weight { - (5_156_000 as Weight) + (5_698_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) diff --git a/runtime/polkadot/src/weights/runtime_parachains_paras.rs b/runtime/polkadot/src/weights/runtime_parachains_paras.rs index 728d95561bfc..75c20b3301c4 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_paras.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_paras.rs @@ -16,13 +16,13 @@ //! Autogenerated weights for `runtime_parachains::paras` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-21, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 +//! DATE: 2021-10-11, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 // Executed Command: // target/release/polkadot // benchmark -// --chain=kusama-dev +// --chain=polkadot-dev // --steps=50 // --repeat=20 // --pallet=runtime_parachains::paras @@ -31,7 +31,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/runtime_parachains_paras.rs +// --output=./runtime/polkadot/src/weights/runtime_parachains_paras.rs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -59,7 +59,7 @@ impl runtime_parachains::paras::WeightInfo for WeightIn } // Storage: Paras Heads (r:0 w:1) fn force_set_current_head(s: u32, ) -> Weight { - (16_088_000 as Weight) + (16_226_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -90,7 +90,7 @@ impl runtime_parachains::paras::WeightInfo for WeightIn // Storage: Paras PastCodeHash (r:0 w:1) // Storage: Paras UpgradeGoAheadSignal (r:0 w:1) fn force_note_new_head(s: u32, ) -> Weight { - (69_114_000 as Weight) + (68_208_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) @@ -99,7 +99,7 @@ impl runtime_parachains::paras::WeightInfo for WeightIn // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Paras ActionsQueue (r:1 w:1) fn force_queue_action() -> Weight { - (26_752_000 as Weight) + (26_462_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index 294cb062a225..388d49101c16 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rococo-runtime" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 900d53150ef6..2f7bc9ffbc28 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -38,8 +38,9 @@ use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, Moment, - Nonce, OccupiedCoreAssumption, PersistedValidationData, SessionInfo as SessionInfoData, - Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo as SessionInfoData, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, }; use runtime_common::{ auctions, crowdloan, impls::ToAuthor, paras_registrar, paras_sudo_wrapper, slots, xcm_sender, @@ -55,7 +56,7 @@ use sp_runtime::{ OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, KeyTypeId, Perbill, + ApplyExtrinsicResult, KeyTypeId, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -83,10 +84,10 @@ use constants::{currency::*, fee::*, time::*}; use frame_support::traits::InstanceFilter; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, BackingToPlurality, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, - IsConcrete, LocationInverter, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, UsingComponents, + AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, BackingToPlurality, + ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, + CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, IsConcrete, LocationInverter, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, UsingComponents, }; use xcm_executor::XcmExecutor; @@ -254,7 +255,7 @@ construct_runtime! { Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, // Pallet for sending XCM. - XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 99, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, } } @@ -452,19 +453,19 @@ impl pallet_timestamp::Config for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; + /// This value increases the priority of `Operational` transactions by adding + /// a "virtual tip" that's equal to the `OperationalFeeMultiplier * final_fee`. + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); -} - /// Special `ValidatorIdOf` implementation that is just returning the input as result. pub struct ValidatorIdOf; impl sp_runtime::traits::Convert> for ValidatorIdOf { @@ -482,7 +483,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = (); } @@ -574,7 +574,7 @@ impl pallet_authorship::Config for Runtime { impl parachains_origin::Config for Runtime {} impl parachains_configuration::Config for Runtime { - type WeightInfo = parachains_configuration::weights::WeightInfo; + type WeightInfo = weights::runtime_parachains_configuration::WeightInfo; } impl parachains_shared::Config for Runtime {} @@ -595,7 +595,7 @@ impl parachains_inclusion::Config for Runtime { impl parachains_paras::Config for Runtime { type Origin = Origin; type Event = Event; - type WeightInfo = parachains_paras::weights::WeightInfo; + type WeightInfo = weights::runtime_parachains_paras::WeightInfo; } parameter_types! { @@ -636,7 +636,7 @@ parameter_types! { /// individual routers. pub type XcmRouter = ( // Only one router so far - use DMP to communicate with child parachains. - xcm_sender::ChildParachainRouter, + xcm_sender::ChildParachainRouter, ); parameter_types! { @@ -644,7 +644,7 @@ parameter_types! { pub const RococoForTick: (MultiAssetFilter, MultiLocation) = (Rococo::get(), Parachain(100).into()); pub const RococoForTrick: (MultiAssetFilter, MultiLocation) = (Rococo::get(), Parachain(110).into()); pub const RococoForTrack: (MultiAssetFilter, MultiLocation) = (Rococo::get(), Parachain(120).into()); - pub const RococoForStatemint: (MultiAssetFilter, MultiLocation) = (Rococo::get(), Parachain(1001).into()); + pub const RococoForRockmine: (MultiAssetFilter, MultiLocation) = (Rococo::get(), Parachain(1001).into()); pub const RococoForCanvas: (MultiAssetFilter, MultiLocation) = (Rococo::get(), Parachain(1002).into()); pub const MaxInstructions: u32 = 100; } @@ -652,7 +652,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, - xcm_builder::Case, + xcm_builder::Case, xcm_builder::Case, ); @@ -672,6 +672,10 @@ pub type Barrier = ( TakeWeightCredit, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom>, // <- Trusted parachains get free execution + // Expected responses are OK. + AllowKnownQueryResponses, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, ); pub struct XcmConfig; @@ -999,6 +1003,7 @@ impl slots::Config for Runtime { type Currency = Balances; type Registrar = Registrar; type LeasePeriod = LeasePeriod; + type LeaseOffset = (); type WeightInfo = slots::TestWeightInfo; } @@ -1237,6 +1242,16 @@ sp_api::impl_runtime_apis! { runtime_api_impl::persisted_validation_data::(para_id, assumption) } + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + runtime_api_impl::assumed_validation_data::( + para_id, + expected_persisted_validation_data_hash, + ) + } + fn check_validation_outputs( para_id: Id, outputs: primitives::v1::CandidateCommitments, @@ -1285,6 +1300,10 @@ sp_api::impl_runtime_apis! { fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { runtime_api_impl::validation_code_by_hash::(hash) } + + fn on_chain_votes() -> Option> { + runtime_api_impl::on_chain_votes::() + } } impl fg_primitives::GrandpaApi for Runtime { @@ -1594,7 +1613,9 @@ sp_api::impl_runtime_apis! { let mut list = Vec::::new(); + list_benchmark!(list, extra, runtime_parachains::configuration, Configuration); list_benchmark!(list, extra, runtime_parachains::disputes, ParasDisputes); + list_benchmark!(list, extra, runtime_parachains::paras, Paras); let storage_info = AllPalletsWithSystem::storage_info(); @@ -1624,7 +1645,9 @@ sp_api::impl_runtime_apis! { ]; let params = (&config, &whitelist); + add_benchmark!(params, batches, runtime_parachains::configuration, Configuration); add_benchmark!(params, batches, runtime_parachains::disputes, ParasDisputes); + add_benchmark!(params, batches, runtime_parachains::paras, Paras); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/runtime/rococo/src/weights/mod.rs b/runtime/rococo/src/weights/mod.rs index bcd0ec248a5f..0c18d1ff9201 100644 --- a/runtime/rococo/src/weights/mod.rs +++ b/runtime/rococo/src/weights/mod.rs @@ -1 +1,20 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A list of the different weight modules for our runtime. + +pub mod runtime_parachains_configuration; pub mod runtime_parachains_disputes; +pub mod runtime_parachains_paras; diff --git a/runtime/parachains/src/configuration/weights.rs b/runtime/rococo/src/weights/runtime_parachains_configuration.rs similarity index 64% rename from runtime/parachains/src/configuration/weights.rs rename to runtime/rococo/src/weights/runtime_parachains_configuration.rs index da70d6cb29d1..c783fb4131a3 100644 --- a/runtime/parachains/src/configuration/weights.rs +++ b/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -1,28 +1,37 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-17, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 +//! DATE: 2021-09-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 // Executed Command: -// ./target/release/polkadot +// target/release/polkadot // benchmark -// --chain -// westend-dev +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --pallet=runtime_parachains::configuration +// --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --pallet -// runtime_parachains::configuration -// --steps -// 50 -// --repeat -// 20 -// --raw -// --extrinsic -// * -// --output -// runtime/parachains/src/configuration/weights.rs +// --heap-pages=4096 +// --output=./runtime/rococo/src/weights/runtime_parachains_configuration.rs +// --header=./file_header.txt #![cfg_attr(rustfmt, rustfmt_skip)] @@ -34,12 +43,12 @@ use sp_std::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); -impl super::WeightInfo for WeightInfo { +impl runtime_parachains::configuration::WeightInfo for WeightInfo { // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_block_number() -> Weight { - (16_730_000 as Weight) + (13_098_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -47,7 +56,7 @@ impl super::WeightInfo for WeightInfo { // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_u32() -> Weight { - (16_592_000 as Weight) + (13_216_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -55,7 +64,7 @@ impl super::WeightInfo for WeightInfo { // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_option_u32() -> Weight { - (16_419_000 as Weight) + (13_080_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -63,7 +72,7 @@ impl super::WeightInfo for WeightInfo { // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_weight() -> Weight { - (16_732_000 as Weight) + (13_178_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -75,7 +84,7 @@ impl super::WeightInfo for WeightInfo { // Storage: Configuration PendingConfig (r:1 w:1) // Storage: Configuration ActiveConfig (r:1 w:0) fn set_config_with_balance() -> Weight { - (16_752_000 as Weight) + (13_080_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/parachains/src/paras/weights.rs b/runtime/rococo/src/weights/runtime_parachains_paras.rs similarity index 67% rename from runtime/parachains/src/paras/weights.rs rename to runtime/rococo/src/weights/runtime_parachains_paras.rs index d02acf4bc687..b3d93dd5c60d 100644 --- a/runtime/parachains/src/paras/weights.rs +++ b/runtime/rococo/src/weights/runtime_parachains_paras.rs @@ -1,28 +1,37 @@ +// Copyright 2017-2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . //! Autogenerated weights for `runtime_parachains::paras` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-21, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 +//! DATE: 2021-09-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 // Executed Command: -// ./target/release/polkadot +// target/release/polkadot // benchmark -// --chain -// westend-dev +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --pallet=runtime_parachains::paras +// --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --pallet -// runtime_parachains::paras -// --steps -// 50 -// --repeat -// 20 -// --raw -// --extrinsic -// * -// --output -// runtime/parachains/src/paras/weights.rs +// --heap-pages=4096 +// --output=./runtime/rococo/src/weights/runtime_parachains_paras.rs +// --header=./file_header.txt #![cfg_attr(rustfmt, rustfmt_skip)] @@ -34,7 +43,7 @@ use sp_std::marker::PhantomData; /// Weight functions for `runtime_parachains::paras`. pub struct WeightInfo(PhantomData); -impl super::WeightInfo for WeightInfo { +impl runtime_parachains::paras::WeightInfo for WeightInfo { // Storage: Paras CurrentCodeHash (r:1 w:1) // Storage: Paras CodeByHashRefs (r:1 w:1) // Storage: Paras PastCodeMeta (r:1 w:1) @@ -42,7 +51,7 @@ impl super::WeightInfo for WeightInfo { // Storage: Paras PastCodeHash (r:0 w:1) // Storage: Paras CodeByHash (r:0 w:1) fn force_set_current_code(c: u32, ) -> Weight { - (14_669_000 as Weight) + (0 as Weight) // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) @@ -50,9 +59,9 @@ impl super::WeightInfo for WeightInfo { } // Storage: Paras Heads (r:0 w:1) fn force_set_current_head(s: u32, ) -> Weight { - (0 as Weight) + (18_653_000 as Weight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Configuration ActiveConfig (r:1 w:0) @@ -65,7 +74,7 @@ impl super::WeightInfo for WeightInfo { // Storage: Paras FutureCodeHash (r:0 w:1) // Storage: Paras UpgradeRestrictionSignal (r:0 w:1) fn force_schedule_code_upgrade(c: u32, ) -> Weight { - (37_383_000 as Weight) + (0 as Weight) // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) @@ -81,16 +90,16 @@ impl super::WeightInfo for WeightInfo { // Storage: Paras PastCodeHash (r:0 w:1) // Storage: Paras UpgradeGoAheadSignal (r:0 w:1) fn force_note_new_head(s: u32, ) -> Weight { - (60_855_000 as Weight) + (69_515_000 as Weight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(9 as Weight)) } // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Paras ActionsQueue (r:1 w:1) fn force_queue_action() -> Weight { - (32_014_000 as Weight) + (26_804_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index c621fbd3ca12..8c303041bdf9 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-test-runtime" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 842f451c40ed..773c50920c52 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -48,8 +48,8 @@ use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash as HashT, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, - SessionInfo as SessionInfoData, Signature, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, + ScrapedOnChainVotes, SessionInfo as SessionInfoData, Signature, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, }; use runtime_common::{ claims, paras_sudo_wrapper, BlockHashCount, BlockLength, BlockWeights, SlowAdjustingFeeUpdate, @@ -223,11 +223,15 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub storage TransactionByteFee: Balance = 10 * MILLICENTS; + /// This value increases the priority of `Operational` transactions by adding + /// a "virtual tip" that's equal to the `OperationalFeeMultiplier * final_fee`. + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; } @@ -269,10 +273,6 @@ impl_opaque_keys! { } } -parameter_types! { - pub storage DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); -} - impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; @@ -282,7 +282,6 @@ impl pallet_session::Config for Runtime { type SessionManager = Staking; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = (); } @@ -311,6 +310,7 @@ parameter_types! { pub storage SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub storage MaxNominatorRewardedPerValidator: u32 = 64; + pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; } @@ -336,6 +336,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = frame_election_provider_support::onchain::OnChainSequentialPhragmen; @@ -457,7 +458,7 @@ impl pallet_sudo::Config for Runtime { } impl parachains_configuration::Config for Runtime { - type WeightInfo = parachains_configuration::weights::WeightInfo; + type WeightInfo = parachains_configuration::TestWeightInfo; } impl parachains_shared::Config for Runtime {} @@ -488,7 +489,7 @@ impl parachains_session_info::Config for Runtime {} impl parachains_paras::Config for Runtime { type Origin = Origin; type Event = Event; - type WeightInfo = parachains_paras::weights::WeightInfo; + type WeightInfo = parachains_paras::TestWeightInfo; } impl parachains_dmp::Config for Runtime {} @@ -800,6 +801,16 @@ sp_api::impl_runtime_apis! { runtime_impl::persisted_validation_data::(para_id, assumption) } + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + runtime_impl::assumed_validation_data::( + para_id, + expected_persisted_validation_data_hash, + ) + } + fn check_validation_outputs( para_id: ParaId, outputs: primitives::v1::CandidateCommitments, @@ -845,6 +856,10 @@ sp_api::impl_runtime_apis! { fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { runtime_impl::validation_code_by_hash::(hash) } + + fn on_chain_votes() -> Option> { + runtime_impl::on_chain_votes::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index 356ecefc5d5c..65a43599440d 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "westend-runtime" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" diff --git a/runtime/westend/src/voter_bags.rs b/runtime/westend/src/bag_thresholds.rs similarity index 100% rename from runtime/westend/src/voter_bags.rs rename to runtime/westend/src/bag_thresholds.rs diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 41c9d98e4076..1ab95475b1b3 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -20,14 +20,13 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] -use frame_support::traits::OnRuntimeUpgrade; use pallet_transaction_payment::CurrencyAdapter; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, }; use runtime_common::{ auctions, crowdloan, impls::ToAuthor, paras_registrar, paras_sudo_wrapper, slots, xcm_sender, @@ -47,8 +46,9 @@ use runtime_parachains::{ use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, - ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, + AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, ChildParachainAsNative, + ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, IsConcrete, LocationInverter, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, WeightInfoBounds, @@ -102,7 +102,7 @@ use constants::{currency::*, fee::*, time::*}; mod weights; // Voter bag threshold definitions. -mod voter_bags; +mod bag_thresholds; #[cfg(test)] mod tests; @@ -116,7 +116,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 9110, + spec_version: 9130, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -262,11 +262,15 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; + /// This value increases the priority of `Operational` transactions by adding + /// a "virtual tip" that's equal to the `OperationalFeeMultiplier * final_fee`. + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; } @@ -308,10 +312,6 @@ impl_opaque_keys! { } } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); -} - impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; @@ -321,7 +321,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = weights::pallet_session::WeightInfo; } @@ -343,8 +342,8 @@ parameter_types! { pub SignedRewardBase: Balance = 1 * UNITS; pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(5u32, 10_000); - // miner configs - pub OffchainRepeat: BlockNumber = 5; + // 1 hour session, 15 minutes unsigned phase, 4 offchain executions. + pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 4; /// Whilst `UseNominatorsAndUpdateBagsList` or `UseNominatorsMap` is in use, this can still be a /// very large value. Once the `BagsList` is in full motion, staking might open its door to many @@ -395,7 +394,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { } parameter_types! { - pub const BagThresholds: &'static [u64] = &voter_bags::THRESHOLDS; + pub const BagThresholds: &'static [u64] = &bag_thresholds::THRESHOLDS; } impl pallet_bags_list::Config for Runtime { @@ -425,6 +424,7 @@ parameter_types! { pub const SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); } impl frame_election_provider_support::onchain::Config for Runtime { @@ -450,11 +450,11 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = runtime_common::elections::GenesisElectionOf; - // Use the nominators map to iter voters, but also perform the bags-list migration and keep - // it up-to-date. + // Use the nominators map to iter voters, but also keep bags-list up-to-date. type SortedListProvider = runtime_common::elections::UseNominatorsAndUpdateBagsList; type WeightInfo = weights::pallet_staking::WeightInfo; } @@ -751,7 +751,8 @@ impl InstanceFilter for ProxyType { Call::Registrar(paras_registrar::Call::reserve{..}) | Call::Crowdloan(..) | Call::Slots(..) | - Call::Auctions(..) // Specifically omitting the entire XCM Pallet + Call::Auctions(..) | // Specifically omitting the entire XCM Pallet + Call::BagsList(..) ), ProxyType::Staking => { matches!(c, Call::Staking(..) | Call::Session(..) | Call::Utility(..)) @@ -879,6 +880,7 @@ impl slots::Config for Runtime { type Currency = Balances; type Registrar = Registrar; type LeasePeriod = LeasePeriod; + type LeaseOffset = (); type WeightInfo = weights::runtime_common_slots::WeightInfo; } @@ -956,7 +958,7 @@ type LocalOriginConverter = ( /// individual routers. pub type XcmRouter = ( // Only one router so far - use DMP to communicate with child parachains. - xcm_sender::ChildParachainRouter, + xcm_sender::ChildParachainRouter, ); parameter_types! { @@ -975,6 +977,10 @@ pub type Barrier = ( AllowTopLevelPaidExecutionFrom, // Messages coming from system parachains need not pay for execution. AllowUnpaidExecutionFrom>, + // Expected responses are OK. + AllowKnownQueryResponses, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, ); pub struct XcmConfig; @@ -1100,7 +1106,7 @@ construct_runtime! { Crowdloan: crowdloan::{Pallet, Call, Storage, Event} = 64, // Pallet for sending XCM. - XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 99, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, } } @@ -1133,30 +1139,11 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPallets, - (StakingBagsListMigrationV8,), + (), >; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; -// Migration to generate pallet staking's `SortedListProvider` from pre-existing nominators. -pub struct StakingBagsListMigrationV8; - -impl OnRuntimeUpgrade for StakingBagsListMigrationV8 { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - pallet_staking::migrations::v8::migrate::() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - pallet_staking::migrations::v8::pre_migrate::() - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - pallet_staking::migrations::v8::post_migrate::() - } -} - #[cfg(not(feature = "disable-runtime-api"))] sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { @@ -1234,6 +1221,16 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) } + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + parachains_runtime_api_impl::assumed_validation_data::( + para_id, + expected_persisted_validation_data_hash, + ) + } + fn check_validation_outputs( para_id: ParaId, outputs: primitives::v1::CandidateCommitments, @@ -1282,6 +1279,10 @@ sp_api::impl_runtime_apis! { fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { parachains_runtime_api_impl::validation_code_by_hash::(hash) } + + fn on_chain_votes() -> Option> { + parachains_runtime_api_impl::on_chain_votes::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/westend/src/tests.rs b/runtime/westend/src/tests.rs index f7d4e749c252..841089f93ead 100644 --- a/runtime/westend/src/tests.rs +++ b/runtime/westend/src/tests.rs @@ -49,3 +49,21 @@ fn call_size() { If the limit is too strong, maybe consider increase the limit to 300.", ); } + +#[test] +fn sanity_check_teleport_assets_weight() { + // This test sanity checks that at least 50 teleports can exist in a block. + // Usually when XCM runs into an issue, it will return a weight of `Weight::MAX`, + // so this test will certainly ensure that this problem does not occur. + use frame_support::dispatch::GetDispatchInfo; + let weight = pallet_xcm::Call::::teleport_assets { + dest: Box::new(xcm::VersionedMultiLocation::V1(MultiLocation::here())), + beneficiary: Box::new(xcm::VersionedMultiLocation::V1(MultiLocation::here())), + assets: Box::new((Concrete(MultiLocation::here()), Fungible(200_000)).into()), + fee_asset_item: 0, + } + .get_dispatch_info() + .weight; + + assert!(weight * 50 < BlockWeights::get().max_block); +} diff --git a/runtime/westend/src/weights/xcm/mod.rs b/runtime/westend/src/weights/xcm/mod.rs index 1b0d7c1ef485..70c5731a7853 100644 --- a/runtime/westend/src/weights/xcm/mod.rs +++ b/runtime/westend/src/weights/xcm/mod.rs @@ -31,7 +31,9 @@ trait WeighMultiAssets { fn weigh_multi_assets(&self, balances_weight: Weight) -> Weight; } -// TODO wild case +// Westend only knows about one asset, the balances pallet. +const MAX_ASSETS: u32 = 1; + impl WeighMultiAssets for MultiAssetFilter { fn weigh_multi_assets(&self, balances_weight: Weight) -> Weight { match self { @@ -44,7 +46,7 @@ impl WeighMultiAssets for MultiAssetFilter { AssetTypes::Unknown => Weight::MAX, }) .fold(0, |acc, x| acc.saturating_add(x)), - _ => Weight::MAX, + Self::Wild(_) => (MAX_ASSETS as Weight).saturating_mul(balances_weight), } } } diff --git a/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 869c1bca408f..08b75be93653 100644 --- a/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,7 +17,7 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-17, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-10-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 // Executed Command: @@ -48,48 +48,59 @@ pub struct WeightInfo(PhantomData); impl WeightInfo { // Storage: System Account (r:1 w:1) pub(crate) fn withdraw_asset() -> Weight { - (39_691_000 as Weight) + (43_806_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:2 w:2) pub(crate) fn transfer_asset() -> Weight { - (62_616_000 as Weight) + (68_076_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:2 w:2) + // Storage: XcmPallet SupportedVersion (r:1 w:0) + // Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) + // Storage: XcmPallet SafeXcmVersion (r:1 w:0) // Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) // Storage: Dmp DownwardMessageQueues (r:1 w:1) pub(crate) fn transfer_reserve_asset() -> Weight { - (86_642_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + (99_639_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } - // Storage: Benchmark Override (r:0 w:0) + // Storage: System Account (r:1 w:1) pub(crate) fn receive_teleported_asset() -> Weight { - (2_000_000_000_000 as Weight) + (38_670_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) pub(crate) fn deposit_asset() -> Weight { - (49_745_000 as Weight) + (51_793_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) + // Storage: XcmPallet SupportedVersion (r:1 w:0) + // Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) + // Storage: XcmPallet SafeXcmVersion (r:1 w:0) // Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) // Storage: Dmp DownwardMessageQueues (r:1 w:1) pub(crate) fn deposit_reserve_asset() -> Weight { - (75_318_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + (85_782_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: System Account (r:1 w:1) + // Storage: XcmPallet SupportedVersion (r:1 w:0) + // Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) + // Storage: XcmPallet SafeXcmVersion (r:1 w:0) // Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) // Storage: Dmp DownwardMessageQueues (r:1 w:1) pub(crate) fn initiate_teleport() -> Weight { - (75_467_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + (84_873_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } } diff --git a/scripts/dockerfiles/polkadot/build.sh b/scripts/dockerfiles/polkadot/build.sh index ee4d29991a86..cf100bedbfaa 100755 --- a/scripts/dockerfiles/polkadot/build.sh +++ b/scripts/dockerfiles/polkadot/build.sh @@ -8,19 +8,17 @@ PROJECT_ROOT=`git rev-parse --show-toplevel` cd $PROJECT_ROOT # Find the current version from Cargo.toml -VERSION=`grep "^version" ./Cargo.toml | egrep -o "([0-9\.]+)"` +VERSION=`grep "^version" ./cli/Cargo.toml | egrep -o "([0-9\.]+)"` GITUSER=parity GITREPO=polkadot # Build the image echo "Building ${GITUSER}/${GITREPO}:latest docker image, hang on!" -time docker build -f ./scripts/docker/polkadot/polkadot_builder.Dockerfile --build-arg RUSTC_WRAPPER= --build-arg PROFILE=release -t ${GITUSER}/${GITREPO}:latest . +time docker build -f ./scripts/dockerfiles/polkadot/polkadot_builder.Dockerfile -t ${GITUSER}/${GITREPO}:latest . +docker tag ${GITUSER}/${GITREPO}:latest ${GITUSER}/${GITREPO}:v${VERSION} # Show the list of available images for this repo echo "Image is ready" docker images | grep ${GITREPO} -echo -e "\nIf you just built version ${VERSION}, you may want to update your tag:" -echo " $ docker tag ${GITUSER}/${GITREPO}:$VERSION ${GITUSER}/${GITREPO}:${VERSION}" - popd diff --git a/scripts/dockerfiles/polkadot/docker-compose-local.yml b/scripts/dockerfiles/polkadot/docker-compose-local.yml index 2c93799259ef..1ff3a1ccaac2 100644 --- a/scripts/dockerfiles/polkadot/docker-compose-local.yml +++ b/scripts/dockerfiles/polkadot/docker-compose-local.yml @@ -1,35 +1,39 @@ version: '3' services: node_alice: - build: - context: . - dockerfile: polkadot_builder.Dockerfile ports: - "30333:30333" - "9933:9933" - "9944:9944" - image: chevdor/polkadot:latest + - "9615:9615" + image: parity/polkadot:latest volumes: - "polkadot-data-alice:/data" - command: polkadot --chain=polkadot-local --alice -d /data --node-key 0000000000000000000000000000000000000000000000000000000000000001 + command: | + --chain=polkadot-local + --alice + -d /data + --node-key 0000000000000000000000000000000000000000000000000000000000000001 networks: testing_net: ipv4_address: 172.28.1.1 node_bob: - build: - context: . - dockerfile: polkadot_builder.Dockerfile ports: - - "30344:30344" - - "9935:9935" - - "9945:9945" - image: chevdor/polkadot:latest + - "30344:30333" + - "9935:9933" + - "9945:9944" + - "29615:9615" + image: parity/polkadot:latest volumes: - "polkadot-data-bob:/data" links: - "node_alice:alice" - command: polkadot --chain=polkadot-local --bob -d /data --port 30344 --rpc-port 9935 --ws-port 9945 --bootnodes '/ip4/172.28.1.1/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR' + command: | + --chain=polkadot-local + --bob + -d /data + --bootnodes '/ip4/172.28.1.1/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR' networks: testing_net: ipv4_address: 172.28.1.2 diff --git a/scripts/dockerfiles/polkadot/docker-compose.yml b/scripts/dockerfiles/polkadot/docker-compose.yml index 54fe158735d8..978191af88c1 100644 --- a/scripts/dockerfiles/polkadot/docker-compose.yml +++ b/scripts/dockerfiles/polkadot/docker-compose.yml @@ -1,16 +1,23 @@ version: '3' services: polkadot: - build: - context: . - dockerfile: polkadot_builder.Dockerfile ports: - "127.0.0.1:30333:30333/tcp" - "127.0.0.1:9933:9933/tcp" - image: chevdor/polkadot:latest + image: parity/polkadot:latest volumes: - "polkadot-data:/data" - command: polkadot + command: | + --unsafe-rpc-external + --unsafe-ws-external + --rpc-cors all + --prometheus-external + + ports: + - "30333:30333" + - "9933:9933" + - "9944:9944" + - "9615:9615" volumes: polkadot-data: diff --git a/scripts/dockerfiles/polkadot/polkadot_builder.Dockerfile b/scripts/dockerfiles/polkadot/polkadot_builder.Dockerfile index 2fc5787e6cb0..6b096244a506 100644 --- a/scripts/dockerfiles/polkadot/polkadot_builder.Dockerfile +++ b/scripts/dockerfiles/polkadot/polkadot_builder.Dockerfile @@ -1,27 +1,35 @@ +# This is the build stage for Polkadot. Here we create the binary in a temporary image. FROM docker.io/paritytech/ci-linux:production as builder -LABEL io.parity.image.description="This is the build stage for Polkadot. Here we create the binary." WORKDIR /polkadot - COPY . /polkadot -RUN cargo build --release --locked - -# ===== SECOND STAGE ====== +RUN cargo build --locked --release +# This is the 2nd stage: a very small image where we copy the Polkadot binary." FROM docker.io/library/ubuntu:20.04 -LABEL io.parity.image.description="Polkadot: a platform for web3. This is a self-buit multistage image." + +LABEL description="Multistage Docker image for Polkadot: a platform for web3" \ + io.parity.image.type="builder" \ + io.parity.image.authors="chevdor@gmail.com, devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.description="Polkadot: a platform for web3" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/dockerfiles/polkadot/polkadot_builder.Dockerfile" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" COPY --from=builder /polkadot/target/release/polkadot /usr/local/bin RUN useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ - mkdir -p /polkadot/.local/share && \ - mkdir /data && \ + mkdir -p /data /polkadot/.local/share && \ chown -R polkadot:polkadot /data && \ ln -s /data /polkadot/.local/share/polkadot && \ - rm -rf /usr/bin /usr/sbin +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/polkadot --version USER polkadot + EXPOSE 30333 9933 9944 9615 VOLUME ["/data"] diff --git a/scripts/dockerfiles/polkadot_injected_release.Dockerfile b/scripts/dockerfiles/polkadot_injected_release.Dockerfile index c4c8aea182ed..4b0ca7ef1329 100644 --- a/scripts/dockerfiles/polkadot_injected_release.Dockerfile +++ b/scripts/dockerfiles/polkadot_injected_release.Dockerfile @@ -11,7 +11,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="parity/polkadot" \ io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/docker/polkadot_injected_release.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/dockerfiles/polkadot_injected_release.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/polkadot/" diff --git a/scripts/github/extrinsic-ordering-filter.sh b/scripts/github/extrinsic-ordering-filter.sh new file mode 100755 index 000000000000..4fd3337f64a6 --- /dev/null +++ b/scripts/github/extrinsic-ordering-filter.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# This script is used in a Github Workflow. It helps filtering out what is interesting +# when comparing metadata and spot what would require a tx version bump. + +# shellcheck disable=SC2002,SC2086 + +FILE=$1 + +# Higlight indexes that were deleted +function find_deletions() { + echo "\n## Deletions\n" + RES=$(cat "$FILE" | grep -n '\[\-\]' | tr -s " ") + if [ "$RES" ]; then + echo "$RES" | awk '{ printf "%s\\n", $0 }' + else + echo "n/a" + fi +} + +# Highlight indexes that have been deleted +function find_index_changes() { + echo "\n## Index changes\n" + RES=$(cat "$FILE" | grep -E -n -i 'idx:\s*([0-9]+)\s*(->)\s*([0-9]+)' | tr -s " ") + if [ "$RES" ]; then + echo "$RES" | awk '{ printf "%s\\n", $0 }' + else + echo "n/a" + fi +} + +# Highlight values that decreased +function find_decreases() { + echo "\n## Decreases\n" + OUT=$(cat "$FILE" | grep -E -i -o '([0-9]+)\s*(->)\s*([0-9]+)' | awk '$1 > $3 { printf "%s;", $0 }') + IFS=$';' LIST=("$OUT") + unset RES + for line in "${LIST[@]}"; do + RES="$RES\n$(cat "$FILE" | grep -E -i -n \"$line\" | tr -s " ")" + done + + if [ "$RES" ]; then + echo "$RES" | awk '{ printf "%s\\n", $0 }' | sort -u -g | uniq + else + echo "n/a" + fi +} + +echo "\n------------------------------ SUMMARY -------------------------------" +echo "\n⚠️ This filter is here to help spotting changes that should be reviewed carefully." +echo "\n⚠️ It catches only index changes, deletions and value decreases". + +find_deletions "$FILE" +find_index_changes "$FILE" +find_decreases "$FILE" +echo "\n----------------------------------------------------------------------\n" diff --git a/scripts/github/generate_release_text.rb b/scripts/github/generate_release_text.rb index a35154bd1a66..c1fe06f45ebd 100644 --- a/scripts/github/generate_release_text.rb +++ b/scripts/github/generate_release_text.rb @@ -9,8 +9,34 @@ require 'toml' require_relative './lib.rb' +# A logger only active when NOT running in CI +def logger(s) + puts "▶ DEBUG: %s" % [s] if ENV['CI'] != 'true' +end + +# Check if all the required ENV are set +# This is especially convenient when testing locally +def check_env() + if ENV['CI'] != 'true' then + logger("Running locally") + vars = ['GITHUB_REF', 'GITHUB_TOKEN', 'GITHUB_WORKSPACE', 'GITHUB_REPOSITORY', 'RUSTC_STABLE', 'RUSTC_NIGHTLY'] + vars.each { |x| + env = (ENV[x] || "") + if env.length > 0 then + logger("- %s:\tset: %s, len: %d" % [x, env.length > 0 || false, env.length]) + else + logger("- %s:\tset: %s, len: %d" % [x, env.length > 0 || false, env.length]) + end + } + end +end + +check_env() + current_ref = ENV['GITHUB_REF'] token = ENV['GITHUB_TOKEN'] + +logger("Connecting to Github") github_client = Octokit::Client.new( access_token: token ) @@ -19,13 +45,15 @@ # Generate an ERB renderer based on the template .erb file renderer = ERB.new( - File.read(ENV['GITHUB_WORKSPACE'] + '/polkadot/scripts/github/polkadot_release.erb'), + File.read(File.join(polkadot_path, 'scripts/github/polkadot_release.erb')), trim_mode: '<>' ) # get ref of last polkadot release last_ref = 'refs/tags/' + github_client.latest_release(ENV['GITHUB_REPOSITORY']).tag_name +logger("Last ref: " + last_ref) +logger("Generate changelog for Polkadot") polkadot_cl = Changelog.new( 'paritytech/polkadot', last_ref, current_ref, token: token ) @@ -47,6 +75,7 @@ def get_substrate_commit(client, ref) substrate_prev_sha = get_substrate_commit(github_client, last_ref) substrate_cur_sha = get_substrate_commit(github_client, current_ref) +logger("Generate changelog for Substrate") substrate_cl = Changelog.new( 'paritytech/substrate', substrate_prev_sha, substrate_cur_sha, token: token, diff --git a/scripts/github/polkadot_release.erb b/scripts/github/polkadot_release.erb index 2078fa3bb96f..3fcf07dddc5c 100644 --- a/scripts/github/polkadot_release.erb +++ b/scripts/github/polkadot_release.erb @@ -11,11 +11,11 @@ This release was tested against the following versions of `rustc`. Other version - <%= rustc_stable %> - <%= rustc_nightly %> -WASM runtimes built with [srtool](https://github.com/paritytech/srtool) using `<%= polkadot_json['rustc'] %>`. +WASM runtimes built with [<%= polkadot_json['info']['generator']['name'] %> v<%= polkadot_json['info']['generator']['version'] %>](https://github.com/paritytech/srtool) using `<%= polkadot_json['rustc'] %>`. Proposal hashes: -* `polkadot_runtime-v<%= polkadot_runtime %>.compact.wasm - <%= polkadot_json['prop'] %>` -* `kusama_runtime-v<%= kusama_runtime %>.compact.wasm - <%= kusama_json['prop'] %>` +* `polkadot_runtime-v<%= polkadot_runtime %>.compact.compressed.wasm`: `<%= polkadot_json['runtimes']['compressed']['prop'] %>` +* `kusama_runtime-v<%= kusama_runtime %>.compact.compressed.wasm`: `<%= kusama_json['runtimes']['compressed']['prop'] %>` <% unless misc_changes.empty? %> ## Changes diff --git a/scripts/gitlab/check_line_width.sh b/scripts/gitlab/check_line_width.sh index c31cab446579..b147488d92db 100755 --- a/scripts/gitlab/check_line_width.sh +++ b/scripts/gitlab/check_line_width.sh @@ -8,7 +8,7 @@ BASE_BRANCH="origin/master" LINE_WIDTH="121" GOOD_LINE_WIDTH="101" -git diff --name-only "${BASE_BRANCH}...${CI_COMMIT_SHA}" -- \*.rs | ( while read -r file +git diff --name-only "${BASE_BRANCH}...${CI_COMMIT_SHA}" -- \*.rs :^bridges | ( while read -r file do if [ ! -f "${file}" ]; then diff --git a/scripts/gitlab/lingua.dic b/scripts/gitlab/lingua.dic index a32366d63ee6..3d65a40c8187 100644 --- a/scripts/gitlab/lingua.dic +++ b/scripts/gitlab/lingua.dic @@ -27,6 +27,7 @@ blockchain/MS borked broadcast/UDSMG BTC/S +canonicalization canonicalize/D CentOS CLI/MS @@ -66,6 +67,7 @@ encodable enqueue/D enqueue/DMSG entrypoint/MS +enum ERC-20 ETH/S ethereum/MS @@ -180,6 +182,7 @@ phragmen picosecond/SM PoA/MS polkadot/MS +Polkadot/MS PoS/MS PoV/MS PoW/MS @@ -214,6 +217,7 @@ rpc RPC/MS runtime/MS rustc/MS +sybil SAFT scalable scalability @@ -250,6 +254,8 @@ teleporter/SM teleporters testnet/MS timestamp/MS +timeframe +tradeoff transitionary trie/MS trustless/Y @@ -259,6 +265,7 @@ typesystem ubuntu/M UDP UI +unconcluded unfinalize/B unfinalized union/MSG @@ -288,6 +295,7 @@ VMP/SM VPS VRF/SM w3f/MS +wakeup wakeups warming/S wasm/M diff --git a/statement-table/Cargo.toml b/statement-table/Cargo.toml index b6ae92a22c5b..c5d490ac1fd9 100644 --- a/statement-table/Cargo.toml +++ b/statement-table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-statement-table" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 9120010986a4..db40c88d75c1 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -172,12 +172,12 @@ pub type MisbehaviorFor = Misbehavior< ::Signature, >; -// kinds of votes for validity +// Kinds of votes for validity on a particular candidate. #[derive(Clone, PartialEq, Eq)] enum ValidityVote { - // implicit validity vote by issuing + // Implicit validity vote. Issued(Signature), - // direct validity vote + // Direct validity vote. Valid(Signature), } diff --git a/utils/remote-ext-tests/bags-list/Cargo.toml b/utils/remote-ext-tests/bags-list/Cargo.toml index 6f97eabcf823..1421a78b1249 100644 --- a/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/utils/remote-ext-tests/bags-list/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "remote-ext-tests-bags-list" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -21,4 +21,4 @@ frame-support = { git = "https://github.com/paritytech/substrate", branch = "mas remote-externalities = { git = "https://github.com/paritytech/substrate", branch = "master" } tokio = { version = "1", features = ["macros"] } log = { version = "0.4.14" } -structopt = {version = "0.3.23" } +structopt = {version = "0.3.25" } diff --git a/utils/remote-ext-tests/bags-list/src/main.rs b/utils/remote-ext-tests/bags-list/src/main.rs index 410cb1e80511..c7ef5cc62791 100644 --- a/utils/remote-ext-tests/bags-list/src/main.rs +++ b/utils/remote-ext-tests/bags-list/src/main.rs @@ -23,6 +23,7 @@ mod voter_bags; #[derive(StructOpt)] enum Runtime { Kusama, + Polkadot, } impl std::str::FromStr for Runtime { @@ -30,6 +31,7 @@ impl std::str::FromStr for Runtime { fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "kusama" => Ok(Runtime::Kusama), + "polkadot" => Ok(Runtime::Polkadot), _ => Err("wrong Runtime: can be 'polkadot' or 'kusama'."), } } @@ -37,9 +39,9 @@ impl std::str::FromStr for Runtime { #[derive(StructOpt)] struct Cli { - #[structopt(long, default_value = "wss://rpc.kusama.io")] + #[structopt(long, default_value = "wss://rpc.polkadot.io")] uri: String, - #[structopt(long, short, default_value = "kusama")] + #[structopt(long, short, default_value = "polkadot")] runtime: Runtime, } @@ -55,5 +57,13 @@ async fn main() { ) .await; }, + Runtime::Polkadot => { + use polkadot_runtime::{constants::currency::UNITS, Block, Runtime}; + voter_bags::test_voter_bags_migration::( + UNITS as u64, + options.uri.clone(), + ) + .await; + }, } } diff --git a/utils/remote-ext-tests/bags-list/src/voter_bags.rs b/utils/remote-ext-tests/bags-list/src/voter_bags.rs index 4b7d7d6ec88d..0ed91c14b059 100644 --- a/utils/remote-ext-tests/bags-list/src/voter_bags.rs +++ b/utils/remote-ext-tests/bags-list/src/voter_bags.rs @@ -54,6 +54,15 @@ pub(crate) async fn test_voter_bags_migration< // set the ss58 prefix so addresses printed below are human friendly. sp_core::crypto::set_default_ss58_version(Runtime::SS58Prefix::get().try_into().unwrap()); + // clear anything that may have existed before. + if ::SortedListProvider::count() != 0 { + log::warn!( + target: LOG_TARGET, + "some data already seem to exist in the bags-list pallet.." + ); + } + ::SortedListProvider::clear(None); + // get the nominator & validator count prior to migrating; these should be invariant. let pre_migrate_nominator_count = >::iter().count() as u32; log::info!(target: LOG_TARGET, "Nominator count: {}", pre_migrate_nominator_count); @@ -99,7 +108,7 @@ pub(crate) async fn test_voter_bags_migration< (*vote_weight_thresh).try_into().map_err(|_| "should not fail").unwrap(); if vote_weight_as_balance <= min_nominator_bond { for id in bag.std_iter().map(|node| node.std_id().clone()) { - log::trace!( + log::error!( target: LOG_TARGET, "{} Account found below min bond: {:?}.", pretty_thresh, diff --git a/utils/staking-miner/Cargo.toml b/utils/staking-miner/Cargo.toml index 296240a705d3..042f7da98594 100644 --- a/utils/staking-miner/Cargo.toml +++ b/utils/staking-miner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "staking-miner" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] edition = "2018" @@ -9,12 +9,12 @@ codec = { package = "parity-scale-codec", version = "2.0.0" } tokio = { version = "1.12", features = ["macros"] } log = "0.4.11" env_logger = "0.9.0" -structopt = "0.3.23" -jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = ["tokio1"] } +structopt = "0.3.25" +jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = ["tokio1"] } serde_json = "1.0" serde = "1.0.130" paste = "1.0.5" -thiserror = "1.0.26" +thiserror = "1.0.30" remote-externalities = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/utils/staking-miner/src/dry_run.rs b/utils/staking-miner/src/dry_run.rs index ffebdb3fadf2..9501adb96fab 100644 --- a/utils/staking-miner/src/dry_run.rs +++ b/utils/staking-miner/src/dry_run.rs @@ -117,11 +117,11 @@ macro_rules! dry_run_cmd_for { ($runtime:ident) => { paste::paste! { let mut ext = crate::create_election_ext::( shared.uri.clone(), config.at, - vec!["Staking".to_string(), "System".to_string(), "Balances".to_string()] + vec!["Staking".to_string(), "System".to_string()], ).await?; force_create_snapshot::(&mut ext)?; - let (raw_solution, witness) = crate::mine_with::(&config.solver, &mut ext)?; + let (raw_solution, witness) = crate::mine_with::(&config.solver, &mut ext, false)?; let nonce = crate::get_account_info::(client, &signer.account, config.at) .await? diff --git a/utils/staking-miner/src/emergency_solution.rs b/utils/staking-miner/src/emergency_solution.rs index d27c23e38601..a3847825f5d8 100644 --- a/utils/staking-miner/src/emergency_solution.rs +++ b/utils/staking-miner/src/emergency_solution.rs @@ -16,7 +16,7 @@ //! The emergency-solution command. -use crate::{prelude::*, Error, SharedConfig}; +use crate::{prelude::*, EmergencySolutionConfig, Error, SharedConfig}; use codec::Encode; use frame_election_provider_support::SequentialPhragmen; use std::io::Write; @@ -25,25 +25,35 @@ macro_rules! emergency_solution_cmd_for { ($runtime:ident) => { paste::paste! { /// Execute the emergency-solution command. pub(crate) async fn []( shared: SharedConfig, + config: EmergencySolutionConfig, ) -> Result<(), Error<$crate::[<$runtime _runtime_exports>]::Runtime>> { use $crate::[<$runtime _runtime_exports>]::*; let mut ext = crate::create_election_ext::(shared.uri.clone(), None, vec![]).await?; ext.execute_with(|| { assert!(EPM::Pallet::::current_phase().is_emergency()); + // NOTE: this internally calls feasibility_check, but we just re-do it here as an easy way // to get a `ReadySolution`. let (raw_solution, _) = >::mine_solution::>()?; log::info!(target: LOG_TARGET, "mined solution with {:?}", &raw_solution.score); - let ready_solution = EPM::Pallet::::feasibility_check(raw_solution, EPM::ElectionCompute::Signed)?; - let encoded_ready = ready_solution.encode(); + let mut ready_solution = EPM::Pallet::::feasibility_check(raw_solution, EPM::ElectionCompute::Signed)?; + + // maybe truncate. + if let Some(take) = config.take { + log::info!(target: LOG_TARGET, "truncating {} winners to {}", ready_solution.supports.len(), take); + ready_solution.supports.sort_unstable_by_key(|(_, s)| s.total); + ready_solution.supports.truncate(take); + } + + // write to file and stdout. let encoded_support = ready_solution.supports.encode(); - let mut solution_file = std::fs::File::create("solution.bin")?; let mut supports_file = std::fs::File::create("solution.supports.bin")?; - solution_file.write_all(&encoded_ready)?; supports_file.write_all(&encoded_support)?; - log::info!(target: LOG_TARGET, "ReadySolution: size {:?} / score = {:?}", encoded_ready.len(), ready_solution.score); + + log::info!(target: LOG_TARGET, "ReadySolution: size {:?} / score = {:?}", ready_solution.encoded_size(), ready_solution.score); log::trace!(target: LOG_TARGET, "Supports: {}", sp_core::hexdisplay::HexDisplay::from(&encoded_support)); + Ok(()) }) } diff --git a/utils/staking-miner/src/main.rs b/utils/staking-miner/src/main.rs index b3f504ff2d07..5692d991e23e 100644 --- a/utils/staking-miner/src/main.rs +++ b/utils/staking-miner/src/main.rs @@ -276,7 +276,7 @@ enum Command { /// Just compute a solution now, and don't submit it. DryRun(DryRunConfig), /// Provide a solution that can be submitted to the chain as an emergency response. - EmergencySolution, + EmergencySolution(EmergencySolutionConfig), } #[derive(Debug, Clone, StructOpt)] @@ -291,39 +291,6 @@ enum Solvers { }, } -/// Mine a solution with the given `solver`. -fn mine_with( - solver: &Solvers, - ext: &mut Ext, -) -> Result<(EPM::RawSolution>, u32), Error> -where - T: EPM::Config, - T::Solver: NposSolver, -{ - use frame_election_provider_support::{PhragMMS, SequentialPhragmen}; - - match solver { - Solvers::SeqPhragmen { iterations } => { - BalanceIterations::set(*iterations); - mine_unchecked::< - T, - SequentialPhragmen< - ::AccountId, - sp_runtime::Perbill, - Balancing, - >, - >(ext, false) - }, - Solvers::PhragMMS { iterations } => { - BalanceIterations::set(*iterations); - mine_unchecked::< - T, - PhragMMS<::AccountId, sp_runtime::Perbill, Balancing>, - >(ext, false) - }, - } -} - frame_support::parameter_types! { /// Number of balancing iterations for a solution algorithm. Set based on the [`Solvers`] CLI /// config. @@ -341,16 +308,32 @@ struct MonitorConfig { #[structopt(long, default_value = "head", possible_values = &["head", "finalized"])] listen: String, + /// The solver algorithm to use. #[structopt(subcommand)] solver: Solvers, } +#[derive(Debug, Clone, StructOpt)] +struct EmergencySolutionConfig { + /// The block hash at which scraping happens. If none is provided, the latest head is used. + #[structopt(long)] + at: Option, + + /// The solver algorithm to use. + #[structopt(subcommand)] + solver: Solvers, + + /// The number of top backed winners to take. All are taken, if not provided. + take: Option, +} + #[derive(Debug, Clone, StructOpt)] struct DryRunConfig { /// The block hash at which scraping happens. If none is provided, the latest head is used. #[structopt(long)] at: Option, + /// The solver algorithm to use. #[structopt(subcommand)] solver: Solvers, } @@ -407,9 +390,9 @@ async fn create_election_ext( .map_err(|why| Error::RemoteExternalities(why)) } -/// Compute the election at the given block number. It expects to NOT be `Phase::Off`. In other -/// words, the snapshot must exists on the given externalities. -fn mine_unchecked( +/// Compute the election. It expects to NOT be `Phase::Off`. In other words, the snapshot must +/// exists on the given externalities. +fn mine_solution( ext: &mut Ext, do_feasibility: bool, ) -> Result<(EPM::RawSolution>, u32), Error> @@ -434,6 +417,40 @@ where }) } +/// Mine a solution with the given `solver`. +fn mine_with( + solver: &Solvers, + ext: &mut Ext, + do_feasibility: bool, +) -> Result<(EPM::RawSolution>, u32), Error> +where + T: EPM::Config, + T::Solver: NposSolver, +{ + use frame_election_provider_support::{PhragMMS, SequentialPhragmen}; + + match solver { + Solvers::SeqPhragmen { iterations } => { + BalanceIterations::set(*iterations); + mine_solution::< + T, + SequentialPhragmen< + ::AccountId, + sp_runtime::Perbill, + Balancing, + >, + >(ext, do_feasibility) + }, + Solvers::PhragMMS { iterations } => { + BalanceIterations::set(*iterations); + mine_solution::< + T, + PhragMMS<::AccountId, sp_runtime::Perbill, Balancing>, + >(ext, do_feasibility) + }, + } +} + #[allow(unused)] fn mine_dpos(ext: &mut Ext) -> Result<(), Error> { ext.execute_with(|| { @@ -474,7 +491,6 @@ fn mine_dpos(ext: &mut Ext) -> Result<(), Error> { pub(crate) async fn check_versions( client: &WsClient, - print: bool, ) -> Result<(), Error> { let linked_version = T::Version::get(); let on_chain_version = rpc_helpers::rpc::( @@ -485,10 +501,9 @@ pub(crate) async fn check_versions( .await .expect("runtime version RPC should always work; qed"); - if print { - log::info!(target: LOG_TARGET, "linked version {:?}", linked_version); - log::info!(target: LOG_TARGET, "on-chain version {:?}", on_chain_version); - } + log::debug!(target: LOG_TARGET, "linked version {:?}", linked_version); + log::debug!(target: LOG_TARGET, "on-chain version {:?}", on_chain_version); + if linked_version != on_chain_version { log::error!( target: LOG_TARGET, @@ -534,7 +549,7 @@ async fn main() { match chain.to_lowercase().as_str() { "polkadot" | "development" => { sp_core::crypto::set_default_ss58_version( - sp_core::crypto::Ss58AddressFormat::PolkadotAccount, + sp_core::crypto::Ss58AddressFormatRegistry::PolkadotAccount.into(), ); sub_tokens::dynamic::set_name("DOT"); sub_tokens::dynamic::set_decimal_points(10_000_000_000); @@ -546,7 +561,7 @@ async fn main() { }, "kusama" | "kusama-dev" => { sp_core::crypto::set_default_ss58_version( - sp_core::crypto::Ss58AddressFormat::KusamaAccount, + sp_core::crypto::Ss58AddressFormatRegistry::KusamaAccount.into(), ); sub_tokens::dynamic::set_name("KSM"); sub_tokens::dynamic::set_decimal_points(1_000_000_000_000); @@ -558,7 +573,7 @@ async fn main() { }, "westend" => { sp_core::crypto::set_default_ss58_version( - sp_core::crypto::Ss58AddressFormat::PolkadotAccount, + sp_core::crypto::Ss58AddressFormatRegistry::PolkadotAccount.into(), ); sub_tokens::dynamic::set_name("WND"); sub_tokens::dynamic::set_decimal_points(1_000_000_000_000); @@ -576,7 +591,7 @@ async fn main() { log::info!(target: LOG_TARGET, "connected to chain {:?}", chain); any_runtime_unit! { - check_versions::(&client, true).await + check_versions::(&client).await }; let signer_account = any_runtime! { @@ -595,7 +610,7 @@ async fn main() { .map_err(|e| { log::error!(target: LOG_TARGET, "DryRun error: {:?}", e); }), - Command::EmergencySolution => emergency_solution_cmd(shared.clone()).await + Command::EmergencySolution(c) => emergency_solution_cmd(shared.clone(), c).await .map_err(|e| { log::error!(target: LOG_TARGET, "EmergencySolution error: {:?}", e); }), diff --git a/utils/staking-miner/src/monitor.rs b/utils/staking-miner/src/monitor.rs index 396700335dd7..6bd90666f7ca 100644 --- a/utils/staking-miner/src/monitor.rs +++ b/utils/staking-miner/src/monitor.rs @@ -21,7 +21,7 @@ use crate::{ }; use codec::Encode; use jsonrpsee_ws_client::{ - types::{traits::SubscriptionClient, v2::params::JsonRpcParams, Subscription}, + types::{traits::SubscriptionClient, Subscription}, WsClient, }; use sc_transaction_pool_api::TransactionStatus; @@ -71,106 +71,113 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { signer: Signer, ) -> Result<(), Error<$crate::[<$runtime _runtime_exports>]::Runtime>> { use $crate::[<$runtime _runtime_exports>]::*; + let (sub, unsub) = if config.listen == "head" { ("chain_subscribeNewHeads", "chain_unsubscribeNewHeads") } else { ("chain_subscribeFinalizedHeads", "chain_unsubscribeFinalizedHeads") }; - log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", sub, unsub); - let mut subscription: Subscription
= client - .subscribe(&sub, JsonRpcParams::NoParams, &unsub) - .await - .unwrap(); - - while let Some(now) = subscription.next().await? { - let hash = now.hash(); - log::debug!(target: LOG_TARGET, "new event at #{:?} ({:?})", now.number, hash); - - // if the runtime version has changed, terminate - crate::check_versions::(client, false).await?; - - // we prefer doing this check before fetching anything into a remote-ext. - if ensure_signed_phase::(client, hash).await.is_err() { - log::debug!(target: LOG_TARGET, "phase closed, not interested in this block at all."); - continue; - }; - - // NOTE: we don't check the score of any of the submitted solutions. If we submit a weak - // one, as long as we are valid, we will end up getting our deposit back, so not a big - // deal for now. Note that to avoid an unfeasible solution, we should make sure that we - // only start the process on a finalized snapshot. If the signed phase is long enough, - // this will not be a solution. - - // grab an externalities without staking, just the election snapshot. - let mut ext = crate::create_election_ext::(shared.uri.clone(), Some(hash), vec![]).await?; - - if ensure_no_previous_solution::(&mut ext, &signer.account).await.is_err() { - log::debug!(target: LOG_TARGET, "We already have a solution in this phase, skipping."); - continue; - } - - let (raw_solution, witness) = crate::mine_with::(&config.solver, &mut ext)?; - - log::info!(target: LOG_TARGET, "mined solution with {:?}", &raw_solution.score); - - let nonce = crate::get_account_info::(client, &signer.account, Some(hash)) - .await? - .map(|i| i.nonce) - .expect(crate::signer::SIGNER_ACCOUNT_WILL_EXIST); - let tip = 0 as Balance; - let period = ::BlockHashCount::get() / 2; - let current_block = now.number.saturating_sub(1); - let era = sp_runtime::generic::Era::mortal(period.into(), current_block.into()); - log::trace!(target: LOG_TARGET, "transaction mortality: {:?} -> {:?}", era.birth(current_block.into()), era.death(current_block.into())); - let extrinsic = ext.execute_with(|| create_uxt(raw_solution, witness, signer.clone(), nonce, tip, era)); - let bytes = sp_core::Bytes(extrinsic.encode()); - - let mut tx_subscription: Subscription< - TransactionStatus<::Hash, ::Hash> - > = match client - .subscribe(&"author_submitAndWatchExtrinsic", params! { bytes }, "author_unwatchExtrinsic") + loop { + log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", sub, unsub); + let mut subscription: Subscription
= client + .subscribe(&sub, params! {}, &unsub) .await - { - Ok(sub) => sub, - Err(why) => { - // This usually happens when we've been busy with mining for a few blocks, and now we're receiving the - // subscriptions of blocks in which we were busy. In these blocks, we still don't have a solution, so we - // re-compute a new solution and submit it with an outdated `Nonce`, which yields most often `Stale` - // error. NOTE: to improve this overall, and to be able to introduce an array of other fancy features, - // we should make this multi-threaded and do the computation outside of this callback. - log::warn!(target: LOG_TARGET, "failing to submit a transaction {:?}. continuing...", why); - continue + .unwrap(); + + while let Some(now) = subscription.next().await? { + let hash = now.hash(); + log::trace!(target: LOG_TARGET, "new event at #{:?} ({:?})", now.number, hash); + + // if the runtime version has changed, terminate. + crate::check_versions::(client).await?; + + // we prefer doing this check before fetching anything into a remote-ext. + if ensure_signed_phase::(client, hash).await.is_err() { + log::debug!(target: LOG_TARGET, "phase closed, not interested in this block at all."); + continue; + }; + + // grab an externalities without staking, just the election snapshot. + let mut ext = crate::create_election_ext::( + shared.uri.clone(), + Some(hash), + vec![], + ).await?; + + if ensure_no_previous_solution::(&mut ext, &signer.account).await.is_err() { + log::debug!(target: LOG_TARGET, "We already have a solution in this phase, skipping."); + continue; } - }; - - let _success = while let Some(status_update) = tx_subscription.next().await? { - log::trace!(target: LOG_TARGET, "status update {:?}", status_update); - match status_update { - TransactionStatus::Ready | TransactionStatus::Broadcast(_) | TransactionStatus::Future => continue, - TransactionStatus::InBlock(hash) => { - log::info!(target: LOG_TARGET, "included at {:?}", hash); - let key = frame_support::storage::storage_prefix(b"System", b"Events"); - let events = get_storage::::Hash>>, - >(client, params!{ key, hash }).await?.unwrap_or_default(); - log::info!(target: LOG_TARGET, "events at inclusion {:?}", events); - } - TransactionStatus::Retracted(hash) => { - log::info!(target: LOG_TARGET, "Retracted at {:?}", hash); - } - TransactionStatus::Finalized(hash) => { - log::info!(target: LOG_TARGET, "Finalized at {:?}", hash); - break + + // mine a solution, and run feasibility check on it as well. + let (raw_solution, witness) = crate::mine_with::(&config.solver, &mut ext, true)?; + log::info!(target: LOG_TARGET, "mined solution with {:?}", &raw_solution.score); + + let nonce = crate::get_account_info::(client, &signer.account, Some(hash)) + .await? + .map(|i| i.nonce) + .expect(crate::signer::SIGNER_ACCOUNT_WILL_EXIST); + let tip = 0 as Balance; + let period = ::BlockHashCount::get() / 2; + let current_block = now.number.saturating_sub(1); + let era = sp_runtime::generic::Era::mortal(period.into(), current_block.into()); + log::trace!( + target: LOG_TARGET, "transaction mortality: {:?} -> {:?}", + era.birth(current_block.into()), + era.death(current_block.into()), + ); + let extrinsic = ext.execute_with(|| create_uxt(raw_solution, witness, signer.clone(), nonce, tip, era)); + let bytes = sp_core::Bytes(extrinsic.encode()); + + let mut tx_subscription: Subscription< + TransactionStatus<::Hash, ::Hash> + > = match client + .subscribe(&"author_submitAndWatchExtrinsic", params! { bytes }, "author_unwatchExtrinsic") + .await + { + Ok(sub) => sub, + Err(why) => { + // This usually happens when we've been busy with mining for a few blocks, and + // now we're receiving the subscriptions of blocks in which we were busy. In + // these blocks, we still don't have a solution, so we re-compute a new solution + // and submit it with an outdated `Nonce`, which yields most often `Stale` + // error. NOTE: to improve this overall, and to be able to introduce an array of + // other fancy features, we should make this multi-threaded and do the + // computation outside of this callback. + log::warn!(target: LOG_TARGET, "failing to submit a transaction {:?}. continuing...", why); + continue } - _ => { - log::warn!(target: LOG_TARGET, "Stopping listen due to other status {:?}", status_update); - break + }; + + while let Some(status_update) = tx_subscription.next().await? { + log::trace!(target: LOG_TARGET, "status update {:?}", status_update); + match status_update { + TransactionStatus::Ready | TransactionStatus::Broadcast(_) | TransactionStatus::Future => continue, + TransactionStatus::InBlock(hash) => { + log::info!(target: LOG_TARGET, "included at {:?}", hash); + let key = frame_support::storage::storage_prefix(b"System", b"Events"); + let events = get_storage::::Hash>>, + >(client, params!{ key, hash }).await?.unwrap_or_default(); + log::info!(target: LOG_TARGET, "events at inclusion {:?}", events); + } + TransactionStatus::Retracted(hash) => { + log::info!(target: LOG_TARGET, "Retracted at {:?}", hash); + } + TransactionStatus::Finalized(hash) => { + log::info!(target: LOG_TARGET, "Finalized at {:?}", hash); + break + } + _ => { + log::warn!(target: LOG_TARGET, "Stopping listen due to other status {:?}", status_update); + break + } } - } - }; - } + }; + } - Ok(()) + log::warn!(target: LOG_TARGET, "subscription to {} terminated. Retrying..", sub) + } } }}} diff --git a/utils/voter-bags/Cargo.toml b/utils/voter-bags/Cargo.toml index 0c48442e6cf3..2ebe24722265 100644 --- a/utils/voter-bags/Cargo.toml +++ b/utils/voter-bags/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] generate-bags = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -structopt = "0.3.21" +structopt = "0.3.25" westend-runtime = { path = "../../runtime/westend" } kusama-runtime = { path = "../../runtime/kusama" } diff --git a/xcm/Cargo.toml b/xcm/Cargo.toml index 6c4c319170d0..f6a309b70c08 100644 --- a/xcm/Cargo.toml +++ b/xcm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "The basic XCM datastructures." edition = "2018" diff --git a/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index b93184e0455d..9e2083704254 100644 --- a/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -16,11 +16,12 @@ use super::*; use crate::{account_and_location, new_executor, worst_case_holding, AssetTransactorOf, XcmCallOf}; -use frame_benchmarking::{ - benchmarks_instance_pallet, impl_benchmark_test_suite, BenchmarkError, BenchmarkResult, +use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError, BenchmarkResult}; +use frame_support::{ + pallet_prelude::Get, + traits::fungible::{Inspect, Mutate}, }; -use frame_support::{pallet_prelude::Get, traits::fungible::Inspect}; -use sp_runtime::traits::Zero; +use sp_runtime::traits::{Bounded, Zero}; use sp_std::{convert::TryInto, prelude::*, vec}; use xcm::latest::prelude::*; use xcm_executor::traits::{Convert, TransactAsset}; @@ -108,11 +109,19 @@ benchmarks_instance_pallet! { receive_teleported_asset { // If there is no trusted teleporter, then we skip this benchmark. - let (trusted_teleporter, teleportable_asset) = T::TrustedTeleporter::get().ok_or( - BenchmarkError::Override( - BenchmarkResult::from_weight(T::BlockWeights::get().max_block) - ) - )?; + let (trusted_teleporter, teleportable_asset) = T::TrustedTeleporter::get() + .ok_or(BenchmarkError::Skip)?; + + if let Some(checked_account) = T::CheckedAccount::get() { + T::TransactAsset::mint_into( + &checked_account, + < + T::TransactAsset + as + Inspect + >::Balance::max_value() / 2u32.into(), + )?; + } let assets: MultiAssets = vec![ teleportable_asset ].into(); @@ -210,10 +219,10 @@ benchmarks_instance_pallet! { assert!(!T::TransactAsset::balance(&checked_account).is_zero()); } } -} -impl_benchmark_test_suite!( - Pallet, - crate::fungible::mock::new_test_ext(), - crate::fungible::mock::Test -); + impl_benchmark_test_suite!( + Pallet, + crate::fungible::mock::new_test_ext(), + crate::fungible::mock::Test + ); +} diff --git a/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index 8666dc6caa4d..fb5f3059561b 100644 --- a/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -131,7 +131,7 @@ impl xcm_executor::Config for XcmConfig { type AssetTransactor = AssetTransactor; type OriginConverter = (); type IsReserve = (); - type IsTeleporter = (); + type IsTeleporter = TrustedTeleporters; type LocationInverter = xcm_builder::LocationInverter; type Barrier = AllowUnpaidExecutionFrom; type Weigher = xcm_builder::FixedWeightBounds; @@ -153,12 +153,17 @@ impl crate::Config for Test { } } +pub type TrustedTeleporters = (xcm_builder::Case,); + parameter_types! { pub const CheckedAccount: Option = Some(100); + pub const ChildTeleporter: MultiLocation = Parachain(1000).into(); pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - X1(OnlyChild).into(), + ChildTeleporter::get(), MultiAsset { id: Concrete(Here.into()), fun: Fungible(100) }, )); + pub const TeleConcreteFung: (MultiAssetFilter, MultiLocation) = + (Wild(AllOf { fun: WildFungible, id: Concrete(Here.into()) }), ChildTeleporter::get()); } impl xcm_balances_benchmark::Config for Test { diff --git a/xcm/pallet-xcm/Cargo.toml b/xcm/pallet-xcm/Cargo.toml index a9ff97ddc088..1a487a169269 100644 --- a/xcm/pallet-xcm/Cargo.toml +++ b/xcm/pallet-xcm/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Parity Technologies "] edition = "2018" name = "pallet-xcm" -version = "0.9.11" +version = "0.9.12" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } diff --git a/xcm/pallet-xcm/src/lib.rs b/xcm/pallet-xcm/src/lib.rs index 0252e22cf450..393d015d6592 100644 --- a/xcm/pallet-xcm/src/lib.rs +++ b/xcm/pallet-xcm/src/lib.rs @@ -179,7 +179,7 @@ pub mod pallet { /// \[ origin location, id, expected location \] InvalidResponder(MultiLocation, QueryId, Option), /// Expected query response has been received but the expected origin location placed in - /// storate by this runtime previously cannot be decoded. The query remains registered. + /// storage by this runtime previously cannot be decoded. The query remains registered. /// /// This is unexpected (since a location placed in storage in a previously executing /// runtime should be readable prior to query timeout) and dangerous since the possibly @@ -473,7 +473,9 @@ pub mod pallet { /// Teleport some assets from the local chain to some destination chain. /// - /// Fee payment on the destination side is made from the first asset listed in the `assets` vector. + /// Fee payment on the destination side is made from the first asset listed in the `assets` vector and + /// fee-weight is calculated locally and thus remote weights are assumed to be equal to + /// local weights. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send @@ -506,49 +508,15 @@ pub mod pallet { assets: Box, fee_asset_item: u32, ) -> DispatchResult { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let beneficiary = - MultiLocation::try_from(*beneficiary).map_err(|()| Error::::BadVersion)?; - let assets = MultiAssets::try_from(*assets).map_err(|()| Error::::BadVersion)?; - - ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); - let value = (origin_location, assets.drain()); - ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - let inv_dest = T::LocationInverter::invert_location(&dest) - .map_err(|()| Error::::DestinationNotInvertible)?; - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? - .clone() - .reanchored(&inv_dest) - .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let assets = assets.into(); - let mut message = Xcm(vec![ - WithdrawAsset(assets), - InitiateTeleport { - assets: Wild(All), - dest, - xcm: Xcm(vec![ - BuyExecution { fees, weight_limit: Unlimited }, - DepositAsset { assets: Wild(All), max_assets, beneficiary }, - ]), - }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, weight, weight); - Self::deposit_event(Event::Attempted(outcome)); - Ok(()) + Self::do_teleport_assets(origin, dest, beneficiary, assets, fee_asset_item, None) } /// Transfer some assets from the local chain to the sovereign account of a destination chain and forward /// a notification XCM. /// - /// Fee payment on the destination side is made from the first asset listed in the `assets` vector. + /// Fee payment on the destination side is made from the first asset listed in the `assets` vector and + /// fee-weight is calculated locally and thus remote weights are assumed to be equal to + /// local weights. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send @@ -578,39 +546,14 @@ pub mod pallet { assets: Box, fee_asset_item: u32, ) -> DispatchResult { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; - let beneficiary = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; - let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - - ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); - let value = (origin_location, assets.drain()); - ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - let inv_dest = T::LocationInverter::invert_location(&dest) - .map_err(|()| Error::::DestinationNotInvertible)?; - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? - .clone() - .reanchored(&inv_dest) - .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let assets = assets.into(); - let mut message = Xcm(vec![TransferReserveAsset { - assets, + Self::do_reserve_transfer_assets( + origin, dest, - xcm: Xcm(vec![ - BuyExecution { fees, weight_limit: Unlimited }, - DepositAsset { assets: Wild(All), max_assets, beneficiary }, - ]), - }]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, weight, weight); - Self::deposit_event(Event::Attempted(outcome)); - Ok(()) + beneficiary, + assets, + fee_asset_item, + None, + ) } /// Execute an XCM message from a local, signed, origin. @@ -721,9 +664,218 @@ pub mod pallet { .into() }) } + + /// Transfer some assets from the local chain to the sovereign account of a destination chain and forward + /// a notification XCM. + /// + /// Fee payment on the destination side is made from the first asset listed in the `assets` vector. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the + /// `dest` side. + /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay + /// fees. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::weight({ + match ((*assets.clone()).try_into(), (*dest.clone()).try_into()) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } + ]); + T::Weigher::weight(&mut message).map_or(Weight::max_value(), |w| 100_000_000 + w) + }, + _ => Weight::max_value(), + } + })] + pub fn limited_reserve_transfer_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + weight_limit: WeightLimit, + ) -> DispatchResult { + Self::do_reserve_transfer_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + Some(weight_limit), + ) + } + + /// Teleport some assets from the local chain to some destination chain. + /// + /// Fee payment on the destination side is made from the first asset listed in the `assets` vector. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send + /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be + /// an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the + /// `dest` side. May not be empty. + /// - `dest_weight`: Equal to the total weight on `dest` of the XCM message + /// `Teleport { assets, effects: [ BuyExecution{..}, DepositAsset{..} ] }`. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::weight({ + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_dest: Result = (*dest.clone()).try_into(); + match (maybe_assets, maybe_dest) { + (Ok(assets), Ok(dest)) => { + use sp_std::vec; + let mut message = Xcm(vec![ + WithdrawAsset(assets), + InitiateTeleport { assets: Wild(All), dest, xcm: Xcm(vec![]) }, + ]); + T::Weigher::weight(&mut message).map_or(Weight::max_value(), |w| 100_000_000 + w) + }, + _ => Weight::max_value(), + } + })] + pub fn limited_teleport_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + weight_limit: WeightLimit, + ) -> DispatchResult { + Self::do_teleport_assets( + origin, + dest, + beneficiary, + assets, + fee_asset_item, + Some(weight_limit), + ) + } } impl Pallet { + fn do_reserve_transfer_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + maybe_weight_limit: Option, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: MultiLocation = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + + ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); + let value = (origin_location, assets.drain()); + ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + let (origin_location, assets) = value; + let inv_dest = T::LocationInverter::invert_location(&dest) + .map_err(|()| Error::::DestinationNotInvertible)?; + let fees = assets + .get(fee_asset_item as usize) + .ok_or(Error::::Empty)? + .clone() + .reanchored(&inv_dest) + .map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let assets: MultiAssets = assets.into(); + let weight_limit = match maybe_weight_limit { + Some(weight_limit) => weight_limit, + None => { + let beneficiary = beneficiary.clone(); + let fees = fees.clone(); + let mut remote_message = Xcm(vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees, weight_limit: Limited(0) }, + DepositAsset { assets: Wild(All), max_assets, beneficiary }, + ]); + // use local weight for remote message and hope for the best. + let remote_weight = T::Weigher::weight(&mut remote_message) + .map_err(|()| Error::::UnweighableMessage)?; + Limited(remote_weight) + }, + }; + let xcm = Xcm(vec![ + BuyExecution { fees, weight_limit }, + DepositAsset { assets: Wild(All), max_assets, beneficiary }, + ]); + let mut message = Xcm(vec![TransferReserveAsset { assets, dest, xcm }]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin_location, message, weight, weight); + Self::deposit_event(Event::Attempted(outcome)); + Ok(()) + } + + fn do_teleport_assets( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + fee_asset_item: u32, + maybe_weight_limit: Option, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: MultiLocation = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + + ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); + let value = (origin_location, assets.drain()); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + let (origin_location, assets) = value; + let inv_dest = T::LocationInverter::invert_location(&dest) + .map_err(|()| Error::::DestinationNotInvertible)?; + let fees = assets + .get(fee_asset_item as usize) + .ok_or(Error::::Empty)? + .clone() + .reanchored(&inv_dest) + .map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let assets: MultiAssets = assets.into(); + let weight_limit = match maybe_weight_limit { + Some(weight_limit) => weight_limit, + None => { + let beneficiary = beneficiary.clone(); + let fees = fees.clone(); + let mut remote_message = Xcm(vec![ + ReceiveTeleportedAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees, weight_limit: Limited(0) }, + DepositAsset { assets: Wild(All), max_assets, beneficiary }, + ]); + // use local weight for remote message and hope for the best. + let remote_weight = T::Weigher::weight(&mut remote_message) + .map_err(|()| Error::::UnweighableMessage)?; + Limited(remote_weight) + }, + }; + let xcm = Xcm(vec![ + BuyExecution { fees, weight_limit }, + DepositAsset { assets: Wild(All), max_assets, beneficiary }, + ]); + let mut message = + Xcm(vec![WithdrawAsset(assets), InitiateTeleport { assets: Wild(All), dest, xcm }]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin_location, message, weight, weight); + Self::deposit_event(Event::Attempted(outcome)); + Ok(()) + } + /// Will always make progress, and will do its best not to use much more than `weight_cutoff` /// in doing so. pub(crate) fn check_xcm_version_change( diff --git a/xcm/pallet-xcm/src/mock.rs b/xcm/pallet-xcm/src/mock.rs index 7c5a835b7851..8b6174c5b722 100644 --- a/xcm/pallet-xcm/src/mock.rs +++ b/xcm/pallet-xcm/src/mock.rs @@ -310,6 +310,11 @@ pub(crate) fn buy_execution(fees: impl Into) -> Instruction { BuyExecution { fees: fees.into(), weight_limit: Unlimited } } +pub(crate) fn buy_limited_execution(fees: impl Into, weight: u64) -> Instruction { + use xcm::latest::prelude::*; + BuyExecution { fees: fees.into(), weight_limit: Limited(weight) } +} + pub(crate) fn new_test_ext_with_balances( balances: Vec<(AccountId, Balance)>, ) -> sp_io::TestExternalities { diff --git a/xcm/pallet-xcm/src/tests.rs b/xcm/pallet-xcm/src/tests.rs index 5b9a3a177a36..660ac0ef14a8 100644 --- a/xcm/pallet-xcm/src/tests.rs +++ b/xcm/pallet-xcm/src/tests.rs @@ -218,14 +218,111 @@ fn teleport_assets_works() { new_test_ext_with_balances(balances).execute_with(|| { let weight = 2 * BaseXcmWeight::get(); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let dest: MultiLocation = AccountId32 { network: Any, id: BOB.into() }.into(); assert_ok!(XcmPallet::teleport_assets( Origin::signed(ALICE), Box::new(RelayLocation::get().into()), - Box::new(AccountId32 { network: Any, id: BOB.into() }.into().into()), + Box::new(dest.clone().into()), Box::new((Here, SEND_AMOUNT).into()), 0, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), 4000), + DepositAsset { assets: All.into(), max_assets: 1, beneficiary: dest }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v0_ok: xcm::v0::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + Event::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limmited_teleport_assets_works() { + let balances = + vec![(ALICE, INITIAL_BALANCE), (ParaId::from(PARA_ID).into_account(), INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = 2 * BaseXcmWeight::get(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let dest: MultiLocation = AccountId32 { network: Any, id: BOB.into() }.into(); + assert_ok!(XcmPallet::limited_teleport_assets( + Origin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Limited(5000), + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), 5000), + DepositAsset { assets: All.into(), max_assets: 1, beneficiary: dest }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v0_ok: xcm::v0::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + Event::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_teleport_assets` with unlimited weight +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn unlimmited_teleport_assets_works() { + let balances = + vec![(ALICE, INITIAL_BALANCE), (ParaId::from(PARA_ID).into_account(), INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = 2 * BaseXcmWeight::get(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let dest: MultiLocation = AccountId32 { network: Any, id: BOB.into() }.into(); + assert_ok!(XcmPallet::limited_teleport_assets( + Origin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Unlimited, + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Here, SEND_AMOUNT)), + DepositAsset { assets: All.into(), max_assets: 1, beneficiary: dest }, + ]), + )] + ); assert_eq!( last_event(), Event::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) @@ -258,6 +355,100 @@ fn reserve_transfer_assets_works() { // Destination account (parachain account) has amount let para_acc: AccountId = ParaId::from(PARA_ID).into_account(); assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + Parachain(PARA_ID).into(), + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), 4000), + DepositAsset { assets: All.into(), max_assets: 1, beneficiary: dest }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v0_ok: xcm::v0::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + Event::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_reserve_transfer_assets` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn limited_reserve_transfer_assets_works() { + let balances = + vec![(ALICE, INITIAL_BALANCE), (ParaId::from(PARA_ID).into_account(), INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = + Junction::AccountId32 { network: NetworkId::Any, id: ALICE.into() }.into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + Origin::signed(ALICE), + Box::new(Parachain(PARA_ID).into().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Limited(5000), + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(PARA_ID).into_account(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + Parachain(PARA_ID).into(), + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), 5000), + DepositAsset { assets: All.into(), max_assets: 1, beneficiary: dest }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v0_ok: xcm::v0::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + Event::XcmPallet(crate::Event::Attempted(Outcome::Complete(weight))) + ); + }); +} + +/// Test `limited_reserve_transfer_assets` with unlimited weight purchasing +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn unlimited_reserve_transfer_assets_works() { + let balances = + vec![(ALICE, INITIAL_BALANCE), (ParaId::from(PARA_ID).into_account(), INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = + Junction::AccountId32 { network: NetworkId::Any, id: ALICE.into() }.into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + Origin::signed(ALICE), + Box::new(Parachain(PARA_ID).into().into()), + Box::new(dest.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + WeightLimit::Unlimited, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(PARA_ID).into_account(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); assert_eq!( sent_xcm(), vec![( @@ -670,7 +861,7 @@ fn subscriber_side_subscription_works() { }); } -/// We should autosubscribe when we don't know the remote's version. +/// We should auto-subscribe when we don't know the remote's version. #[test] fn auto_subscription_works() { new_test_ext_with_balances(vec![]).execute_with(|| { diff --git a/xcm/procedural/Cargo.toml b/xcm/procedural/Cargo.toml index fde65dcdd51a..0e5682d98bef 100644 --- a/xcm/procedural/Cargo.toml +++ b/xcm/procedural/Cargo.toml @@ -8,6 +8,6 @@ edition = "2018" proc-macro = true [dependencies] -proc-macro2 = "1.0.28" -quote = "1.0.9" -syn = "1.0.77" +proc-macro2 = "1.0.30" +quote = "1.0.10" +syn = "1.0.80" diff --git a/xcm/src/v1/README.md b/xcm/src/v1/README.md new file mode 100644 index 000000000000..2985125b11a4 --- /dev/null +++ b/xcm/src/v1/README.md @@ -0,0 +1,25 @@ +# XCM Version 1 +The comprehensive list of changes can be found in [this PR description](https://github.com/paritytech/polkadot/pull/2815#issue-608567900). + +## Changes to be aware of +Most changes should automatically be resolved via the conversion traits (i.e. `TryFrom` and `From`). The list here is mostly for incompatible changes that result in an `Err(())` when attempting to convert XCM objects from v0. + +### Junction +- `v0::Junction::Parent` cannot be converted to v1, because the way we represent parents in v1 has changed - instead of being a property of the junction, v1 MultiLocations now have an extra field representing the number of parents that the MultiLocation contains. + +### MultiLocation +- The `try_from` conversion method will always canonicalize the v0 MultiLocation before attempting to do the proper conversion. Since canonicalization is not a fallible operation, we do not expect v0 MultiLocation to ever fail to be upgraded to v1. + +### MultiAsset +- Stronger typing to differentiate between a single class of `MultiAsset` and several classes of `MultiAssets` is introduced. As the name suggests, a `Vec` that is used on all APIs will instead be using a new type called `MultiAssets` (note the `s`). +- All `MultiAsset` variants whose name contains "All" in it, namely `v0::MultiAsset::All`, `v0::MultiAsset::AllFungible`, `v0::MultiAsset::AllNonFungible`, `v0::MultiAsset::AllAbstractFungible`, `v0::MultiAsset::AllAbstractNonFungible`, `v0::MultiAsset::AllConcreteFungible` and `v0::MultiAsset::AllConcreteNonFungible`, will fail to convert to v1 MultiAsset, since v1 does not contain these variants. +- Similarly, all `MultiAsset` variants whose name contains "All" in it can be converted into a `WildMultiAsset`. +- `v0::MultiAsset::None` is not represented at all in v1. + +### XCM +- No special attention necessary + +### Order +- `v1::Order::DepositAsset` and `v1::Order::DepositReserveAsset` both introduced a new `max_asset` field that limits the maximum classes of assets that can be deposited. During conversion from v0, the `max_asset` field defaults to 1. +- v1 Orders that contain `MultiAsset` as argument(s) will need to explicitly specify the amount and details of assets. This is to prevent accidental misuse of `All` to possibly transfer, spend or otherwise perform unintended operations on `All` assets. +- v1 Orders that do allow the notion of `All` to be used as wildcards, will instead use a new type called `MultiAssetFilter`. diff --git a/xcm/src/v1/mod.rs b/xcm/src/v1/mod.rs index 6f96dc739491..87c016432018 100644 --- a/xcm/src/v1/mod.rs +++ b/xcm/src/v1/mod.rs @@ -14,7 +14,51 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Version 1 of the Cross-Consensus Message format data structures. +//! # XCM Version 1 +//! Version 1 of the Cross-Consensus Message format data structures. The comprehensive list of +//! changes can be found in +//! [this PR description](https://github.com/paritytech/polkadot/pull/2815#issue-608567900). +//! +//! ## Changes to be aware of +//! Most changes should automatically be resolved via the conversion traits (i.e. `TryFrom` and +//! `From`). The list here is mostly for incompatible changes that result in an `Err(())` when +//! attempting to convert XCM objects from v0. +//! +//! ### Junction +//! - `v0::Junction::Parent` cannot be converted to v1, because the way we represent parents in v1 +//! has changed - instead of being a property of the junction, v1 `MultiLocation`s now have an +//! extra field representing the number of parents that the `MultiLocation` contains. +//! +//! ### `MultiLocation` +//! - The `try_from` conversion method will always canonicalize the v0 `MultiLocation` before +//! attempting to do the proper conversion. Since canonicalization is not a fallible operation, +//! we do not expect v0 `MultiLocation` to ever fail to be upgraded to v1. +//! +//! ### `MultiAsset` +//! - Stronger typing to differentiate between a single class of `MultiAsset` and several classes +//! of `MultiAssets` is introduced. As the name suggests, a `Vec` that is used on all +//! APIs will instead be using a new type called `MultiAssets` (note the `s`). +//! - All `MultiAsset` variants whose name contains "All" in it, namely `v0::MultiAsset::All`, +//! `v0::MultiAsset::AllFungible`, `v0::MultiAsset::AllNonFungible`, +//! `v0::MultiAsset::AllAbstractFungible`, `v0::MultiAsset::AllAbstractNonFungible`, +//! `v0::MultiAsset::AllConcreteFungible` and `v0::MultiAsset::AllConcreteNonFungible`, will fail +//! to convert to v1 `MultiAsset`, since v1 does not contain these variants. +//! - Similarly, all `MultiAsset` variants whose name contains "All" in it can be converted into a +//! `WildMultiAsset`. +//! - `v0::MultiAsset::None` is not represented at all in v1. +//! +//! ### XCM +//! - No special attention necessary +//! +//! ### Order +//! - `v1::Order::DepositAsset` and `v1::Order::DepositReserveAsset` both introduced a new +//! `max_asset` field that limits the maximum classes of assets that can be deposited. During +//! conversion from v0, the `max_asset` field defaults to 1. +//! - v1 Orders that contain `MultiAsset` as argument(s) will need to explicitly specify the amount +//! and details of assets. This is to prevent accidental misuse of `All` to possibly transfer, +//! spend or otherwise perform unintended operations on `All` assets. +//! - v1 Orders that do allow the notion of `All` to be used as wildcards, will instead use a new +//! type called `MultiAssetFilter`. use super::{ v0::{Response as OldResponse, Xcm as OldXcm}, diff --git a/xcm/xcm-builder/Cargo.toml b/xcm/xcm-builder/Cargo.toml index 08f37b515ca7..639f801b2555 100644 --- a/xcm/xcm-builder/Cargo.toml +++ b/xcm/xcm-builder/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Parity Technologies "] edition = "2018" name = "xcm-builder" description = "Tools & types for building with XCM and its executor." -version = "0.9.11" +version = "0.9.12" [dependencies] parity-scale-codec = { version = "2.3.1", default-features = false, features = ["derive"] } diff --git a/xcm/xcm-builder/src/weight.rs b/xcm/xcm-builder/src/weight.rs index 1e84a42e2ada..92d26ebef157 100644 --- a/xcm/xcm-builder/src/weight.rs +++ b/xcm/xcm-builder/src/weight.rs @@ -156,7 +156,7 @@ impl, R: TakeRevenue> WeightTrader fn refund_weight(&mut self, weight: Weight) -> Option { let (id, units_per_second) = T::get(); let weight = weight.min(self.0); - let amount = units_per_second * (weight as u128) / 1_000_000_000_000u128; + let amount = units_per_second * (weight as u128) / (WEIGHT_PER_SECOND as u128); self.0 -= weight; self.1 = self.1.saturating_sub(amount); if amount > 0 { diff --git a/xcm/xcm-builder/tests/mock/mod.rs b/xcm/xcm-builder/tests/mock/mod.rs index 1f0a5942045c..7bbbb8873747 100644 --- a/xcm/xcm-builder/tests/mock/mod.rs +++ b/xcm/xcm-builder/tests/mock/mod.rs @@ -108,7 +108,7 @@ impl pallet_balances::Config for Runtime { impl shared::Config for Runtime {} impl configuration::Config for Runtime { - type WeightInfo = configuration::weights::WeightInfo; + type WeightInfo = configuration::TestWeightInfo; } // aims to closely emulate the Kusama XcmConfig @@ -150,11 +150,11 @@ pub type Barrier = ( ); parameter_types! { - pub const KusamaForStatemint: (MultiAssetFilter, MultiLocation) = + pub const KusamaForStatemine: (MultiAssetFilter, MultiLocation) = (MultiAssetFilter::Wild(WildMultiAsset::AllOf { id: Concrete(MultiLocation::here()), fun: WildFungible }), X1(Parachain(1000)).into()); pub const MaxInstructions: u32 = 100; } -pub type TrustedTeleporters = (xcm_builder::Case,); +pub type TrustedTeleporters = (xcm_builder::Case,); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { diff --git a/xcm/xcm-executor/Cargo.toml b/xcm/xcm-executor/Cargo.toml index ab2184c81d1d..e0f2568c58c6 100644 --- a/xcm/xcm-executor/Cargo.toml +++ b/xcm/xcm-executor/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Parity Technologies "] edition = "2018" name = "xcm-executor" description = "An abstract and configurable XCM message executor." -version = "0.9.11" +version = "0.9.12" [dependencies] impl-trait-for-tuples = "0.2.0" diff --git a/xcm/xcm-executor/integration-tests/Cargo.toml b/xcm/xcm-executor/integration-tests/Cargo.toml index 5602a1f178e7..d88417536ea4 100644 --- a/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/xcm/xcm-executor/integration-tests/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Parity Technologies "] edition = "2018" name = "xcm-executor-integration-tests" description = "Integration tests for the XCM Executor" -version = "0.9.11" +version = "0.9.12" [dependencies] frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/xcm/xcm-simulator/Cargo.toml b/xcm/xcm-simulator/Cargo.toml index abfa5560fc0c..a583851d1573 100644 --- a/xcm/xcm-simulator/Cargo.toml +++ b/xcm/xcm-simulator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm-simulator" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Test kit to simulate cross-chain message passing and XCM execution" edition = "2018" diff --git a/xcm/xcm-simulator/example/Cargo.toml b/xcm/xcm-simulator/example/Cargo.toml index 17ce84caabcd..c33a8080348f 100644 --- a/xcm/xcm-simulator/example/Cargo.toml +++ b/xcm/xcm-simulator/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm-simulator-example" -version = "0.9.11" +version = "0.9.12" authors = ["Parity Technologies "] description = "Examples of xcm-simulator usage." edition = "2018" diff --git a/xcm/xcm-simulator/example/src/relay_chain.rs b/xcm/xcm-simulator/example/src/relay_chain.rs index cc50aec90d18..8dcb5f1f310b 100644 --- a/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/xcm/xcm-simulator/example/src/relay_chain.rs @@ -89,7 +89,7 @@ impl pallet_balances::Config for Runtime { impl shared::Config for Runtime {} impl configuration::Config for Runtime { - type WeightInfo = configuration::weights::WeightInfo; + type WeightInfo = configuration::TestWeightInfo; } parameter_types! { diff --git a/xcm/xcm-simulator/fuzzer/Cargo.toml b/xcm/xcm-simulator/fuzzer/Cargo.toml new file mode 100644 index 000000000000..9fada9b71d55 --- /dev/null +++ b/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "xcm-simulator-fuzzer" +version = "0.9.9" +authors = ["Parity Technologies "] +description = "Examples of xcm-simulator usage." +edition = "2018" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +honggfuzz = "0.5.54" +scale-info = { version = "1.0", features = ["derive"] } + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } + +xcm = { path = "../../" } +xcm-simulator = { path = "../" } +xcm-executor = { path = "../../xcm-executor" } +xcm-builder = { path = "../../xcm-builder" } +pallet-xcm = { path = "../../pallet-xcm" } +polkadot-core-primitives = { path = "../../../core-primitives" } +polkadot-runtime-parachains = { path = "../../../runtime/parachains" } +polkadot-parachain = { path = "../../../parachain" } + +[[bin]] +path = "src/fuzz.rs" +name = "xcm-fuzzer" diff --git a/xcm/xcm-simulator/fuzzer/src/fuzz.rs b/xcm/xcm-simulator/fuzzer/src/fuzz.rs new file mode 100644 index 000000000000..44516ab8a562 --- /dev/null +++ b/xcm/xcm-simulator/fuzzer/src/fuzz.rs @@ -0,0 +1,166 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +mod parachain; +mod relay_chain; + +use codec::DecodeLimit; +use polkadot_parachain::primitives::Id as ParaId; +use sp_runtime::traits::AccountIdConversion; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +use frame_support::assert_ok; +use xcm::{latest::prelude::*, MAX_XCM_DECODE_DEPTH}; + +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const INITIAL_BALANCE: u128 = 1_000_000_000; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MsgQueue, + DmpMessageHandler = parachain::MsgQueue, + new_ext = para_ext(1), + } +} + +decl_test_parachain! { + pub struct ParaB { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MsgQueue, + DmpMessageHandler = parachain::MsgQueue, + new_ext = para_ext(2), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + XcmConfig = relay_chain::XcmConfig, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (1, ParaA), + (2, ParaB), + ], + } +} + +pub fn para_account_id(id: u32) -> relay_chain::AccountId { + ParaId::from(id).into_account() +} + +pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { + use parachain::{MsgQueue, Runtime, System}; + + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + pallet_balances::GenesisConfig:: { balances: vec![(ALICE, INITIAL_BALANCE)] } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + MsgQueue::set_para_id(para_id.into()); + }); + ext +} + +pub fn relay_ext() -> sp_io::TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![(ALICE, INITIAL_BALANCE), (para_account_id(1), INITIAL_BALANCE)], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +pub type RelayChainPalletXcm = pallet_xcm::Pallet; +pub type ParachainPalletXcm = pallet_xcm::Pallet; + +fn run_one_input(data: &[u8]) { + MockNet::reset(); + if let Ok(m) = Xcm::decode_all_with_depth_limit(MAX_XCM_DECODE_DEPTH, data) { + #[cfg(not(fuzzing))] + { + println!("Executing message {:?}", m); + } + ParaA::execute_with(|| { + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, m)); + }); + Relay::execute_with(|| {}); + } +} + +fn main() { + #[cfg(fuzzing)] + { + loop { + fuzz!(|data: &[u8]| { + run_one_input(data); + }); + } + } + #[cfg(not(fuzzing))] + { + //This code path can be used to generate a line-code coverage report in html + //that depicts which lines are executed by at least one input in the current fuzzing queue. + //To generate this code coverage report, run the following commands: + /* + ``` + export CARGO_INCREMENTAL=0 + export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" + export RUSTDOCFLAGS="-Cpanic=abort" + rustup override set nightly + SKIP_WASM_BUILD=1 cargo build + ./xcm/xcm-simulator/fuzzer/target/debug/xcm-fuzzer hfuzz_workspace/xcm-fuzzer/input + zip -0 ccov.zip `find ../../target/debug \( -name "*.gc*" -o -name "test-*.gc*" \) -print` + grcov ccov.zip -s / -t html --llvm --branch --ignore-not-existing -o ../../target/debug/coverage/ + ``` + */ + use std::{env, fs, fs::File, io::Read}; + let args: Vec<_> = env::args().collect(); + let md = fs::metadata(&args[1]).unwrap(); + let all_files = match md.is_dir() { + true => fs::read_dir(&args[1]) + .unwrap() + .map(|x| x.unwrap().path().to_str().unwrap().to_string()) + .collect::>(), + false => (&args[1..]).to_vec(), + }; + println!("All_files {:?}", all_files); + for argument in all_files { + println!("Now doing file {:?}", argument); + let mut buffer: Vec = Vec::new(); + let mut f = File::open(argument).unwrap(); + f.read_to_end(&mut buffer).unwrap(); + run_one_input(&buffer.as_slice()); + } + } +} diff --git a/xcm/xcm-simulator/fuzzer/src/parachain.rs b/xcm/xcm-simulator/fuzzer/src/parachain.rs new file mode 100644 index 000000000000..3911bf8e3578 --- /dev/null +++ b/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -0,0 +1,327 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +use codec::{Decode, Encode}; +use frame_support::{ + construct_runtime, parameter_types, + traits::{Everything, Nothing}, + weights::{constants::WEIGHT_PER_SECOND, Weight}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{Hash, IdentityLookup}, + AccountId32, +}; +use sp_std::{convert::TryFrom, prelude::*}; + +use pallet_xcm::XcmPassthrough; +use polkadot_core_primitives::BlockNumber as RelayBlockNumber; +use polkadot_parachain::primitives::{ + DmpMessageHandler, Id as ParaId, Sibling, XcmpMessageFormat, XcmpMessageHandler, +}; +use xcm::{latest::prelude::*, VersionedXcm}; +use xcm_builder::{ + AccountId32Aliases, AllowUnpaidExecutionFrom, CurrencyAdapter as XcmCurrencyAdapter, + EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, IsConcrete, LocationInverter, + NativeAsset, ParentIsDefault, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, +}; +use xcm_executor::{Config, XcmExecutor}; + +pub type AccountId = AccountId32; +pub type Balance = u128; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = MaxLocks; + type Balance = Balance; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = WEIGHT_PER_SECOND / 4; + pub const ReservedDmpWeight: Weight = WEIGHT_PER_SECOND / 4; +} + +parameter_types! { + pub const KsmLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Kusama; + pub Ancestry: MultiLocation = Parachain(MsgQueue::parachain_id().into()).into(); +} + +pub type LocationToAccountId = ( + ParentIsDefault, + SiblingParachainConvertsVia, + AccountId32Aliases, +); + +pub type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + SignedAccountId32AsNative, + XcmPassthrough, +); + +parameter_types! { + pub const UnitWeightCost: Weight = 1; + pub KsmPerSecond: (AssetId, u128) = (Concrete(Parent.into()), 1); + pub const MaxInstructions: u32 = 100; +} + +pub type LocalAssetTransactor = + XcmCurrencyAdapter, LocationToAccountId, AccountId, ()>; + +pub type XcmRouter = super::ParachainXcmRouter; +pub type Barrier = AllowUnpaidExecutionFrom; + +pub struct XcmConfig; +impl Config for XcmConfig { + type Call = Call; + type XcmSender = XcmRouter; + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = XcmOriginToCallOrigin; + type IsReserve = NativeAsset; + type IsTeleporter = (); + type LocationInverter = LocationInverter; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = (); + type AssetTrap = (); + type AssetClaims = (); + type SubscriptionService = (); +} + +#[frame_support::pallet] +pub mod mock_msg_queue { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::storage] + #[pallet::getter(fn parachain_id)] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn received_dmp)] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + // XCMP + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = MultiLocation::new(1, X1(Parachain(sender.into()))); + match T::XcmExecutor::execute_xcm(location, xcm, max_weight) { + Outcome::Error(e) => (Err(e.clone()), Event::Fail(Some(hash), e)), + Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete(w, e) => (Ok(w), Event::Fail(Some(hash), e)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = &data_ref[..]; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = VersionedXcm::::decode(&mut remaining_fragments) { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let id = sp_io::hashing::blake2_256(&data[..]); + let maybe_msg = + VersionedXcm::::decode(&mut &data[..]).map(Xcm::::try_from); + match maybe_msg { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(Err(())) => { + Self::deposit_event(Event::UnsupportedVersion(id)); + }, + Ok(Ok(x)) => { + let outcome = T::XcmExecutor::execute_xcm(Parent, x.clone(), limit); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + } + } + limit + } + } +} + +impl mock_msg_queue::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + type Event = Event; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type LocationInverter = LocationInverter; + type Origin = Origin; + type Call = Call; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + MsgQueue: mock_msg_queue::{Pallet, Storage, Event}, + PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin}, + } +); diff --git a/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/xcm/xcm-simulator/fuzzer/src/relay_chain.rs new file mode 100644 index 000000000000..8dcb5f1f310b --- /dev/null +++ b/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -0,0 +1,191 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Everything, Nothing}, + weights::Weight, +}; +use sp_core::H256; +use sp_runtime::{testing::Header, traits::IdentityLookup, AccountId32}; + +use polkadot_parachain::primitives::Id as ParaId; +use polkadot_runtime_parachains::{configuration, origin, shared, ump}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowUnpaidExecutionFrom, ChildParachainAsNative, + ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, + CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, FixedWeightBounds, IsConcrete, + LocationInverter, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, +}; +use xcm_executor::{Config, XcmExecutor}; + +pub type AccountId = AccountId32; +pub type Balance = u128; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = MaxLocks; + type Balance = Balance; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +impl shared::Config for Runtime {} + +impl configuration::Config for Runtime { + type WeightInfo = configuration::TestWeightInfo; +} + +parameter_types! { + pub const KsmLocation: MultiLocation = Here.into(); + pub const KusamaNetwork: NetworkId = NetworkId::Kusama; + pub const AnyNetwork: NetworkId = NetworkId::Any; + pub Ancestry: MultiLocation = Here.into(); + pub UnitWeightCost: Weight = 1_000; +} + +pub type SovereignAccountOf = + (ChildParachainConvertsVia, AccountId32Aliases); + +pub type LocalAssetTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +parameter_types! { + pub const BaseXcmWeight: Weight = 1_000; + pub KsmPerSecond: (AssetId, u128) = (Concrete(KsmLocation::get()), 1); + pub const MaxInstructions: u32 = 100; +} + +pub type XcmRouter = super::RelayChainXcmRouter; +pub type Barrier = AllowUnpaidExecutionFrom; + +pub struct XcmConfig; +impl Config for XcmConfig { + type Call = Call; + type XcmSender = XcmRouter; + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type LocationInverter = LocationInverter; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = (); + type AssetTrap = (); + type AssetClaims = (); + type SubscriptionService = (); +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + type Event = Event; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = XcmRouter; + // Anyone can execute XCM messages locally... + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type LocationInverter = LocationInverter; + type Origin = Origin; + type Call = Call; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; +} + +parameter_types! { + pub const FirstMessageFactorPercent: u64 = 100; +} + +impl ump::Config for Runtime { + type Event = Event; + type UmpSink = ump::XcmSink, Runtime>; + type FirstMessageFactorPercent = FirstMessageFactorPercent; + type ExecuteOverweightOrigin = frame_system::EnsureRoot; +} + +impl origin::Config for Runtime {} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParasOrigin: origin::{Pallet, Origin}, + ParasUmp: ump::{Pallet, Call, Storage, Event}, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + } +);