diff --git a/.cargo/config b/.cargo/config.toml similarity index 86% rename from .cargo/config rename to .cargo/config.toml index b6879bf01e5f0..47c98de4efc2a 100644 --- a/.cargo/config +++ b/.cargo/config.toml @@ -7,6 +7,7 @@ xclippy = [ "clippy", "--all-targets", "--all-features", "--", "-Wclippy::all", "-Wclippy::disallowed_methods", + "-Aclippy::unnecessary_get_then_check", ] xlint = "run --package x --bin x -- lint" xtest = "run --package x --bin x -- external-crates-tests" @@ -22,6 +23,9 @@ move-clippy = [ "-Aclippy::upper_case_acronyms", "-Aclippy::type_complexity", "-Aclippy::new_without_default", + "-Aclippy::question_mark", + "-Aclippy::unnecessary_get_then_check", + "-Aclippy::needless_borrows_for_generic_args", ] mysql-clippy = [ @@ -38,6 +42,7 @@ mysql-clippy = [ "-Aclippy::upper_case_acronyms", "-Aclippy::type_complexity", "-Aclippy::new_without_default", + "-Aclippy::unnecessary_get_then_check", ] [build] diff --git a/.github/workflows/release-notes-generator.yml b/.github/workflows/release-notes-generator.yml new file mode 100644 index 0000000000000..993efe023436f --- /dev/null +++ b/.github/workflows/release-notes-generator.yml @@ -0,0 +1,71 @@ +name: Create Sui Release with Release Notes + +concurrency: ${{ github.workflow }}-${{ inputs.release_tag }} + +on: + workflow_dispatch: + inputs: + release_tag: + description: 'Sui Release Tag' + type: string + required: true + previous_branch: + description: 'Previous Release Branch (Ex: releases/sui-vX.XX.X-release)' + type: string + required: true + current_branch: + description: 'Current Release Branch (Ex: releases/sui-vX.XX.X-release)' + type: string + required: true + +env: + RELEASE_NOTES_FILE: "./release_notes.txt" + +jobs: + get-release-notes: + name: Get Release Notes for ${{ inputs.release_tag }} release + runs-on: ubuntu-latest + + steps: + - name: Get commits for branches + shell: bash + working-directory: ./ + run: | + echo "previous_commit=$(curl https://api.github.com/repos/MystenLabs/sui/commits/${{ inputs.previous_branch }} | jq .sha)" >> $GITHUB_ENV + echo "current_commit=$(curl https://api.github.com/repos/MystenLabs/sui/commits/${{ inputs.current_branch }} | jq .sha)" >> $GITHUB_ENV + + - name: Checkout main + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + with: + fetch-depth: 0 + ref: main + + - name: Setup Python + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # pin@v5.0.0 + with: + python-version: 3.10.10 + + - name: Generate Release Notes + shell: bash + working-directory: ./ + run: | + GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} python ./scripts/release_notes.py generate ${{ env.previous_commit }} ${{ env.current_commit }} | tee -a ${{ env.RELEASE_NOTES_FILE }} + echo "---" >> ${{ env.RELEASE_NOTES_FILE }} + echo "#### Full Log: https://github.com/MystenLabs/sui/commits/${{ inputs.release_tag }}" >> ${{ env.RELEASE_NOTES_FILE }} + + if [[ ${{ inputs.release_tag }} == devnet* ]]; then + echo "pre_release=true" >> $GITHUB_ENV + else + echo "pre_release=false" >> $GITHUB_ENV + fi + + - name: Create Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token + with: + tag_name: ${{ inputs.release_tag }} + release_name: ${{ inputs.release_tag }} + body_path: ${{ env.RELEASE_NOTES_FILE }} + draft: false + prerelease: ${{ env.pre_release }} diff --git a/Cargo.lock b/Cargo.lock index 988cc85bff310..fd91fa9a6cbee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -38,6 +38,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "aead" version = "0.5.2" @@ -177,7 +183,7 @@ dependencies = [ "rcgen", "ring 0.17.8", "rustls 0.23.12", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.7", "serde", "serde_json", "socket2 0.5.7", @@ -208,10 +214,10 @@ name = "anemo-build" version = "0.0.0" source = "git+https://github.com/mystenlabs/anemo.git?rev=dbb5a074c2d25660525ab5d36d65ff0cb8051949#dbb5a074c2d25660525ab5d36d65ff0cb8051949" dependencies = [ - "prettyplease 0.2.20", + "prettyplease 0.2.22", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -254,15 +260,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" version = "0.6.15" @@ -423,7 +420,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -436,7 +433,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -509,7 +506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -549,9 +546,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "arrow" @@ -1024,7 +1021,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", "synstructure", ] @@ -1036,7 +1033,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -1062,7 +1059,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -1113,7 +1110,7 @@ checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ "async-task 4.7.1", "concurrent-queue", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-lite 2.3.0", "slab", ] @@ -1202,9 +1199,9 @@ dependencies = [ "darling 0.20.10", "proc-macro-crate 1.1.3", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strum 0.25.0", - "syn 2.0.74", + "syn 2.0.76", "thiserror", ] @@ -1265,7 +1262,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.7.3", - "rustix 0.38.34", + "rustix 0.38.35", "slab", "tracing", "windows-sys 0.59.0", @@ -1298,8 +1295,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -1347,8 +1344,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -1369,8 +1366,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -1426,12 +1423,13 @@ checksum = "62af46d040ba9df09edc6528dae9d8e49f5f3e82f55b7d2ec31a733c38dbc49d" [[package]] name = "atomicwrites" -version = "0.3.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb8f2cd6962fa53c0e2a9d3f97eaa7dbd1e3cbbeeb4745403515b42ae07b3ff6" +checksum = "fc7b2dbe9169059af0f821e811180fddc971fc210c776c133c7819ccd6e478db" dependencies = [ + "rustix 0.38.35", "tempfile", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -1441,8 +1439,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -1475,7 +1473,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "hex", "http 0.2.12", "hyper 0.14.30", @@ -1495,7 +1493,7 @@ checksum = "70a66ac8ef5fa9cf01c2d999f39d16812e90ec1467bd382cbbb74ba23ea86201" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "fastrand 2.1.0", + "fastrand 2.1.1", "tokio", "tracing", "zeroize", @@ -1535,7 +1533,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "fastrand 2.1.0", + "fastrand 2.1.1", "http 0.2.12", "percent-encoding", "tracing", @@ -1560,7 +1558,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "http 0.2.12", "regex", "tokio-stream", @@ -1586,7 +1584,7 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "fastrand 2.1.0", + "fastrand 2.1.1", "http 0.2.12", "regex", "tokio-stream", @@ -1738,7 +1736,7 @@ dependencies = [ "aws-smithy-http-tower", "aws-smithy-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.30", @@ -1832,7 +1830,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "http 0.2.12", "http-body 0.4.6", "once_cell", @@ -2069,7 +2067,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", ] @@ -2226,7 +2224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3deeecb812ca5300b7d3f66f730cc2ebd3511c3d36c691dd79c165d5b19a26e3" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -2238,7 +2236,7 @@ checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" [[package]] name = "bin-version" -version = "1.31.1" +version = "1.32.0" dependencies = [ "const-str", "git-version", @@ -2265,13 +2263,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.20", + "prettyplease 0.2.22", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -2404,8 +2402,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", - "arrayvec 0.7.4", - "constant_time_eq 0.3.0", + "arrayvec 0.7.6", + "constant_time_eq 0.3.1", ] [[package]] @@ -2415,21 +2413,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" dependencies = [ "arrayref", - "arrayvec 0.7.4", - "constant_time_eq 0.3.0", + "arrayvec 0.7.6", + "constant_time_eq 0.3.1", ] [[package]] name = "blake3" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ec96fe9a81b5e365f9db71fe00edc4fe4ca2cc7dcb7861f0603012a7caa210" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", - "arrayvec 0.7.4", + "arrayvec 0.7.6", "cc", "cfg-if", - "constant_time_eq 0.3.0", + "constant_time_eq 0.3.1", ] [[package]] @@ -2475,7 +2473,7 @@ dependencies = [ "maplit", "paste", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -2541,9 +2539,9 @@ dependencies = [ [[package]] name = "bnum" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56953345e39537a3e18bdaeba4cb0c58a78c1f61f361dc0fa7c5c7340ae87c5f" +checksum = "3e31ea183f6ee62ac8b8a8cf7feddd766317adfb13ff469de57ce033efd6a790" [[package]] name = "brotli" @@ -2647,7 +2645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -2659,9 +2657,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "773d90827bc3feecfb67fab12e24de0749aad83c74b9504ecde46237b5cd24e2" [[package]] name = "byteorder" @@ -2746,7 +2744,7 @@ dependencies = [ "cached_proc_macro_types", "darling 0.14.4", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -2758,9 +2756,9 @@ checksum = "ade8366b8bd5ba243f0a58f036cc0ca8a2f069cff1a2351ef1cac6b083e16fc0" [[package]] name = "camino" -version = "1.1.7" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] @@ -2785,7 +2783,7 @@ checksum = "434168fe6533055f0f4204039abe3ff6d7db338ef46872a5fa39e9d5ad5ab7a9" dependencies = [ "cap-primitives", "cap-std", - "rustix 0.38.34", + "rustix 0.38.35", "smallvec", ] @@ -2801,7 +2799,7 @@ dependencies = [ "io-lifetimes 2.0.3", "ipnet", "maybe-owned", - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.52.0", "winx", ] @@ -2825,7 +2823,7 @@ dependencies = [ "cap-primitives", "io-extras", "io-lifetimes 2.0.3", - "rustix 0.38.34", + "rustix 0.38.35", ] [[package]] @@ -2838,7 +2836,7 @@ dependencies = [ "cap-primitives", "iana-time-zone", "once_cell", - "rustix 0.38.34", + "rustix 0.38.35", "winx", ] @@ -2902,12 +2900,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.10" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" +checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -2921,9 +2920,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.8" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +checksum = "345c78335be0624ed29012dc10c49102196c6882c12dde65d9f35b02da2aada8" dependencies = [ "smallvec", "target-lexicon", @@ -3033,9 +3032,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.15" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d8838454fda655dafd3accb2b6e2bea645b9e4078abe84a22ceb947235c5cc" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -3062,8 +3061,8 @@ checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -3205,17 +3204,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "colored-diff" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410208eb08c3f3ad44b95b51c4fc0d5993cbcc9dd39cfadb4214b9115a97dcb5" -dependencies = [ - "ansi_term", - "dissimilar", - "itertools 0.10.5", -] - [[package]] name = "combine" version = "4.6.7" @@ -3309,8 +3297,8 @@ dependencies = [ "tokio-rustls 0.26.0", "tokio-stream", "tokio-util 0.7.11", - "tonic 0.12.1", - "tonic-build 0.12.1", + "tonic 0.12.2", + "tonic-build 0.12.2", "tower", "tower-http", "tracing", @@ -3420,9 +3408,9 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "containers-api" @@ -3541,6 +3529,15 @@ dependencies = [ "siphasher", ] +[[package]] +name = "counter" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d458e66999348f56fd3ffcfbb7f7951542075ca8359687c703de6500c1ddccd" +dependencies = [ + "num-traits", +] + [[package]] name = "cpp_demangle" version = "0.3.5" @@ -3552,9 +3549,9 @@ dependencies = [ [[package]] name = "cpp_demangle" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" +checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" dependencies = [ "cfg-if", ] @@ -3592,7 +3589,7 @@ version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98b022ed2a5913a38839dfbafe6cf135342661293b08049843362df4301261dc" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bumpalo", "cranelift-bforest 0.91.1", "cranelift-codegen-meta 0.91.1", @@ -4028,8 +4025,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -4045,6 +4042,62 @@ dependencies = [ "zeroize", ] +[[package]] +name = "cynic" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c02b53607e3f21c374f024c2cfc2154e554905bba478e8e09409f10ce3726" +dependencies = [ + "cynic-proc-macros", + "ref-cast", + "reqwest 0.12.7", + "serde", + "serde_json", + "static_assertions", + "thiserror", +] + +[[package]] +name = "cynic-codegen" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c0ec86f960a00ce087e96ff6f073f6ff28b6876d69ce8caa06c03fb4143981c" +dependencies = [ + "counter", + "cynic-parser", + "darling 0.20.10", + "once_cell", + "ouroboros 0.18.4", + "proc-macro2 1.0.86", + "quote 1.0.37", + "strsim 0.10.0", + "syn 2.0.76", + "thiserror", +] + +[[package]] +name = "cynic-parser" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "718f6cd8c54ae5249fd42b0c86639df0100b8a86eea2e5f1b915cde2e1481453" +dependencies = [ + "indexmap 2.4.0", + "lalrpop-util", + "logos", +] + +[[package]] +name = "cynic-proc-macros" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a69ecdf4aa110fed1c0c8de290bc8ccb2835388733cf2f418f0abdf6ff3899" +dependencies = [ + "cynic-codegen", + "darling 0.20.10", + "quote 1.0.37", + "syn 2.0.76", +] + [[package]] name = "darling" version = "0.13.4" @@ -4084,7 +4137,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", ] @@ -4098,7 +4151,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", ] @@ -4112,9 +4165,9 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -4124,7 +4177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -4135,7 +4188,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core 0.14.4", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -4146,8 +4199,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -4433,7 +4486,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -4444,7 +4497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -4455,8 +4508,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -4476,7 +4529,7 @@ checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ "darling 0.14.4", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -4498,16 +4551,16 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustc_version", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] name = "determinator" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c644b91adb5bcc66d3533607b6d3ee5c1c2d858d2d95e41dd6aae673e29e0509" +checksum = "bf14b901cdfba3f731d01c4c184100e85f586a272d38874824175b845dbaeaf9" dependencies = [ "camino", "globset", @@ -4522,9 +4575,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf97ee7261bb708fa3402fa9c17a54b70e90e3cb98afb3dc8999d5512cb03f94" +checksum = "65e13bab2796f412722112327f3e575601a3e9cdcbe426f0d30dbf43f3f5dc71" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -4547,21 +4600,21 @@ checksum = "81c5131a2895ef64741dad1d483f358c2a229a3a2d1b256778cdc5e146db64d4" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] name = "diesel_derives" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ff2be1e7312c858b2ef974f5c7089833ae57b5311b334b30923af58e5718d8" +checksum = "e7f2c3de51e2ba6bf2a648285696137aaf0f5f487bcbea93972fe8a364e131a4" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -4581,7 +4634,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -4720,8 +4773,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -4801,8 +4854,8 @@ dependencies = [ "either", "heck 0.5.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -5018,8 +5071,8 @@ checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -5045,7 +5098,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -5057,8 +5110,8 @@ checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -5078,8 +5131,8 @@ checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -5279,14 +5332,14 @@ dependencies = [ "ethers-core", "ethers-etherscan", "eyre", - "prettyplease 0.2.20", + "prettyplease 0.2.22", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "regex", "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.74", + "syn 2.0.76", "toml 0.8.19", "walkdir", ] @@ -5302,9 +5355,9 @@ dependencies = [ "ethers-contract-abigen", "ethers-core", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "serde_json", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -5313,7 +5366,7 @@ version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bytes", "cargo_metadata 0.18.1", "chrono", @@ -5330,11 +5383,11 @@ dependencies = [ "serde", "serde_json", "strum 0.26.3", - "syn 2.0.74", + "syn 2.0.76", "tempfile", "thiserror", "tiny-keccak", - "unicode-xid 0.2.4", + "unicode-xid 0.2.5", ] [[package]] @@ -5466,7 +5519,7 @@ dependencies = [ "tokio", "tracing", "walkdir", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -5622,7 +5675,7 @@ name = "fastcrypto-derive" version = "0.1.3" source = "git+https://github.com/MystenLabs/fastcrypto?rev=5f2c63266a065996d53f98156f0412782b468597#5f2c63266a065996d53f98156f0412782b468597" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -5687,7 +5740,7 @@ dependencies = [ "num-bigint 0.4.6", "once_cell", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "schemars", "serde", "serde_json", @@ -5705,9 +5758,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fd-lock" @@ -5716,7 +5769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ "cfg-if", - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.48.0", ] @@ -5727,7 +5780,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e5768da2206272c81ef0b5e951a41862938a6070da63bcea197899942d3b947" dependencies = [ "cfg-if", - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.52.0", ] @@ -5775,7 +5828,7 @@ dependencies = [ "num-integer", "num-traits", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -5793,15 +5846,15 @@ checksum = "cca4fdab1b9b7e274e7de51202e37f9cfa542b28c77f8d09b817d77a726b4807" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "filetime" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if", "libc", @@ -5879,12 +5932,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.31" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -5934,7 +5987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "033b337d725b97690d86893f9de22b67b80dcc4e9ad815f348254c38119db8fb" dependencies = [ "io-lifetimes 2.0.3", - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.52.0", ] @@ -6044,7 +6097,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", @@ -6068,8 +6121,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -6146,7 +6199,7 @@ dependencies = [ "hyper 0.14.30", "hyper-rustls 0.25.0", "log", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "thiserror", @@ -6241,8 +6294,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -6322,19 +6375,20 @@ dependencies = [ [[package]] name = "guppy" -version = "0.15.2" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f822a2716041492e071691606474f5a7e4fa7c2acbfd7da7b29884fb448291c7" +checksum = "3bff2f6a9d515cf6453282af93363f93bdf570792a6f4f619756e46696d773fa" dependencies = [ + "ahash 0.8.11", "camino", - "cargo_metadata 0.15.4", + "cargo_metadata 0.18.1", "cfg-if", "debug-ignore", "fixedbitset 0.4.2", "guppy-summaries", "guppy-workspace-hack", - "indexmap 1.9.3", - "itertools 0.10.5", + "indexmap 2.4.0", + "itertools 0.13.0", "nested", "once_cell", "pathdiff", @@ -6391,9 +6445,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -6410,10 +6464,11 @@ dependencies = [ [[package]] name = "hakari" -version = "0.13.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af0223111b69beda15417ad6a960bffb093c916f0eaa559036c7036efa2d199" +checksum = "12bd2b14c094d2793daf279eb7624f4525e26f555fbc1647613756cf83f44755" dependencies = [ + "ahash 0.8.11", "atomicwrites", "bimap", "camino", @@ -6424,7 +6479,7 @@ dependencies = [ "guppy-workspace-hack", "include_dir", "indenter", - "itertools 0.10.5", + "itertools 0.12.1", "owo-colors 3.5.0", "pathdiff", "rayon", @@ -6746,7 +6801,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -6801,7 +6856,7 @@ dependencies = [ "hyper 0.14.30", "log", "rustls 0.22.4", - "rustls-native-certs 0.7.1", + "rustls-native-certs 0.7.3", "rustls-pki-types", "tokio", "tokio-rustls 0.25.0", @@ -6818,7 +6873,7 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "rustls 0.23.12", - "rustls-native-certs 0.7.1", + "rustls-native-certs 0.7.3", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -7001,7 +7056,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -7022,7 +7077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", ] [[package]] @@ -7208,11 +7263,11 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] @@ -7406,7 +7461,7 @@ version = "0.16.2" source = "git+https://github.com/wlmyng/jsonrpsee.git?rev=b1b300784795f6a64d0fcdf8f03081a9bc38bde8#b1b300784795f6a64d0fcdf8f03081a9bc38bde8" dependencies = [ "anyhow", - "arrayvec 0.7.4", + "arrayvec 0.7.6", "async-lock 2.8.0", "async-trait", "beef", @@ -7453,7 +7508,7 @@ dependencies = [ "heck 0.4.1", "proc-macro-crate 1.1.3", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -7613,7 +7668,7 @@ dependencies = [ "string_cache", "term", "tiny-keccak", - "unicode-xid 0.2.4", + "unicode-xid 0.2.5", "walkdir", ] @@ -7713,9 +7768,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" @@ -7784,9 +7839,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.19" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -7831,6 +7886,39 @@ dependencies = [ "value-bag", ] +[[package]] +name = "logos" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff1ceb190eb9bdeecdd8f1ad6a71d6d632a50905948771718741b5461fb01e13" +dependencies = [ + "logos-derive", +] + +[[package]] +name = "logos-codegen" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90be66cb7bd40cb5cc2e9cfaf2d1133b04a3d93b72344267715010a466e0915a" +dependencies = [ + "beef", + "fnv", + "lazy_static", + "proc-macro2 1.0.86", + "quote 1.0.37", + "regex-syntax 0.8.4", + "syn 2.0.76", +] + +[[package]] +name = "logos-derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45154231e8e96586b39494029e58f12f8ffcb5ecf80333a603a13aa205ea8cbd" +dependencies = [ + "logos-codegen", +] + [[package]] name = "lru" version = "0.7.8" @@ -8040,12 +8128,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" -[[package]] -name = "markdown-gen" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8034621d7f1258317ca1dfb9205e3925d27ee4aa2a46620a09c567daf0310562" - [[package]] name = "match_opt" version = "0.1.2" @@ -8118,7 +8200,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.38.34", + "rustix 0.38.35", ] [[package]] @@ -8130,6 +8212,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.6.5" @@ -8184,8 +8275,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf09caffaac8068c346b6df2a7fc27a177fd20b39421a39ce0a211bde679a6c" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -8206,7 +8297,7 @@ checksum = "ffb161cc72176cb37aa47f1fc520d3ef02263d67d661f44f05d05a079e1237fd" dependencies = [ "migrations_internals", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", ] [[package]] @@ -8240,6 +8331,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + [[package]] name = "mio" version = "0.8.11" @@ -8287,7 +8387,7 @@ checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -8689,7 +8789,7 @@ dependencies = [ "move-ir-to-bytecode-syntax", "move-ir-types", "move-symbol-pool", - "ouroboros", + "ouroboros 0.17.2", ] [[package]] @@ -8777,8 +8877,8 @@ name = "move-proc-macros" version = "0.1.0" dependencies = [ "enum-compat-util", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -9084,7 +9184,7 @@ dependencies = [ [[package]] name = "msim" version = "0.1.0" -source = "git+https://github.com/MystenLabs/mysten-sim.git?rev=220f52a15804a768610ac0ae3b8da7de4a5c4d2b#220f52a15804a768610ac0ae3b8da7de4a5c4d2b" +source = "git+https://github.com/MystenLabs/mysten-sim.git?rev=b320996d8dfb99b273fe31c0222c659332283c99#b320996d8dfb99b273fe31c0222c659332283c99" dependencies = [ "ahash 0.7.8", "async-task 4.3.0", @@ -9113,11 +9213,11 @@ dependencies = [ [[package]] name = "msim-macros" version = "0.1.0" -source = "git+https://github.com/MystenLabs/mysten-sim.git?rev=220f52a15804a768610ac0ae3b8da7de4a5c4d2b#220f52a15804a768610ac0ae3b8da7de4a5c4d2b" +source = "git+https://github.com/MystenLabs/mysten-sim.git?rev=b320996d8dfb99b273fe31c0222c659332283c99#b320996d8dfb99b273fe31c0222c659332283c99" dependencies = [ "darling 0.14.4", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -9188,7 +9288,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", "synstructure", ] @@ -9207,9 +9307,9 @@ checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "mysqlclient-sys" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2aa3a303b6e9878b34811838301b00a56878693c47f9ac0ba397f91adc7bf12" +checksum = "478e2040dbc35c73927b77a2be91a496de19deab376a6982ed61e89592434619" dependencies = [ "pkg-config", "vcpkg", @@ -9237,6 +9337,7 @@ dependencies = [ "prometheus", "prometheus-closure-metric", "scopeguard", + "simple-server-timing-header", "tap", "tokio", "tracing", @@ -9259,7 +9360,7 @@ dependencies = [ "snap", "tokio", "tokio-stream", - "tonic 0.12.1", + "tonic 0.12.2", "tonic-health", "tower", "tower-http", @@ -9276,6 +9377,7 @@ dependencies = [ "prometheus", "serde", "serde_json", + "simple-server-timing-header", "telemetry-subscribers", "tokio", "tower", @@ -9393,7 +9495,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "tonic 0.12.1", + "tonic 0.12.2", "tracing", "typed-store", ] @@ -9455,7 +9557,7 @@ dependencies = [ "pretty_assertions", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde-reflection", "serde_yaml 0.8.26", "sui-keys", @@ -9509,7 +9611,7 @@ dependencies = [ "prometheus", "proptest", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "sui-macros", "sui-protocol-config", "tap", @@ -9573,7 +9675,7 @@ dependencies = [ "telemetry-subscribers", "tempfile", "tokio", - "tonic 0.12.1", + "tonic 0.12.2", "tracing", "typed-store", ] @@ -9615,8 +9717,8 @@ dependencies = [ "sui-protocol-config", "thiserror", "tokio", - "tonic 0.12.1", - "tonic-build 0.12.1", + "tonic 0.12.2", + "tonic-build 0.12.2", "tracing", "typed-store", ] @@ -9649,14 +9751,14 @@ dependencies = [ "narwhal-types", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "sui-protocol-config", "tap", "telemetry-subscribers", "tempfile", "thiserror", "tokio", - "tonic 0.12.1", + "tonic 0.12.2", "tower", "tracing", "typed-store", @@ -9705,7 +9807,7 @@ dependencies = [ [[package]] name = "nexlint" version = "0.1.0" -source = "git+https://github.com/nextest-rs/nexlint.git?rev=94da5c787636dad779c340affa65219134d127f5#94da5c787636dad779c340affa65219134d127f5" +source = "git+https://github.com/nextest-rs/nexlint.git?rev=7ce56bd591242a57660ed05f14ca2483c37d895b#7ce56bd591242a57660ed05f14ca2483c37d895b" dependencies = [ "camino", "debug-ignore", @@ -9720,11 +9822,11 @@ dependencies = [ [[package]] name = "nexlint-lints" version = "0.1.0" -source = "git+https://github.com/nextest-rs/nexlint.git?rev=94da5c787636dad779c340affa65219134d127f5#94da5c787636dad779c340affa65219134d127f5" +source = "git+https://github.com/nextest-rs/nexlint.git?rev=7ce56bd591242a57660ed05f14ca2483c37d895b#7ce56bd591242a57660ed05f14ca2483c37d895b" dependencies = [ "anyhow", "camino", - "colored-diff", + "diffy", "globset", "guppy", "nexlint", @@ -9854,7 +9956,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16d0d3f2a488592e5368ebbe996e7f1d44aa13156efad201f5b4d84e150eaa93" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -9864,9 +9966,9 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc7c92f190c97f79b4a332f5e81dcf68c8420af2045c936c9be0bc9de6f63b5" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -9955,7 +10057,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -10063,8 +10165,8 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -10073,10 +10175,10 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -10145,7 +10247,7 @@ dependencies = [ "percent-encoding", "quick-xml", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "ring 0.17.8", "rustls-pemfile 2.1.3", "serde", @@ -10201,7 +10303,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "auto_impl", "bytes", "ethereum-types", @@ -10216,7 +10318,7 @@ checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -10373,7 +10475,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2ba07320d39dfea882faa70554b4bd342a5f273ed59ba7c1c6b4c840492c954" dependencies = [ "aliasable", - "ouroboros_macro", + "ouroboros_macro 0.17.2", + "static_assertions", +] + +[[package]] +name = "ouroboros" +version = "0.18.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "944fa20996a25aded6b4795c6d63f10014a7a83f8be9828a11860b08c5fc4a67" +dependencies = [ + "aliasable", + "ouroboros_macro 0.18.4", "static_assertions", ] @@ -10386,8 +10499,22 @@ dependencies = [ "heck 0.4.1", "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", +] + +[[package]] +name = "ouroboros_macro" +version = "0.18.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39b0deead1528fd0e5947a8546a9642a9777c25f6e1e26f34c97b204bbb465bd" +dependencies = [ + "heck 0.4.1", + "itertools 0.12.1", + "proc-macro2 1.0.86", + "proc-macro2-diagnostics", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -10452,7 +10579,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bitvec 0.20.4", "byte-slice-cast", "impl-trait-for-tuples", @@ -10466,7 +10593,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", @@ -10482,7 +10609,7 @@ checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -10492,9 +10619,9 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -10828,8 +10955,8 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -10912,8 +11039,8 @@ dependencies = [ "phf_generator", "phf_shared 0.11.2", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -10950,8 +11077,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -10973,7 +11100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-io", ] @@ -11080,7 +11207,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix 0.38.35", "tracing", "windows-sys 0.59.0", ] @@ -11122,9 +11249,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "pprof" -version = "0.11.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059" +checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" dependencies = [ "backtrace", "cfg-if", @@ -11212,7 +11339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ "diff", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -11227,12 +11354,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2 1.0.86", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -11296,11 +11423,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit 0.22.20", ] [[package]] @@ -11311,7 +11438,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", "version_check", ] @@ -11323,7 +11450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "version_check", ] @@ -11345,6 +11472,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.76", + "version_check", + "yansi 1.0.1", +] + [[package]] name = "prometheus" version = "0.13.4" @@ -11377,7 +11517,7 @@ checksum = "0fcebfa99f03ae51220778316b37d24981e36322c82c24848f48c5bd0f64cbdb" dependencies = [ "enum-as-inner", "mime", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "time", "url", @@ -11490,11 +11630,11 @@ dependencies = [ "multimap 0.10.0", "once_cell", "petgraph 0.6.5", - "prettyplease 0.2.20", + "prettyplease 0.2.22", "prost 0.13.1", "prost-types 0.13.1", "regex", - "syn 2.0.74", + "syn 2.0.76", "tempfile", ] @@ -11507,7 +11647,7 @@ dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -11520,8 +11660,8 @@ dependencies = [ "anyhow", "itertools 0.12.1", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -11533,8 +11673,8 @@ dependencies = [ "anyhow", "itertools 0.13.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -11598,7 +11738,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -11660,8 +11800,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.3" -source = "git+https://github.com/quinn-rs/quinn.git?rev=f0fa66f871b80b9d2d7075d76967c649aecc0b77#f0fa66f871b80b9d2d7075d76967c649aecc0b77" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" dependencies = [ "bytes", "rand 0.8.5", @@ -11698,9 +11839,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2 1.0.86", ] @@ -11876,8 +12017,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a25d631e41bfb5fdcde1d4e2215f62f7f0afa3ff11e26563765bd6ea1d229aeb" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -11927,9 +12068,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", @@ -11952,8 +12093,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -12090,14 +12231,14 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots 0.25.4", - "winreg 0.50.0", + "winreg", ] [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "async-compression", "base64 0.22.1", @@ -12105,7 +12246,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", @@ -12121,7 +12262,7 @@ dependencies = [ "pin-project-lite", "quinn", "rustls 0.23.12", - "rustls-native-certs 0.7.1", + "rustls-native-certs 0.7.3", "rustls-pemfile 2.1.3", "rustls-pki-types", "serde", @@ -12138,7 +12279,7 @@ dependencies = [ "wasm-streams", "web-sys", "webpki-roots 0.26.3", - "winreg 0.52.0", + "windows-registry", ] [[package]] @@ -12150,7 +12291,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "thiserror", "tower-service", @@ -12170,7 +12311,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "parking_lot 0.11.2", - "reqwest 0.12.5", + "reqwest 0.12.7", "reqwest-middleware", "retry-policies", "tokio", @@ -12251,9 +12392,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec 1.0.1", "bytecheck", @@ -12270,12 +12411,12 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -12297,7 +12438,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -12325,9 +12466,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.3" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" +checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" dependencies = [ "bytemuck", "byteorder", @@ -12416,7 +12557,7 @@ checksum = "7229b505ae0706e64f37ffc54a9c163e11022a6636d58fe1f3f52018257ff9f7" dependencies = [ "cfg-if", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustc_version", "syn 1.0.109", "unicode-ident", @@ -12602,11 +12743,11 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.35.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "num-traits", ] @@ -12636,9 +12777,9 @@ checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] @@ -12668,9 +12809,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" dependencies = [ "bitflags 2.6.0", "errno", @@ -12714,7 +12855,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.7", "subtle", "zeroize", ] @@ -12729,7 +12870,7 @@ dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.7", "subtle", "zeroize", ] @@ -12748,9 +12889,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.3", @@ -12796,9 +12937,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -12854,7 +12995,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "107c3d5d7f370ac09efa62a78375f94d94b8a33c61d8c278b96683fb4dbf2d8d" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -12900,9 +13041,9 @@ version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -12944,9 +13085,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "serde_derive_internals", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -13092,9 +13233,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.207" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] @@ -13142,13 +13283,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.207" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -13158,15 +13299,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "indexmap 2.4.0", "itoa", @@ -13192,8 +13333,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -13268,8 +13409,8 @@ checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -13280,8 +13421,8 @@ checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -13340,8 +13481,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -13537,6 +13678,12 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" +[[package]] +name = "simple-server-timing-header" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e78919e05c9b8e123d435a4ad104b488ad1585631830e413830985c214086e" + [[package]] name = "simple_asn1" version = "0.6.2" @@ -13660,7 +13807,7 @@ checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -13685,7 +13832,7 @@ dependencies = [ "log", "object_store 0.10.2", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "reqwest-middleware", "reqwest-retry", "serde", @@ -13759,7 +13906,7 @@ dependencies = [ "lalrpop-util", "phf", "thiserror", - "unicode-xid 0.2.4", + "unicode-xid 0.2.5", ] [[package]] @@ -13837,7 +13984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55fe75cb4a364c7f7ae06c7dbbc8d84bddd85d6cdf9975963c3935bc1991761e" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -13849,15 +13996,15 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "stacker" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce" +checksum = "799c883d55abdb5e98af1a7b3f23b9b6de8ecada0ecac058672d7635eb48ca7b" dependencies = [ "cc", "cfg-if", "libc", "psm", - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -13947,7 +14094,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustversion", "syn 1.0.109", ] @@ -13960,9 +14107,9 @@ checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustversion", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -13973,9 +14120,9 @@ checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustversion", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -14011,7 +14158,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "sui" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anemo", "anyhow", @@ -14053,7 +14200,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "rusoto_core", "rusoto_kms", "rustyline", @@ -14082,7 +14229,7 @@ dependencies = [ "sui-package-management", "sui-protocol-config", "sui-replay", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-simulator", "sui-source-validation", "sui-swarm", @@ -14226,7 +14373,7 @@ dependencies = [ [[package]] name = "sui-analytics-indexer" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "arrow 52.2.0", @@ -14278,16 +14425,16 @@ dependencies = [ [[package]] name = "sui-analytics-indexer-derive" -version = "1.31.1" +version = "1.32.0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "sui-archival" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "byteorder", @@ -14347,7 +14494,7 @@ dependencies = [ "narwhal-config", "prettytable-rs", "prometheus-parse", - "reqwest 0.12.5", + "reqwest 0.12.7", "russh", "russh-keys", "serde", @@ -14395,7 +14542,7 @@ dependencies = [ "sui-macros", "sui-network", "sui-protocol-config", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-simulator", "sui-storage", "sui-surfer", @@ -14413,7 +14560,7 @@ dependencies = [ [[package]] name = "sui-bridge" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "arc-swap", @@ -14437,7 +14584,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "serde_with 3.9.0", @@ -14447,7 +14594,7 @@ dependencies = [ "sui-json-rpc-api", "sui-json-rpc-types", "sui-keys", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-test-transaction-builder", "sui-types", "tap", @@ -14462,7 +14609,7 @@ dependencies = [ [[package]] name = "sui-bridge-cli" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "clap", @@ -14470,7 +14617,7 @@ dependencies = [ "fastcrypto", "futures", "move-core-types", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "serde_with 3.9.0", @@ -14479,7 +14626,7 @@ dependencies = [ "sui-config", "sui-json-rpc-types", "sui-keys", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", "telemetry-subscribers", "tokio", @@ -14508,8 +14655,9 @@ dependencies = [ "sui-bridge", "sui-config", "sui-data-ingestion-core", + "sui-indexer-builder", "sui-json-rpc-types", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-test-transaction-builder", "sui-types", "tap", @@ -14521,7 +14669,7 @@ dependencies = [ [[package]] name = "sui-cluster-test" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -14534,7 +14682,7 @@ dependencies = [ "move-core-types", "prometheus", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde_json", "shared-crypto", "sui-config", @@ -14545,7 +14693,7 @@ dependencies = [ "sui-json", "sui-json-rpc-types", "sui-keys", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-swarm", "sui-swarm-config", "sui-test-transaction-builder", @@ -14576,7 +14724,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_with 3.9.0", "serde_yaml 0.8.26", @@ -14651,7 +14799,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "rayon", - "reqwest 0.12.5", + "reqwest 0.12.7", "roaring", "rstest", "scopeguard", @@ -14675,7 +14823,6 @@ dependencies = [ "sui-move-build", "sui-network", "sui-protocol-config", - "sui-rest-api", "sui-simulator", "sui-storage", "sui-swarm-config", @@ -14720,7 +14867,7 @@ dependencies = [ [[package]] name = "sui-data-ingestion" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -14782,7 +14929,7 @@ dependencies = [ [[package]] name = "sui-e2e-tests" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "assert_cmd", @@ -14826,7 +14973,7 @@ dependencies = [ "sui-node", "sui-protocol-config", "sui-rest-api", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-simulator", "sui-storage", "sui-swarm", @@ -14899,7 +15046,7 @@ dependencies = [ [[package]] name = "sui-faucet" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-recursion", @@ -14918,7 +15065,7 @@ dependencies = [ "sui-config", "sui-json-rpc-types", "sui-keys", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", "tap", "telemetry-subscribers", @@ -14955,7 +15102,7 @@ dependencies = [ [[package]] name = "sui-framework-snapshot" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "bcs", @@ -15018,9 +15165,9 @@ dependencies = [ [[package]] name = "sui-graphql-config" -version = "1.31.1" +version = "1.32.0" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -15065,7 +15212,6 @@ dependencies = [ "insta", "itertools 0.10.5", "lru 0.10.1", - "markdown-gen", "move-binary-format", "move-bytecode-utils", "move-core-types", @@ -15077,7 +15223,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "serde_with 3.9.0", @@ -15096,7 +15242,7 @@ dependencies = [ "sui-package-resolver", "sui-protocol-config", "sui-rest-api", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-swarm-config", "sui-test-transaction-builder", "sui-types", @@ -15121,7 +15267,7 @@ dependencies = [ "async-graphql", "axum 0.7.5", "hyper 1.4.1", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde_json", "sui-graphql-rpc-headers", "thiserror", @@ -15136,7 +15282,7 @@ dependencies = [ [[package]] name = "sui-indexer" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -15179,7 +15325,7 @@ dependencies = [ "sui-package-resolver", "sui-protocol-config", "sui-rest-api", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-test-transaction-builder", "sui-transaction-builder", "sui-types", @@ -15194,6 +15340,21 @@ dependencies = [ "url", ] +[[package]] +name = "sui-indexer-builder" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "mysten-metrics", + "prometheus", + "sui-data-ingestion-core", + "sui-types", + "telemetry-subscribers", + "tokio", + "tracing", +] + [[package]] name = "sui-json" version = "0.0.0" @@ -15239,6 +15400,7 @@ dependencies = [ "move-core-types", "move-package", "mysten-metrics", + "mysten-service", "once_cell", "prometheus", "serde", @@ -15300,7 +15462,7 @@ dependencies = [ "move-package", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "sui-config", "sui-core", "sui-json", @@ -15313,7 +15475,7 @@ dependencies = [ "sui-open-rpc", "sui-open-rpc-macros", "sui-protocol-config", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-simulator", "sui-swarm-config", "sui-test-transaction-builder", @@ -15374,7 +15536,7 @@ dependencies = [ [[package]] name = "sui-light-client" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -15391,7 +15553,7 @@ dependencies = [ "sui-json-rpc-types", "sui-package-resolver", "sui-rest-api", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", "tokio", ] @@ -15408,7 +15570,7 @@ dependencies = [ [[package]] name = "sui-metric-checker" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "backoff", @@ -15418,7 +15580,7 @@ dependencies = [ "humantime", "once_cell", "prometheus-http-query", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_yaml 0.9.34+deprecated", "strum_macros 0.24.3", @@ -15429,7 +15591,7 @@ dependencies = [ [[package]] name = "sui-move" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "assert_cmd", @@ -15471,7 +15633,7 @@ dependencies = [ [[package]] name = "sui-move-build" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "datatest-stable", @@ -15495,7 +15657,7 @@ dependencies = [ [[package]] name = "sui-move-lsp" -version = "1.31.1" +version = "1.32.0" dependencies = [ "bin-version", "clap", @@ -15617,15 +15779,15 @@ dependencies = [ "telemetry-subscribers", "tempfile", "tokio", - "tonic 0.12.1", - "tonic-build 0.12.1", + "tonic 0.12.2", + "tonic-build 0.12.2", "tower", "tracing", ] [[package]] name = "sui-node" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anemo", "anemo-tower", @@ -15644,10 +15806,12 @@ dependencies = [ "mysten-common", "mysten-metrics", "mysten-network", + "mysten-service", "narwhal-network", "narwhal-worker", + "parking_lot 0.12.3", "prometheus", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "snap", "sui-archival", @@ -15676,7 +15840,7 @@ dependencies = [ [[package]] name = "sui-open-rpc" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "bcs", @@ -15705,14 +15869,14 @@ dependencies = [ "derive-syn-parse", "itertools 0.10.5", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", "unescape", ] [[package]] name = "sui-oracle" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "bcs", @@ -15724,7 +15888,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "shared-crypto", @@ -15732,7 +15896,7 @@ dependencies = [ "sui-json-rpc-types", "sui-keys", "sui-move-build", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", "tap", "telemetry-subscribers", @@ -15740,17 +15904,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "sui-package-dump" +version = "1.32.0" +dependencies = [ + "anyhow", + "bcs", + "cynic", + "cynic-codegen", + "fastcrypto", + "move-core-types", + "reqwest 0.12.7", + "serde", + "serde_json", + "sui-types", + "tracing", +] + [[package]] name = "sui-package-management" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "move-core-types", "move-package", "move-symbol-pool", "sui-json-rpc-types", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", + "thiserror", "tracing", ] @@ -15784,9 +15966,9 @@ version = "0.7.0" dependencies = [ "msim-macros", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "sui-enum-compat-util", - "syn 2.0.74", + "syn 2.0.76", ] [[package]] @@ -15808,7 +15990,7 @@ name = "sui-protocol-config-macros" version = "0.1.0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -15838,7 +16020,7 @@ dependencies = [ "prost-build 0.13.1", "protobuf", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "rustls 0.23.12", "rustls-pemfile 2.1.3", "serde", @@ -15891,7 +16073,7 @@ dependencies = [ "sui-json-rpc-api", "sui-json-rpc-types", "sui-protocol-config", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-storage", "sui-transaction-checks", "sui-types", @@ -15919,7 +16101,7 @@ dependencies = [ "openapiv3", "prometheus", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "schemars", "serde", "serde_json", @@ -15931,11 +16113,12 @@ dependencies = [ "tap", "thiserror", "tokio", + "url", ] [[package]] name = "sui-rosetta" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -15951,7 +16134,7 @@ dependencies = [ "mysten-metrics", "once_cell", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "shared-crypto", @@ -15963,7 +16146,7 @@ dependencies = [ "sui-keys", "sui-move-build", "sui-node", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-swarm-config", "sui-types", "telemetry-subscribers", @@ -15977,7 +16160,7 @@ dependencies = [ [[package]] name = "sui-rpc-loadgen" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -15995,19 +16178,19 @@ dependencies = [ "sui-json-rpc", "sui-json-rpc-types", "sui-keys", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", "telemetry-subscribers", "test-cluster", "tokio", - "tonic 0.12.1", + "tonic 0.12.2", "tracing", ] [[package]] name = "sui-sdk" version = "0.0.0" -source = "git+https://github.com/mystenlabs/sui-rust-sdk.git?rev=9a125ed5764fb5bcc1acb6074064bc8f9ea85b38#9a125ed5764fb5bcc1acb6074064bc8f9ea85b38" +source = "git+https://github.com/mystenlabs/sui-rust-sdk.git?rev=bd233b6879b917fb95e17f21927c198e7a60c924#bd233b6879b917fb95e17f21927c198e7a60c924" dependencies = [ "base64ct", "bcs", @@ -16026,7 +16209,7 @@ dependencies = [ [[package]] name = "sui-sdk" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-recursion", @@ -16042,7 +16225,7 @@ dependencies = [ "jsonrpsee", "move-core-types", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "serde_with 3.9.0", @@ -16062,7 +16245,7 @@ dependencies = [ [[package]] name = "sui-security-watchdog" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "arrow-array 52.2.0", @@ -16073,7 +16256,7 @@ dependencies = [ "lexical-util", "mysten-metrics", "prometheus", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "snowflake-api", @@ -16109,7 +16292,7 @@ dependencies = [ [[package]] name = "sui-single-node-benchmark" -version = "1.31.1" +version = "1.32.0" dependencies = [ "async-trait", "bcs", @@ -16120,6 +16303,7 @@ dependencies = [ "move-bytecode-utils", "move-core-types", "move-package", + "move-symbol-pool", "once_cell", "prometheus", "serde", @@ -16171,7 +16355,7 @@ dependencies = [ [[package]] name = "sui-source-validation" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "colored", @@ -16188,7 +16372,8 @@ dependencies = [ "rand 0.8.5", "sui-json-rpc-types", "sui-move-build", - "sui-sdk 1.31.1", + "sui-package-management", + "sui-sdk 1.32.0", "sui-test-transaction-builder", "sui-types", "tar", @@ -16218,13 +16403,13 @@ dependencies = [ "move-symbol-pool", "mysten-metrics", "prometheus", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "sui", "sui-json-rpc-types", "sui-move", "sui-move-build", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-source-validation", "telemetry-subscribers", "tempfile", @@ -16273,7 +16458,7 @@ dependencies = [ "percent-encoding", "pretty_assertions", "prometheus", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "sui-config", @@ -16295,7 +16480,7 @@ dependencies = [ [[package]] name = "sui-surfer" -version = "1.31.1" +version = "1.32.0" dependencies = [ "async-trait", "bcs", @@ -16378,7 +16563,7 @@ dependencies = [ name = "sui-telemetry" version = "0.1.0" dependencies = [ - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "sui-core", "tracing", @@ -16393,13 +16578,13 @@ dependencies = [ "shared-crypto", "sui-genesis-builder", "sui-move-build", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-types", ] [[package]] name = "sui-test-validator" -version = "1.31.1" +version = "1.32.0" [[package]] name = "sui-tls" @@ -16413,9 +16598,9 @@ dependencies = [ "pkcs8 0.9.0", "rand 0.8.5", "rcgen", - "reqwest 0.12.5", + "reqwest 0.12.7", "rustls 0.23.12", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.7", "tokio", "tokio-rustls 0.26.0", "tower-layer", @@ -16424,7 +16609,7 @@ dependencies = [ [[package]] name = "sui-tool" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anemo", "anemo-cli", @@ -16434,7 +16619,6 @@ dependencies = [ "clap", "colored", "comfy-table", - "diesel", "eyre", "fastcrypto", "futures", @@ -16455,11 +16639,11 @@ dependencies = [ "sui-archival", "sui-config", "sui-core", - "sui-indexer", "sui-network", + "sui-package-dump", "sui-protocol-config", "sui-replay", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-snapshot", "sui-storage", "sui-types", @@ -16621,7 +16805,7 @@ dependencies = [ "tap", "thiserror", "tokio", - "tonic 0.12.1", + "tonic 0.12.2", "tracing", "typed-store-error", "url", @@ -16711,7 +16895,7 @@ dependencies = [ [[package]] name = "suins-indexer" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "async-trait", @@ -16745,7 +16929,7 @@ dependencies = [ [[package]] name = "suiop-cli" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "axum 0.7.5", @@ -16762,7 +16946,7 @@ dependencies = [ "prettytable-rs", "rand 0.8.5", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "semver", "serde", "serde_json", @@ -16821,23 +17005,23 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "10.2.1" +version = "12.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737" +checksum = "b1944ea8afd197111bca0c0edea1e1f56abb3edd030e240c1035cc0e3ff51fec" dependencies = [ "debugid", - "memmap2", + "memmap2 0.9.4", "stable_deref_trait", "uuid 1.10.0", ] [[package]] name = "symbolic-demangle" -version = "10.2.1" +version = "12.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489" +checksum = "ddaccaf1bf8e73c4f64f78dbb30aadd6965c71faa4ff3fba33f8d7296cf94a87" dependencies = [ - "cpp_demangle 0.4.3", + "cpp_demangle 0.4.4", "rustc-demangle", "symbolic-common", ] @@ -16860,18 +17044,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.74" +version = "2.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" +checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "unicode-ident", ] @@ -16886,6 +17070,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -16894,9 +17081,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", - "unicode-xid 0.2.4", + "unicode-xid 0.2.5", ] [[package]] @@ -16946,7 +17133,7 @@ dependencies = [ "cap-std", "fd-lock 4.0.2", "io-lifetimes 2.0.3", - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.52.0", "winx", ] @@ -16971,7 +17158,7 @@ dependencies = [ "heck 0.4.1", "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -17016,14 +17203,15 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "target-spec" -version = "1.4.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf4306559bd50cb358e7af5692694d6f6fad95cf2c0bea2571dd419f5298e12" +checksum = "419ccf3482090c626619fa2574290aaa00b696f9ab73af08fbf48260565431bf" dependencies = [ "cfg-expr", "guppy-workspace-hack", "serde", "target-lexicon", + "unicode-ident", ] [[package]] @@ -17060,9 +17248,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", - "fastrand 2.1.0", + "fastrand 2.1.1", "once_cell", - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.59.0", ] @@ -17141,7 +17329,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.48.0", ] @@ -17175,7 +17363,7 @@ dependencies = [ "sui-macros", "sui-node", "sui-protocol-config", - "sui-sdk 1.31.1", + "sui-sdk 1.32.0", "sui-simulator", "sui-swarm", "sui-swarm-config", @@ -17205,7 +17393,7 @@ checksum = "48db3bbc562408b2111f3a0c96ec416ffa3ab66f8a6ab42579b608b9f74744e1" dependencies = [ "cargo_metadata 0.15.4", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "serde", "strum_macros 0.24.3", ] @@ -17221,9 +17409,9 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "subprocess", - "syn 2.0.74", + "syn 2.0.76", "test-fuzz-internal", "toolchain_find", ] @@ -17269,8 +17457,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -17391,9 +17579,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", @@ -17439,8 +17627,8 @@ version = "2.2.0" source = "git+https://github.com/mystenmark/tokio-madsim-fork.git?rev=e47aafebf98e9c1734a8848a1876d5946c44bdd1#e47aafebf98e9c1734a8848a1876d5946c44bdd1" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -17450,8 +17638,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -17667,17 +17855,6 @@ dependencies = [ "winnow 0.5.40", ] -[[package]] -name = "toml_edit" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.4.0", - "toml_datetime 0.6.8", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.22.20" @@ -17752,16 +17929,16 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" dependencies = [ "async-stream", "async-trait", "axum 0.7.5", "base64 0.22.1", "bytes", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", @@ -17789,34 +17966,34 @@ dependencies = [ "prettyplease 0.1.25", "proc-macro2 1.0.86", "prost-build 0.11.9", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "tonic-build" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964" +checksum = "fe4ee8877250136bd7e3d2331632810a4df4ea5e004656990d8d66d2f5ee8a67" dependencies = [ - "prettyplease 0.2.20", + "prettyplease 0.2.22", "proc-macro2 1.0.86", "prost-build 0.13.1", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] name = "tonic-health" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e10e6a96ee08b6ce443487d4368442d328d0e746f3681f81127f7dc41b4955" +checksum = "ec0a34e6f706bae26b2b490e1da5c3f6a6ff87cae442bcbc7c881bab9631b5a7" dependencies = [ "async-stream", "prost 0.13.1", "tokio", "tokio-stream", - "tonic 0.12.1", + "tonic 0.12.2", ] [[package]] @@ -17927,8 +18104,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -18040,7 +18217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -18165,10 +18342,10 @@ dependencies = [ "itertools 0.10.5", "msim", "once_cell", - "ouroboros", + "ouroboros 0.17.2", "proc-macro2 1.0.86", "prometheus", - "quote 1.0.36", + "quote 1.0.37", "rand 0.8.5", "rocksdb", "rstest", @@ -18192,7 +18369,7 @@ version = "0.3.0" dependencies = [ "itertools 0.10.5", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -18214,10 +18391,10 @@ dependencies = [ "memchr", "nom", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "regex", "regex-syntax 0.7.5", - "syn 2.0.74", + "syn 2.0.76", "zstd-sys", ] @@ -18245,8 +18422,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a615d6c2764852a2e88a4f16e9ce1ea49bb776b5872956309e170d63a042a34f" dependencies = [ - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -18335,9 +18512,9 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" [[package]] name = "universal-hash" @@ -18457,7 +18634,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae2faf80ac463422992abf4de234731279c058aaf33171ca70277c98406b124" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -18513,7 +18690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e369bee1b05d510a7b4ed645f5faa90619e05437111783ea5848f28d97d3c2e" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", ] [[package]] @@ -18579,7 +18756,7 @@ dependencies = [ "io-lifetimes 2.0.3", "log", "once_cell", - "rustix 0.38.34", + "rustix 0.38.35", "system-interface", "thiserror", "tracing", @@ -18615,8 +18792,8 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", "wasm-bindgen-shared", ] @@ -18639,7 +18816,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5020cfa87c7cecefef118055d44e3c1fc122c7ec25701d528ee458a0b45f38f" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -18661,7 +18838,7 @@ version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "wasm-bindgen-macro-support", ] @@ -18672,8 +18849,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -18695,9 +18872,9 @@ dependencies = [ [[package]] name = "wasm-encoder" -version = "0.215.0" +version = "0.216.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb56df3e06b8e6b77e37d2969a50ba51281029a9aeb3855e76b7f49b6418847" +checksum = "04c23aebea22c8a75833ae08ed31ccc020835b12a41999e58c31464271b94a88" dependencies = [ "leb128", ] @@ -18770,7 +18947,7 @@ dependencies = [ "enumset", "lazy_static", "leb128", - "memmap2", + "memmap2 0.5.10", "more-asserts 0.2.2", "region", "smallvec", @@ -18808,7 +18985,7 @@ checksum = "97901fdbaae383dbb90ea162cc3a76a9fa58ac39aec7948b4c0b9bbef9307738" dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -18909,7 +19086,7 @@ dependencies = [ "once_cell", "paste", "rayon", - "rustix 0.38.34", + "rustix 0.38.35", "serde", "serde_derive", "serde_json", @@ -18950,7 +19127,7 @@ dependencies = [ "bincode", "directories-next", "log", - "rustix 0.38.34", + "rustix 0.38.35", "serde", "serde_derive", "sha2 0.10.8", @@ -18967,8 +19144,8 @@ checksum = "dc6aca484581f9651886dca45f9dea893e105713b58623d14b06c56d8fe3f3f1" dependencies = [ "anyhow", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", "wasmtime-component-util", "wasmtime-wit-bindgen", "wit-parser", @@ -19056,7 +19233,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "rustix 0.38.34", + "rustix 0.38.35", "wasmtime-asm-macros", "wasmtime-versioned-export-macros", "windows-sys 0.52.0", @@ -19070,7 +19247,7 @@ checksum = "4b0462a46b80d2352ee553b17d626b6468e9cec2220cc58ac31754fd7b58245e" dependencies = [ "object", "once_cell", - "rustix 0.38.34", + "rustix 0.38.35", "wasmtime-versioned-export-macros", ] @@ -19103,7 +19280,7 @@ dependencies = [ "memoffset 0.9.1", "paste", "psm", - "rustix 0.38.34", + "rustix 0.38.35", "sptr", "wasm-encoder 0.41.2", "wasmtime-asm-macros", @@ -19135,8 +19312,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5399c175ddba4a471b9da45105dea3493059d52b2d54860eadb0df04c813948d" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -19160,7 +19337,7 @@ dependencies = [ "io-lifetimes 2.0.3", "log", "once_cell", - "rustix 0.38.34", + "rustix 0.38.35", "system-interface", "thiserror", "tokio", @@ -19218,24 +19395,24 @@ dependencies = [ [[package]] name = "wast" -version = "215.0.0" +version = "216.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff1d00d893593249e60720be04a7c1f42f1c4dc3806a2869f4e66ab61eb54cb" +checksum = "f7eb1f2eecd913fdde0dc6c3439d0f24530a98ac6db6cb3d14d92a5328554a08" dependencies = [ "bumpalo", "leb128", "memchr", "unicode-width", - "wasm-encoder 0.215.0", + "wasm-encoder 0.216.0", ] [[package]] name = "wat" -version = "1.215.0" +version = "1.216.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670bf4d9c8cf76ae242d70ded47c546525b6dafaa6871f9bcb065344bf2b4e3d" +checksum = "ac0409090fb5154f95fb5ba3235675fd9e579e731524d63b6a2f653e1280c82a" dependencies = [ - "wast 215.0.0", + "wast 216.0.0", ] [[package]] @@ -19291,7 +19468,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.34", + "rustix 0.38.35", ] [[package]] @@ -19335,9 +19512,9 @@ dependencies = [ "anyhow", "heck 0.4.1", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "shellexpand 2.1.2", - "syn 2.0.74", + "syn 2.0.76", "witx", ] @@ -19348,8 +19525,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93e43fc332703d1ec3aa86a5ce8bb49e6b95b6c617b90e726d3e70a0f70f48a5" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", "wiggle-generate", ] @@ -19409,6 +19586,36 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.33.0" @@ -19628,16 +19835,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "winx" version = "0.36.3" @@ -19662,7 +19859,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "unicode-xid 0.2.4", + "unicode-xid 0.2.5", ] [[package]] @@ -19713,7 +19910,7 @@ dependencies = [ [[package]] name = "x" -version = "1.31.1" +version = "1.32.0" dependencies = [ "anyhow", "camino", @@ -19748,7 +19945,7 @@ checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" dependencies = [ "libc", "linux-raw-sys 0.4.14", - "rustix 0.38.34", + "rustix 0.38.35", ] [[package]] @@ -19778,6 +19975,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "yasna" version = "0.5.2" @@ -19833,8 +20036,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] @@ -19853,8 +20056,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.74", + "quote 1.0.37", + "syn 2.0.76", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index eec37bc5d4b3f..c2d8ab6c2fd85 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,205 +2,206 @@ resolver = "2" exclude = [ - "examples/tic-tac-toe/cli", - "external-crates/move/crates/bytecode-interpreter-crypto", - "external-crates/move/crates/bytecode-verifier-libfuzzer", - "external-crates/move/crates/bytecode-verifier-tests", - "external-crates/move/crates/bytecode-verifier-prop-tests", - "external-crates/move/crates/bytecode-verifier-transactional-tests", - "external-crates/move/crates/enum-compat-util", - "external-crates/move/crates/invalid-mutations", - "external-crates/move/crates/language-benchmarks", - "external-crates/move/crates/module-generation", - "external-crates/move/crates/move-abstract-interpreter", - "external-crates/move/crates/move-abstract-stack", - "external-crates/move/crates/move-analyzer", - "external-crates/move/crates/move-binary-format", - "external-crates/move/crates/move-borrow-graph", - "external-crates/move/crates/move-bytecode-source-map", - "external-crates/move/crates/move-bytecode-utils", - "external-crates/move/crates/move-bytecode-verifier", - "external-crates/move/crates/move-bytecode-verifier-meter", - "external-crates/move/crates/move-bytecode-viewer", - "external-crates/move/crates/move-cli", - "external-crates/move/crates/move-command-line-common", - "external-crates/move/crates/move-compiler", - "external-crates/move/crates/move-compiler-transactional-tests", - "external-crates/move/crates/move-core-types", - "external-crates/move/crates/move-coverage", - "external-crates/move/crates/move-disassembler", - "external-crates/move/crates/move-docgen", - "external-crates/move/crates/move-ir-compiler", - "external-crates/move/crates/move-ir-compiler-transactional-tests", - "external-crates/move/crates/move-ir-to-bytecode", - "external-crates/move/crates/move-ir-to-bytecode-syntax", - "external-crates/move/crates/move-ir-types", - "external-crates/move/crates/move-model", - "external-crates/move/crates/move-package", - "external-crates/move/crates/move-proc-macros", - "external-crates/move/crates/move-prover", - "external-crates/move/crates/move-prover-test-utils", - "external-crates/move/crates/move-stackless-bytecode", - "external-crates/move/crates/move-stdlib", - "external-crates/move/crates/move-stdlib-natives", - "external-crates/move/crates/move-symbol-pool", - "external-crates/move/crates/move-transactional-test-runner", - "external-crates/move/crates/move-unit-test", - "external-crates/move/crates/move-vm-config", - "external-crates/move/crates/move-vm-integration-tests", - "external-crates/move/crates/move-vm-profiler", - "external-crates/move/crates/move-vm-runtime", - "external-crates/move/crates/move-vm-test-utils", - "external-crates/move/crates/move-vm-transactional-tests", - "external-crates/move/crates/move-vm-types", - "external-crates/move/crates/serializer-tests", - "external-crates/move/crates/test-generation", - "external-crates/move/move-execution/v0/crates/move-bytecode-verifier", - "external-crates/move/move-execution/v0/crates/move-stdlib-natives", - "external-crates/move/move-execution/v0/crates/move-vm-runtime", - "external-crates/move/move-execution/v1/crates/move-bytecode-verifier", - "external-crates/move/move-execution/v1/crates/move-stdlib-natives", - "external-crates/move/move-execution/v1/crates/move-vm-runtime", - "external-crates/move/move-execution/v2/crates/move-abstract-interpreter", - "external-crates/move/move-execution/v2/crates/move-bytecode-verifier", - "external-crates/move/move-execution/v2/crates/move-stdlib-natives", - "external-crates/move/move-execution/v2/crates/move-vm-runtime", - "sdk/move-bytecode-template", + "examples/tic-tac-toe/cli", + "external-crates/move/crates/bytecode-interpreter-crypto", + "external-crates/move/crates/bytecode-verifier-libfuzzer", + "external-crates/move/crates/bytecode-verifier-tests", + "external-crates/move/crates/bytecode-verifier-prop-tests", + "external-crates/move/crates/bytecode-verifier-transactional-tests", + "external-crates/move/crates/enum-compat-util", + "external-crates/move/crates/invalid-mutations", + "external-crates/move/crates/language-benchmarks", + "external-crates/move/crates/module-generation", + "external-crates/move/crates/move-abstract-interpreter", + "external-crates/move/crates/move-abstract-stack", + "external-crates/move/crates/move-analyzer", + "external-crates/move/crates/move-binary-format", + "external-crates/move/crates/move-borrow-graph", + "external-crates/move/crates/move-bytecode-source-map", + "external-crates/move/crates/move-bytecode-utils", + "external-crates/move/crates/move-bytecode-verifier", + "external-crates/move/crates/move-bytecode-verifier-meter", + "external-crates/move/crates/move-bytecode-viewer", + "external-crates/move/crates/move-cli", + "external-crates/move/crates/move-command-line-common", + "external-crates/move/crates/move-compiler", + "external-crates/move/crates/move-compiler-transactional-tests", + "external-crates/move/crates/move-core-types", + "external-crates/move/crates/move-coverage", + "external-crates/move/crates/move-disassembler", + "external-crates/move/crates/move-docgen", + "external-crates/move/crates/move-ir-compiler", + "external-crates/move/crates/move-ir-compiler-transactional-tests", + "external-crates/move/crates/move-ir-to-bytecode", + "external-crates/move/crates/move-ir-to-bytecode-syntax", + "external-crates/move/crates/move-ir-types", + "external-crates/move/crates/move-model", + "external-crates/move/crates/move-package", + "external-crates/move/crates/move-proc-macros", + "external-crates/move/crates/move-prover", + "external-crates/move/crates/move-prover-test-utils", + "external-crates/move/crates/move-stackless-bytecode", + "external-crates/move/crates/move-stdlib", + "external-crates/move/crates/move-stdlib-natives", + "external-crates/move/crates/move-symbol-pool", + "external-crates/move/crates/move-transactional-test-runner", + "external-crates/move/crates/move-unit-test", + "external-crates/move/crates/move-vm-config", + "external-crates/move/crates/move-vm-integration-tests", + "external-crates/move/crates/move-vm-profiler", + "external-crates/move/crates/move-vm-runtime", + "external-crates/move/crates/move-vm-test-utils", + "external-crates/move/crates/move-vm-transactional-tests", + "external-crates/move/crates/move-vm-types", + "external-crates/move/crates/serializer-tests", + "external-crates/move/crates/test-generation", + "external-crates/move/move-execution/v0/crates/move-bytecode-verifier", + "external-crates/move/move-execution/v0/crates/move-stdlib-natives", + "external-crates/move/move-execution/v0/crates/move-vm-runtime", + "external-crates/move/move-execution/v1/crates/move-bytecode-verifier", + "external-crates/move/move-execution/v1/crates/move-stdlib-natives", + "external-crates/move/move-execution/v1/crates/move-vm-runtime", + "external-crates/move/move-execution/v2/crates/move-abstract-interpreter", + "external-crates/move/move-execution/v2/crates/move-bytecode-verifier", + "external-crates/move/move-execution/v2/crates/move-stdlib-natives", + "external-crates/move/move-execution/v2/crates/move-vm-runtime", + "sdk/move-bytecode-template", ] members = [ - "consensus/config", - "consensus/core", - "crates/anemo-benchmark", - "crates/bin-version", - "crates/mamoru-sui-sniffer", - "crates/mysten-common", - "crates/mysten-metrics", - "crates/mysten-network", - "crates/mysten-service", - "crates/mysten-util-mem", - "crates/mysten-util-mem-derive", - "crates/prometheus-closure-metric", - "crates/shared-crypto", - "crates/simulacrum", - "crates/sui", - "crates/sui-adapter-transactional-tests", - "crates/sui-analytics-indexer", - "crates/sui-analytics-indexer-derive", - "crates/sui-archival", - "crates/sui-authority-aggregation", - "crates/sui-aws-orchestrator", - "crates/sui-benchmark", - "crates/sui-bridge", - "crates/sui-bridge-cli", - "crates/sui-bridge-indexer", - "crates/sui-cluster-test", - "crates/sui-config", - "crates/sui-core", - "crates/sui-cost", - "crates/sui-data-ingestion", - "crates/sui-data-ingestion-core", - "crates/sui-e2e-tests", - "crates/sui-enum-compat-util", - "crates/sui-faucet", - "crates/sui-framework", - "crates/sui-framework-snapshot", - "crates/sui-framework-tests", - "crates/sui-genesis-builder", - "crates/sui-graphql-config", - "crates/sui-graphql-e2e-tests", - "crates/sui-graphql-rpc", - "crates/sui-graphql-rpc-client", - "crates/sui-graphql-rpc-headers", - "crates/sui-indexer", - "crates/sui-json", - "crates/sui-json-rpc", - "crates/sui-json-rpc-api", - "crates/sui-json-rpc-tests", - "crates/sui-json-rpc-types", - "crates/sui-keys", - "crates/sui-light-client", - "crates/sui-macros", - "crates/sui-metric-checker", - "crates/sui-move", - "crates/sui-move-build", - "crates/sui-move-lsp", - "crates/sui-network", - "crates/sui-node", - "crates/sui-open-rpc", - "crates/sui-open-rpc-macros", - "crates/sui-oracle", - "crates/sui-package-management", - "crates/sui-package-resolver", - "crates/sui-proc-macros", - "crates/sui-protocol-config", - "crates/sui-protocol-config-macros", - "crates/sui-proxy", - "crates/sui-replay", - "crates/sui-rest-api", - "crates/sui-rosetta", - "crates/sui-rpc-loadgen", - "crates/sui-sdk", - "crates/sui-security-watchdog", - "crates/sui-simulator", - "crates/sui-single-node-benchmark", - "crates/sui-snapshot", - "crates/sui-source-validation", - "crates/sui-source-validation-service", - "crates/sui-storage", - "crates/sui-surfer", - "crates/sui-swarm", - "crates/sui-swarm-config", - "crates/sui-telemetry", - "crates/sui-test-transaction-builder", - "crates/sui-test-validator", - "crates/sui-tls", - "crates/sui-tool", - "crates/sui-transaction-builder", - "crates/sui-transaction-checks", - "crates/sui-transactional-test-runner", - "crates/sui-types", - "crates/sui-upgrade-compatibility-transactional-tests", - "crates/sui-verifier-transactional-tests", - "crates/suins-indexer", - "crates/suiop-cli", - "crates/telemetry-subscribers", - "crates/test-cluster", - "crates/transaction-fuzzer", - "crates/typed-store", - "crates/typed-store-derive", - "crates/typed-store-error", - "crates/typed-store-workspace-hack", - "crates/x", - "narwhal/config", - "narwhal/crypto", - "narwhal/executor", - "narwhal/network", - "narwhal/node", - "narwhal/primary", - "narwhal/storage", - "narwhal/test-utils", - "narwhal/types", - "narwhal/worker", - "sui-execution", - "sui-execution/cut", - "sui-execution/latest/sui-adapter", - "sui-execution/latest/sui-move-natives", - "sui-execution/latest/sui-verifier", - "sui-execution/v0/sui-adapter", - "sui-execution/v0/sui-move-natives", - "sui-execution/v0/sui-verifier", - "sui-execution/v1/sui-adapter", - "sui-execution/v1/sui-move-natives", - "sui-execution/v1/sui-verifier", - "sui-execution/v2/sui-adapter", - "sui-execution/v2/sui-move-natives", - "sui-execution/v2/sui-verifier", + "consensus/config", + "consensus/core", + "crates/anemo-benchmark", + "crates/bin-version", + "crates/mysten-common", + "crates/mysten-metrics", + "crates/mysten-network", + "crates/mysten-service", + "crates/mysten-util-mem", + "crates/mysten-util-mem-derive", + "crates/prometheus-closure-metric", + "crates/shared-crypto", + "crates/simulacrum", + "crates/sui", + "crates/sui-adapter-transactional-tests", + "crates/sui-analytics-indexer", + "crates/sui-analytics-indexer-derive", + "crates/sui-archival", + "crates/sui-authority-aggregation", + "crates/sui-aws-orchestrator", + "crates/sui-benchmark", + "crates/sui-bridge", + "crates/sui-bridge-cli", + "crates/sui-bridge-indexer", + "crates/sui-cluster-test", + "crates/sui-config", + "crates/sui-core", + "crates/sui-cost", + "crates/sui-data-ingestion", + "crates/sui-data-ingestion-core", + "crates/sui-e2e-tests", + "crates/sui-enum-compat-util", + "crates/sui-faucet", + "crates/sui-framework", + "crates/sui-framework-snapshot", + "crates/sui-framework-tests", + "crates/sui-genesis-builder", + "crates/sui-graphql-config", + "crates/sui-graphql-e2e-tests", + "crates/sui-graphql-rpc", + "crates/sui-graphql-rpc-client", + "crates/sui-graphql-rpc-headers", + "crates/sui-indexer", + "crates/sui-indexer-builder", + "crates/sui-json", + "crates/sui-json-rpc", + "crates/sui-json-rpc-api", + "crates/sui-json-rpc-tests", + "crates/sui-json-rpc-types", + "crates/sui-keys", + "crates/sui-light-client", + "crates/sui-macros", + "crates/sui-metric-checker", + "crates/sui-move", + "crates/sui-move-build", + "crates/sui-move-lsp", + "crates/sui-network", + "crates/sui-node", + "crates/sui-open-rpc", + "crates/sui-open-rpc-macros", + "crates/sui-oracle", + "crates/sui-package-dump", + "crates/sui-package-management", + "crates/sui-package-resolver", + "crates/sui-proc-macros", + "crates/sui-protocol-config", + "crates/sui-protocol-config-macros", + "crates/sui-proxy", + "crates/sui-replay", + "crates/sui-rest-api", + "crates/sui-rosetta", + "crates/sui-rpc-loadgen", + "crates/sui-sdk", + "crates/sui-security-watchdog", + "crates/sui-simulator", + "crates/sui-single-node-benchmark", + "crates/sui-snapshot", + "crates/sui-source-validation", + "crates/sui-source-validation-service", + "crates/sui-storage", + "crates/sui-surfer", + "crates/sui-swarm", + "crates/sui-swarm-config", + "crates/sui-telemetry", + "crates/sui-test-transaction-builder", + "crates/sui-test-validator", + "crates/sui-tls", + "crates/sui-tool", + "crates/sui-transaction-builder", + "crates/sui-transaction-checks", + "crates/sui-transactional-test-runner", + "crates/sui-types", + "crates/sui-upgrade-compatibility-transactional-tests", + "crates/sui-verifier-transactional-tests", + "crates/suins-indexer", + "crates/suiop-cli", + "crates/telemetry-subscribers", + "crates/test-cluster", + "crates/transaction-fuzzer", + "crates/typed-store", + "crates/typed-store-derive", + "crates/typed-store-error", + "crates/typed-store-workspace-hack", + "crates/x", + "narwhal/config", + "narwhal/crypto", + "narwhal/executor", + "narwhal/network", + "narwhal/node", + "narwhal/primary", + "narwhal/storage", + "narwhal/test-utils", + "narwhal/types", + "narwhal/worker", + "sui-execution", + "sui-execution/cut", + "sui-execution/latest/sui-adapter", + "sui-execution/latest/sui-move-natives", + "sui-execution/latest/sui-verifier", + "sui-execution/v0/sui-adapter", + "sui-execution/v0/sui-move-natives", + "sui-execution/v0/sui-verifier", + "sui-execution/v1/sui-adapter", + "sui-execution/v1/sui-move-natives", + "sui-execution/v1/sui-verifier", + "sui-execution/v2/sui-adapter", + "sui-execution/v2/sui-move-natives", + "sui-execution/v2/sui-verifier", ] [workspace.package] # This version string will be inherited by sui-core, sui-faucet, sui-node, sui-tools, sui-sdk, sui-move-build, and sui crates. -version = "1.31.1" +version = "1.32.0" [profile.release] # debug = 1 means line charts only, which is minimum needed for good stack traces @@ -236,6 +237,9 @@ overflow-checks = true # opt-level 1 gives >5x speedup for simulator tests without slowing down build times very much. opt-level = 1 +[workspace.lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(msim)', 'cfg(fail_points)'] } + # Dependencies that should be kept in sync through the whole workspace [workspace.dependencies] anyhow = "1.0.71" @@ -256,26 +260,26 @@ aws-sdk-s3 = "0.29.0" aws-smithy-http = "0.56" aws-smithy-runtime-api = "0.56" axum = { version = "0.7", default-features = false, features = [ - "tokio", - "http1", - "http2", - "json", - "matched-path", - "original-uri", - "form", - "query", - "ws", + "tokio", + "http1", + "http2", + "json", + "matched-path", + "original-uri", + "form", + "query", + "ws", ] } axum-extra = { version = "0.9", features = ["typed-header"] } axum-server = { git = "https://github.com/bmwill/axum-server.git", rev = "f44323e271afdd1365fd0c8b0a4c0bbdf4956cb7", version = "0.6", default-features = false, features = [ - "tls-rustls", + "tls-rustls", ] } backoff = { version = "0.4.0", features = [ - "futures", - "futures-core", - "pin-project-lite", - "tokio", - "tokio_1", + "futures", + "futures-core", + "pin-project-lite", + "tokio", + "tokio_1", ] } base64 = "0.21.2" base64-url = "2" @@ -299,12 +303,14 @@ console-subscriber = "0.2" const-str = "0.5.3" count-min-sketch = "0.1.7" criterion = { version = "0.5.0", features = [ - "async", - "async_tokio", - "html_reports", + "async", + "async_tokio", + "html_reports", ] } crossterm = "0.25.0" csv = "1.2.1" +cynic = { version = "3.7.3", features = ["http-reqwest"] } +cynic-codegen = "= 3.7.3" dashmap = "5.5.3" # datatest-stable = "0.1.2" datatest-stable = { git = "https://github.com/nextest-rs/datatest-stable.git", rev = "72db7f6d1bbe36a5407e96b9488a581f763e106f" } @@ -313,11 +319,11 @@ derive-syn-parse = "0.1.5" derive_builder = "0.12.0" derive_more = "0.99.17" diesel = { version = "2.1.0", features = [ - "chrono", - "r2d2", - "serde_json", - "64-column-tables", - "i-implement-a-third-party-backend-and-opt-into-breaking-changes", + "chrono", + "r2d2", + "serde_json", + "64-column-tables", + "i-implement-a-third-party-backend-and-opt-into-breaking-changes", ] } diesel-derive-enum = { version = "2.0.1" } diesel_migrations = { version = "2.0.0" } @@ -342,7 +348,6 @@ hashbrown = "0.12" hdrhistogram = "7.5.1" hex = "0.4.3" hex-literal = "0.3.4" -highlight = "all" http = "1" http-body = "1" humantime = "2.1.0" @@ -360,30 +365,29 @@ ipnetwork = "0.20.0" itertools = "0.10.5" jemalloc-ctl = "^0.5" jsonrpsee = { git = "https://github.com/wlmyng/jsonrpsee.git", rev = "b1b300784795f6a64d0fcdf8f03081a9bc38bde8", features = [ - "server", - "macros", - "ws-client", - "http-client", - "jsonrpsee-core", + "server", + "macros", + "ws-client", + "http-client", + "jsonrpsee-core", ] } json_to_table = { git = "https://github.com/zhiburt/tabled/", rev = "e449317a1c02eb6b29e409ad6617e5d9eb7b3bd4" } leb128 = "0.2.5" lru = "0.10" -markdown-gen = "1.2.1" match_opt = "0.1.2" miette = { version = "7", features = ["fancy"] } mime = "0.3" mockall = "0.11.4" moka = { version = "0.12", default-features = false, features = [ - "sync", - "atomic64", + "sync", + "atomic64", ] } more-asserts = "0.3.1" -msim = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "220f52a15804a768610ac0ae3b8da7de4a5c4d2b", package = "msim" } -msim-macros = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "220f52a15804a768610ac0ae3b8da7de4a5c4d2b", package = "msim-macros" } +msim = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "b320996d8dfb99b273fe31c0222c659332283c99", package = "msim" } +msim-macros = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "b320996d8dfb99b273fe31c0222c659332283c99", package = "msim-macros" } multiaddr = "0.17.0" -nexlint = { git = "https://github.com/nextest-rs/nexlint.git", rev = "94da5c787636dad779c340affa65219134d127f5" } -nexlint-lints = { git = "https://github.com/nextest-rs/nexlint.git", rev = "94da5c787636dad779c340affa65219134d127f5" } +nexlint = { git = "https://github.com/nextest-rs/nexlint.git", rev = "7ce56bd591242a57660ed05f14ca2483c37d895b" } +nexlint-lints = { git = "https://github.com/nextest-rs/nexlint.git", rev = "7ce56bd591242a57660ed05f14ca2483c37d895b" } nonempty = "0.9.0" notify = "6.1.1" ntest = "0.9.0" @@ -396,13 +400,13 @@ ouroboros = "0.17" parking_lot = "0.12.1" parquet = "52" pkcs8 = { version = "0.9.0", features = ["std"] } -pprof = { version = "0.11.0", features = ["cpp", "frame-pointer"] } +pprof = { version = "0.13.0", features = ["cpp", "frame-pointer"] } pretty_assertions = "1.3.0" prettytable-rs = "0.10.0" proc-macro2 = "1.0.47" prometheus = "0.13.3" -prometheus-http-query = { version = "0.8", default_features = false, features = [ - "rustls-tls", +prometheus-http-query = { version = "0.8", default-features = false, features = [ + "rustls-tls", ] } prometheus-parse = { git = "https://github.com/asonnino/prometheus-parser.git", rev = "75334db" } proptest = "1.1.0" @@ -410,25 +414,25 @@ proptest-derive = "0.3.0" prost = "0.13" prost-build = "0.13" protobuf = { version = "2.28", features = ["with-bytes"] } -quinn-proto = "0.11" +quinn-proto = "0.11.6" quote = "1.0.23" rand = "0.8.5" rayon = "1.5.3" rcgen = "0.13" regex = "1.7.1" -reqwest = { version = "0.12", default_features = false, features = [ - "http2", - "json", - "rustls-tls", +reqwest = { version = "0.12", default-features = false, features = [ + "http2", + "json", + "rustls-tls", ] } -roaring = "=0.10.3" +roaring = "0.10.6" ron = "0.8.0" rstest = "0.16.0" -rusoto_core = { version = "0.48.0", default_features = false, features = [ - "rustls", +rusoto_core = { version = "0.48.0", default-features = false, features = [ + "rustls", ] } -rusoto_kms = { version = "0.48.0", default_features = false, features = [ - "rustls", +rusoto_kms = { version = "0.48.0", default-features = false, features = [ + "rustls", ] } russh = "0.38.0" russh-keys = "0.38.0" @@ -454,6 +458,7 @@ shell-words = "1.1.0" shellexpand = "3.1.0" signature = "1.6.0" similar = "2.4.0" +simple-server-timing-header = "0.1.1" slip10_ed25519 = "0.1.3" smallvec = "1.10.0" snap = "1.1.0" @@ -486,32 +491,32 @@ tonic = { version = "0.12", features = ["transport"] } tonic-build = { version = "0.12", features = ["prost", "transport"] } tonic-health = "0.12" tower = { version = "0.4.12", features = [ - "full", - "util", - "timeout", - "load-shed", - "limit", + "full", + "util", + "timeout", + "load-shed", + "limit", ] } tower-http = { version = "0.5", features = [ - "cors", - "full", - "trace", - "set-header", - "propagate-header", + "cors", + "full", + "trace", + "set-header", + "propagate-header", ] } tower-layer = "0.3.2" twox-hash = "1.6.3" tracing = "0.1.37" tracing-appender = "0.2.2" tracing-subscriber = { version = "0.3.15", default-features = false, features = [ - "std", - "smallvec", - "fmt", - "ansi", - "time", - "json", - "registry", - "env-filter", + "std", + "smallvec", + "fmt", + "ansi", + "time", + "json", + "registry", + "env-filter", ] } ttl_cache = "0.5.1" uint = "0.9.4" @@ -520,8 +525,8 @@ ureq = "2.9.1" url = "2.3.1" uuid = { version = "1.1.2", features = ["v4", "fast-rng"] } webpki = { version = "0.102", package = "rustls-webpki", features = [ - "alloc", - "std", + "alloc", + "std", ] } x509-parser = "0.14.0" zstd = "0.12.3" @@ -543,7 +548,7 @@ move-package = { path = "external-crates/move/crates/move-package" } move-unit-test = { path = "external-crates/move/crates/move-unit-test" } move-vm-config = { path = "external-crates/move/crates/move-vm-config" } move-vm-test-utils = { path = "external-crates/move/crates/move-vm-test-utils/", features = [ - "tiered-gas", + "tiered-gas", ] } move-vm-types = { path = "external-crates/move/crates/move-vm-types" } move-vm-profiler = { path = "external-crates/move/crates/move-vm-profiler" } @@ -562,7 +567,6 @@ mamoru-sui-types = { git = "https://github.com/Mamoru-Foundation/mamoru-core", r #mamoru-sniffer = { path = "../mamoru-core/mamoru-sniffer" } #mamoru-sui-types = { path = "../mamoru-core/blockchain-types/mamoru-sui-types" } - fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "5f2c63266a065996d53f98156f0412782b468597" } fastcrypto-tbls = { git = "https://github.com/MystenLabs/fastcrypto", rev = "5f2c63266a065996d53f98156f0412782b468597" } fastcrypto-zkp = { git = "https://github.com/MystenLabs/fastcrypto", rev = "5f2c63266a065996d53f98156f0412782b468597", package = "fastcrypto-zkp" } @@ -580,7 +584,7 @@ anemo-cli = { git = "https://github.com/mystenlabs/anemo.git", rev = "dbb5a074c2 anemo-tower = { git = "https://github.com/mystenlabs/anemo.git", rev = "dbb5a074c2d25660525ab5d36d65ff0cb8051949" } # core-types with json format for REST api -sui-sdk2 = { package = "sui-sdk", git = "https://github.com/mystenlabs/sui-rust-sdk.git", rev = "9a125ed5764fb5bcc1acb6074064bc8f9ea85b38", features = ["hash", "serde", "schemars"] } +sui-sdk2 = { package = "sui-sdk", git = "https://github.com/mystenlabs/sui-rust-sdk.git", rev = "bd233b6879b917fb95e17f21927c198e7a60c924", features = ["hash", "serde", "schemars"] } ### Workspace Members ### anemo-benchmark = { path = "crates/anemo-benchmark" } @@ -620,6 +624,7 @@ sui-graphql-rpc-client = { path = "crates/sui-graphql-rpc-client" } sui-graphql-rpc-headers = { path = "crates/sui-graphql-rpc-headers" } sui-genesis-builder = { path = "crates/sui-genesis-builder" } sui-indexer = { path = "crates/sui-indexer" } +sui-indexer-builder = { path = "crates/sui-indexer-builder" } sui-json = { path = "crates/sui-json" } sui-json-rpc = { path = "crates/sui-json-rpc" } sui-json-rpc-api = { path = "crates/sui-json-rpc-api" } @@ -634,6 +639,7 @@ sui-network = { path = "crates/sui-network" } sui-node = { path = "crates/sui-node" } sui-open-rpc = { path = "crates/sui-open-rpc" } sui-open-rpc-macros = { path = "crates/sui-open-rpc-macros" } +sui-package-dump = { path = "crates/sui-package-dump" } sui-package-management = { path = "crates/sui-package-management" } sui-package-resolver = { path = "crates/sui-package-resolver" } sui-proc-macros = { path = "crates/sui-proc-macros" } @@ -701,4 +707,3 @@ spinners = "4.1.0" include_dir = "0.7.3" [patch.crates-io] -quinn-proto = { git = "https://github.com/quinn-rs/quinn.git", rev = "f0fa66f871b80b9d2d7075d76967c649aecc0b77" } diff --git a/apps/wallet/src/ui/app/components/address-input/index.tsx b/apps/wallet/src/ui/app/components/address-input/index.tsx index b4aa1cc01ed0d..115ee0195919a 100644 --- a/apps/wallet/src/ui/app/components/address-input/index.tsx +++ b/apps/wallet/src/ui/app/components/address-input/index.tsx @@ -72,7 +72,7 @@ export function AddressInput({ refetchInterval: false, }); - const { isSubmitting, setFieldValue } = useFormikContext(); + const { isSubmitting, setFieldValue, isValidating } = useFormikContext(); const suiAddressValidation = useSuiAddressValidation(); const disabled = forcedDisabled !== undefined ? forcedDisabled : isSubmitting; @@ -92,7 +92,7 @@ export function AddressInput({ setFieldValue('to', ''); }, [setFieldValue]); - const hasWarningOrError = meta.touched && (meta.error || warningData); + const hasWarningOrError = meta.touched && (meta.error || warningData) && !isValidating; return ( <> @@ -132,7 +132,7 @@ export function AddressInput({ - {meta.touched ? ( + {meta.touched && !isValidating ? (
{warningData === RecipientWarningType.OBJECT ? ( diff --git a/apps/wallet/src/ui/app/components/address-input/validation.ts b/apps/wallet/src/ui/app/components/address-input/validation.ts index 4c4a133a79d28..26b28ef6b9b30 100644 --- a/apps/wallet/src/ui/app/components/address-input/validation.ts +++ b/apps/wallet/src/ui/app/components/address-input/validation.ts @@ -8,9 +8,12 @@ import { isValidSuiAddress, isValidSuiNSName } from '@mysten/sui/utils'; import { useMemo } from 'react'; import * as Yup from 'yup'; +const CACHE_EXPIRY_TIME = 60 * 1000; // 1 minute in milliseconds + export function createSuiAddressValidation(client: SuiClient, suiNSEnabled: boolean) { - const resolveCache = new Map(); + const resolveCache = new Map(); + const currentTime = Date.now(); return Yup.string() .ensure() .trim() @@ -18,14 +21,22 @@ export function createSuiAddressValidation(client: SuiClient, suiNSEnabled: bool .test('is-sui-address', 'Invalid address. Please check again.', async (value) => { if (suiNSEnabled && isValidSuiNSName(value)) { if (resolveCache.has(value)) { - return resolveCache.get(value)!; + const cachedEntry = resolveCache.get(value)!; + if (currentTime < cachedEntry.expiry) { + return cachedEntry.valid; + } else { + resolveCache.delete(value); // Remove expired entry + } } const address = await client.resolveNameServiceAddress({ name: value, }); - resolveCache.set(value, !!address); + resolveCache.set(value, { + valid: !!address, + expiry: currentTime + CACHE_EXPIRY_TIME, + }); return !!address; } diff --git a/apps/wallet/src/ui/app/pages/home/nfts/VisualAssets.tsx b/apps/wallet/src/ui/app/pages/home/nfts/VisualAssets.tsx index d1327c728ac06..ccc3cba329863 100644 --- a/apps/wallet/src/ui/app/pages/home/nfts/VisualAssets.tsx +++ b/apps/wallet/src/ui/app/pages/home/nfts/VisualAssets.tsx @@ -65,7 +65,7 @@ export default function VisualAssets({ items }: { items: SuiObjectData[] }) {
item?.objectType === object.type)} + hideLabel objectId={object.objectId} size="lg" animateHover diff --git a/apps/wallet/vitest.config.ts b/apps/wallet/vitest.config.ts index 9623017969816..22ecf350712f4 100644 --- a/apps/wallet/vitest.config.ts +++ b/apps/wallet/vitest.config.ts @@ -10,7 +10,6 @@ export default defineConfig({ exclude: [...configDefaults.exclude, 'tests/**'], // TODO: Create custom extension environment. environment: 'happy-dom', - minThreads: 1, setupFiles: ['./testSetup.ts'], restoreMocks: true, }, diff --git a/bridge/evm/.gitignore b/bridge/evm/.gitignore index b03db232ccd9e..eede9111c4f1a 100644 --- a/bridge/evm/.gitignore +++ b/bridge/evm/.gitignore @@ -10,5 +10,4 @@ out*/ lcov.info broadcast/**/31337 -lib/* - +dependencies diff --git a/bridge/evm/README.md b/bridge/evm/README.md index 97cbdb270a45c..94667326299ca 100644 --- a/bridge/evm/README.md +++ b/bridge/evm/README.md @@ -1,6 +1,6 @@ # 🏄‍♂️ Quick Start -This project leverages [Foundry](https://github.com/foundry-rs/foundry) to manage dependencies, contract compilation, testing, deployment, and on chain interactions via Solidity scripting. +This project leverages [Foundry](https://github.com/foundry-rs/foundry) to manage dependencies (via soldeer), contract compilation, testing, deployment, and on chain interactions via Solidity scripting. #### Environment configuration @@ -14,7 +14,7 @@ Duplicate rename the `.env.example` file to `.env`. You'll need accounts and api To install the project dependencies, run: ```bash -forge install https://github.com/OpenZeppelin/openzeppelin-contracts-upgradeable@v5.0.1 https://github.com/foundry-rs/forge-std@v1.3.0 https://github.com/OpenZeppelin/openzeppelin-foundry-upgrades --no-git --no-commit +forge soldeer update ``` #### Compilation @@ -28,8 +28,7 @@ forge compile #### Testing ```bash -forge clean -forge test --ffi +forge test ``` #### Coverage @@ -45,15 +44,13 @@ forge coverage > The file should be named `.json` and should have the same fields and in the same order (alphabetical) as the `example.json`. ```bash -forge clean -forge script script/deploy_bridge.s.sol --rpc-url <> --broadcast --verify --ffi +forge script script/deploy_bridge.s.sol --rpc-url <> --broadcast --verify ``` **Local deployment** ```bash -forge clean -forge script script/deploy_bridge.s.sol --fork-url anvil --broadcast --ffi +forge script script/deploy_bridge.s.sol --fork-url anvil --broadcast ``` All deployments are saved in the `broadcast` directory. diff --git a/bridge/evm/foundry.toml b/bridge/evm/foundry.toml index b2a3ebfec2a6d..ba31fcbba8e08 100644 --- a/bridge/evm/foundry.toml +++ b/bridge/evm/foundry.toml @@ -3,20 +3,31 @@ src = 'contracts' test = 'test' no_match_test = "testSkip" out = 'out' -libs = ['lib'] +libs = ['dependencies'] solc = "0.8.20" build_info = true extra_output = ["storageLayout"] fs_permissions = [{ access = "read", path = "/"}] gas_reports = ["SuiBridge"] +ffi = true + [fmt] line_length = 100 + [fuzz] runs = 1000 + [rpc_endpoints] mainnet = "${MAINNET_RPC_URL}" sepolia = "${SEPOLIA_RPC_URL}" anvil = "http://localhost:8545" + [etherscan] sepolia = { key = "${ETHERSCAN_API_KEY}" } -mainnet = { key = "${ETHERSCAN_API_KEY}" } \ No newline at end of file +mainnet = { key = "${ETHERSCAN_API_KEY}" } + +[dependencies] +forge-std = "1.9.2" +openzeppelin-foundry-upgrades = "0.3.1" +"@openzeppelin-contracts-upgradeable" = "5.0.1" +"@openzeppelin-contracts" = "5.0.1" \ No newline at end of file diff --git a/bridge/evm/remappings.txt b/bridge/evm/remappings.txt index 5279b569511f7..c680ee33d8dd9 100644 --- a/bridge/evm/remappings.txt +++ b/bridge/evm/remappings.txt @@ -1,5 +1,8 @@ -@openzeppelin/contracts/=lib/openzeppelin-contracts-upgradeable/lib/openzeppelin-contracts/contracts/ -@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/ -@openzeppelin/openzeppelin-foundry-upgrades/=lib/openzeppelin-foundry-upgrades/src/ -ds-test/=lib/forge-std/lib/ds-test/src/ -forge-std/=lib/openzeppelin-foundry-upgrades/lib/forge-std/src/ \ No newline at end of file +@forge-std=dependencies/forge-std-1.9.2/src +@openzeppelin/foundry-upgrades=dependencies/openzeppelin-foundry-upgrades-0.3.1/src +@openzeppelin/contracts=dependencies/@openzeppelin-contracts-5.0.1 +@openzeppelin/contracts-upgradeable=dependencies/@openzeppelin-contracts-upgradeable-5.0.1 +@forge-std-1.9.2=dependencies/forge-std-1.9.2 +@openzeppelin-foundry-upgrades-0.3.1=dependencies/openzeppelin-foundry-upgrades-0.3.1 +@openzeppelin-contracts-upgradeable-5.0.1=dependencies/@openzeppelin-contracts-upgradeable-5.0.1 +@openzeppelin-contracts-5.0.1=dependencies/@openzeppelin-contracts-5.0.1 \ No newline at end of file diff --git a/bridge/evm/script/deploy_bridge.s.sol b/bridge/evm/script/deploy_bridge.s.sol index 4df43f5384a52..dfb5c6609c15a 100644 --- a/bridge/evm/script/deploy_bridge.s.sol +++ b/bridge/evm/script/deploy_bridge.s.sol @@ -44,6 +44,8 @@ contract DeployBridge is Script { MockWBTC wBTC = new MockWBTC(); MockUSDC USDC = new MockUSDC(); MockUSDT USDT = new MockUSDT(); + MockKA KA = new MockKA(); + console.log("[Deployed] KA:", address(KA)); // update deployConfig with mock addresses deployConfig.supportedTokens = new address[](5); diff --git a/bridge/evm/soldeer.lock b/bridge/evm/soldeer.lock new file mode 100644 index 0000000000000..20bd2407a347b --- /dev/null +++ b/bridge/evm/soldeer.lock @@ -0,0 +1,24 @@ + +[[dependencies]] +name = "forge-std" +version = "1.9.2" +source = "https://soldeer-revisions.s3.amazonaws.com/forge-std/1_9_2_06-08-2024_17:31:25_forge-std-1.9.2.zip" +checksum = "20fd008c7c69b6c737cc0284469d1c76497107bc3e004d8381f6d8781cb27980" + +[[dependencies]] +name = "openzeppelin-foundry-upgrades" +version = "0.3.1" +source = "https://soldeer-revisions.s3.amazonaws.com/openzeppelin-foundry-upgrades/0_3_1_25-06-2024_18:12:33_openzeppelin-foundry-upgrades.zip" +checksum = "16a43c67b7c62e4a638b669b35f7b19c98a37278811fe910750b62b6e6fdffa7" + +[[dependencies]] +name = "@openzeppelin-contracts-upgradeable" +version = "5.0.1" +source = "https://soldeer-revisions.s3.amazonaws.com/@openzeppelin-contracts-upgradeable/5_0_1_22-01-2024_13:15:10_contracts-upgradeable.zip" +checksum = "cca37ad1d376a5c3954d1c2a8d2675339f182eee535caa7ba7ebf8d589a2c19a" + +[[dependencies]] +name = "@openzeppelin-contracts" +version = "5.0.1" +source = "https://soldeer-revisions.s3.amazonaws.com/@openzeppelin-contracts/5_0_1_22-01-2024_13:14:01_contracts.zip" +checksum = "c256cbf6f5f38d3b65c7528bbffb530d0bdb818a20c9d5b61235a829202d7df7" diff --git a/bridge/evm/test/mocks/MockTokens.sol b/bridge/evm/test/mocks/MockTokens.sol index 3a1bfbd059f39..05f463b36781e 100644 --- a/bridge/evm/test/mocks/MockTokens.sol +++ b/bridge/evm/test/mocks/MockTokens.sol @@ -57,6 +57,24 @@ contract MockUSDT is ERC20 { function testSkip() public {} } +contract MockKA is ERC20 { + constructor() ERC20("Ka Coin", "KA") {} + + function mint(address to, uint256 amount) public virtual { + _mint(to, amount); + } + + function burn(address form, uint256 amount) public virtual { + _burn(form, amount); + } + + function decimals() public view virtual override returns (uint8) { + return 9; + } + + function testSkip() public {} +} + contract WETH { string public name = "Wrapped Ether"; string public symbol = "WETH"; diff --git a/chocolatey/sui.nuspec b/chocolatey/sui.nuspec index 6d649bbe60e09..843f5bf90f3c9 100644 --- a/chocolatey/sui.nuspec +++ b/chocolatey/sui.nuspec @@ -5,7 +5,7 @@ enclosed in quotation marks, you should use an editor that supports UTF-8, not t sui $version$ - sui + Sui Foundation Main Sui Binary sui https://sui.io/ @@ -15,12 +15,12 @@ enclosed in quotation marks, you should use an editor that supports UTF-8, not t https://github.com/MystenLabs/sui/issues sui https://community.chocolatey.org/packages/sui.portable - Run a local sui binary + Sui delivers the benefits of Web3 with the ease of Web2 Sui is the first internet-scale programmable blockchain platform - https://github.com/MystenLabs/sui/releases/tag/mainnet-v$version$ + See https://github.com/MystenLabs/sui/releases/tag/testnet-v$version$ - + diff --git a/consensus/config/Cargo.toml b/consensus/config/Cargo.toml index aeb3978f12301..48697e2f7e91b 100644 --- a/consensus/config/Cargo.toml +++ b/consensus/config/Cargo.toml @@ -6,6 +6,9 @@ authors = ["Mysten Labs "] edition = "2021" publish = false +[lints] +workspace = true + [dependencies] fastcrypto.workspace = true mysten-network.workspace = true diff --git a/consensus/config/src/parameters.rs b/consensus/config/src/parameters.rs index 4b3a85906c19a..6df7da3cb21e0 100644 --- a/consensus/config/src/parameters.rs +++ b/consensus/config/src/parameters.rs @@ -57,8 +57,8 @@ pub struct Parameters { #[serde(default = "Parameters::default_commit_sync_batch_size")] pub commit_sync_batch_size: u32, - // Maximum number of commit batches being fetched, before throttling - // of outgoing commit fetches starts. + // This affects the maximum number of commit batches being fetched, and those fetched but not + // processed as consensus output, before throttling of outgoing commit fetches starts. #[serde(default = "Parameters::default_commit_sync_batches_ahead")] pub commit_sync_batches_ahead: usize, @@ -73,8 +73,8 @@ pub struct Parameters { /// Time to wait during node start up until the node has synced the last proposed block via the /// network peers. When set to `0` the sync mechanism is disabled. This property is meant to be /// used for amnesia recovery. - #[serde(default = "Parameters::default_sync_last_proposed_block_timeout")] - pub sync_last_proposed_block_timeout: Duration, + #[serde(default = "Parameters::default_sync_last_known_own_block_timeout")] + pub sync_last_known_own_block_timeout: Duration, } impl Parameters { @@ -129,15 +129,19 @@ impl Parameters { } pub(crate) fn default_commit_sync_batches_ahead() -> usize { - 200 + // This is set to be a multiple of default commit_sync_parallel_fetches to allow fetching ahead, + // while keeping the total number of inflight fetches and unprocessed fetched commits limited. + 80 } - pub(crate) fn default_sync_last_proposed_block_timeout() -> Duration { - Duration::ZERO - } - - pub fn is_sync_last_proposed_block_enabled(&self) -> bool { - !self.sync_last_proposed_block_timeout.is_zero() + pub(crate) fn default_sync_last_known_own_block_timeout() -> Duration { + if cfg!(msim) { + Duration::from_millis(500) + } else { + // Here we prioritise liveness over the complete de-risking of block equivocation. 5 seconds + // in the majority of cases should be good enough for this given a healthy network. + Duration::from_secs(5) + } } } @@ -150,8 +154,8 @@ impl Default for Parameters { max_forward_time_drift: Parameters::default_max_forward_time_drift(), dag_state_cached_rounds: Parameters::default_dag_state_cached_rounds(), max_blocks_per_fetch: Parameters::default_max_blocks_per_fetch(), - sync_last_proposed_block_timeout: Parameters::default_sync_last_proposed_block_timeout( - ), + sync_last_known_own_block_timeout: + Parameters::default_sync_last_known_own_block_timeout(), commit_sync_parallel_fetches: Parameters::default_commit_sync_parallel_fetches(), commit_sync_batch_size: Parameters::default_commit_sync_batch_size(), commit_sync_batches_ahead: Parameters::default_commit_sync_batches_ahead(), diff --git a/consensus/config/tests/snapshots/parameters_test__parameters.snap b/consensus/config/tests/snapshots/parameters_test__parameters.snap index b764571ffe0a2..60ed151ba6303 100644 --- a/consensus/config/tests/snapshots/parameters_test__parameters.snap +++ b/consensus/config/tests/snapshots/parameters_test__parameters.snap @@ -15,7 +15,7 @@ max_blocks_per_fetch: 1000 dag_state_cached_rounds: 500 commit_sync_parallel_fetches: 20 commit_sync_batch_size: 100 -commit_sync_batches_ahead: 200 +commit_sync_batches_ahead: 80 anemo: excessive_message_size: 8388608 tonic: @@ -25,6 +25,6 @@ tonic: connection_buffer_size: 33554432 excessive_message_size: 16777216 message_size_limit: 67108864 -sync_last_proposed_block_timeout: - secs: 0 +sync_last_known_own_block_timeout: + secs: 5 nanos: 0 diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 067901f002a16..2cb277834c9b2 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -6,6 +6,9 @@ authors = ["Mysten Labs "] edition = "2021" publish = false +[lints] +workspace = true + [dependencies] anemo.workspace = true anemo-tower.workspace = true diff --git a/consensus/core/src/authority_node.rs b/consensus/core/src/authority_node.rs index 06e57b10c0846..f896dc1a0bd89 100644 --- a/consensus/core/src/authority_node.rs +++ b/consensus/core/src/authority_node.rs @@ -7,7 +7,7 @@ use consensus_config::{AuthorityIndex, Committee, NetworkKeyPair, Parameters, Pr use parking_lot::RwLock; use prometheus::Registry; use sui_protocol_config::{ConsensusNetwork, ProtocolConfig}; -use tracing::info; +use tracing::{info, warn}; use crate::{ authority_service::AuthorityService, @@ -15,7 +15,8 @@ use crate::{ block_verifier::SignedBlockVerifier, broadcaster::Broadcaster, commit_observer::CommitObserver, - commit_syncer::{CommitSyncer, CommitVoteMonitor}, + commit_syncer::{CommitSyncer, CommitSyncerHandle}, + commit_vote_monitor::CommitVoteMonitor, context::{Clock, Context}, core::{Core, CoreSignals}, core_thread::{ChannelCoreThreadDispatcher, CoreThreadHandle}, @@ -54,6 +55,11 @@ impl ConsensusAuthority { transaction_verifier: Arc, commit_consumer: CommitConsumer, registry: Registry, + // A counter that keeps track of how many times the authority node has been booted while the binary + // or the component that is calling the `ConsensusAuthority` has been running. It's mostly useful to + // make decisions on whether amnesia recovery should run or not. When `boot_counter` is 0, then `ConsensusAuthority` + // will initiate the process of amnesia recovery if that's enabled in the parameters. + boot_counter: u64, ) -> Self { match network_type { ConsensusNetwork::Anemo => { @@ -67,6 +73,7 @@ impl ConsensusAuthority { transaction_verifier, commit_consumer, registry, + boot_counter, ) .await; Self::WithAnemo(authority) @@ -82,6 +89,7 @@ impl ConsensusAuthority { transaction_verifier, commit_consumer, registry, + boot_counter, ) .await; Self::WithTonic(authority) @@ -110,6 +118,14 @@ impl ConsensusAuthority { Self::WithTonic(authority) => &authority.context, } } + + #[allow(unused)] + fn sync_last_known_own_block_enabled(&self) -> bool { + match self { + Self::WithAnemo(authority) => authority.sync_last_known_own_block, + Self::WithTonic(authority) => authority.sync_last_known_own_block, + } + } } pub(crate) struct AuthorityNode @@ -120,7 +136,7 @@ where start_time: Instant, transaction_client: Arc, synchronizer: Arc, - commit_syncer: CommitSyncer, + commit_syncer_handle: CommitSyncerHandle, leader_timeout_handle: LeaderTimeoutTaskHandle, core_thread_handle: CoreThreadHandle, // Only one of broadcaster and subscriber gets created, depending on @@ -128,6 +144,7 @@ where broadcaster: Option, subscriber: Option>>, network_manager: N, + sync_last_known_own_block: bool, } impl AuthorityNode @@ -146,10 +163,11 @@ where transaction_verifier: Arc, commit_consumer: CommitConsumer, registry: Registry, + boot_counter: u64, ) -> Self { info!( - "Starting consensus authority {}\n{:#?}\n{:#?}\n{:?}", - own_index, committee, parameters, protocol_config.version + "Starting consensus authority {}\n{:#?}\n{:#?}\n{:?}\nBoot counter: {}", + own_index, committee, parameters, protocol_config.version, boot_counter ); assert!(committee.is_valid_index(own_index)); let context = Arc::new(Context::new( @@ -163,7 +181,7 @@ where let start_time = Instant::now(); let (tx_client, tx_receiver) = TransactionClient::new(context.clone()); - let tx_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let tx_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (core_signals, signals_receivers) = CoreSignals::new(context.clone()); @@ -185,6 +203,13 @@ where let store_path = context.parameters.db_path.as_path().to_str().unwrap(); let store = Arc::new(RocksDBStore::new(store_path)); let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store.clone()))); + let sync_last_known_own_block = boot_counter == 0 + && dag_state.read().highest_accepted_round() == 0 + && !context + .parameters + .sync_last_known_own_block_timeout + .is_zero(); + info!("Sync last known own block: {sync_last_known_own_block}"); let block_verifier = Arc::new(SignedBlockVerifier::new( context.clone(), @@ -209,6 +234,7 @@ where )) }; + let commit_consumer_monitor = commit_consumer.monitor(); let commit_observer = CommitObserver::new( context.clone(), commit_consumer, @@ -229,6 +255,7 @@ where core_signals, protocol_keypair, dag_state.clone(), + sync_last_known_own_block, ); let (core_dispatcher, core_thread_handle) = @@ -238,6 +265,7 @@ where LeaderTimeoutTask::start(core_dispatcher.clone(), &signals_receivers, context.clone()); let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); + let synchronizer = Synchronizer::start( network_client.clone(), context.clone(), @@ -245,16 +273,19 @@ where commit_vote_monitor.clone(), block_verifier.clone(), dag_state.clone(), + sync_last_known_own_block, ); - let commit_syncer = CommitSyncer::new( + let commit_syncer_handle = CommitSyncer::new( context.clone(), core_dispatcher.clone(), commit_vote_monitor.clone(), + commit_consumer_monitor, network_client.clone(), block_verifier.clone(), dag_state.clone(), - ); + ) + .start(); let network_service = Arc::new(AuthorityService::new( context.clone(), @@ -296,12 +327,13 @@ where start_time, transaction_client: Arc::new(tx_client), synchronizer, - commit_syncer, + commit_syncer_handle, leader_timeout_handle, core_thread_handle, broadcaster, subscriber, network_manager, + sync_last_known_own_block, } } @@ -312,8 +344,16 @@ where ); // First shutdown components calling into Core. - self.synchronizer.stop().await.ok(); - self.commit_syncer.stop().await; + if let Err(e) = self.synchronizer.stop().await { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } + warn!( + "Failed to stop synchronizer when shutting down consensus: {:?}", + e + ); + }; + self.commit_syncer_handle.stop().await; self.leader_timeout_handle.stop().await; // Shutdown Core to stop block productions and broadcast. // When using streaming, all subscribers to broadcasted blocks stop after this. @@ -343,7 +383,7 @@ where mod tests { #![allow(non_snake_case)] - use std::sync::Mutex; + use std::collections::BTreeMap; use std::{collections::BTreeSet, sync::Arc, time::Duration}; use consensus_config::{local_committee_and_keys, Parameters}; @@ -352,10 +392,11 @@ mod tests { use rstest::rstest; use sui_protocol_config::ProtocolConfig; use tempfile::TempDir; - use tokio::time::sleep; + use tokio::time::{sleep, timeout}; use typed_store::DBMetrics; use super::*; + use crate::block::GENESIS_ROUND; use crate::{block::BlockAPI as _, transaction::NoopTransactionVerifier, CommittedSubDag}; #[rstest] @@ -378,7 +419,7 @@ mod tests { let network_keypair = keypairs[own_index].0.clone(); let (sender, _receiver) = unbounded_channel("consensus_output"); - let commit_consumer = CommitConsumer::new(sender, 0, 0); + let commit_consumer = CommitConsumer::new(sender, 0); let authority = ConsensusAuthority::start( network_type, @@ -391,6 +432,7 @@ mod tests { Arc::new(txn_verifier), commit_consumer, registry, + 0, ) .await; @@ -410,11 +452,15 @@ mod tests { let db_registry = Registry::new(); DBMetrics::init(&db_registry); - let (committee, keypairs) = local_committee_and_keys(0, vec![1, 1, 1, 1]); - let temp_dirs = (0..4).map(|_| TempDir::new().unwrap()).collect::>(); + const NUM_OF_AUTHORITIES: usize = 4; + let (committee, keypairs) = local_committee_and_keys(0, [1; NUM_OF_AUTHORITIES].to_vec()); + let temp_dirs = (0..NUM_OF_AUTHORITIES) + .map(|_| TempDir::new().unwrap()) + .collect::>(); let mut output_receivers = Vec::with_capacity(committee.size()); let mut authorities = Vec::with_capacity(committee.size()); + let mut boot_counters = [0; NUM_OF_AUTHORITIES]; for (index, _authority_info) in committee.authorities() { let (authority, receiver) = make_authority( @@ -423,8 +469,10 @@ mod tests { committee.clone(), keypairs.clone(), network_type, + boot_counters[index], ) .await; + boot_counters[index] += 1; output_receivers.push(receiver); authorities.push(authority); } @@ -468,7 +516,7 @@ mod tests { // Stop authority 1. let index = committee.to_authority_index(1).unwrap(); authorities.remove(index.value()).stop().await; - sleep(Duration::from_secs(15)).await; + sleep(Duration::from_secs(10)).await; // Restart authority 1 and let it run. let (authority, receiver) = make_authority( @@ -477,11 +525,13 @@ mod tests { committee.clone(), keypairs.clone(), network_type, + boot_counters[index], ) .await; + boot_counters[index] += 1; output_receivers[index] = receiver; authorities.insert(index.value(), authority); - sleep(Duration::from_secs(15)).await; + sleep(Duration::from_secs(10)).await; // Stop all authorities and exit. for authority in authorities { @@ -491,166 +541,120 @@ mod tests { #[rstest] #[tokio::test(flavor = "current_thread")] - async fn test_amnesia_success( + async fn test_amnesia_recovery_success( #[values(ConsensusNetwork::Anemo, ConsensusNetwork::Tonic)] network_type: ConsensusNetwork, ) { telemetry_subscribers::init_for_testing(); let db_registry = Registry::new(); DBMetrics::init(&db_registry); - let (committee, keypairs) = local_committee_and_keys(0, vec![1, 1, 1, 1]); + const NUM_OF_AUTHORITIES: usize = 4; + let (committee, keypairs) = local_committee_and_keys(0, [1; NUM_OF_AUTHORITIES].to_vec()); let mut output_receivers = vec![]; - let mut authorities = vec![]; + let mut authorities = BTreeMap::new(); + let mut temp_dirs = BTreeMap::new(); + let mut boot_counters = [0; NUM_OF_AUTHORITIES]; for (index, _authority_info) in committee.authorities() { + let dir = TempDir::new().unwrap(); let (authority, receiver) = make_authority( index, - &TempDir::new().unwrap(), + &dir, committee.clone(), keypairs.clone(), network_type, + boot_counters[index], ) .await; + assert!(authority.sync_last_known_own_block_enabled(), "Expected syncing of last known own block to be enabled as all authorities are of empty db and boot for first time."); + boot_counters[index] += 1; output_receivers.push(receiver); - authorities.push(authority); + authorities.insert(index, authority); + temp_dirs.insert(index, dir); } - const NUM_TRANSACTIONS: u8 = 15; - let mut submitted_transactions = BTreeSet::>::new(); - for i in 0..NUM_TRANSACTIONS { - let txn = vec![i; 16]; - submitted_transactions.insert(txn.clone()); - authorities[i as usize % authorities.len()] - .transaction_client() - .submit(vec![txn]) + // Now we take the receiver of authority 1 and we wait until we see at least one block committed from this authority + // We wait until we see at least one committed block authored from this authority. That way we'll be 100% sure that + // at least one block has been proposed and successfully received by a quorum of nodes. + let index_1 = committee.to_authority_index(1).unwrap(); + 'outer: while let Some(result) = + timeout(Duration::from_secs(10), output_receivers[index_1].recv()) .await - .unwrap(); - } - - for receiver in &mut output_receivers { - let mut expected_transactions = submitted_transactions.clone(); - loop { - let committed_subdag = - tokio::time::timeout(Duration::from_secs(1), receiver.recv()) - .await - .unwrap() - .unwrap(); - for b in committed_subdag.blocks { - for txn in b.transactions().iter().map(|t| t.data().to_vec()) { - assert!( - expected_transactions.remove(&txn), - "Transaction not submitted or already seen: {:?}", - txn - ); - } - } - assert_eq!(committed_subdag.reputation_scores_desc, vec![]); - if expected_transactions.is_empty() { - break; + .expect("Timed out while waiting for at least one committed block from authority 1") + { + for block in result.blocks { + if block.round() > GENESIS_ROUND && block.author() == index_1 { + break 'outer; } } } - // Stop authority 1. - let index = committee.to_authority_index(1).unwrap(); - authorities.remove(index.value()).stop().await; + // Stop authority 1 & 2. + // * Authority 1 will be used to wipe out their DB and practically "force" the amnesia recovery. + // * Authority 2 is stopped in order to simulate less than f+1 availability which will + // make authority 1 retry during amnesia recovery until it has finally managed to successfully get back f+1 responses. + // once authority 2 is up and running again. + authorities.remove(&index_1).unwrap().stop().await; + let index_2 = committee.to_authority_index(2).unwrap(); + authorities.remove(&index_2).unwrap().stop().await; sleep(Duration::from_secs(5)).await; - // now create a new directory to simulate amnesia. The node will start having participated previously - // to consensus but now will attempt to synchronize the last own block and recover from there. + // Authority 1: create a new directory to simulate amnesia. The node will start having participated previously + // to consensus but now will attempt to synchronize the last own block and recover from there. It won't be able + // to do that successfully as authority 2 is still down. + let dir = TempDir::new().unwrap(); + // We do reset the boot counter for this one to simulate a "binary" restart + boot_counters[index_1] = 0; let (authority, mut receiver) = make_authority( - index, - &TempDir::new().unwrap(), + index_1, + &dir, + committee.clone(), + keypairs.clone(), + network_type, + boot_counters[index_1], + ) + .await; + assert!( + authority.sync_last_known_own_block_enabled(), + "Authority should have the sync of last own block enabled" + ); + boot_counters[index_1] += 1; + authorities.insert(index_1, authority); + temp_dirs.insert(index_1, dir); + sleep(Duration::from_secs(5)).await; + + // Now spin up authority 2 using its earlier directly - so no amnesia recovery should be forced here. + // Authority 1 should be able to recover from amnesia successfully. + let (authority, _receiver) = make_authority( + index_2, + &temp_dirs[&index_2], committee.clone(), keypairs, network_type, + boot_counters[index_2], ) .await; - authorities.insert(index.value(), authority); + assert!( + !authority.sync_last_known_own_block_enabled(), + "Authority should not have attempted to sync the last own block" + ); + boot_counters[index_2] += 1; + authorities.insert(index_2, authority); sleep(Duration::from_secs(5)).await; // We wait until we see at least one committed block authored from this authority 'outer: while let Some(result) = receiver.recv().await { for block in result.blocks { - if block.author() == index { + if block.round() > GENESIS_ROUND && block.author() == index_1 { break 'outer; } } } // Stop all authorities and exit. - for authority in authorities { - authority.stop().await; - } - } - - #[rstest] - #[tokio::test] - async fn test_amnesia_failure( - #[values(ConsensusNetwork::Anemo, ConsensusNetwork::Tonic)] network_type: ConsensusNetwork, - ) { - telemetry_subscribers::init_for_testing(); - - let occurred_panic = Arc::new(Mutex::new(None)); - let occurred_panic_cloned = occurred_panic.clone(); - - let default_panic_handler = std::panic::take_hook(); - std::panic::set_hook(Box::new(move |panic| { - let mut l = occurred_panic_cloned.lock().unwrap(); - *l = Some(panic.to_string()); - default_panic_handler(panic); - })); - - let db_registry = Registry::new(); - DBMetrics::init(&db_registry); - - let (committee, keypairs) = local_committee_and_keys(0, vec![1, 1, 1, 1]); - let mut output_receivers = vec![]; - let mut authorities = vec![]; - - for (index, _authority_info) in committee.authorities() { - let (authority, receiver) = make_authority( - index, - &TempDir::new().unwrap(), - committee.clone(), - keypairs.clone(), - network_type, - ) - .await; - output_receivers.push(receiver); - authorities.push(authority); - } - - // Let the network run for a few seconds - sleep(Duration::from_secs(5)).await; - - // Stop all authorities - while let Some(authority) = authorities.pop() { + for (_, authority) in authorities { authority.stop().await; } - - sleep(Duration::from_secs(2)).await; - - let index = AuthorityIndex::new_for_test(0); - let (_authority, _receiver) = make_authority( - index, - &TempDir::new().unwrap(), - committee, - keypairs, - network_type, - ) - .await; - sleep(Duration::from_secs(5)).await; - - // Now reset the panic hook - let _default_panic_handler = std::panic::take_hook(); - - // We expect this test to panic as all the other peers are down and the node that tries to - // recover its last produced block fails. - let panic_info = occurred_panic.lock().unwrap().take().unwrap(); - assert!(panic_info.contains( - "No peer has returned any acceptable result, can not safely update min round" - )); } // TODO: create a fixture @@ -660,6 +664,7 @@ mod tests { committee: Committee, keypairs: Vec<(NetworkKeyPair, ProtocolKeyPair)>, network_type: ConsensusNetwork, + boot_counter: u64, ) -> (ConsensusAuthority, UnboundedReceiver) { let registry = Registry::new(); @@ -669,7 +674,7 @@ mod tests { dag_state_cached_rounds: 5, commit_sync_parallel_fetches: 3, commit_sync_batch_size: 3, - sync_last_proposed_block_timeout: Duration::from_millis(2_000), + sync_last_known_own_block_timeout: Duration::from_millis(2_000), ..Default::default() }; let txn_verifier = NoopTransactionVerifier {}; @@ -678,7 +683,7 @@ mod tests { let network_keypair = keypairs[index].0.clone(); let (sender, receiver) = unbounded_channel("consensus_output"); - let commit_consumer = CommitConsumer::new(sender, 0, 0); + let commit_consumer = CommitConsumer::new(sender, 0); let authority = ConsensusAuthority::start( network_type, @@ -691,8 +696,10 @@ mod tests { Arc::new(txn_verifier), commit_consumer, registry, + boot_counter, ) .await; + (authority, receiver) } } diff --git a/consensus/core/src/authority_service.rs b/consensus/core/src/authority_service.rs index feac2741570cc..94fa311e01a6b 100644 --- a/consensus/core/src/authority_service.rs +++ b/consensus/core/src/authority_service.rs @@ -17,7 +17,7 @@ use crate::{ block::{BlockAPI as _, BlockRef, SignedBlock, VerifiedBlock, GENESIS_ROUND}, block_verifier::BlockVerifier, commit::{CommitAPI as _, CommitRange, TrustedCommit}, - commit_syncer::CommitVoteMonitor, + commit_vote_monitor::CommitVoteMonitor, context::Context, core_thread::CoreThreadDispatcher, dag_state::DagState, @@ -133,7 +133,7 @@ impl NetworkService for AuthorityService { .metrics .node_metrics .rejected_future_blocks - .with_label_values(&[&peer_hostname]) + .with_label_values(&[peer_hostname]) .inc(); debug!( "Block {:?} timestamp ({} > {}) is too far in the future, rejected.", @@ -157,7 +157,7 @@ impl NetworkService for AuthorityService { .metrics .node_metrics .block_timestamp_drift_wait_ms - .with_label_values(&[peer_hostname, &"handle_send_block"]) + .with_label_values(&[peer_hostname, "handle_send_block"]) .inc_by(forward_time_drift.as_millis() as u64); debug!( "Block {:?} timestamp ({} > {}) is in the future, waiting for {}ms", @@ -171,7 +171,7 @@ impl NetworkService for AuthorityService { // Observe the block for the commit votes. When local commit is lagging too much, // commit sync loop will trigger fetching. - self.commit_vote_monitor.observe(&verified_block); + self.commit_vote_monitor.observe_block(&verified_block); // Reject blocks when local commit index is lagging too far from quorum commit index. // @@ -192,7 +192,7 @@ impl NetworkService for AuthorityService { .metrics .node_metrics .rejected_blocks - .with_label_values(&[&"commit_lagging"]) + .with_label_values(&["commit_lagging"]) .inc(); debug!( "Block {:?} is rejected because last commit index is lagging quorum commit index too much ({} < {})", @@ -213,7 +213,7 @@ impl NetworkService for AuthorityService { .metrics .node_metrics .verified_blocks - .with_label_values(&[&peer_hostname]) + .with_label_values(&[peer_hostname]) .inc(); let missing_ancestors = self @@ -567,7 +567,7 @@ mod tests { authority_service::AuthorityService, block::BlockAPI, block::{BlockRef, SignedBlock, TestBlock, VerifiedBlock}, - commit_syncer::CommitVoteMonitor, + commit_vote_monitor::CommitVoteMonitor, context::Context, core_thread::{CoreError, CoreThreadDispatcher}, dag_state::DagState, @@ -689,24 +689,25 @@ mod tests { let (context, _keys) = Context::new_for_test(4); let context = Arc::new(context); let block_verifier = Arc::new(crate::block_verifier::NoopBlockVerifier {}); + let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let core_dispatcher = Arc::new(FakeCoreThreadDispatcher::new()); let (_tx_block_broadcast, rx_block_broadcast) = broadcast::channel(100); let network_client = Arc::new(FakeNetworkClient::default()); let store = Arc::new(MemStore::new()); let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store.clone()))); - let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let synchronizer = Synchronizer::start( network_client, context.clone(), core_dispatcher.clone(), - commit_vote_monitor, + commit_vote_monitor.clone(), block_verifier.clone(), dag_state.clone(), + false, ); let authority_service = Arc::new(AuthorityService::new( context.clone(), block_verifier, - Arc::new(CommitVoteMonitor::new(context.clone())), + commit_vote_monitor, synchronizer, core_dispatcher.clone(), rx_block_broadcast, @@ -747,24 +748,25 @@ mod tests { let (context, _keys) = Context::new_for_test(4); let context = Arc::new(context); let block_verifier = Arc::new(crate::block_verifier::NoopBlockVerifier {}); + let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let core_dispatcher = Arc::new(FakeCoreThreadDispatcher::new()); let (_tx_block_broadcast, rx_block_broadcast) = broadcast::channel(100); let network_client = Arc::new(FakeNetworkClient::default()); let store = Arc::new(MemStore::new()); let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store.clone()))); - let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let synchronizer = Synchronizer::start( network_client, context.clone(), core_dispatcher.clone(), - commit_vote_monitor, + commit_vote_monitor.clone(), block_verifier.clone(), dag_state.clone(), + true, ); let authority_service = Arc::new(AuthorityService::new( context.clone(), block_verifier, - Arc::new(CommitVoteMonitor::new(context.clone())), + commit_vote_monitor, synchronizer, core_dispatcher.clone(), rx_block_broadcast, diff --git a/consensus/core/src/block_verifier.rs b/consensus/core/src/block_verifier.rs index 7c04605ddb0f5..2e876e9130f0b 100644 --- a/consensus/core/src/block_verifier.rs +++ b/consensus/core/src/block_verifier.rs @@ -142,8 +142,43 @@ impl BlockVerifier for SignedBlockVerifier { }); } - // TODO: check transaction size, total size and count. let batch: Vec<_> = block.transactions().iter().map(|t| t.data()).collect(); + + let max_transaction_size_limit = + self.context + .protocol_config + .consensus_max_transaction_size_bytes() as usize; + for t in &batch { + if t.len() > max_transaction_size_limit && max_transaction_size_limit > 0 { + return Err(ConsensusError::TransactionTooLarge { + size: t.len(), + limit: max_transaction_size_limit, + }); + } + } + + let max_num_transactions_limit = + self.context.protocol_config.max_num_transactions_in_block() as usize; + if batch.len() > max_num_transactions_limit && max_num_transactions_limit > 0 { + return Err(ConsensusError::TooManyTransactions { + count: batch.len(), + limit: max_num_transactions_limit, + }); + } + + let total_transactions_size_limit = + self.context + .protocol_config + .consensus_max_transactions_in_block_bytes() as usize; + if batch.iter().map(|t| t.len()).sum::() > total_transactions_size_limit + && total_transactions_size_limit > 0 + { + return Err(ConsensusError::TooManyTransactionBytes { + size: batch.len(), + limit: total_transactions_size_limit, + }); + } + self.transaction_verifier .verify_batch(&self.context.protocol_config, &batch) .map_err(|e| ConsensusError::InvalidTransaction(format!("{e:?}"))) @@ -171,6 +206,7 @@ impl BlockVerifier for SignedBlockVerifier { } } +#[allow(unused)] pub(crate) struct NoopBlockVerifier; impl BlockVerifier for NoopBlockVerifier { @@ -445,6 +481,49 @@ mod test { Err(ConsensusError::InvalidTransaction(_)) )); } + + // Block with transaction too large. + { + let block = test_block + .clone() + .set_transactions(vec![Transaction::new(vec![4; 257 * 1024])]) + .build(); + let signed_block = SignedBlock::new(block, authority_2_protocol_keypair).unwrap(); + assert!(matches!( + verifier.verify(&signed_block), + Err(ConsensusError::TransactionTooLarge { size: _, limit: _ }) + )); + } + + // Block with too many transactions. + { + let block = test_block + .clone() + .set_transactions((0..1000).map(|_| Transaction::new(vec![4; 8])).collect()) + .build(); + let signed_block = SignedBlock::new(block, authority_2_protocol_keypair).unwrap(); + assert!(matches!( + verifier.verify(&signed_block), + Err(ConsensusError::TooManyTransactions { count: _, limit: _ }) + )); + } + + // Block with too many transaction bytes. + { + let block = test_block + .clone() + .set_transactions( + (0..100) + .map(|_| Transaction::new(vec![4; 8 * 1024])) + .collect(), + ) + .build(); + let signed_block = SignedBlock::new(block, authority_2_protocol_keypair).unwrap(); + assert!(matches!( + verifier.verify(&signed_block), + Err(ConsensusError::TooManyTransactionBytes { size: _, limit: _ }) + )); + } } #[tokio::test] diff --git a/consensus/core/src/commit.rs b/consensus/core/src/commit.rs index 810fd9eb43b9d..f50d5ba469b89 100644 --- a/consensus/core/src/commit.rs +++ b/consensus/core/src/commit.rs @@ -13,7 +13,6 @@ use bytes::Bytes; use consensus_config::{AuthorityIndex, DefaultHashFunction, DIGEST_LENGTH}; use enum_dispatch::enum_dispatch; use fastcrypto::hash::{Digest, HashFunction as _}; -use mysten_metrics::monitored_mpsc::UnboundedSender; use serde::{Deserialize, Serialize}; use crate::{ @@ -398,32 +397,6 @@ pub fn load_committed_subdag_from_store( ) } -pub struct CommitConsumer { - // A channel to send the committed sub dags through - pub sender: UnboundedSender, - // Leader round of the last commit that the consumer has processed. - pub last_processed_commit_round: Round, - // Index of the last commit that the consumer has processed. This is useful for - // crash/recovery so mysticeti can replay the commits from the next index. - // First commit in the replayed sequence will have index last_processed_commit_index + 1. - // Set 0 to replay from the start (as generated commit sequence starts at index = 1). - pub last_processed_commit_index: CommitIndex, -} - -impl CommitConsumer { - pub fn new( - sender: UnboundedSender, - last_processed_commit_round: Round, - last_processed_commit_index: CommitIndex, - ) -> Self { - Self { - sender, - last_processed_commit_round, - last_processed_commit_index, - } - } -} - #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub(crate) enum Decision { Direct, diff --git a/consensus/core/src/commit_consumer.rs b/consensus/core/src/commit_consumer.rs new file mode 100644 index 0000000000000..47a96d80f0e2a --- /dev/null +++ b/consensus/core/src/commit_consumer.rs @@ -0,0 +1,74 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::{atomic::AtomicU32, Arc}; + +use mysten_metrics::monitored_mpsc::UnboundedSender; + +use crate::{CommitIndex, CommittedSubDag}; + +pub struct CommitConsumer { + // A channel to send the committed sub dags through + pub(crate) sender: UnboundedSender, + // Index of the last commit that the consumer has processed. This is useful for + // crash/recovery so mysticeti can replay the commits from the next index. + // First commit in the replayed sequence will have index last_processed_commit_index + 1. + // Set 0 to replay from the start (as generated commit sequence starts at index = 1). + pub(crate) last_processed_commit_index: CommitIndex, + // Allows the commit consumer to report its progress. + monitor: Arc, +} + +impl CommitConsumer { + pub fn new( + sender: UnboundedSender, + last_processed_commit_index: CommitIndex, + ) -> Self { + let monitor = Arc::new(CommitConsumerMonitor::new(last_processed_commit_index)); + Self { + sender, + last_processed_commit_index, + monitor, + } + } + + pub fn monitor(&self) -> Arc { + self.monitor.clone() + } +} + +pub struct CommitConsumerMonitor { + highest_handled_commit: AtomicU32, +} + +impl CommitConsumerMonitor { + pub(crate) fn new(last_handled_commit: CommitIndex) -> Self { + Self { + highest_handled_commit: AtomicU32::new(last_handled_commit), + } + } + + pub(crate) fn highest_handled_commit(&self) -> CommitIndex { + self.highest_handled_commit + .load(std::sync::atomic::Ordering::Acquire) + } + + pub fn set_highest_handled_commit(&self, highest_handled_commit: CommitIndex) { + self.highest_handled_commit + .store(highest_handled_commit, std::sync::atomic::Ordering::Release); + } +} + +#[cfg(test)] +mod test { + use crate::CommitConsumerMonitor; + + #[test] + fn test_commit_consumer_monitor() { + let monitor = CommitConsumerMonitor::new(10); + assert_eq!(monitor.highest_handled_commit(), 10); + + monitor.set_highest_handled_commit(100); + assert_eq!(monitor.highest_handled_commit(), 100); + } +} diff --git a/consensus/core/src/commit_observer.rs b/consensus/core/src/commit_observer.rs index 34cf4871234f0..a25113de387fb 100644 --- a/consensus/core/src/commit_observer.rs +++ b/consensus/core/src/commit_observer.rs @@ -23,14 +23,16 @@ use crate::{ /// Role of CommitObserver /// - Called by core when try_commit() returns newly committed leaders. /// - The newly committed leaders are sent to commit observer and then commit observer -/// gets subdags for each leader via the commit interpreter (linearizer) +/// gets subdags for each leader via the commit interpreter (linearizer) /// - The committed subdags are sent as consensus output via an unbounded tokio channel. +/// /// No back pressure mechanism is needed as backpressure is handled as input into /// consenus. +/// /// - Commit metadata including index is persisted in store, before the CommittedSubDag -/// is sent to the consumer. +/// is sent to the consumer. /// - When CommitObserver is initialized a last processed commit index can be used -/// to ensure any missing commits are re-sent. +/// to ensure any missing commits are re-sent. pub(crate) struct CommitObserver { context: Arc, /// Component to deterministically collect subdags for committed leaders. @@ -210,11 +212,7 @@ mod tests { use super::*; use crate::{ - block::{BlockRef, Round}, - commit::DEFAULT_WAVE_LENGTH, - context::Context, - dag_state::DagState, - storage::mem_store::MemStore, + block::BlockRef, context::Context, dag_state::DagState, storage::mem_store::MemStore, test_dag_builder::DagBuilder, }; @@ -228,7 +226,6 @@ mod tests { context.clone(), mem_store.clone(), ))); - let last_processed_commit_round = 0; let last_processed_commit_index = 0; let (sender, mut receiver) = unbounded_channel("consensus_output"); @@ -239,11 +236,7 @@ mod tests { let mut observer = CommitObserver::new( context.clone(), - CommitConsumer::new( - sender, - last_processed_commit_round, - last_processed_commit_index, - ), + CommitConsumer::new(sender, last_processed_commit_index), dag_state.clone(), mem_store.clone(), leader_schedule, @@ -332,7 +325,6 @@ mod tests { context.clone(), mem_store.clone(), ))); - let last_processed_commit_round = 0; let last_processed_commit_index = 0; let (sender, mut receiver) = unbounded_channel("consensus_output"); @@ -343,11 +335,7 @@ mod tests { let mut observer = CommitObserver::new( context.clone(), - CommitConsumer::new( - sender.clone(), - last_processed_commit_round, - last_processed_commit_index, - ), + CommitConsumer::new(sender.clone(), last_processed_commit_index), dag_state.clone(), mem_store.clone(), leader_schedule.clone(), @@ -370,8 +358,6 @@ mod tests { // Commit first batch of leaders (2) and "receive" the subdags as the // consumer of the consensus output channel. let expected_last_processed_index: usize = 2; - let expected_last_processed_round = - expected_last_processed_index as u32 * DEFAULT_WAVE_LENGTH; let mut commits = observer .handle_commit( leaders @@ -443,11 +429,7 @@ mod tests { // last processed index from the consumer over consensus output channel let _observer = CommitObserver::new( context.clone(), - CommitConsumer::new( - sender, - expected_last_processed_round as Round, - expected_last_processed_index as CommitIndex, - ), + CommitConsumer::new(sender, expected_last_processed_index as CommitIndex), dag_state.clone(), mem_store.clone(), leader_schedule, @@ -480,7 +462,6 @@ mod tests { context.clone(), mem_store.clone(), ))); - let last_processed_commit_round = 0; let last_processed_commit_index = 0; let (sender, mut receiver) = unbounded_channel("consensus_output"); @@ -491,11 +472,7 @@ mod tests { let mut observer = CommitObserver::new( context.clone(), - CommitConsumer::new( - sender.clone(), - last_processed_commit_round, - last_processed_commit_index, - ), + CommitConsumer::new(sender.clone(), last_processed_commit_index), dag_state.clone(), mem_store.clone(), leader_schedule.clone(), @@ -518,8 +495,6 @@ mod tests { // Commit all of the leaders and "receive" the subdags as the consumer of // the consensus output channel. let expected_last_processed_index: usize = 10; - let expected_last_processed_round = - expected_last_processed_index as u32 * DEFAULT_WAVE_LENGTH; let commits = observer.handle_commit(leaders.clone()).unwrap(); // Check commits sent over consensus output channel is accurate @@ -548,11 +523,7 @@ mod tests { // last processed index from the consumer over consensus output channel let _observer = CommitObserver::new( context.clone(), - CommitConsumer::new( - sender, - expected_last_processed_round as Round, - expected_last_processed_index as CommitIndex, - ), + CommitConsumer::new(sender, expected_last_processed_index as CommitIndex), dag_state.clone(), mem_store.clone(), leader_schedule, diff --git a/consensus/core/src/commit_syncer.rs b/consensus/core/src/commit_syncer.rs index 6737561786168..f4ecf4fa2f951 100644 --- a/consensus/core/src/commit_syncer.rs +++ b/consensus/core/src/commit_syncer.rs @@ -47,23 +47,60 @@ use tracing::{debug, info, warn}; use crate::{ block::{BlockAPI, BlockRef, SignedBlock, VerifiedBlock}, block_verifier::BlockVerifier, - commit::{ - Commit, CommitAPI as _, CommitDigest, CommitRange, CommitRef, TrustedCommit, - GENESIS_COMMIT_INDEX, - }, + commit::{Commit, CommitAPI as _, CommitDigest, CommitRange, CommitRef, TrustedCommit}, + commit_vote_monitor::CommitVoteMonitor, context::Context, core_thread::CoreThreadDispatcher, dag_state::DagState, error::{ConsensusError, ConsensusResult}, network::NetworkClient, stake_aggregator::{QuorumThreshold, StakeAggregator}, - CommitIndex, + CommitConsumerMonitor, CommitIndex, }; -pub(crate) struct CommitSyncer { +// Handle to stop the CommitSyncer loop. +pub(crate) struct CommitSyncerHandle { schedule_task: JoinHandle<()>, tx_shutdown: oneshot::Sender<()>, - _phantom: std::marker::PhantomData, +} + +impl CommitSyncerHandle { + pub(crate) async fn stop(self) { + let _ = self.tx_shutdown.send(()); + // Do not abort schedule task, which waits for fetches to shut down. + if let Err(e) = self.schedule_task.await { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } + } + } +} + +pub(crate) struct CommitSyncer { + // States shared by scheduler and fetch tasks. + + // Shared components wrapper. + inner: Arc>, + // State of peers shared by fetch tasks, to determine the next peer to fetch against. + peer_state: Arc>, + + // States only used by the scheduler. + + // Inflight requests to fetch commits from different authorities. + inflight_fetches: JoinSet<(u32, Vec, Vec)>, + // Additional ranges of commits to fetch. + pending_fetches: BTreeSet, + // Fetched commits and blocks by commit range. + fetched_ranges: BTreeMap>, + // Highest commit index among inflight and pending fetches. + // Used to determine the start of new ranges to be fetched. + highest_scheduled_index: Option, + // Highest index among fetched commits, after commits and blocks are verified. + // Used for metrics. + highest_fetched_commit_index: CommitIndex, + // The commit index that is the max of highest local commit index and commit index inflight to Core. + // Used to determine if fetched blocks can be sent to Core without gaps. + synced_commit_index: CommitIndex, } impl CommitSyncer { @@ -71,219 +108,269 @@ impl CommitSyncer { context: Arc, core_thread_dispatcher: Arc, commit_vote_monitor: Arc, + commit_consumer_monitor: Arc, network_client: Arc, block_verifier: Arc, dag_state: Arc>, ) -> Self { - let fetch_state = Arc::new(Mutex::new(FetchState::new(&context))); + let peer_state = Arc::new(Mutex::new(PeerState::new(&context))); let inner = Arc::new(Inner { context, core_thread_dispatcher, commit_vote_monitor, + commit_consumer_monitor, network_client, block_verifier, dag_state, }); - let (tx_shutdown, rx_shutdown) = oneshot::channel(); - let schedule_task = - spawn_logged_monitored_task!(Self::schedule_loop(inner, fetch_state, rx_shutdown)); + let synced_commit_index = inner.dag_state.read().last_commit_index(); CommitSyncer { - schedule_task, - tx_shutdown, - _phantom: Default::default(), + inner, + peer_state, + inflight_fetches: JoinSet::new(), + pending_fetches: BTreeSet::new(), + fetched_ranges: BTreeMap::new(), + highest_scheduled_index: None, + highest_fetched_commit_index: 0, + synced_commit_index, } } - pub(crate) async fn stop(self) { - let _ = self.tx_shutdown.send(()); - // Do not abort schedule task, which waits for fetches to shut down. - let _ = self.schedule_task.await; + pub(crate) fn start(self) -> CommitSyncerHandle { + let (tx_shutdown, rx_shutdown) = oneshot::channel(); + let schedule_task = spawn_logged_monitored_task!(self.schedule_loop(rx_shutdown,)); + CommitSyncerHandle { + schedule_task, + tx_shutdown, + } } - async fn schedule_loop( - inner: Arc>, - fetch_state: Arc>, - mut rx_shutdown: oneshot::Receiver<()>, - ) { + async fn schedule_loop(mut self, mut rx_shutdown: oneshot::Receiver<()>) { let mut interval = tokio::time::interval(Duration::from_secs(2)); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); - // Inflight requests to fetch commits from different authorities. - let mut inflight_fetches = JoinSet::new(); - // Additional ranges (inclusive start and end) of commits to fetch. - let mut pending_fetches = BTreeSet::::new(); - // Fetched commits and blocks by commit indices. - let mut fetched_blocks = BTreeMap::>::new(); - // Highest end index among inflight and pending fetches. - // Used to determine if and which new ranges to fetch. - let mut highest_scheduled_index = Option::::None; - // The commit index that is the max of local last commit index and highest commit index of blocks sent to Core. - // Used to determine if fetched blocks can be sent to Core without gaps. - let mut synced_commit_index = inner.dag_state.read().last_commit_index(); - let mut highest_fetched_commit_index = 0; loop { tokio::select! { // Periodically, schedule new fetches if the node is falling behind. _ = interval.tick() => { - let quorum_commit_index = inner.commit_vote_monitor.quorum_commit_index(); - let local_commit_index = inner.dag_state.read().last_commit_index(); - let metrics = &inner.context.metrics.node_metrics; - metrics.commit_sync_quorum_index.set(quorum_commit_index as i64); - metrics.commit_sync_local_index.set(local_commit_index as i64); - // Update synced_commit_index periodically to make sure it is not smaller than - // local commit index. - synced_commit_index = synced_commit_index.max(local_commit_index); - info!( - "Checking to schedule fetches: synced_commit_index={}, highest_scheduled_index={}, quorum_commit_index={}", - synced_commit_index, highest_scheduled_index.unwrap_or(0), quorum_commit_index, - ); - // TODO: pause commit sync when execution of commits is lagging behind, maybe through Core. - // TODO: cleanup inflight fetches that are no longer needed. - let fetch_after_index = synced_commit_index.max(highest_scheduled_index.unwrap_or(0)); - // When the node is falling behind, schedule pending fetches which will be executed on later. - 'pending: for prev_end in (fetch_after_index..=quorum_commit_index).step_by(inner.context.parameters.commit_sync_batch_size as usize) { - // Create range with inclusive start and end. - let range_start = prev_end + 1; - let range_end = prev_end + inner.context.parameters.commit_sync_batch_size; - // When the condition below is true, [range_start, range_end] contains less number of commits - // than the target batch size. Not creating the smaller batch is intentional, to avoid the - // cost of processing more and smaller batches. - // Block broadcast, subscription and synchronization will help the node catchup. - if range_end > quorum_commit_index { - break 'pending; - } - pending_fetches.insert((range_start..=range_end).into()); - // quorum_commit_index should be non-decreasing, so highest_scheduled_index should not - // decrease either. - highest_scheduled_index = Some(range_end); - } + self.try_schedule_once(); } - - // Processed fetched blocks. - Some(result) = inflight_fetches.join_next(), if !inflight_fetches.is_empty() => { + // Handles results from fetch tasks. + Some(result) = self.inflight_fetches.join_next(), if !self.inflight_fetches.is_empty() => { if let Err(e) = result { - warn!("Fetch cancelled or panicked, CommitSyncer shutting down: {}", e); + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } + warn!("Fetch cancelled. CommitSyncer shutting down: {}", e); // If any fetch is cancelled or panicked, try to shutdown and exit the loop. - inflight_fetches.shutdown().await; + self.inflight_fetches.shutdown().await; return; } - let (target_end, commits, blocks): (CommitIndex, Vec, Vec) = result.unwrap(); - assert!(!commits.is_empty()); - let metrics = &inner.context.metrics.node_metrics; - metrics.commit_sync_fetched_commits.inc_by(commits.len() as u64); - metrics.commit_sync_fetched_blocks.inc_by(blocks.len() as u64); - metrics.commit_sync_total_fetched_blocks_size.inc_by( - blocks.iter().map(|b| b.serialized().len() as u64).sum::() - ); - - let (commit_start, commit_end) = (commits.first().unwrap().index(), commits.last().unwrap().index()); - - highest_fetched_commit_index = highest_fetched_commit_index.max(commit_end); - metrics.commit_sync_highest_fetched_index.set(highest_fetched_commit_index.into()); - - // Allow returning partial results, and try fetching the rest separately. - if commit_end < target_end { - pending_fetches.insert((commit_end + 1..=target_end).into()); - } - // Make sure synced_commit_index is up to date. - synced_commit_index = synced_commit_index.max(inner.dag_state.read().last_commit_index()); - // Only add new blocks if at least some of them are not already synced. - if synced_commit_index < commit_end { - fetched_blocks.insert((commit_start..=commit_end).into(), blocks); - } - // Try to process as many fetched blocks as possible. - 'fetched: while let Some((fetched_commit_range, _blocks)) = fetched_blocks.first_key_value() { - // Only pop fetched_blocks if there is no gap with blocks already synced. - // Note: start, end and synced_commit_index are all inclusive. - let (fetched_commit_range, blocks) = if fetched_commit_range.start() <= synced_commit_index + 1 { - fetched_blocks.pop_first().unwrap() - } else { - // Found gap between earliest fetched block and latest synced block, - // so not sending additional blocks to Core. - metrics.commit_sync_gap_on_processing.inc(); - break 'fetched; - }; - // Avoid sending to Core a whole batch of already synced blocks. - if fetched_commit_range.end() <= synced_commit_index { - continue 'fetched; - } - debug!( - "Fetched certified blocks: {}", - blocks - .iter() - .map(|b| b.reference().to_string()) - .join(","), - ); - // If core thread cannot handle the incoming blocks, it is ok to block here. - // Also it is possible to have missing ancestors because an equivocating validator - // may produce blocks that are not included in commits but are ancestors to other blocks. - // Synchronizer is needed to fill in the missing ancestors in this case. - match inner.core_thread_dispatcher.add_blocks(blocks).await { - Ok(missing) => { - if !missing.is_empty() { - warn!("Fetched blocks have missing ancestors: {:?}", missing); - } - } - Err(e) => { - info!("Failed to add blocks, shutting down: {}", e); - return; - } - }; - // Once commits and blocks are sent to Core, ratchet up synced_commit_index - synced_commit_index = synced_commit_index.max(fetched_commit_range.end()); - } + let (target_end, commits, blocks) = result.unwrap(); + self.handle_fetch_result(target_end, commits, blocks).await; } - _ = &mut rx_shutdown => { // Shutdown requested. info!("CommitSyncer shutting down ..."); - inflight_fetches.shutdown().await; + self.inflight_fetches.shutdown().await; return; } } - // Cap parallel fetches based on configured limit and committee size, to avoid overloading the network. - // Also when there are too many fetched blocks that cannot be sent to Core before an earlier fetch - // has not finished, reduce parallelism so the earlier fetch can retry on a better host and succeed. - let target_parallel_fetches = inner - .context - .parameters - .commit_sync_parallel_fetches - .min(inner.context.committee.size() * 2 / 3) - .min( - inner - .context - .parameters - .commit_sync_batches_ahead - .saturating_sub(fetched_blocks.len()), - ) - .max(1); - // Start new fetches if there are pending batches and available slots. - loop { - if inflight_fetches.len() >= target_parallel_fetches { - break; - } - let Some(commit_range) = pending_fetches.pop_first() else { + self.try_start_fetches(); + } + } + + fn try_schedule_once(&mut self) { + let quorum_commit_index = self.inner.commit_vote_monitor.quorum_commit_index(); + let local_commit_index = self.inner.dag_state.read().last_commit_index(); + let metrics = &self.inner.context.metrics.node_metrics; + metrics + .commit_sync_quorum_index + .set(quorum_commit_index as i64); + metrics + .commit_sync_local_index + .set(local_commit_index as i64); + let highest_handled_index = self.inner.commit_consumer_monitor.highest_handled_commit(); + let highest_scheduled_index = self.highest_scheduled_index.unwrap_or(0); + // Update synced_commit_index periodically to make sure it is no smaller than + // local commit index. + self.synced_commit_index = self.synced_commit_index.max(local_commit_index); + let unhandled_commits_threshold = self.unhandled_commits_threshold(); + info!( + "Checking to schedule fetches: synced_commit_index={}, highest_handled_index={}, highest_scheduled_index={}, quorum_commit_index={}, unhandled_commits_threshold={}", + self.synced_commit_index, highest_handled_index, highest_scheduled_index, quorum_commit_index, unhandled_commits_threshold, + ); + + // TODO: cleanup inflight fetches that are no longer needed. + let fetch_after_index = self + .synced_commit_index + .max(self.highest_scheduled_index.unwrap_or(0)); + // When the node is falling behind, schedule pending fetches which will be executed on later. + for prev_end in (fetch_after_index..=quorum_commit_index) + .step_by(self.inner.context.parameters.commit_sync_batch_size as usize) + { + // Create range with inclusive start and end. + let range_start = prev_end + 1; + let range_end = prev_end + self.inner.context.parameters.commit_sync_batch_size; + // Commit range is not fetched when [range_start, range_end] contains less number of commits + // than the target batch size. This is to avoid the cost of processing more and smaller batches. + // Block broadcast, subscription and synchronization will help the node catchup. + if quorum_commit_index < range_end { + break; + } + // Pause scheduling new fetches when handling of commits is lagging. + if highest_handled_index + unhandled_commits_threshold < range_end { + warn!("Skip scheduling new commit fetches: consensus handler is lagging. highest_handled_index={}, highest_scheduled_index={}", highest_handled_index, highest_scheduled_index); + break; + } + self.pending_fetches + .insert((range_start..=range_end).into()); + // quorum_commit_index should be non-decreasing, so highest_scheduled_index should not + // decrease either. + self.highest_scheduled_index = Some(range_end); + } + } + + async fn handle_fetch_result( + &mut self, + target_end: CommitIndex, + commits: Vec, + blocks: Vec, + ) { + assert!(!commits.is_empty()); + let metrics = &self.inner.context.metrics.node_metrics; + metrics + .commit_sync_fetched_commits + .inc_by(commits.len() as u64); + metrics + .commit_sync_fetched_blocks + .inc_by(blocks.len() as u64); + metrics.commit_sync_total_fetched_blocks_size.inc_by( + blocks + .iter() + .map(|b| b.serialized().len() as u64) + .sum::(), + ); + + let (commit_start, commit_end) = ( + commits.first().unwrap().index(), + commits.last().unwrap().index(), + ); + self.highest_fetched_commit_index = self.highest_fetched_commit_index.max(commit_end); + metrics + .commit_sync_highest_fetched_index + .set(self.highest_fetched_commit_index as i64); + + // Allow returning partial results, and try fetching the rest separately. + if commit_end < target_end { + self.pending_fetches + .insert((commit_end + 1..=target_end).into()); + } + // Make sure synced_commit_index is up to date. + self.synced_commit_index = self + .synced_commit_index + .max(self.inner.dag_state.read().last_commit_index()); + // Only add new blocks if at least some of them are not already synced. + if self.synced_commit_index < commit_end { + self.fetched_ranges + .insert((commit_start..=commit_end).into(), blocks); + } + // Try to process as many fetched blocks as possible. + while let Some((fetched_commit_range, _blocks)) = self.fetched_ranges.first_key_value() { + // Only pop fetched_ranges if there is no gap with blocks already synced. + // Note: start, end and synced_commit_index are all inclusive. + let (fetched_commit_range, blocks) = + if fetched_commit_range.start() <= self.synced_commit_index + 1 { + self.fetched_ranges.pop_first().unwrap() + } else { + // Found gap between earliest fetched block and latest synced block, + // so not sending additional blocks to Core. + metrics.commit_sync_gap_on_processing.inc(); break; }; - inflight_fetches.spawn(Self::fetch_loop( - inner.clone(), - fetch_state.clone(), - commit_range, - )); + // Avoid sending to Core a whole batch of already synced blocks. + if fetched_commit_range.end() <= self.synced_commit_index { + continue; } - let metrics = &inner.context.metrics.node_metrics; - metrics - .commit_sync_inflight_fetches - .set(inflight_fetches.len() as i64); - metrics - .commit_sync_pending_fetches - .set(pending_fetches.len() as i64); - metrics - .commit_sync_highest_synced_index - .set(synced_commit_index as i64); + debug!( + "Fetched certified blocks: {}", + blocks.iter().map(|b| b.reference().to_string()).join(","), + ); + // If core thread cannot handle the incoming blocks, it is ok to block here. + // Also it is possible to have missing ancestors because an equivocating validator + // may produce blocks that are not included in commits but are ancestors to other blocks. + // Synchronizer is needed to fill in the missing ancestors in this case. + match self.inner.core_thread_dispatcher.add_blocks(blocks).await { + Ok(missing) => { + if !missing.is_empty() { + warn!("Fetched blocks have missing ancestors: {:?}", missing); + } + } + Err(e) => { + info!("Failed to add blocks, shutting down: {}", e); + return; + } + }; + // Once commits and blocks are sent to Core, ratchet up synced_commit_index + self.synced_commit_index = self.synced_commit_index.max(fetched_commit_range.end()); + } + + metrics + .commit_sync_inflight_fetches + .set(self.inflight_fetches.len() as i64); + metrics + .commit_sync_pending_fetches + .set(self.pending_fetches.len() as i64); + metrics + .commit_sync_highest_synced_index + .set(self.synced_commit_index as i64); + } + + fn try_start_fetches(&mut self) { + // Cap parallel fetches based on configured limit and committee size, to avoid overloading the network. + // Also when there are too many fetched blocks that cannot be sent to Core before an earlier fetch + // has not finished, reduce parallelism so the earlier fetch can retry on a better host and succeed. + let target_parallel_fetches = self + .inner + .context + .parameters + .commit_sync_parallel_fetches + .min(self.inner.context.committee.size() * 2 / 3) + .min( + self.inner + .context + .parameters + .commit_sync_batches_ahead + .saturating_sub(self.fetched_ranges.len()), + ) + .max(1); + // Start new fetches if there are pending batches and available slots. + loop { + if self.inflight_fetches.len() >= target_parallel_fetches { + break; + } + let Some(commit_range) = self.pending_fetches.pop_first() else { + break; + }; + self.inflight_fetches.spawn(Self::fetch_loop( + self.inner.clone(), + self.peer_state.clone(), + commit_range, + )); } + + let metrics = &self.inner.context.metrics.node_metrics; + metrics + .commit_sync_inflight_fetches + .set(self.inflight_fetches.len() as i64); + metrics + .commit_sync_pending_fetches + .set(self.pending_fetches.len() as i64); + metrics + .commit_sync_highest_synced_index + .set(self.synced_commit_index as i64); } // Retries fetching commits and blocks from available authorities, until a request succeeds @@ -291,7 +378,7 @@ impl CommitSyncer { // Returns the fetched commits and blocks referenced by the commits. async fn fetch_loop( inner: Arc>, - fetch_state: Arc>, + peer_state: Arc>, commit_range: CommitRange, ) -> (CommitIndex, Vec, Vec) { let _timer = inner @@ -302,7 +389,7 @@ impl CommitSyncer { .start_timer(); info!("Starting to fetch commits in {commit_range:?} ...",); loop { - match Self::fetch_once(inner.clone(), fetch_state.clone(), commit_range.clone()).await { + match Self::fetch_once(inner.clone(), peer_state.clone(), commit_range.clone()).await { Ok((commits, blocks)) => { info!("Finished fetching commits in {commit_range:?}",); return (commit_range.end(), commits, blocks); @@ -327,7 +414,7 @@ impl CommitSyncer { // and sent to Core for processing. async fn fetch_once( inner: Arc>, - fetch_state: Arc>, + peer_state: Arc>, commit_range: CommitRange, ) -> ConsensusResult<(Vec, Vec)> { const FETCH_COMMITS_TIMEOUT: Duration = Duration::from_secs(30); @@ -346,7 +433,7 @@ impl CommitSyncer { // 1. Find an available authority to fetch commits and blocks from, and wait // if it is not yet ready. let Some((available_time, retries, target_authority)) = - fetch_state.lock().available_authorities.pop_first() + peer_state.lock().available_authorities.pop_first() else { sleep(MAX_RETRY_INTERVAL).await; return Err(ConsensusError::NoAvailableAuthorityToFetchCommits); @@ -367,17 +454,17 @@ impl CommitSyncer { .await { Ok(result) => { - let mut fetch_state = fetch_state.lock(); + let mut peer_state = peer_state.lock(); let now = Instant::now(); - fetch_state + peer_state .available_authorities .insert((now, 0, target_authority)); result } Err(e) => { - let mut fetch_state = fetch_state.lock(); + let mut peer_state = peer_state.lock(); let now = Instant::now(); - fetch_state.available_authorities.insert(( + peer_state.available_authorities.insert(( now + FETCH_RETRY_BASE_INTERVAL * retries.min(FETCH_RETRY_INTERVAL_LIMIT), retries.saturating_add(1), target_authority, @@ -480,7 +567,7 @@ impl CommitSyncer { .metrics .node_metrics .block_timestamp_drift_wait_ms - .with_label_values(&[peer_hostname, &"commit_syncer"]) + .with_label_values(&[peer_hostname, "commit_syncer"]) .inc_by(forward_drift); let forward_drift = Duration::from_millis(forward_drift); if forward_drift >= inner.context.parameters.max_forward_time_drift { @@ -495,56 +582,35 @@ impl CommitSyncer { Ok((commits, fetched_blocks)) } -} -/// Monitors commit votes from received and verified blocks, -/// and keeps track of the highest commit voted by each authority and certified by a quorum. -pub(crate) struct CommitVoteMonitor { - context: Arc, - // Highest commit index voted by each authority. - highest_voted_commits: Mutex>, -} + fn unhandled_commits_threshold(&self) -> CommitIndex { + self.inner.context.parameters.commit_sync_batch_size + * (self.inner.context.parameters.commit_sync_batches_ahead as u32) + } -impl CommitVoteMonitor { - pub(crate) fn new(context: Arc) -> Self { - let highest_voted_commits = Mutex::new(vec![0; context.committee.size()]); - Self { - context, - highest_voted_commits, - } + #[cfg(test)] + fn pending_fetches(&self) -> BTreeSet { + self.pending_fetches.clone() } - // Records the highest commit index voted in each block. - pub(crate) fn observe(&self, block: &VerifiedBlock) { - let mut highest_voted_commits = self.highest_voted_commits.lock(); - for vote in block.commit_votes() { - if vote.index > highest_voted_commits[block.author()] { - highest_voted_commits[block.author()] = vote.index; - } - } + #[cfg(test)] + fn fetched_ranges(&self) -> BTreeMap> { + self.fetched_ranges.clone() } - // Finds the highest commit index certified by a quorum. - // When an authority votes for commit index S, it is also voting for all commit indices 1 <= i < S. - // So the quorum commit index is the smallest index S such that the sum of stakes of authorities - // voting for commit indices >= S passes the quorum threshold. - pub(crate) fn quorum_commit_index(&self) -> CommitIndex { - let highest_voted_commits = self.highest_voted_commits.lock(); - let mut highest_voted_commits = highest_voted_commits - .iter() - .zip(self.context.committee.authorities()) - .map(|(commit_index, (_, a))| (*commit_index, a.stake)) - .collect::>(); - // Sort by commit index then stake, in descending order. - highest_voted_commits.sort_by(|a, b| a.cmp(b).reverse()); - let mut total_stake = 0; - for (commit_index, stake) in highest_voted_commits { - total_stake += stake; - if total_stake >= self.context.committee.quorum_threshold() { - return commit_index; - } - } - GENESIS_COMMIT_INDEX + #[cfg(test)] + fn highest_scheduled_index(&self) -> Option { + self.highest_scheduled_index + } + + #[cfg(test)] + fn highest_fetched_commit_index(&self) -> CommitIndex { + self.highest_fetched_commit_index + } + + #[cfg(test)] + fn synced_commit_index(&self) -> CommitIndex { + self.synced_commit_index } } @@ -552,6 +618,7 @@ struct Inner { context: Arc, core_thread_dispatcher: Arc, commit_vote_monitor: Arc, + commit_consumer_monitor: Arc, network_client: Arc, block_verifier: Arc, dag_state: Arc>, @@ -636,7 +703,7 @@ impl Inner { } } -struct FetchState { +struct PeerState { // The value is a tuple of // - the next available time for the authority to fetch from, // - count of current consecutive failures fetching from the authority, reset on success, @@ -646,7 +713,7 @@ struct FetchState { available_authorities: BTreeSet<(Instant, u32, AuthorityIndex)>, } -impl FetchState { +impl PeerState { fn new(context: &Context) -> Self { // Randomize the initial order of authorities. let mut shuffled_authority_indices: Vec<_> = context @@ -670,58 +737,174 @@ impl FetchState { } } -// TODO: add more unit and integration tests. #[cfg(test)] -mod test { - use std::sync::Arc; +mod tests { + use std::{sync::Arc, time::Duration}; + + use bytes::Bytes; + use consensus_config::{AuthorityIndex, Parameters}; + use parking_lot::RwLock; - use super::CommitVoteMonitor; use crate::{ - block::{TestBlock, VerifiedBlock}, - commit::{CommitDigest, CommitRef}, + block::{BlockRef, TestBlock, VerifiedBlock}, + block_verifier::NoopBlockVerifier, + commit::CommitRange, + commit_syncer::CommitSyncer, + commit_vote_monitor::CommitVoteMonitor, context::Context, + core_thread::MockCoreThreadDispatcher, + dag_state::DagState, + error::ConsensusResult, + network::{BlockStream, NetworkClient}, + storage::mem_store::MemStore, + CommitConsumerMonitor, CommitDigest, CommitRef, Round, }; - #[tokio::test] - async fn test_commit_vote_monitor() { - let context = Arc::new(Context::new_for_test(4).0); - let monitor = CommitVoteMonitor::new(context.clone()); - - // Observe commit votes for indices 5, 6, 7, 8 from blocks. - let blocks = (0..4) - .map(|i| { - VerifiedBlock::new_for_test( - TestBlock::new(10, i) - .set_commit_votes(vec![CommitRef::new(5 + i, CommitDigest::MIN)]) - .build(), - ) - }) - .collect::>(); - for b in blocks { - monitor.observe(&b); + #[derive(Default)] + struct FakeNetworkClient {} + + #[async_trait::async_trait] + impl NetworkClient for FakeNetworkClient { + const SUPPORT_STREAMING: bool = true; + + async fn send_block( + &self, + _peer: AuthorityIndex, + _serialized_block: &VerifiedBlock, + _timeout: Duration, + ) -> ConsensusResult<()> { + unimplemented!("Unimplemented") } - // CommitIndex 6 is the highest index supported by a quorum. - assert_eq!(monitor.quorum_commit_index(), 6); - - // Observe new blocks with new votes from authority 0 and 1. - let blocks = (0..2) - .map(|i| { - VerifiedBlock::new_for_test( - TestBlock::new(11, i) - .set_commit_votes(vec![ - CommitRef::new(6 + i, CommitDigest::MIN), - CommitRef::new(7 + i, CommitDigest::MIN), - ]) - .build(), - ) - }) - .collect::>(); - for b in blocks { - monitor.observe(&b); + async fn subscribe_blocks( + &self, + _peer: AuthorityIndex, + _last_received: Round, + _timeout: Duration, + ) -> ConsensusResult { + unimplemented!("Unimplemented") } - // Highest commit index per authority should be 7, 8, 7, 8 now. - assert_eq!(monitor.quorum_commit_index(), 7); + async fn fetch_blocks( + &self, + _peer: AuthorityIndex, + _block_refs: Vec, + _highest_accepted_rounds: Vec, + _timeout: Duration, + ) -> ConsensusResult> { + unimplemented!("Unimplemented") + } + + async fn fetch_commits( + &self, + _peer: AuthorityIndex, + _commit_range: CommitRange, + _timeout: Duration, + ) -> ConsensusResult<(Vec, Vec)> { + unimplemented!("Unimplemented") + } + + async fn fetch_latest_blocks( + &self, + _peer: AuthorityIndex, + _authorities: Vec, + _timeout: Duration, + ) -> ConsensusResult> { + unimplemented!("Unimplemented") + } + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn commit_syncer_start_and_pause_scheduling() { + // SETUP + let (context, _) = Context::new_for_test(4); + // Use smaller batches and fetch limits for testing. + let context = Context { + own_index: AuthorityIndex::new_for_test(3), + parameters: Parameters { + commit_sync_batch_size: 5, + commit_sync_batches_ahead: 5, + commit_sync_parallel_fetches: 5, + max_blocks_per_fetch: 5, + ..context.parameters + }, + ..context + }; + let context = Arc::new(context); + let block_verifier = Arc::new(NoopBlockVerifier {}); + let core_thread_dispatcher = Arc::new(MockCoreThreadDispatcher::default()); + let network_client = Arc::new(FakeNetworkClient::default()); + let store = Arc::new(MemStore::new()); + let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store))); + let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); + let commit_consumer_monitor = Arc::new(CommitConsumerMonitor::new(0)); + let mut commit_syncer = CommitSyncer::new( + context, + core_thread_dispatcher, + commit_vote_monitor.clone(), + commit_consumer_monitor.clone(), + network_client, + block_verifier, + dag_state, + ); + + // Check initial state. + assert!(commit_syncer.pending_fetches().is_empty()); + assert!(commit_syncer.fetched_ranges().is_empty()); + assert!(commit_syncer.highest_scheduled_index().is_none()); + assert_eq!(commit_syncer.highest_fetched_commit_index(), 0); + assert_eq!(commit_syncer.synced_commit_index(), 0); + + // Observe round 15 blocks voting for commit 10 from authorities 0 to 2 in CommitVoteMonitor + for i in 0..3 { + let test_block = TestBlock::new(15, i) + .set_commit_votes(vec![CommitRef::new(10, CommitDigest::MIN)]) + .build(); + let block = VerifiedBlock::new_for_test(test_block); + commit_vote_monitor.observe_block(&block); + } + + // Fetches should be scheduled after seeing progress of other validators. + commit_syncer.try_schedule_once(); + + // Verify state. + assert_eq!(commit_syncer.pending_fetches().len(), 2); + assert!(commit_syncer.fetched_ranges().is_empty()); + assert_eq!(commit_syncer.highest_scheduled_index(), Some(10)); + assert_eq!(commit_syncer.highest_fetched_commit_index(), 0); + assert_eq!(commit_syncer.synced_commit_index(), 0); + + // Observe round 40 blocks voting for commit 35 from authorities 0 to 2 in CommitVoteMonitor + for i in 0..3 { + let test_block = TestBlock::new(40, i) + .set_commit_votes(vec![CommitRef::new(35, CommitDigest::MIN)]) + .build(); + let block = VerifiedBlock::new_for_test(test_block); + commit_vote_monitor.observe_block(&block); + } + + // Fetches should be scheduled until the unhandled commits threshold. + commit_syncer.try_schedule_once(); + + // Verify commit syncer is paused after scheduling 15 commits to index 25. + assert_eq!(commit_syncer.unhandled_commits_threshold(), 25); + assert_eq!(commit_syncer.highest_scheduled_index(), Some(25)); + let pending_fetches = commit_syncer.pending_fetches(); + assert_eq!(pending_fetches.len(), 5); + + // Indicate commit index 25 is consumed, and try to schedule again. + commit_consumer_monitor.set_highest_handled_commit(25); + commit_syncer.try_schedule_once(); + + // Verify commit syncer schedules fetches up to index 35. + assert_eq!(commit_syncer.highest_scheduled_index(), Some(35)); + let pending_fetches = commit_syncer.pending_fetches(); + assert_eq!(pending_fetches.len(), 7); + + // Verify contiguous ranges are scheduled. + for (range, start) in pending_fetches.iter().zip((1..35).step_by(5)) { + assert_eq!(range.start(), start); + assert_eq!(range.end(), start + 4); + } } } diff --git a/consensus/core/src/commit_vote_monitor.rs b/consensus/core/src/commit_vote_monitor.rs new file mode 100644 index 0000000000000..654260661da4f --- /dev/null +++ b/consensus/core/src/commit_vote_monitor.rs @@ -0,0 +1,118 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use parking_lot::Mutex; + +use crate::{ + block::{BlockAPI as _, VerifiedBlock}, + commit::GENESIS_COMMIT_INDEX, + context::Context, + CommitIndex, +}; + +/// Monitors the progress of consensus commits across the network. +pub(crate) struct CommitVoteMonitor { + context: Arc, + // Highest commit index voted by each authority. + highest_voted_commits: Mutex>, +} + +impl CommitVoteMonitor { + pub(crate) fn new(context: Arc) -> Self { + let highest_voted_commits = Mutex::new(vec![0; context.committee.size()]); + Self { + context, + highest_voted_commits, + } + } + + /// Keeps track of the highest commit voted by each authority. + pub(crate) fn observe_block(&self, block: &VerifiedBlock) { + let mut highest_voted_commits = self.highest_voted_commits.lock(); + for vote in block.commit_votes() { + if vote.index > highest_voted_commits[block.author()] { + highest_voted_commits[block.author()] = vote.index; + } + } + } + + // Finds the highest commit index certified by a quorum. + // When an authority votes for commit index S, it is also voting for all commit indices 1 <= i < S. + // So the quorum commit index is the smallest index S such that the sum of stakes of authorities + // voting for commit indices >= S passes the quorum threshold. + pub(crate) fn quorum_commit_index(&self) -> CommitIndex { + let highest_voted_commits = self.highest_voted_commits.lock(); + let mut highest_voted_commits = highest_voted_commits + .iter() + .zip(self.context.committee.authorities()) + .map(|(commit_index, (_, a))| (*commit_index, a.stake)) + .collect::>(); + // Sort by commit index then stake, in descending order. + highest_voted_commits.sort_by(|a, b| a.cmp(b).reverse()); + let mut total_stake = 0; + for (commit_index, stake) in highest_voted_commits { + total_stake += stake; + if total_stake >= self.context.committee.quorum_threshold() { + return commit_index; + } + } + GENESIS_COMMIT_INDEX + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use super::CommitVoteMonitor; + use crate::{ + block::{TestBlock, VerifiedBlock}, + commit::{CommitDigest, CommitRef}, + context::Context, + }; + + #[tokio::test] + async fn test_commit_vote_monitor() { + let context = Arc::new(Context::new_for_test(4).0); + let monitor = CommitVoteMonitor::new(context.clone()); + + // Observe commit votes for indices 5, 6, 7, 8 from blocks. + let blocks = (0..4) + .map(|i| { + VerifiedBlock::new_for_test( + TestBlock::new(10, i) + .set_commit_votes(vec![CommitRef::new(5 + i, CommitDigest::MIN)]) + .build(), + ) + }) + .collect::>(); + for b in blocks { + monitor.observe_block(&b); + } + + // CommitIndex 6 is the highest index supported by a quorum. + assert_eq!(monitor.quorum_commit_index(), 6); + + // Observe new blocks with new votes from authority 0 and 1. + let blocks = (0..2) + .map(|i| { + VerifiedBlock::new_for_test( + TestBlock::new(11, i) + .set_commit_votes(vec![ + CommitRef::new(6 + i, CommitDigest::MIN), + CommitRef::new(7 + i, CommitDigest::MIN), + ]) + .build(), + ) + }) + .collect::>(); + for b in blocks { + monitor.observe_block(&b); + } + + // Highest commit index per authority should be 7, 8, 7, 8 now. + assert_eq!(monitor.quorum_commit_index(), 7); + } +} diff --git a/consensus/core/src/context.rs b/consensus/core/src/context.rs index 64467cf72ca73..440a63a9b1e75 100644 --- a/consensus/core/src/context.rs +++ b/consensus/core/src/context.rs @@ -96,34 +96,50 @@ impl Context { } } -/// A clock that allows to derive the current UNIX system timestamp while guaranteeing that -/// timestamp will be monotonically incremented having tolerance to ntp and system clock changes and corrections. +/// A clock that allows to derive the current UNIX system timestamp while guaranteeing that timestamp +/// will be monotonically incremented, tolerating ntp and system clock changes and corrections. /// Explicitly avoid to make `[Clock]` cloneable to ensure that a single instance is shared behind an `[Arc]` /// wherever is needed in order to make sure that consecutive calls to receive the system timestamp /// will remain monotonically increasing. pub(crate) struct Clock { - unix_epoch_instant: Instant, + initial_instant: Instant, + initial_system_time: SystemTime, } impl Clock { pub fn new() -> Self { - let now = Instant::now(); - let duration_since_unix_epoch = - match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) { - Ok(d) => d, - Err(e) => panic!("SystemTime before UNIX EPOCH! {e}"), - }; - let unix_epoch_instant = now.checked_sub(duration_since_unix_epoch).unwrap(); - - Self { unix_epoch_instant } + Self { + initial_instant: Instant::now(), + initial_system_time: SystemTime::now(), + } } // Returns the current time expressed as UNIX timestamp in milliseconds. - // Calculated with Rust Instant to ensure monotonicity. + // Calculated with Tokio Instant to ensure monotonicity, + // and to allow testing with tokio clock. pub(crate) fn timestamp_utc_ms(&self) -> BlockTimestampMs { - Instant::now() - .checked_duration_since(self.unix_epoch_instant) - .unwrap() + let now: Instant = Instant::now(); + let monotonic_system_time = self + .initial_system_time + .checked_add( + now.checked_duration_since(self.initial_instant) + .unwrap_or_else(|| { + panic!( + "current instant ({:?}) < initial instant ({:?})", + now, self.initial_instant + ) + }), + ) + .expect("Computing system time should not overflow"); + monotonic_system_time + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_else(|_| { + panic!( + "system time ({:?}) < UNIX_EPOCH ({:?})", + monotonic_system_time, + SystemTime::UNIX_EPOCH, + ) + }) .as_millis() as BlockTimestampMs } } diff --git a/consensus/core/src/core.rs b/consensus/core/src/core.rs index c9593548d20a6..f9d5f2041b0aa 100644 --- a/consensus/core/src/core.rs +++ b/consensus/core/src/core.rs @@ -100,6 +100,7 @@ impl Core { signals: CoreSignals, block_signer: ProtocolKeyPair, dag_state: Arc>, + sync_last_known_own_block: bool, ) -> Self { let last_decided_leader = dag_state.read().last_commit_leader(); let number_of_leaders = context @@ -133,7 +134,7 @@ impl Core { last_included_ancestors[ancestor.author] = Some(*ancestor); } - let min_propose_round = if context.parameters.is_sync_last_proposed_block_enabled() { + let min_propose_round = if sync_last_known_own_block { None } else { // if the sync is disabled then we practically don't want to impose any restriction. @@ -305,16 +306,9 @@ impl Core { /// `> last_known_proposed_round`. At the moment is allowed to call the method only once leading to a panic /// if attempt to do multiple times. pub(crate) fn set_last_known_proposed_round(&mut self, round: Round) { - assert!( - self.context - .parameters - .is_sync_last_proposed_block_enabled(), - "Should not attempt to set the last known proposed round if that has been already set" - ); - assert!( - self.last_known_proposed_round.is_none(), - "Attempted to set the last known proposed round more than once" - ); + if self.last_known_proposed_round.is_some() { + panic!("Should not attempt to set the last known proposed round if that has been already set"); + } self.last_known_proposed_round = Some(round); info!("Set last known proposed round to {round}"); } @@ -868,7 +862,7 @@ impl CoreTextFixture { .with_num_commits_per_schedule(10), ); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (signals, signal_receivers) = CoreSignals::new(context.clone()); // Need at least one subscriber to the block broadcast channel. let block_receiver = signal_receivers.block_broadcast_receiver(); @@ -876,7 +870,7 @@ impl CoreTextFixture { let (commit_sender, commit_receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(commit_sender.clone(), 0, 0), + CommitConsumer::new(commit_sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -894,6 +888,7 @@ impl CoreTextFixture { signals, block_signer, dag_state, + false, ); Self { @@ -935,7 +930,7 @@ mod test { let context = Arc::new(context); let store = Arc::new(MemStore::new()); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); // Create test blocks for all the authorities for 4 rounds and populate them in store let mut last_round_blocks = genesis_blocks(context.clone()); @@ -974,7 +969,7 @@ mod test { let (sender, _receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -999,6 +994,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state.clone(), + false, ); // New round should be 5 @@ -1045,7 +1041,7 @@ mod test { let context = Arc::new(context); let store = Arc::new(MemStore::new()); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); // Create test blocks for all authorities except our's (index = 0). let mut last_round_blocks = genesis_blocks(context.clone()); @@ -1091,7 +1087,7 @@ mod test { let (sender, _receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -1116,6 +1112,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state.clone(), + false, ); // New round should be 4 @@ -1175,7 +1172,7 @@ mod test { Arc::new(NoopBlockVerifier), ); let (transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (signals, signal_receivers) = CoreSignals::new(context.clone()); // Need at least one subscriber to the block broadcast channel. let mut block_receiver = signal_receivers.block_broadcast_receiver(); @@ -1187,7 +1184,7 @@ mod test { let (sender, _receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -1203,6 +1200,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state.clone(), + false, ); // Send some transactions @@ -1288,7 +1286,7 @@ mod test { )); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (signals, signal_receivers) = CoreSignals::new(context.clone()); // Need at least one subscriber to the block broadcast channel. let _block_receiver = signal_receivers.block_broadcast_receiver(); @@ -1296,7 +1294,7 @@ mod test { let (sender, _receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -1312,6 +1310,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state.clone(), + false, ); let mut expected_ancestors = BTreeSet::new(); @@ -1358,7 +1357,7 @@ mod test { telemetry_subscribers::init_for_testing(); let (context, mut key_pairs) = Context::new_for_test(4); let context = Arc::new(context.with_parameters(Parameters { - sync_last_proposed_block_timeout: Duration::from_millis(2_000), + sync_last_known_own_block_timeout: Duration::from_millis(2_000), ..Default::default() })); @@ -1376,7 +1375,7 @@ mod test { )); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (signals, signal_receivers) = CoreSignals::new(context.clone()); // Need at least one subscriber to the block broadcast channel. let _block_receiver = signal_receivers.block_broadcast_receiver(); @@ -1384,7 +1383,7 @@ mod test { let (sender, _receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -1400,6 +1399,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state.clone(), + true, ); // No new block should have been produced @@ -1563,7 +1563,7 @@ mod test { )); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (signals, signal_receivers) = CoreSignals::new(context.clone()); // Need at least one subscriber to the block broadcast channel. let _block_receiver = signal_receivers.block_broadcast_receiver(); @@ -1571,7 +1571,7 @@ mod test { let (sender, _receiver) = unbounded_channel("consensus_output"); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store.clone(), leader_schedule.clone(), @@ -1587,6 +1587,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state.clone(), + false, ); // No proposal during recovery. diff --git a/consensus/core/src/core_thread.rs b/consensus/core/src/core_thread.rs index fca3406dd8d64..8c39d6059746f 100644 --- a/consensus/core/src/core_thread.rs +++ b/consensus/core/src/core_thread.rs @@ -8,6 +8,7 @@ use mysten_metrics::{ monitored_mpsc::{channel, Receiver, Sender, WeakSender}, monitored_scope, spawn_logged_monitored_task, }; +use parking_lot::Mutex; use thiserror::Error; use tokio::sync::{oneshot, watch}; use tracing::warn; @@ -231,6 +232,67 @@ impl CoreThreadDispatcher for ChannelCoreThreadDispatcher { } } +// TODO: complete the Mock for thread dispatcher to be used from several tests +#[derive(Default)] +pub(crate) struct MockCoreThreadDispatcher { + add_blocks: Mutex>, + missing_blocks: Mutex>, + last_known_proposed_round: Mutex>, +} + +impl MockCoreThreadDispatcher { + #[cfg(test)] + pub(crate) async fn get_add_blocks(&self) -> Vec { + let mut add_blocks = self.add_blocks.lock(); + add_blocks.drain(0..).collect() + } + + #[cfg(test)] + pub(crate) async fn stub_missing_blocks(&self, block_refs: BTreeSet) { + let mut missing_blocks = self.missing_blocks.lock(); + missing_blocks.extend(block_refs); + } + + #[cfg(test)] + pub(crate) async fn get_last_own_proposed_round(&self) -> Vec { + let last_known_proposed_round = self.last_known_proposed_round.lock(); + last_known_proposed_round.clone() + } +} + +#[async_trait] +impl CoreThreadDispatcher for MockCoreThreadDispatcher { + async fn add_blocks( + &self, + blocks: Vec, + ) -> Result, CoreError> { + let mut add_blocks = self.add_blocks.lock(); + add_blocks.extend(blocks); + Ok(BTreeSet::new()) + } + + async fn new_block(&self, _round: Round, _force: bool) -> Result<(), CoreError> { + Ok(()) + } + + async fn get_missing_blocks(&self) -> Result, CoreError> { + let mut missing_blocks = self.missing_blocks.lock(); + let result = missing_blocks.clone(); + missing_blocks.clear(); + Ok(result) + } + + fn set_consumer_availability(&self, _available: bool) -> Result<(), CoreError> { + todo!() + } + + fn set_last_known_proposed_round(&self, round: Round) -> Result<(), CoreError> { + let mut last_known_proposed_round = self.last_known_proposed_round.lock(); + last_known_proposed_round.push(round); + Ok(()) + } +} + #[cfg(test)] mod test { use mysten_metrics::monitored_mpsc::unbounded_channel; @@ -263,7 +325,7 @@ mod test { Arc::new(NoopBlockVerifier), ); let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); - let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone()); let (signals, signal_receivers) = CoreSignals::new(context.clone()); let _block_receiver = signal_receivers.block_broadcast_receiver(); let (sender, _receiver) = unbounded_channel("consensus_output"); @@ -273,7 +335,7 @@ mod test { )); let commit_observer = CommitObserver::new( context.clone(), - CommitConsumer::new(sender.clone(), 0, 0), + CommitConsumer::new(sender.clone(), 0), dag_state.clone(), store, leader_schedule.clone(), @@ -292,6 +354,7 @@ mod test { signals, key_pairs.remove(context.own_index.value()).1, dag_state, + false, ); let (core_dispatcher, handle) = ChannelCoreThreadDispatcher::start(core, context); diff --git a/consensus/core/src/dag_state.rs b/consensus/core/src/dag_state.rs index 5ebac04a585a1..aa7f3ca718f28 100644 --- a/consensus/core/src/dag_state.rs +++ b/consensus/core/src/dag_state.rs @@ -307,7 +307,7 @@ impl DagState { .metrics .node_metrics .dag_state_store_read_count - .with_label_values(&[&"get_blocks"]) + .with_label_values(&["get_blocks"]) .inc(); for ((index, _), result) in missing.into_iter().zip(store_results.into_iter()) { @@ -556,7 +556,7 @@ impl DagState { .metrics .node_metrics .dag_state_store_read_count - .with_label_values(&[&"contains_blocks"]) + .with_label_values(&["contains_blocks"]) .inc(); for ((index, _), result) in missing.into_iter().zip(store_results.into_iter()) { diff --git a/consensus/core/src/error.rs b/consensus/core/src/error.rs index d9fdb99e37ed0..9dadc6c84da06 100644 --- a/consensus/core/src/error.rs +++ b/consensus/core/src/error.rs @@ -24,6 +24,15 @@ pub(crate) enum ConsensusError { #[error("Error serializing: {0}")] SerializationFailure(bcs::Error), + #[error("Block contains a transaction that is too large: {size} > {limit}")] + TransactionTooLarge { size: usize, limit: usize }, + + #[error("Block contains too many transactions: {count} > {limit}")] + TooManyTransactions { count: usize, limit: usize }, + + #[error("Block contains too many transaction bytes: {size} > {limit}")] + TooManyTransactionBytes { size: usize, limit: usize }, + #[error("Unexpected block authority {0} from peer {1}")] UnexpectedAuthority(AuthorityIndex, AuthorityIndex), diff --git a/consensus/core/src/leader_scoring.rs b/consensus/core/src/leader_scoring.rs index eaafbcc439e00..380003debd37d 100644 --- a/consensus/core/src/leader_scoring.rs +++ b/consensus/core/src/leader_scoring.rs @@ -41,7 +41,7 @@ pub(crate) struct ReputationScoreCalculator<'a> { impl<'a> ReputationScoreCalculator<'a> { pub(crate) fn new( context: Arc, - unscored_subdags: &Vec, + unscored_subdags: &[CommittedSubDag], scoring_strategy: &'a dyn ScoringStrategy, ) -> Self { let num_authorities = context.committee.size(); diff --git a/consensus/core/src/leader_scoring_strategy.rs b/consensus/core/src/leader_scoring_strategy.rs index b635f5f474cd0..640e3667cd71c 100644 --- a/consensus/core/src/leader_scoring_strategy.rs +++ b/consensus/core/src/leader_scoring_strategy.rs @@ -10,6 +10,7 @@ use crate::{ stake_aggregator::{QuorumThreshold, StakeAggregator}, }; +#[allow(unused)] pub(crate) trait ScoringStrategy: Send + Sync { fn calculate_scores_for_leader(&self, subdag: &UnscoredSubdag, leader_slot: Slot) -> Vec; diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index ba428b1eb7bc4..729fe63c18153 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -31,6 +31,8 @@ mod threshold_clock; mod transaction; mod universal_committer; +mod commit_consumer; +mod commit_vote_monitor; #[cfg(test)] mod test_dag; #[cfg(test)] @@ -40,8 +42,9 @@ mod test_dag_parser; pub use authority_node::ConsensusAuthority; pub use block::{BlockAPI, Round}; -pub use commit::{CommitConsumer, CommitDigest, CommitIndex, CommitRef, CommittedSubDag}; -pub use transaction::{TransactionClient, TransactionVerifier, ValidationError}; +pub use commit::{CommitDigest, CommitIndex, CommitRef, CommittedSubDag}; +pub use commit_consumer::{CommitConsumer, CommitConsumerMonitor}; +pub use transaction::{ClientError, TransactionClient, TransactionVerifier, ValidationError}; #[cfg(test)] #[path = "tests/randomized_tests.rs"] diff --git a/consensus/core/src/metrics.rs b/consensus/core/src/metrics.rs index 897607c00452b..8d9f60b179b6b 100644 --- a/consensus/core/src/metrics.rs +++ b/consensus/core/src/metrics.rs @@ -134,6 +134,7 @@ pub(crate) struct NodeMetrics { pub(crate) last_committed_leader_round: IntGauge, pub(crate) last_commit_index: IntGauge, pub(crate) last_known_own_block_round: IntGauge, + pub(crate) sync_last_known_own_block_retries: IntCounter, pub(crate) commit_round_advancement_interval: Histogram, pub(crate) last_decided_leader_round: IntGauge, pub(crate) leader_timeout_total: IntCounterVec, @@ -334,6 +335,11 @@ impl NodeMetrics { "The highest round of our own block as this has been synced from peers during an amnesia recovery", registry, ).unwrap(), + sync_last_known_own_block_retries: register_int_counter_with_registry!( + "sync_last_known_own_block_retries", + "Number of times this node tried to fetch the last own block from peers", + registry, + ).unwrap(), // TODO: add a short status label. invalid_blocks: register_int_counter_vec_with_registry!( "invalid_blocks", diff --git a/consensus/core/src/network/network_tests.rs b/consensus/core/src/network/network_tests.rs index d8113befb6b3a..cef1ff8530c54 100644 --- a/consensus/core/src/network/network_tests.rs +++ b/consensus/core/src/network/network_tests.rs @@ -31,7 +31,11 @@ trait ManagerBuilder { struct AnemoManagerBuilder {} impl ManagerBuilder for AnemoManagerBuilder { - fn build(&self, context: Arc, network_keypair: NetworkKeyPair) -> AnemoManager { + fn build( + &self, + context: Arc, + network_keypair: NetworkKeyPair, + ) -> impl NetworkManager> { AnemoManager::new(context, network_keypair) } } @@ -39,7 +43,11 @@ impl ManagerBuilder for AnemoManagerBuilder { struct TonicManagerBuilder {} impl ManagerBuilder for TonicManagerBuilder { - fn build(&self, context: Arc, network_keypair: NetworkKeyPair) -> TonicManager { + fn build( + &self, + context: Arc, + network_keypair: NetworkKeyPair, + ) -> impl NetworkManager> { TonicManager::new(context, network_keypair) } } diff --git a/consensus/core/src/stake_aggregator.rs b/consensus/core/src/stake_aggregator.rs index 7e907dee1fcb8..11501319e64e9 100644 --- a/consensus/core/src/stake_aggregator.rs +++ b/consensus/core/src/stake_aggregator.rs @@ -11,6 +11,7 @@ pub(crate) trait CommitteeThreshold { pub(crate) struct QuorumThreshold; +#[allow(unused)] pub(crate) struct ValidityThreshold; impl CommitteeThreshold for QuorumThreshold { diff --git a/consensus/core/src/storage/mem_store.rs b/consensus/core/src/storage/mem_store.rs index 2a88fc5bd71ad..ad23cd3652cee 100644 --- a/consensus/core/src/storage/mem_store.rs +++ b/consensus/core/src/storage/mem_store.rs @@ -20,10 +20,12 @@ use crate::{ }; /// In-memory storage for testing. +#[allow(unused)] pub(crate) struct MemStore { inner: RwLock, } +#[allow(unused)] struct Inner { blocks: BTreeMap<(Round, AuthorityIndex, BlockDigest), VerifiedBlock>, digests_by_authorities: BTreeSet<(AuthorityIndex, Round, BlockDigest)>, diff --git a/consensus/core/src/storage/mod.rs b/consensus/core/src/storage/mod.rs index 5c524be30318e..38258c87e8420 100644 --- a/consensus/core/src/storage/mod.rs +++ b/consensus/core/src/storage/mod.rs @@ -17,6 +17,7 @@ use crate::{ }; /// A common interface for consensus storage. +#[allow(unused)] pub(crate) trait Store: Send + Sync { /// Writes blocks, consensus commits and other data to store atomically. fn write(&self, write_batch: WriteBatch) -> ConsensusResult<()>; diff --git a/consensus/core/src/subscriber.rs b/consensus/core/src/subscriber.rs index bce04247c7a1d..7b350debf313c 100644 --- a/consensus/core/src/subscriber.rs +++ b/consensus/core/src/subscriber.rs @@ -152,7 +152,7 @@ impl Subscriber { .metrics .node_metrics .subscriber_connection_attempts - .with_label_values(&[&peer_hostname, "success"]) + .with_label_values(&[peer_hostname, "success"]) .inc(); blocks } @@ -162,7 +162,7 @@ impl Subscriber { .metrics .node_metrics .subscriber_connection_attempts - .with_label_values(&[&peer_hostname, "failure"]) + .with_label_values(&[peer_hostname, "failure"]) .inc(); continue 'subscription; } @@ -184,7 +184,7 @@ impl Subscriber { .metrics .node_metrics .subscribed_blocks - .with_label_values(&[&peer_hostname]) + .with_label_values(&[peer_hostname]) .inc(); let result = authority_service .handle_send_block(peer, block.clone()) diff --git a/consensus/core/src/synchronizer.rs b/consensus/core/src/synchronizer.rs index 504cd63249b6e..09193b7d06742 100644 --- a/consensus/core/src/synchronizer.rs +++ b/consensus/core/src/synchronizer.rs @@ -27,13 +27,12 @@ use tokio::{ }; use tracing::{debug, error, info, trace, warn}; -use crate::authority_service::COMMIT_LAG_MULTIPLIER; -use crate::commit_syncer::CommitVoteMonitor; +use crate::{authority_service::COMMIT_LAG_MULTIPLIER, core_thread::CoreThreadDispatcher}; use crate::{ block::{BlockRef, SignedBlock, VerifiedBlock}, block_verifier::BlockVerifier, + commit_vote_monitor::CommitVoteMonitor, context::Context, - core_thread::CoreThreadDispatcher, dag_state::DagState, error::{ConsensusError, ConsensusResult}, network::NetworkClient, @@ -245,6 +244,7 @@ impl Synchronizer, block_verifier: Arc, dag_state: Arc>, + sync_last_known_own_block: bool, ) -> Arc { let (commands_sender, commands_receiver) = channel("consensus_synchronizer_commands", 1_000); @@ -259,29 +259,24 @@ impl Synchronizer Synchronizer Synchronizer Synchronizer, block_verifier: Arc, + commit_vote_monitor: Arc, context: Arc, core_dispatcher: Arc, dag_state: Arc>, @@ -453,6 +449,7 @@ impl Synchronizer Synchronizer, block_verifier: Arc, + commit_vote_monitor: Arc, context: Arc, commands_sender: Sender, sync_method: &str, @@ -530,17 +528,22 @@ impl Synchronizer Synchronizer Synchronizer, authority_index: AuthorityIndex| -> ConsensusResult> { let mut result = Vec::new(); for serialized_block in blocks { @@ -749,50 +739,80 @@ impl Synchronizer { - let Some((result, authority_index)) = result else { - break; - }; - match result { - Ok(result) => { - match process_blocks(result, authority_index) { - Ok(blocks) => { - let max_round = blocks.into_iter().map(|b|b.round()).max().unwrap_or(0); - highest_round = highest_round.max(max_round); - - total_stake += context.committee.stake(authority_index); - }, - Err(err) => { - warn!("Invalid result returned from {authority_index} while fetching last own block: {err}"); + // Get the highest of all the results. Retry until at least `f+1` results have been gathered. + let mut total_stake; + let mut highest_round; + let mut retries = 0; + let mut retry_delay_step = Duration::from_millis(500); + 'main:loop { + total_stake = 0; + highest_round = 0; + + // Ask all the other peers about our last block + let mut results = FuturesUnordered::new(); + + for (authority_index, _authority) in context.committee.authorities() { + if authority_index != context.own_index { + results.push(fetch_own_block(authority_index, Duration::from_millis(0))); + } + } + + // Gather the results but wait to timeout as well + let timer = sleep_until(Instant::now() + context.parameters.sync_last_known_own_block_timeout); + tokio::pin!(timer); + + 'inner: loop { + tokio::select! { + result = results.next() => { + let Some((result, authority_index)) = result else { + break 'inner; + }; + match result { + Ok(result) => { + match process_blocks(result, authority_index) { + Ok(blocks) => { + let max_round = blocks.into_iter().map(|b|b.round()).max().unwrap_or(0); + highest_round = highest_round.max(max_round); + + total_stake += context.committee.stake(authority_index); + }, + Err(err) => { + warn!("Invalid result returned from {authority_index} while fetching last own block: {err}"); + } } + }, + Err(err) => { + warn!("Error {err} while fetching our own block from peer {authority_index}. Will retry."); + results.push(fetch_own_block(authority_index, FETCH_OWN_BLOCK_RETRY_DELAY)); } - }, - Err(err) => { - warn!("Error {err} while fetching our own block from peer {authority_index}. Will retry."); - results.push(fetch_own_block(authority_index, FETCH_OWN_BLOCK_RETRY_DELAY)); } + }, + () = &mut timer => { + info!("Timeout while trying to sync our own last block from peers"); + break 'inner; } - }, - () = &mut timer => { - info!("Timeout while trying to sync our own last block from peers"); - break; } } - } - // Update the Core with the highest detected round - if total_stake == 0 { - panic!("No peer has returned any acceptable result, can not safely update min round"); + // Request at least f+1 stake to have replied back. + if context.committee.reached_validity(total_stake) { + info!("{} out of {} total stake returned acceptable results for our own last block with highest round {}, with {retries} retries.", total_stake, context.committee.total_stake(), highest_round); + break 'main; + } else { + retries += 1; + context.metrics.node_metrics.sync_last_known_own_block_retries.inc(); + warn!("Not enough stake: {} out of {} total stake returned acceptable results for our own last block with highest round {}. Will now retry {retries}.", total_stake, context.committee.total_stake(), highest_round); + + sleep(retry_delay_step).await; + + retry_delay_step = Duration::from_secs_f64(retry_delay_step.as_secs_f64() * 1.5); + retry_delay_step = retry_delay_step.min(MAX_RETRY_DELAY_STEP); + } } + // Update the Core with the highest detected round context.metrics.node_metrics.last_known_own_block_round.set(highest_round as i64); - info!("{} out of {} total stake returned acceptable results for our own last block with highest round {}", total_stake, context.committee.total_stake(), highest_round); if let Err(err) = core_dispatcher.set_last_known_proposed_round(highest_round) { warn!("Error received while calling dispatcher, probably dispatcher is shutting down, will now exit: {err:?}"); } @@ -826,6 +846,7 @@ impl Synchronizer Synchronizer>, - missing_blocks: Mutex>, - last_known_proposed_round: parking_lot::Mutex>, - } - - impl MockCoreThreadDispatcher { - async fn get_add_blocks(&self) -> Vec { - let mut lock = self.add_blocks.lock().await; - lock.drain(0..).collect() - } - - async fn stub_missing_blocks(&self, block_refs: BTreeSet) { - let mut lock = self.missing_blocks.lock().await; - lock.extend(block_refs); - } - - async fn get_last_own_proposed_round(&self) -> Vec { - let lock = self.last_known_proposed_round.lock(); - lock.clone() - } - } - - #[async_trait] - impl CoreThreadDispatcher for MockCoreThreadDispatcher { - async fn add_blocks( - &self, - blocks: Vec, - ) -> Result, CoreError> { - let mut lock = self.add_blocks.lock().await; - lock.extend(blocks); - Ok(BTreeSet::new()) - } - - async fn new_block(&self, _round: Round, _force: bool) -> Result<(), CoreError> { - Ok(()) - } - - async fn get_missing_blocks(&self) -> Result, CoreError> { - let mut lock = self.missing_blocks.lock().await; - let result = lock.clone(); - lock.clear(); - Ok(result) - } - - fn set_consumer_availability(&self, _available: bool) -> Result<(), CoreError> { - todo!() - } - - fn set_last_known_proposed_round(&self, round: Round) -> Result<(), CoreError> { - let mut lock = self.last_known_proposed_round.lock(); - lock.push(round); - Ok(()) - } - } - type FetchRequestKey = (Vec, AuthorityIndex); type FetchRequestResponse = (Vec, Option); type FetchLatestBlockKey = (AuthorityIndex, Vec); @@ -1082,7 +1045,7 @@ mod tests { struct MockNetworkClient { fetch_blocks_requests: Mutex>, fetch_latest_blocks_requests: - Mutex>, + Mutex>>, } impl MockNetworkClient { @@ -1108,7 +1071,14 @@ mod tests { latency: Option, ) { let mut lock = self.fetch_latest_blocks_requests.lock().await; - lock.insert((peer, authorities), (blocks, latency)); + lock.entry((peer, authorities)) + .or_default() + .push((blocks, latency)); + } + + async fn fetch_latest_blocks_pending_calls(&self) -> usize { + let lock = self.fetch_latest_blocks_requests.lock().await; + lock.len() } } @@ -1177,22 +1147,27 @@ mod tests { _timeout: Duration, ) -> ConsensusResult> { let mut lock = self.fetch_latest_blocks_requests.lock().await; - let response = lock - .remove(&(peer, authorities)) + let mut responses = lock + .remove(&(peer, authorities.clone())) .expect("Unexpected fetch blocks request made"); + let response = responses.remove(0); let serialised = response .0 .into_iter() .map(|block| block.serialized().clone()) .collect::>(); - if let Some(latency) = response.1 { - sleep(latency).await; + if !responses.is_empty() { + lock.insert((peer, authorities), responses); } drop(lock); + if let Some(latency) = response.1 { + sleep(latency).await; + } + Ok(serialised) } } @@ -1272,10 +1247,10 @@ mod tests { let context = Arc::new(context); let block_verifier = Arc::new(NoopBlockVerifier {}); let core_dispatcher = Arc::new(MockCoreThreadDispatcher::default()); + let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let network_client = Arc::new(MockNetworkClient::default()); let store = Arc::new(MemStore::new()); let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store))); - let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let handle = Synchronizer::start( network_client.clone(), @@ -1284,6 +1259,7 @@ mod tests { commit_vote_monitor, block_verifier, dag_state, + false, ); // Create some test blocks @@ -1318,11 +1294,11 @@ mod tests { let (context, _) = Context::new_for_test(4); let context = Arc::new(context); let block_verifier = Arc::new(NoopBlockVerifier {}); + let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let core_dispatcher = Arc::new(MockCoreThreadDispatcher::default()); let network_client = Arc::new(MockNetworkClient::default()); let store = Arc::new(MemStore::new()); let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store))); - let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let handle = Synchronizer::start( network_client.clone(), @@ -1331,6 +1307,7 @@ mod tests { commit_vote_monitor, block_verifier, dag_state, + false, ); // Create some test blocks @@ -1376,11 +1353,11 @@ mod tests { let (context, _) = Context::new_for_test(4); let context = Arc::new(context); let block_verifier = Arc::new(NoopBlockVerifier {}); + let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); let core_dispatcher = Arc::new(MockCoreThreadDispatcher::default()); let network_client = Arc::new(MockNetworkClient::default()); let store = Arc::new(MemStore::new()); let dag_state = Arc::new(RwLock::new(DagState::new(context.clone(), store))); - let commit_vote_monitor = Arc::new(CommitVoteMonitor::new(context.clone())); // Create some test blocks let expected_blocks = (0..10) @@ -1422,6 +1399,7 @@ mod tests { commit_vote_monitor, block_verifier, dag_state, + false, ); sleep(2 * FETCH_REQUEST_TIMEOUT).await; @@ -1497,7 +1475,7 @@ mod tests { // Pass them through the commit vote monitor - so now there will be a big commit lag to prevent // the scheduled synchronizer from running for block in blocks { - commit_vote_monitor.observe(&block); + commit_vote_monitor.observe_block(&block); } // WHEN start the synchronizer and wait for a couple of seconds where normally the synchronizer should have kicked in. @@ -1508,6 +1486,7 @@ mod tests { commit_vote_monitor.clone(), block_verifier, dag_state.clone(), + false, ); sleep(4 * FETCH_REQUEST_TIMEOUT).await; @@ -1546,7 +1525,7 @@ mod tests { // GIVEN let (context, _) = Context::new_for_test(4); let context = Arc::new(context.with_parameters(Parameters { - sync_last_proposed_block_timeout: Duration::from_millis(2_000), + sync_last_known_own_block_timeout: Duration::from_millis(2_000), ..Default::default() })); let block_verifier = Arc::new(NoopBlockVerifier {}); @@ -1564,9 +1543,18 @@ mod tests { // Now set different latest blocks for the peers // For peer 1 we give the block of round 10 (highest) + let block_1 = expected_blocks.pop().unwrap(); network_client .stub_fetch_latest_blocks( - vec![expected_blocks.pop().unwrap()], + vec![block_1.clone()], + AuthorityIndex::new_for_test(1), + vec![our_index], + None, + ) + .await; + network_client + .stub_fetch_latest_blocks( + vec![block_1], AuthorityIndex::new_for_test(1), vec![our_index], None, @@ -1574,9 +1562,18 @@ mod tests { .await; // For peer 2 we give the block of round 9 + let block_2 = expected_blocks.pop().unwrap(); network_client .stub_fetch_latest_blocks( - vec![expected_blocks.pop().unwrap()], + vec![block_2.clone()], + AuthorityIndex::new_for_test(2), + vec![our_index], + Some(Duration::from_secs(10)), + ) + .await; + network_client + .stub_fetch_latest_blocks( + vec![block_2], AuthorityIndex::new_for_test(2), vec![our_index], None, @@ -1584,6 +1581,14 @@ mod tests { .await; // For peer 3 we don't give any block - and it should return an empty vector + network_client + .stub_fetch_latest_blocks( + vec![], + AuthorityIndex::new_for_test(3), + vec![our_index], + Some(Duration::from_secs(10)), + ) + .await; network_client .stub_fetch_latest_blocks( vec![], @@ -1601,10 +1606,11 @@ mod tests { commit_vote_monitor, block_verifier, dag_state, + true, ); // Wait at least for the timeout time - sleep(context.parameters.sync_last_proposed_block_timeout * 2).await; + sleep(context.parameters.sync_last_known_own_block_timeout * 2).await; // Assert that core has been called to set the min propose round assert_eq!( @@ -1612,6 +1618,19 @@ mod tests { vec![10] ); + // Ensure that all the requests have been called + assert_eq!(network_client.fetch_latest_blocks_pending_calls().await, 0); + + // And we got one retry + assert_eq!( + context + .metrics + .node_metrics + .sync_last_known_own_block_retries + .get(), + 1 + ); + // Ensure that no panic occurred if let Err(err) = handle.stop().await { if err.is_panic() { diff --git a/consensus/core/src/test_dag_parser.rs b/consensus/core/src/test_dag_parser.rs index 721766f7d4f1e..8ceda4b8e6d13 100644 --- a/consensus/core/src/test_dag_parser.rs +++ b/consensus/core/src/test_dag_parser.rs @@ -425,7 +425,7 @@ mod tests { let (_, (round, connections)) = result.unwrap(); let skipped_slot = Slot::new_for_test(0, 0); // A0 - let mut expected_references = vec![ + let mut expected_references = [ dag_builder.last_ancestors.clone(), dag_builder.last_ancestors.clone(), dag_builder diff --git a/consensus/core/src/transaction.rs b/consensus/core/src/transaction.rs index 8ae6eb1c4caba..590e922e5eca0 100644 --- a/consensus/core/src/transaction.rs +++ b/consensus/core/src/transaction.rs @@ -18,10 +18,6 @@ use crate::{ /// The maximum number of transactions pending to the queue to be pulled for block proposal const MAX_PENDING_TRANSACTIONS: usize = 2_000; -/// Assume 20_000 TPS * 5% max stake per validator / (minimum) 4 blocks per round = 250 transactions per block maximum -/// Using a higher limit that is 250 * 2 = 500, to account for bursty traffic and system transactions. -const MAX_CONSUMED_TRANSACTIONS_PER_REQUEST: u64 = 500; - /// The guard acts as an acknowledgment mechanism for the inclusion of the transactions to a block. /// When its last transaction is included to a block then `included_in_block_ack` will be signalled. /// If the guard is dropped without getting acknowledged that means the transactions have not been @@ -45,18 +41,15 @@ pub(crate) struct TransactionConsumer { } impl TransactionConsumer { - pub(crate) fn new( - tx_receiver: Receiver, - context: Arc, - max_consumed_transactions_per_request: Option, - ) -> Self { + pub(crate) fn new(tx_receiver: Receiver, context: Arc) -> Self { Self { tx_receiver, max_consumed_bytes_per_request: context .protocol_config .consensus_max_transactions_in_block_bytes(), - max_consumed_transactions_per_request: max_consumed_transactions_per_request - .unwrap_or(MAX_CONSUMED_TRANSACTIONS_PER_REQUEST), + max_consumed_transactions_per_request: context + .protocol_config + .max_num_transactions_in_block(), pending_transactions: None, } } @@ -74,7 +67,6 @@ impl TransactionConsumer { // Handle one batch of incoming transactions from TransactionGuard. // Returns the remaining txs as a new TransactionGuard, if the batch breaks any limit. let mut handle_txs = |t: TransactionsGuard| -> Option { - // Here we assume that a transaction can always fit in `max_fetched_bytes_per_request` let remaining_txs: Vec<_> = t .transactions .into_iter() @@ -237,6 +229,7 @@ pub enum ValidationError { } /// `NoopTransactionVerifier` accepts all transactions. +#[allow(unused)] pub(crate) struct NoopTransactionVerifier; impl TransactionVerifier for NoopTransactionVerifier { @@ -273,7 +266,7 @@ mod tests { let context = Arc::new(Context::new_for_test(4).0); let (client, tx_receiver) = TransactionClient::new(context.clone()); - let mut consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let mut consumer = TransactionConsumer::new(tx_receiver, context.clone()); // submit asynchronously the transactions and keep the waiters let mut included_in_block_waiters = FuturesUnordered::new(); @@ -325,7 +318,7 @@ mod tests { let context = Arc::new(Context::new_for_test(4).0); let (client, tx_receiver) = TransactionClient::new(context.clone()); - let mut consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let mut consumer = TransactionConsumer::new(tx_receiver, context.clone()); // submit some transactions for i in 0..10 { @@ -393,7 +386,7 @@ mod tests { let context = Arc::new(Context::new_for_test(4).0); let (client, tx_receiver) = TransactionClient::new(context.clone()); - let mut consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); + let mut consumer = TransactionConsumer::new(tx_receiver, context.clone()); let mut all_receivers = Vec::new(); // submit a few transactions individually. for i in 0..10 { diff --git a/crates/mysten-metrics/Cargo.toml b/crates/mysten-metrics/Cargo.toml index 2a26c989a9451..26045107d68c7 100644 --- a/crates/mysten-metrics/Cargo.toml +++ b/crates/mysten-metrics/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] axum.workspace = true tracing.workspace = true @@ -20,3 +23,4 @@ parking_lot.workspace = true futures.workspace = true async-trait.workspace = true prometheus-closure-metric.workspace = true +simple-server-timing-header.workspace = true diff --git a/crates/mysten-metrics/src/lib.rs b/crates/mysten-metrics/src/lib.rs index ae45b80f98e4f..a55e36632af7f 100644 --- a/crates/mysten-metrics/src/lib.rs +++ b/crates/mysten-metrics/src/lib.rs @@ -3,6 +3,8 @@ use axum::{extract::Extension, http::StatusCode, routing::get, Router}; use dashmap::DashMap; +use parking_lot::Mutex; +use simple_server_timing_header::Timer; use std::future::Future; use std::net::SocketAddr; use std::pin::Pin; @@ -132,6 +134,60 @@ pub fn get_metrics() -> Option<&'static Metrics> { METRICS.get() } +tokio::task_local! { + static SERVER_TIMING: Arc>; +} + +/// Create a new task-local ServerTiming context and run the provided future within it. +/// Should be used at the top-most level of a request handler. Can be added to an axum router +/// as a layer by using mysten_service::server_timing_middleware. +pub async fn with_new_server_timing(fut: impl Future + Send + 'static) -> T { + let timer = Arc::new(Mutex::new(Timer::new())); + + let mut ret = None; + SERVER_TIMING + .scope(timer, async { + ret = Some(fut.await); + }) + .await; + + ret.unwrap() +} + +/// Create a new task-local ServerTiming context and run the provided future within it. +/// Only intended for use by macros within this module. +pub async fn with_server_timing( + timer: Arc>, + fut: impl Future + Send + 'static, +) -> T { + let mut ret = None; + SERVER_TIMING + .scope(timer, async { + ret = Some(fut.await); + }) + .await; + + ret.unwrap() +} + +/// Get the currently active ServerTiming context. Only intended for use by macros within this module. +pub fn get_server_timing() -> Option>> { + SERVER_TIMING.try_with(|timer| timer.clone()).ok() +} + +/// Add a new entry to the ServerTiming header. +/// If the caller is not currently in a ServerTiming context (created with `with_new_server_timing`), +/// an error is logged. +pub fn add_server_timing(name: &str) { + let res = SERVER_TIMING.try_with(|timer| { + timer.lock().add(name); + }); + + if res.is_err() { + tracing::error!("Server timing context not found"); + } +} + #[macro_export] macro_rules! monitored_future { ($fut: expr) => {{ @@ -181,25 +237,42 @@ macro_rules! monitored_future { }}; } +#[macro_export] +macro_rules! forward_server_timing_and_spawn { + ($fut: expr) => { + if let Some(timing) = $crate::get_server_timing() { + tokio::task::spawn(async move { $crate::with_server_timing(timing, $fut).await }) + } else { + tokio::task::spawn($fut) + } + }; +} + #[macro_export] macro_rules! spawn_monitored_task { ($fut: expr) => { - tokio::task::spawn($crate::monitored_future!(tasks, $fut, "", INFO, false)) + $crate::forward_server_timing_and_spawn!($crate::monitored_future!( + tasks, $fut, "", INFO, false + )) }; } #[macro_export] macro_rules! spawn_logged_monitored_task { ($fut: expr) => { - tokio::task::spawn($crate::monitored_future!(tasks, $fut, "", INFO, true)) + $crate::forward_server_timing_and_spawn!($crate::monitored_future!( + tasks, $fut, "", INFO, true + )) }; ($fut: expr, $name: expr) => { - tokio::task::spawn($crate::monitored_future!(tasks, $fut, $name, INFO, true)) + $crate::forward_server_timing_and_spawn!($crate::monitored_future!( + tasks, $fut, $name, INFO, true + )) }; ($fut: expr, $name: expr, $logging_level: ident) => { - tokio::task::spawn($crate::monitored_future!( + $crate::forward_server_timing_and_spawn!($crate::monitored_future!( tasks, $fut, $name, diff --git a/crates/mysten-service/Cargo.toml b/crates/mysten-service/Cargo.toml index 0432f5f80d5b5..08e3a72a48395 100644 --- a/crates/mysten-service/Cargo.toml +++ b/crates/mysten-service/Cargo.toml @@ -12,6 +12,7 @@ axum.workspace = true tokio.workspace = true serde.workspace = true prometheus.workspace = true +simple-server-timing-header.workspace = true mysten-metrics.workspace = true telemetry-subscribers.workspace = true tracing.workspace = true diff --git a/crates/mysten-service/src/lib.rs b/crates/mysten-service/src/lib.rs index 5af492b3ac48a..fc06357e5eee0 100644 --- a/crates/mysten-service/src/lib.rs +++ b/crates/mysten-service/src/lib.rs @@ -4,6 +4,7 @@ mod health; pub mod logging; pub mod metrics; +pub mod server_timing; mod service; pub use service::get_mysten_service; diff --git a/crates/mysten-service/src/server_timing.rs b/crates/mysten-service/src/server_timing.rs new file mode 100644 index 0000000000000..9cdff1d13c571 --- /dev/null +++ b/crates/mysten-service/src/server_timing.rs @@ -0,0 +1,28 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use axum::extract::Request; +use axum::middleware::Next; +use axum::response::Response; +use mysten_metrics::{add_server_timing, get_server_timing, with_new_server_timing}; +use simple_server_timing_header::Timer; + +pub async fn server_timing_middleware(request: Request, next: Next) -> Response { + with_new_server_timing(async move { + let mut response = next.run(request).await; + add_server_timing("finish_request"); + + if let Ok(header_value) = get_server_timing() + .expect("server timing not set") + .lock() + .header_value() + .try_into() + { + response + .headers_mut() + .insert(Timer::header_key(), header_value); + } + response + }) + .await +} diff --git a/crates/mysten-util-mem/src/malloc_size.rs b/crates/mysten-util-mem/src/malloc_size.rs index 2668bb13e7152..9b6e9e1d61cdf 100644 --- a/crates/mysten-util-mem/src/malloc_size.rs +++ b/crates/mysten-util-mem/src/malloc_size.rs @@ -39,8 +39,6 @@ //! - If an `Rc` or `Arc` is known to be a "primary" reference and can always //! be measured, it should be measured via the `MallocUnconditionalSizeOf` //! trait. -//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen -//! before, it should be measured via the `MallocConditionalSizeOf` trait. //! - Using universal function call syntax is a good idea when measuring boxed //! fields in structs, because it makes it clear that the Box is being //! measured as well as the thing it points to. E.g. @@ -207,22 +205,6 @@ pub trait MallocUnconditionalShallowSizeOf { fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } -/// Like `MallocSizeOf`, but only measures if the value hasn't already been -/// measured. For use with types like `Rc` and `Arc` when appropriate (e.g. -/// when there is no "primary" reference). -pub trait MallocConditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself, and only if that heap usage - /// hasn't already been measured. - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// `MallocConditionalSizeOf` combined with `MallocShallowSizeOf`. -pub trait MallocConditionalShallowSizeOf { - /// `conditional_size_of` combined with `shallow_size_of`. - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - impl<'a, T: ?Sized> MallocSizeOf for &'a T { fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { // Zero makes sense for a non-owning reference. @@ -510,11 +492,6 @@ where // impl !MallocSizeOf for Arc { } // impl !MallocShallowSizeOf for Arc { } -#[cfg(feature = "std")] -fn arc_ptr(s: &Arc) -> *const T { - &(**s) as *const T -} - #[cfg(feature = "std")] impl MallocUnconditionalSizeOf for Arc { fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { @@ -522,28 +499,6 @@ impl MallocUnconditionalSizeOf for Arc { } } -#[cfg(feature = "std")] -impl MallocConditionalShallowSizeOf for Arc { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_shallow_size_of(ops) - } - } -} - -#[cfg(feature = "std")] -impl MallocConditionalSizeOf for Arc { - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_size_of(ops) - } - } -} - /// If a mutex is stored directly as a member of a data type that is being measured, /// it is the unique owner of its contents and deserves to be measured. /// @@ -724,26 +679,6 @@ where } } -#[cfg(feature = "lru")] -impl MallocSizeOf for lru::LruCache -where - K: MallocSizeOf + rstd::cmp::Eq + rstd::hash::Hash, - V: MallocSizeOf, - S: rstd::hash::BuildHasher, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = 0; - if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { - n += self.len() * (k + v) - } else { - n = self - .iter() - .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) - } - n - } -} - malloc_size_of_is_0!( [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 9], [u8; 10], [u8; 11], [u8; 12], [u8; 13], [u8; 14], [u8; 15], [u8; 16], [u8; 17], [u8; 18], [u8; 19], diff --git a/crates/mysten-util-mem/tests/derive.rs b/crates/mysten-util-mem/tests/derive.rs index 0beeb6fb11c47..ed4b0196b1978 100644 --- a/crates/mysten-util-mem/tests/derive.rs +++ b/crates/mysten-util-mem/tests/derive.rs @@ -55,27 +55,6 @@ fn derive_ignore() { assert!(t.malloc_size_of() < 3000); } -#[test] -#[cfg(all(feature = "lru", feature = "hashbrown"))] -fn derive_morecomplex() { - #[derive(MallocSizeOf)] - struct Trivia { - hm: hashbrown::HashMap>, - cache: lru::LruCache>, - } - - let mut t = Trivia { - hm: hashbrown::HashMap::new(), - cache: lru::LruCache::unbounded(), - }; - - t.hm.insert(1, vec![0u8; 2048]); - t.cache.put(1, vec![0u8; 2048]); - t.cache.put(2, vec![0u8; 4096]); - - assert!(t.malloc_size_of() > 8000); -} - #[test] fn derive_tuple() { #[derive(MallocSizeOf)] diff --git a/crates/sui-adapter-transactional-tests/tests/deny_list_v2/coin_deny_multiple_coin_types.exp b/crates/sui-adapter-transactional-tests/tests/deny_list_v2/coin_deny_multiple_coin_types.exp new file mode 100644 index 0000000000000..be2fe6b845377 --- /dev/null +++ b/crates/sui-adapter-transactional-tests/tests/deny_list_v2/coin_deny_multiple_coin_types.exp @@ -0,0 +1,53 @@ +processed 6 tasks + +init: +A: object(0,0) + +task 1, lines 11-60: +//# publish --sender A +created: object(1,0), object(1,1), object(1,2), object(1,3), object(1,4), object(1,5), object(1,6), object(1,7), object(1,8), object(1,9), object(1,10) +mutated: object(0,0) +unchanged_shared: 0x0000000000000000000000000000000000000000000000000000000000000403 +gas summary: computation_cost: 1000000, storage_cost: 34260800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, lines 61-63: +//# view-object 1,1 +Owner: Account Address ( A ) +Version: 2 +Contents: sui::coin::Coin { + id: sui::object::UID { + id: sui::object::ID { + bytes: fake(1,1), + }, + }, + balance: sui::balance::Balance { + value: 10000u64, + }, +} + +task 3, lines 64-66: +//# view-object 1,2 +Owner: Account Address ( A ) +Version: 2 +Contents: sui::coin::Coin { + id: sui::object::UID { + id: sui::object::ID { + bytes: fake(1,2), + }, + }, + balance: sui::balance::Balance { + value: 10000u64, + }, +} + +task 4, line 67: +//# run sui::coin::deny_list_v2_add --args object(0x403) object(1,6) @A --type-args test::regulated_coin2::REGULATED_COIN2 --sender A +events: Event { package_id: sui, transaction_module: Identifier("coin"), sender: A, type_: StructTag { address: sui, module: Identifier("deny_list"), name: Identifier("PerTypeConfigCreated"), type_params: [] }, contents: [0, 0, 0, 0, 0, 0, 0, 0, 98, 101, 100, 100, 50, 98, 100, 54, 51, 99, 50, 51, 102, 99, 52, 54, 52, 55, 102, 51, 101, 99, 101, 57, 48, 55, 57, 57, 49, 51, 102, 53, 99, 49, 98, 52, 98, 55, 97, 101, 101, 52, 56, 51, 50, 51, 55, 50, 97, 102, 52, 48, 99, 100, 48, 51, 49, 57, 50, 101, 97, 48, 52, 51, 97, 58, 58, 114, 101, 103, 117, 108, 97, 116, 101, 100, 95, 99, 111, 105, 110, 50, 58, 58, 82, 69, 71, 85, 76, 65, 84, 69, 68, 95, 67, 79, 73, 78, 50, 157, 35, 217, 228, 182, 26, 20, 142, 175, 244, 33, 216, 213, 187, 161, 23, 168, 25, 82, 72, 79, 110, 47, 210, 76, 90, 37, 190, 166, 158, 5, 16] } +created: object(4,0), object(4,1), object(4,2) +mutated: 0x0000000000000000000000000000000000000000000000000000000000000403, object(0,0), object(1,6) +gas summary: computation_cost: 1000000, storage_cost: 12220800, storage_rebate: 2761308, non_refundable_storage_fee: 27892 + +task 5, lines 69-70: +//# programmable --sender A --inputs object(1,1) object(1,2) @A +//> TransferObjects([Input(0), Input(1)], Input(2)) +Error: Error checking transaction input objects: AddressDeniedForCoin { address: @A, coin_type: "object(1,0)::regulated_coin2::REGULATED_COIN2" } diff --git a/crates/sui-adapter-transactional-tests/tests/deny_list_v2/coin_deny_multiple_coin_types.move b/crates/sui-adapter-transactional-tests/tests/deny_list_v2/coin_deny_multiple_coin_types.move new file mode 100644 index 0000000000000..1ed5f26f09769 --- /dev/null +++ b/crates/sui-adapter-transactional-tests/tests/deny_list_v2/coin_deny_multiple_coin_types.move @@ -0,0 +1,70 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// This test verifies when sending two objects of different coin types in the same transaction, +// if one is denied but not the other, the transaction check should still fail. +// More importantly, if the second type is denied but not the first, the fact that +// the first type doesn't even have a denylist entry should not matter. + +//# init --accounts A --addresses test=0x0 + +//# publish --sender A +module test::regulated_coin1 { + use sui::coin; + + public struct REGULATED_COIN1 has drop {} + + fun init(otw: REGULATED_COIN1, ctx: &mut TxContext) { + let (mut treasury_cap, deny_cap, metadata) = coin::create_regulated_currency_v2( + otw, + 9, + b"RC", + b"REGULATED_COIN", + b"A new regulated coin", + option::none(), + false, + ctx + ); + let coin = coin::mint(&mut treasury_cap, 10000, ctx); + transfer::public_transfer(coin, tx_context::sender(ctx)); + transfer::public_transfer(deny_cap, tx_context::sender(ctx)); + transfer::public_freeze_object(treasury_cap); + transfer::public_freeze_object(metadata); + } +} + +module test::regulated_coin2 { + use sui::coin; + + public struct REGULATED_COIN2 has drop {} + + fun init(otw: REGULATED_COIN2, ctx: &mut TxContext) { + let (mut treasury_cap, deny_cap, metadata) = coin::create_regulated_currency_v2( + otw, + 9, + b"RC", + b"REGULATED_COIN", + b"A new regulated coin", + option::none(), + false, + ctx + ); + let coin = coin::mint(&mut treasury_cap, 10000, ctx); + transfer::public_transfer(coin, tx_context::sender(ctx)); + transfer::public_transfer(deny_cap, tx_context::sender(ctx)); + transfer::public_freeze_object(treasury_cap); + transfer::public_freeze_object(metadata); + } +} + +// Coin1 +//# view-object 1,1 + +// Coin2 +//# view-object 1,2 + +// Deny account A for coin2. +//# run sui::coin::deny_list_v2_add --args object(0x403) object(1,6) @A --type-args test::regulated_coin2::REGULATED_COIN2 --sender A + +//# programmable --sender A --inputs object(1,1) object(1,2) @A +//> TransferObjects([Input(0), Input(1)], Input(2)) diff --git a/crates/sui-adapter-transactional-tests/tests/entry_points/large_enum.exp b/crates/sui-adapter-transactional-tests/tests/entry_points/large_enum.exp new file mode 100644 index 0000000000000..4c3435ef46f66 --- /dev/null +++ b/crates/sui-adapter-transactional-tests/tests/entry_points/large_enum.exp @@ -0,0 +1,22 @@ +processed 4 tasks + +init: +A: object(0,0) + +task 1, lines 8-35: +//# publish +created: object(1,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 6452400, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, lines 37-38: +//# programmable --sender A +//> test::m::x1() +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 988000, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 3, lines 40-41: +//# programmable --sender A +//> test::m::x3() +Error: Transaction Effects Status: Move Bytecode Verification Error. Please run the Bytecode Verifier for more information. +Execution Error: ExecutionError: ExecutionError { inner: ExecutionErrorInner { kind: VMVerificationOrDeserializationError, source: Some(VMError { major_status: TOO_MANY_TYPE_NODES, sub_status: None, message: None, exec_state: None, location: Undefined, indices: [], offsets: [] }), command: Some(0) } } diff --git a/crates/sui-adapter-transactional-tests/tests/entry_points/large_enum.move b/crates/sui-adapter-transactional-tests/tests/entry_points/large_enum.move new file mode 100644 index 0000000000000..da3a8468d81b6 --- /dev/null +++ b/crates/sui-adapter-transactional-tests/tests/entry_points/large_enum.move @@ -0,0 +1,41 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// tests error after serializing a large enum return value + +//# init --addresses test=0x0 --accounts A + +//# publish + +module test::m { + +public enum X1 has drop { + Big1(u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8), +} + +public enum X2 has drop { + V1(X1, X1, X1), + V2(X1, X1, X1), + V3(X1, X1, X1), +} + +public enum X3 has drop { + X2(X2, X2, X2), + U64(u64), +} + +entry fun x1(): X1 { + X1::Big1(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) +} + +entry fun x3(): X3 { + X3::U64(0) +} + +} + +//# programmable --sender A +//> test::m::x1() + +//# programmable --sender A +//> test::m::x3() diff --git a/crates/sui-analytics-indexer/src/package_store.rs b/crates/sui-analytics-indexer/src/package_store.rs index 2878b9852c4d8..94199a6353009 100644 --- a/crates/sui-analytics-indexer/src/package_store.rs +++ b/crates/sui-analytics-indexer/src/package_store.rs @@ -74,10 +74,9 @@ pub struct LocalDBPackageStore { impl LocalDBPackageStore { pub fn new(path: &Path, rest_url: &str) -> Self { - let rest_api_url = format!("{}/rest", rest_url); Self { package_store_tables: PackageStoreTables::new(path), - fallback_client: Client::new(rest_api_url), + fallback_client: Client::new(rest_url), } } diff --git a/crates/sui-archival/src/lib.rs b/crates/sui-archival/src/lib.rs index 1dbcd632c254d..bdc5b6b3ab18a 100644 --- a/crates/sui-archival/src/lib.rs +++ b/crates/sui-archival/src/lib.rs @@ -61,6 +61,7 @@ use tracing::{error, info}; /// - epoch_1/ /// - 101000.chk /// - ... +/// /// Blob File Disk Format ///┌──────────────────────────────┐ ///│ magic <4 byte> │ diff --git a/crates/sui-aws-orchestrator/src/settings.rs b/crates/sui-aws-orchestrator/src/settings.rs index 8eaca609aa3a8..be4b17d3ca10e 100644 --- a/crates/sui-aws-orchestrator/src/settings.rs +++ b/crates/sui-aws-orchestrator/src/settings.rs @@ -121,7 +121,6 @@ impl Settings { .path_segments() .expect("Url should already be checked when loading settings") .collect::>()[1] - .to_string() .split('.') .next() .unwrap() diff --git a/crates/sui-benchmark/Cargo.toml b/crates/sui-benchmark/Cargo.toml index 54a9362e4675b..daad4d2158c51 100644 --- a/crates/sui-benchmark/Cargo.toml +++ b/crates/sui-benchmark/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] async-trait.workspace = true anyhow = { workspace = true, features = ["backtrace"] } diff --git a/crates/sui-benchmark/src/embedded_reconfig_observer.rs b/crates/sui-benchmark/src/embedded_reconfig_observer.rs index 9976491327bfd..42df82d81ce3b 100644 --- a/crates/sui-benchmark/src/embedded_reconfig_observer.rs +++ b/crates/sui-benchmark/src/embedded_reconfig_observer.rs @@ -23,6 +23,7 @@ use tracing::{error, info, trace}; /// and we happen to have a big committee rotation, it may /// fail to get quorum on the latest committee info from /// demissioned validators and then stop working. +/// /// Background: this is a temporary solution for stress before /// we see fullnode reconfiguration stabilizes. #[derive(Clone, Default)] diff --git a/crates/sui-benchmark/tests/simtest.rs b/crates/sui-benchmark/tests/simtest.rs index f3a29e01ba079..b7d2e189da3a2 100644 --- a/crates/sui-benchmark/tests/simtest.rs +++ b/crates/sui-benchmark/tests/simtest.rs @@ -550,7 +550,7 @@ mod test { .build() .await, ); - test_simulated_load(test_cluster, 10).await; + test_simulated_load(test_cluster, 30).await; let checkpoint_files = std::fs::read_dir(path) .map(|entries| { diff --git a/crates/sui-bridge-cli/src/lib.rs b/crates/sui-bridge-cli/src/lib.rs index 3cafe745d4c9b..eacd3ca431f2c 100644 --- a/crates/sui-bridge-cli/src/lib.rs +++ b/crates/sui-bridge-cli/src/lib.rs @@ -313,7 +313,7 @@ pub fn make_action(chain_id: BridgeChainId, cmd: &GovernanceClientCommands) -> B } } -fn encode_call_data(function_selector: &str, params: &Vec) -> Vec { +fn encode_call_data(function_selector: &str, params: &[String]) -> Vec { let left = function_selector .find('(') .expect("Invalid function selector, no left parentheses"); diff --git a/crates/sui-bridge-indexer/Cargo.toml b/crates/sui-bridge-indexer/Cargo.toml index 594a8eceba036..7ddb11e1bc5ed 100644 --- a/crates/sui-bridge-indexer/Cargo.toml +++ b/crates/sui-bridge-indexer/Cargo.toml @@ -31,6 +31,7 @@ telemetry-subscribers.workspace = true tracing.workspace = true backoff.workspace = true sui-config.workspace = true +sui-indexer-builder.workspace = true [dev-dependencies] sui-types = { workspace = true, features = ["test-utils"] } diff --git a/crates/sui-bridge-indexer/src/eth_bridge_indexer.rs b/crates/sui-bridge-indexer/src/eth_bridge_indexer.rs index 3a5b33d185376..f3f951952c6a2 100644 --- a/crates/sui-bridge-indexer/src/eth_bridge_indexer.rs +++ b/crates/sui-bridge-indexer/src/eth_bridge_indexer.rs @@ -18,16 +18,14 @@ use sui_bridge::retry_with_max_elapsed_time; use tokio::task::JoinHandle; use tracing::info; -use mysten_metrics::metered_channel::Receiver; -use mysten_metrics::{metered_channel, spawn_monitored_task}; +use mysten_metrics::spawn_monitored_task; use sui_bridge::abi::{EthBridgeEvent, EthSuiBridgeEvents}; +use crate::metrics::BridgeIndexerMetrics; use sui_bridge::metrics::BridgeMetrics; use sui_bridge::types::{EthEvent, RawEthLog}; +use sui_indexer_builder::indexer_builder::{DataMapper, DataSender, Datasource}; -use crate::indexer_builder::{DataMapper, Datasource}; -use crate::metrics::BridgeIndexerMetrics; -use crate::sui_bridge_indexer::PgBridgePersistent; use crate::{ BridgeDataSource, ProcessedTxnData, TokenTransfer, TokenTransferData, TokenTransferStatus, }; @@ -55,32 +53,18 @@ impl EthSubscriptionDatasource { } } #[async_trait] -impl Datasource for EthSubscriptionDatasource { +impl Datasource for EthSubscriptionDatasource { async fn start_data_retrieval( &self, - task_name: String, starting_checkpoint: u64, target_checkpoint: u64, - ) -> Result< - ( - JoinHandle>, - Receiver<(u64, Vec)>, - ), - Error, - > { + data_sender: DataSender, + ) -> Result>, Error> { let filter = Filter::new() .address(self.bridge_address) .from_block(starting_checkpoint) .to_block(target_checkpoint); - let (data_sender, data_receiver) = metered_channel::channel( - 1000, - &mysten_metrics::get_metrics() - .unwrap() - .channel_inflight - .with_label_values(&[&task_name]), - ); - let eth_ws_url = self.eth_ws_url.clone(); let indexer_metrics: BridgeIndexerMetrics = self.indexer_metrics.clone(); @@ -142,7 +126,7 @@ impl Datasource for EthSubscri Ok::<_, Error>(()) }); - Ok((handle, data_receiver)) + Ok(handle) } } @@ -170,19 +154,13 @@ impl EthSyncDatasource { } } #[async_trait] -impl Datasource for EthSyncDatasource { +impl Datasource for EthSyncDatasource { async fn start_data_retrieval( &self, - task_name: String, starting_checkpoint: u64, target_checkpoint: u64, - ) -> Result< - ( - JoinHandle>, - Receiver<(u64, Vec)>, - ), - Error, - > { + data_sender: DataSender, + ) -> Result>, Error> { let client: Arc> = Arc::new( EthClient::::new( &self.eth_http_url, @@ -197,14 +175,6 @@ impl Datasource for EthSyncDat .interval(std::time::Duration::from_millis(2000)), ); - let (data_sender, data_receiver) = metered_channel::channel( - 1000, - &mysten_metrics::get_metrics() - .unwrap() - .channel_inflight - .with_label_values(&[&task_name]), - ); - let bridge_address = self.bridge_address; let indexer_metrics: BridgeIndexerMetrics = self.indexer_metrics.clone(); let client = Arc::clone(&client); @@ -265,7 +235,7 @@ impl Datasource for EthSyncDat Ok::<_, Error>(()) }); - Ok((handle, data_receiver)) + Ok(handle) } } diff --git a/crates/sui-bridge-indexer/src/lib.rs b/crates/sui-bridge-indexer/src/lib.rs index 3a4320e9b316c..afd4b461303f8 100644 --- a/crates/sui-bridge-indexer/src/lib.rs +++ b/crates/sui-bridge-indexer/src/lib.rs @@ -13,13 +13,10 @@ pub mod metrics; pub mod models; pub mod postgres_manager; pub mod schema; -pub mod sui_checkpoint_ingestion; pub mod sui_transaction_handler; pub mod sui_transaction_queries; pub mod types; -pub mod indexer_builder; - pub mod eth_bridge_indexer; pub mod sui_bridge_indexer; diff --git a/crates/sui-bridge-indexer/src/main.rs b/crates/sui-bridge-indexer/src/main.rs index 889383e7f0239..25b845ae4abaf 100644 --- a/crates/sui-bridge-indexer/src/main.rs +++ b/crates/sui-bridge-indexer/src/main.rs @@ -12,7 +12,6 @@ use ethers::providers::Middleware; use ethers::providers::Provider; use sui_bridge_indexer::eth_bridge_indexer::EthSubscriptionDatasource; use sui_bridge_indexer::eth_bridge_indexer::EthSyncDatasource; -use sui_bridge_indexer::indexer_builder::BackfillStrategy; use tokio::task::JoinHandle; use tracing::info; @@ -22,7 +21,6 @@ use mysten_metrics::start_prometheus_server; use sui_bridge::metrics::BridgeMetrics; use sui_bridge_indexer::config::IndexerConfig; use sui_bridge_indexer::eth_bridge_indexer::EthDataMapper; -use sui_bridge_indexer::indexer_builder::{IndexerBuilder, SuiCheckpointDatasource}; use sui_bridge_indexer::metrics::BridgeIndexerMetrics; use sui_bridge_indexer::postgres_manager::{get_connection_pool, read_sui_progress_store}; use sui_bridge_indexer::sui_bridge_indexer::{PgBridgePersistent, SuiBridgeDataMapper}; @@ -30,6 +28,8 @@ use sui_bridge_indexer::sui_transaction_handler::handle_sui_transactions_loop; use sui_bridge_indexer::sui_transaction_queries::start_sui_tx_polling_task; use sui_config::Config; use sui_data_ingestion_core::DataIngestionMetrics; +use sui_indexer_builder::indexer_builder::{BackfillStrategy, IndexerBuilder}; +use sui_indexer_builder::sui_datasource::SuiCheckpointDatasource; use sui_sdk::SuiClientBuilder; #[derive(Parser, Clone, Debug)] diff --git a/crates/sui-bridge-indexer/src/models.rs b/crates/sui-bridge-indexer/src/models.rs index e005dca186d6f..90435cd60c154 100644 --- a/crates/sui-bridge-indexer/src/models.rs +++ b/crates/sui-bridge-indexer/src/models.rs @@ -1,11 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use diesel::data_types::PgTimestamp; +use diesel::{Identifiable, Insertable, Queryable, Selectable}; + +use sui_indexer_builder::Task; + use crate::schema::{ progress_store, sui_error_transactions, sui_progress_store, token_transfer, token_transfer_data, }; -use diesel::data_types::PgTimestamp; -use diesel::{Identifiable, Insertable, Queryable, Selectable}; #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] #[diesel(table_name = progress_store, primary_key(task_name))] @@ -16,6 +19,18 @@ pub struct ProgressStore { pub timestamp: Option, } +impl From for Task { + fn from(value: ProgressStore) -> Self { + Self { + task_name: value.task_name, + checkpoint: value.checkpoint as u64, + target_checkpoint: value.target_checkpoint as u64, + // Ok to unwrap, timestamp is defaulted to now() in database + timestamp: value.timestamp.expect("Timestamp not set").0 as u64, + } + } +} + #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] #[diesel(table_name = sui_progress_store, primary_key(txn_digest))] pub struct SuiProgressStore { diff --git a/crates/sui-bridge-indexer/src/sui_bridge_indexer.rs b/crates/sui-bridge-indexer/src/sui_bridge_indexer.rs index 41f219beb18f1..d0e5c46f8375c 100644 --- a/crates/sui-bridge-indexer/src/sui_bridge_indexer.rs +++ b/crates/sui-bridge-indexer/src/sui_bridge_indexer.rs @@ -11,18 +11,19 @@ use tracing::info; use sui_bridge::events::{ MoveTokenDepositedEvent, MoveTokenTransferApproved, MoveTokenTransferClaimed, }; +use sui_indexer_builder::indexer_builder::{DataMapper, IndexerProgressStore, Persistent}; +use sui_indexer_builder::sui_datasource::CheckpointTxnData; +use sui_indexer_builder::Task; use sui_types::effects::TransactionEffectsAPI; use sui_types::event::Event; use sui_types::execution_status::ExecutionStatus; use sui_types::full_checkpoint_content::CheckpointTransaction; use sui_types::{BRIDGE_ADDRESS, SUI_BRIDGE_OBJECT_ID}; -use crate::indexer_builder::{CheckpointTxnData, DataMapper, IndexerProgressStore, Persistent}; use crate::metrics::BridgeIndexerMetrics; use crate::postgres_manager::PgPool; use crate::schema::progress_store::{columns, dsl}; use crate::schema::{sui_error_transactions, token_transfer, token_transfer_data}; -use crate::sui_checkpoint_ingestion::Task; use crate::{ models, schema, BridgeDataSource, ProcessedTxnData, SuiTxnError, TokenTransfer, TokenTransferData, TokenTransferStatus, @@ -41,8 +42,9 @@ impl PgBridgePersistent { } // TODO: this is shared between SUI and ETH, move to different file. +#[async_trait] impl Persistent for PgBridgePersistent { - fn write(&self, data: Vec) -> Result<(), Error> { + async fn write(&self, data: Vec) -> Result<(), Error> { if data.is_empty() { return Ok(()); } @@ -115,7 +117,7 @@ impl IndexerProgressStore for PgBridgePersistent { Ok(()) } - fn tasks(&self, prefix: &str) -> Result, anyhow::Error> { + async fn tasks(&self, prefix: &str) -> Result, anyhow::Error> { let mut conn = self.pool.get()?; // get all unfinished tasks let cp: Vec = dsl::progress_store @@ -127,18 +129,18 @@ impl IndexerProgressStore for PgBridgePersistent { Ok(cp.into_iter().map(|d| d.into()).collect()) } - fn register_task( + async fn register_task( &mut self, task_name: String, checkpoint: u64, - target_checkpoint: i64, + target_checkpoint: u64, ) -> Result<(), anyhow::Error> { let mut conn = self.pool.get()?; diesel::insert_into(schema::progress_store::table) .values(models::ProgressStore { task_name, checkpoint: checkpoint as i64, - target_checkpoint, + target_checkpoint: target_checkpoint as i64, // Timestamp is defaulted to current time in DB if None timestamp: None, }) @@ -146,7 +148,7 @@ impl IndexerProgressStore for PgBridgePersistent { Ok(()) } - fn update_task(&mut self, task: Task) -> Result<(), anyhow::Error> { + async fn update_task(&mut self, task: Task) -> Result<(), anyhow::Error> { let mut conn = self.pool.get()?; diesel::update(dsl::progress_store.filter(columns::task_name.eq(task.task_name))) .set(( diff --git a/crates/sui-bridge-indexer/src/sui_checkpoint_ingestion.rs b/crates/sui-bridge-indexer/src/sui_checkpoint_ingestion.rs deleted file mode 100644 index a0cf1a6b14201..0000000000000 --- a/crates/sui-bridge-indexer/src/sui_checkpoint_ingestion.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use crate::models; - -#[derive(Clone)] -pub struct Task { - pub task_name: String, - pub checkpoint: u64, - pub target_checkpoint: u64, - pub timestamp: u64, -} - -impl From for Task { - fn from(value: models::ProgressStore) -> Self { - Self { - task_name: value.task_name, - checkpoint: value.checkpoint as u64, - target_checkpoint: value.target_checkpoint as u64, - // Ok to unwrap, timestamp is defaulted to now() in database - timestamp: value.timestamp.expect("Timestamp not set").0 as u64, - } - } -} - -pub trait Tasks { - fn live_task(&self) -> Option; -} - -impl Tasks for Vec { - fn live_task(&self) -> Option { - self.iter().fold(None, |result, other_task| match &result { - Some(task) if task.checkpoint < other_task.checkpoint => Some(other_task.clone()), - None => Some(other_task.clone()), - _ => result, - }) - } -} diff --git a/crates/sui-bridge/abi/erc20.json b/crates/sui-bridge/abi/erc20.json new file mode 100644 index 0000000000000..5829d4b2816e5 --- /dev/null +++ b/crates/sui-bridge/abi/erc20.json @@ -0,0 +1,358 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "allowance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientAllowance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientBalance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "approver", + "type": "address" + } + ], + "name": "ERC20InvalidApprover", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "receiver", + "type": "address" + } + ], + "name": "ERC20InvalidReceiver", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "ERC20InvalidSender", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "ERC20InvalidSpender", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "form", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "burn", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "mint", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "testSkip", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/crates/sui-bridge/src/abi.rs b/crates/sui-bridge/src/abi.rs index e60d119877936..d1b844402be70 100644 --- a/crates/sui-bridge/src/abi.rs +++ b/crates/sui-bridge/src/abi.rs @@ -88,6 +88,12 @@ gen_eth_events!( gen_eth_events!(EthBridgeVault, "abi/bridge_vault.json"); +abigen!( + EthERC20, + "abi/erc20.json", + event_derives(serde::Deserialize, serde::Serialize) +); + impl EthBridgeEvent { pub fn try_into_bridge_action( self, diff --git a/crates/sui-bridge/src/e2e_tests/basic.rs b/crates/sui-bridge/src/e2e_tests/basic.rs index e875ef9efd72d..fd730708ae559 100644 --- a/crates/sui-bridge/src/e2e_tests/basic.rs +++ b/crates/sui-bridge/src/e2e_tests/basic.rs @@ -1,16 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::{eth_sui_bridge, EthBridgeEvent, EthSuiBridge}; +use crate::abi::{eth_sui_bridge, EthBridgeEvent, EthERC20, EthSuiBridge}; use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator; use crate::e2e_tests::test_utils::BridgeTestCluster; -use crate::e2e_tests::test_utils::{get_signatures, BridgeTestClusterBuilder}; +use crate::e2e_tests::test_utils::{ + get_signatures, send_eth_tx_and_get_tx_receipt, BridgeTestClusterBuilder, +}; +use crate::eth_transaction_builder::build_eth_transaction; use crate::events::{ SuiBridgeEvent, SuiToEthTokenBridgeV1, TokenTransferApproved, TokenTransferClaimed, }; use crate::sui_client::SuiBridgeClient; use crate::sui_transaction_builder::build_add_tokens_on_sui_transaction; -use crate::types::{BridgeAction, BridgeActionStatus, SuiToEthBridgeAction}; +use crate::types::{AddTokensOnEvmAction, BridgeAction, BridgeActionStatus, SuiToEthBridgeAction}; use crate::utils::publish_and_register_coins_return_add_coins_on_sui_action; use crate::utils::EthSigner; use eth_sui_bridge::EthSuiBridgeEvents; @@ -58,7 +61,7 @@ async fn test_bridge_from_eth_to_sui_to_eth() { let amount = 42; let sui_amount = amount * 100_000_000; - initiate_bridge_eth_to_sui(&bridge_test_cluster, amount, sui_amount, TOKEN_ID_ETH, 0) + initiate_bridge_eth_to_sui(&bridge_test_cluster, amount, 0) .await .unwrap(); let events = bridge_test_cluster @@ -141,8 +144,9 @@ async fn test_bridge_from_eth_to_sui_to_eth() { bridge_test_cluster.contracts().sui_bridge, eth_signer.clone().into(), ); - let tx = eth_sui_bridge.transfer_bridged_tokens_with_signatures(signatures, message); - let _eth_claim_tx_receipt = tx.send().await.unwrap().await.unwrap().unwrap(); + let call = eth_sui_bridge.transfer_bridged_tokens_with_signatures(signatures, message); + let eth_claim_tx_receipt = send_eth_tx_and_get_tx_receipt(call).await; + assert_eq!(eth_claim_tx_receipt.status.unwrap().as_u64(), 1); info!("Sui to Eth bridge transfer claimed"); // Assert eth_address_1 has received ETH assert_eq!( @@ -151,8 +155,10 @@ async fn test_bridge_from_eth_to_sui_to_eth() { ); } +// Test add new coins on both Sui and Eth +// Also test bridge ndoe handling `NewTokenEvent`` #[tokio::test(flavor = "multi_thread", worker_threads = 8)] -async fn test_add_new_coins_on_sui() { +async fn test_add_new_coins_on_sui_and_eth() { telemetry_subscribers::init_for_testing(); let mut bridge_test_cluster = BridgeTestClusterBuilder::new() .with_eth_env(true) @@ -163,12 +169,13 @@ async fn test_add_new_coins_on_sui() { let bridge_arg = bridge_test_cluster.get_mut_bridge_arg().await.unwrap(); - // Register tokens - let token_id = 42; + // Register tokens on Sui + let token_id = 5; + let token_sui_decimal = 9; // this needs to match ka.move let token_price = 10000; let sender = bridge_test_cluster.sui_user_address(); info!("Published new token"); - let action = publish_and_register_coins_return_add_coins_on_sui_action( + let sui_action = publish_and_register_coins_return_add_coins_on_sui_action( bridge_test_cluster.wallet(), bridge_arg, vec![Path::new("../../bridge/move/tokens/mock/ka").into()], @@ -177,13 +184,23 @@ async fn test_add_new_coins_on_sui() { 1, // seq num ) .await; + let new_token_erc_address = bridge_test_cluster.contracts().ka; + let eth_action = BridgeAction::AddTokensOnEvmAction(AddTokensOnEvmAction { + nonce: 0, + chain_id: BridgeChainId::EthCustom, + native: true, + token_ids: vec![token_id], + token_addresses: vec![new_token_erc_address], + token_sui_decimals: vec![token_sui_decimal], + token_prices: vec![token_price], + }); info!("Starting bridge cluster"); bridge_test_cluster.set_approved_governance_actions_for_next_start(vec![ - vec![action.clone()], - vec![action.clone()], - vec![], + vec![sui_action.clone(), eth_action.clone()], + vec![sui_action.clone()], + vec![eth_action.clone()], ]); bridge_test_cluster.start_bridge_cluster().await; bridge_test_cluster @@ -199,10 +216,14 @@ async fn test_add_new_coins_on_sui() { .expect("Failed to get bridge committee"), ); let agg = BridgeAuthorityAggregator::new(bridge_committee); - let certified_action = agg - .request_committee_signatures(action) + let certified_sui_action = agg + .request_committee_signatures(sui_action) + .await + .expect("Failed to request committee signatures for AddTokensOnSuiAction"); + let certified_eth_action = agg + .request_committee_signatures(eth_action.clone()) .await - .expect("Failed to request committee signatures"); + .expect("Failed to request committee signatures for AddTokensOnEvmAction"); let tx = build_add_tokens_on_sui_transaction( sender, @@ -212,18 +233,26 @@ async fn test_add_new_coins_on_sui() { .await .unwrap() .unwrap(), - certified_action, + certified_sui_action, bridge_arg, 1000, ) .unwrap(); let response = bridge_test_cluster.sign_and_execute_transaction(&tx).await; - assert_eq!( - response.effects.unwrap().status(), - &SuiExecutionStatus::Success - ); - info!("Approved new token"); + let effects = response.effects.unwrap(); + assert_eq!(effects.status(), &SuiExecutionStatus::Success); + assert!(response.events.unwrap().data.iter().any(|e| { + let sui_bridge_event = SuiBridgeEvent::try_from_sui_event(e).unwrap().unwrap(); + match sui_bridge_event { + SuiBridgeEvent::NewTokenEvent(e) => { + assert_eq!(e.token_id, token_id); + true + } + _ => false, + } + })); + info!("Approved new token on Sui"); // Assert new token is correctly added let treasury_summary = bridge_test_cluster @@ -251,6 +280,34 @@ async fn test_add_new_coins_on_sui() { native_token: false, } ); + + // Add new token on EVM + let config_address = bridge_test_cluster.contracts().bridge_config; + let eth_signer = bridge_test_cluster.get_eth_signer().await; + let eth_call = build_eth_transaction(config_address, eth_signer, certified_eth_action) + .await + .unwrap(); + let eth_receipt = send_eth_tx_and_get_tx_receipt(eth_call).await; + assert_eq!(eth_receipt.status.unwrap().as_u64(), 1); + + // Verify new tokens are added on EVM + let (address, dp, price) = bridge_test_cluster + .eth_env() + .get_supported_token(token_id) + .await; + assert_eq!(address, new_token_erc_address); + assert_eq!(dp, 9); + assert_eq!(price, token_price); + + initiate_bridge_erc20_to_sui( + &bridge_test_cluster, + 100, + new_token_erc_address, + token_id, + 0, + ) + .await + .unwrap(); } pub(crate) async fn deposit_native_eth_to_sol_contract( @@ -313,14 +370,100 @@ async fn deposit_eth_to_sui_package( wallet_context.execute_transaction_may_fail(tx).await } +pub async fn initiate_bridge_erc20_to_sui( + bridge_test_cluster: &BridgeTestCluster, + amount_u64: u64, + token_address: EthAddress, + token_id: u8, + nonce: u64, +) -> Result<(), anyhow::Error> { + let (eth_signer, eth_address) = bridge_test_cluster + .get_eth_signer_and_address() + .await + .unwrap(); + + // First, mint ERC20 tokens to the signer + let contract = EthERC20::new(token_address, eth_signer.clone().into()); + let decimal = contract.decimals().await? as usize; + let amount = U256::from(amount_u64) * U256::exp10(decimal); + let sui_amount = amount.as_u64(); + let mint_call = contract.mint(eth_address, amount); + let mint_tx_receipt = send_eth_tx_and_get_tx_receipt(mint_call).await; + assert_eq!(mint_tx_receipt.status.unwrap().as_u64(), 1); + + // Second, set allowance + let allowance_call = contract.approve(bridge_test_cluster.contracts().sui_bridge, amount); + let allowance_tx_receipt = send_eth_tx_and_get_tx_receipt(allowance_call).await; + assert_eq!(allowance_tx_receipt.status.unwrap().as_u64(), 1); + + // Third, deposit to bridge + let sui_recipient_address = bridge_test_cluster.sui_user_address(); + let sui_chain_id = bridge_test_cluster.sui_chain_id(); + let eth_chain_id = bridge_test_cluster.eth_chain_id(); + + info!( + "Depositing ERC20 (token id:{}, token_address: {}) to Solidity contract", + token_id, token_address + ); + let contract = EthSuiBridge::new( + bridge_test_cluster.contracts().sui_bridge, + eth_signer.clone().into(), + ); + let deposit_call = contract.bridge_erc20( + token_id, + amount, + sui_recipient_address.to_vec().into(), + sui_chain_id as u8, + ); + let tx_receipt = send_eth_tx_and_get_tx_receipt(deposit_call).await; + let eth_bridge_event = tx_receipt + .logs + .iter() + .find_map(EthBridgeEvent::try_from_log) + .unwrap(); + let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( + eth_bridge_event, + )) = eth_bridge_event + else { + unreachable!(); + }; + // assert eth log matches + assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); + assert_eq!(eth_bridge_event.nonce, nonce); + assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); + assert_eq!(eth_bridge_event.token_id, token_id); + assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); + assert_eq!(eth_bridge_event.sender_address, eth_address); + assert_eq!( + eth_bridge_event.recipient_address, + sui_recipient_address.to_vec() + ); + info!( + "Deposited ERC20 (token id:{}, token_address: {}) to Solidity contract", + token_id, token_address + ); + + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + eth_chain_id, + nonce, + BridgeActionStatus::Claimed, + ) + .await + .tap_ok(|_| { + info!( + nonce, + token_id, amount_u64, "Eth to Sui bridge transfer claimed" + ); + }) +} + pub async fn initiate_bridge_eth_to_sui( bridge_test_cluster: &BridgeTestCluster, amount: u64, - sui_amount: u64, - token_id: u8, nonce: u64, ) -> Result<(), anyhow::Error> { - info!("Depositing Eth to Solidity contract"); + info!("Depositing native Ether to Solidity contract, nonce: {nonce}, amount: {amount}"); let (eth_signer, eth_address) = bridge_test_cluster .get_eth_signer_and_address() .await @@ -329,6 +472,9 @@ pub async fn initiate_bridge_eth_to_sui( let sui_address = bridge_test_cluster.sui_user_address(); let sui_chain_id = bridge_test_cluster.sui_chain_id(); let eth_chain_id = bridge_test_cluster.eth_chain_id(); + let token_id = TOKEN_ID_ETH; + + let sui_amount = (U256::from(amount) * U256::exp10(8)).as_u64(); // DP for Ether on Sui let eth_tx = deposit_native_eth_to_sol_contract( ð_signer, @@ -338,8 +484,7 @@ pub async fn initiate_bridge_eth_to_sui( amount, ) .await; - let pending_tx = eth_tx.send().await.unwrap(); - let tx_receipt = pending_tx.await.unwrap().unwrap(); + let tx_receipt = send_eth_tx_and_get_tx_receipt(eth_tx).await; let eth_bridge_event = tx_receipt .logs .iter() @@ -474,9 +619,16 @@ async fn wait_for_transfer_action_status( status, chain_id as u8 ); loop { + let timer = std::time::Instant::now(); let res = sui_bridge_client .get_token_transfer_action_onchain_status_until_success(chain_id as u8, nonce) .await; + info!( + "get_token_transfer_action_onchain_status_until_success took {:?}, status: {:?}", + timer.elapsed(), + res + ); + if res == status { info!( "detected on chain status {:?}. chain: {:?}, nonce: {nonce}", diff --git a/crates/sui-bridge/src/e2e_tests/complex.rs b/crates/sui-bridge/src/e2e_tests/complex.rs index bc20619d511af..d822074146ae6 100644 --- a/crates/sui-bridge/src/e2e_tests/complex.rs +++ b/crates/sui-bridge/src/e2e_tests/complex.rs @@ -54,7 +54,7 @@ async fn test_sui_bridge_paused() { assert!(!bridge_client.get_bridge_summary().await.unwrap().is_frozen); // try bridge from eth and verify it works on sui - initiate_bridge_eth_to_sui(&bridge_test_cluster, 10, 10 * 100_000_000, TOKEN_ID_ETH, 0) + initiate_bridge_eth_to_sui(&bridge_test_cluster, 10, 0) .await .unwrap(); // verify Eth was transferred to Sui address @@ -107,9 +107,7 @@ async fn test_sui_bridge_paused() { assert!(bridge_client.get_bridge_summary().await.unwrap().is_frozen); // Transfer from eth to sui should fail on Sui - let eth_to_sui_bridge_action = - initiate_bridge_eth_to_sui(&bridge_test_cluster, 10, 10 * 100_000_000, TOKEN_ID_ETH, 1) - .await; + let eth_to_sui_bridge_action = initiate_bridge_eth_to_sui(&bridge_test_cluster, 10, 1).await; assert!(eth_to_sui_bridge_action.is_err()); // message should not be recorded on Sui when the bridge is paused let res = bridge_test_cluster diff --git a/crates/sui-bridge/src/e2e_tests/test_utils.rs b/crates/sui-bridge/src/e2e_tests/test_utils.rs index f75969f6a0b34..67c3e61b3e295 100644 --- a/crates/sui-bridge/src/e2e_tests/test_utils.rs +++ b/crates/sui-bridge/src/e2e_tests/test_utils.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::abi::EthBridgeCommittee; +use crate::abi::EthBridgeConfig; use crate::crypto::BridgeAuthorityKeyPair; use crate::crypto::BridgeAuthorityPublicKeyBytes; use crate::events::*; @@ -24,6 +25,7 @@ use std::path::Path; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; +use std::sync::Arc; use sui_json_rpc_types::SuiEvent; use sui_json_rpc_types::SuiTransactionBlockResponse; use sui_json_rpc_types::SuiTransactionBlockResponseOptions; @@ -67,6 +69,7 @@ const BTC_NAME: &str = "BTC"; const ETH_NAME: &str = "ETH"; const USDC_NAME: &str = "USDC"; const USDT_NAME: &str = "USDT"; +const KA_NAME: &str = "KA"; pub const TEST_PK: &str = "0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356"; @@ -238,6 +241,11 @@ impl BridgeTestCluster { Ok((eth_signer, eth_address)) } + pub async fn get_eth_signer(&self) -> EthSigner { + let (eth_signer, _) = self.get_eth_signer_and_private_key().await.unwrap(); + eth_signer + } + pub fn bridge_client(&self) -> &SuiBridgeClient { &self.bridge_client } @@ -258,6 +266,10 @@ impl BridgeTestCluster { self.eth_chain_id } + pub(crate) fn eth_env(&self) -> &EthBridgeEnvironment { + &self.eth_environment + } + pub fn contracts(&self) -> &DeployedSolContracts { self.eth_environment.contracts() } @@ -437,6 +449,7 @@ pub struct DeployedSolContracts { pub eth: EthAddress, pub usdc: EthAddress, pub usdt: EthAddress, + pub ka: EthAddress, } impl DeployedSolContracts { @@ -590,15 +603,16 @@ pub(crate) async fn deploy_sol_contract( } let contracts = DeployedSolContracts { - sui_bridge: *deployed_contracts.get(SUI_BRIDGE_NAME).unwrap(), - bridge_committee: *deployed_contracts.get(BRIDGE_COMMITTEE_NAME).unwrap(), - bridge_config: *deployed_contracts.get(BRIDGE_CONFIG_NAME).unwrap(), - bridge_limiter: *deployed_contracts.get(BRIDGE_LIMITER_NAME).unwrap(), - bridge_vault: *deployed_contracts.get(BRIDGE_VAULT_NAME).unwrap(), - btc: *deployed_contracts.get(BTC_NAME).unwrap(), - eth: *deployed_contracts.get(ETH_NAME).unwrap(), - usdc: *deployed_contracts.get(USDC_NAME).unwrap(), - usdt: *deployed_contracts.get(USDT_NAME).unwrap(), + sui_bridge: deployed_contracts.remove(SUI_BRIDGE_NAME).unwrap(), + bridge_committee: deployed_contracts.remove(BRIDGE_COMMITTEE_NAME).unwrap(), + bridge_config: deployed_contracts.remove(BRIDGE_CONFIG_NAME).unwrap(), + bridge_limiter: deployed_contracts.remove(BRIDGE_LIMITER_NAME).unwrap(), + bridge_vault: deployed_contracts.remove(BRIDGE_VAULT_NAME).unwrap(), + btc: deployed_contracts.remove(BTC_NAME).unwrap(), + eth: deployed_contracts.remove(ETH_NAME).unwrap(), + usdc: deployed_contracts.remove(USDC_NAME).unwrap(), + usdt: deployed_contracts.remove(USDT_NAME).unwrap(), + ka: deployed_contracts.remove(KA_NAME).unwrap(), }; let eth_bridge_committee = EthBridgeCommittee::new(contracts.bridge_committee, eth_signer.clone().into()); @@ -628,7 +642,7 @@ pub(crate) async fn deploy_sol_contract( } #[derive(Debug)] -pub(crate) struct EthBridgeEnvironment { +pub struct EthBridgeEnvironment { pub rpc_url: String, process: Child, contracts: Option, @@ -665,6 +679,25 @@ impl EthBridgeEnvironment { pub(crate) fn contracts(&self) -> &DeployedSolContracts { self.contracts.as_ref().unwrap() } + + pub(crate) fn get_bridge_config( + &self, + ) -> EthBridgeConfig> { + let provider = Arc::new( + ethers::prelude::Provider::::try_from(&self.rpc_url) + .unwrap() + .interval(std::time::Duration::from_millis(2000)), + ); + EthBridgeConfig::new(self.contracts().bridge_config, provider.clone()) + } + + pub(crate) async fn get_supported_token(&self, token_id: u8) -> (EthAddress, u8, u64) { + let config = self.get_bridge_config(); + let token_address = config.token_address_of(token_id).call().await.unwrap(); + let token_sui_decimal = config.token_sui_decimal_of(token_id).call().await.unwrap(); + let token_price = config.token_price_of(token_id).call().await.unwrap(); + (token_address, token_sui_decimal, token_price) + } } impl Drop for EthBridgeEnvironment { @@ -766,6 +799,17 @@ pub(crate) async fn get_signatures( .collect() } +pub(crate) async fn send_eth_tx_and_get_tx_receipt( + call: FunctionCall, +) -> TransactionReceipt +where + M: Middleware, + B: std::borrow::Borrow, + D: ethers::abi::Detokenize, +{ + call.send().await.unwrap().await.unwrap().unwrap() +} + /// A simple struct to create a temporary directory that /// will be removed when it goes out of scope. struct TempDir { diff --git a/crates/sui-bridge/src/node.rs b/crates/sui-bridge/src/node.rs index 031152ce2a776..97a7596cbcb74 100644 --- a/crates/sui-bridge/src/node.rs +++ b/crates/sui-bridge/src/node.rs @@ -24,7 +24,10 @@ use std::{ time::Duration, }; use sui_types::{ - bridge::{BRIDGE_COMMITTEE_MODULE_NAME, BRIDGE_MODULE_NAME}, + bridge::{ + BRIDGE_COMMITTEE_MODULE_NAME, BRIDGE_LIMITER_MODULE_NAME, BRIDGE_MODULE_NAME, + BRIDGE_TREASURY_MODULE_NAME, + }, event::EventID, Identifier, }; @@ -166,6 +169,8 @@ fn get_sui_modules_to_watch( let sui_bridge_modules = vec![ BRIDGE_MODULE_NAME.to_owned(), BRIDGE_COMMITTEE_MODULE_NAME.to_owned(), + BRIDGE_TREASURY_MODULE_NAME.to_owned(), + BRIDGE_LIMITER_MODULE_NAME.to_owned(), ]; if let Some(cursor) = sui_bridge_module_last_processed_event_id_override { info!("Overriding cursor for sui bridge modules to {:?}", cursor); @@ -315,13 +320,17 @@ mod tests { let store = BridgeOrchestratorTables::new(temp_dir.path()); let bridge_module = BRIDGE_MODULE_NAME.to_owned(); let committee_module = BRIDGE_COMMITTEE_MODULE_NAME.to_owned(); + let treasury_module = BRIDGE_TREASURY_MODULE_NAME.to_owned(); + let limiter_module = BRIDGE_LIMITER_MODULE_NAME.to_owned(); // No override, no stored watermark, use None let sui_modules_to_watch = get_sui_modules_to_watch(&store, None); assert_eq!( sui_modules_to_watch, vec![ (bridge_module.clone(), None), - (committee_module.clone(), None) + (committee_module.clone(), None), + (treasury_module.clone(), None), + (limiter_module.clone(), None) ] .into_iter() .collect::>() @@ -337,7 +346,9 @@ mod tests { sui_modules_to_watch, vec![ (bridge_module.clone(), Some(override_cursor)), - (committee_module.clone(), Some(override_cursor)) + (committee_module.clone(), Some(override_cursor)), + (treasury_module.clone(), Some(override_cursor)), + (limiter_module.clone(), Some(override_cursor)) ] .into_iter() .collect::>() @@ -357,7 +368,9 @@ mod tests { sui_modules_to_watch, vec![ (bridge_module.clone(), Some(stored_cursor)), - (committee_module.clone(), None) + (committee_module.clone(), None), + (treasury_module.clone(), None), + (limiter_module.clone(), None) ] .into_iter() .collect::>() @@ -376,7 +389,9 @@ mod tests { sui_modules_to_watch, vec![ (bridge_module.clone(), Some(override_cursor)), - (committee_module.clone(), Some(override_cursor)) + (committee_module.clone(), Some(override_cursor)), + (treasury_module.clone(), Some(override_cursor)), + (limiter_module.clone(), Some(override_cursor)) ] .into_iter() .collect::>() diff --git a/crates/sui-bridge/src/types.rs b/crates/sui-bridge/src/types.rs index e722a79c65eea..4fb157254a5a4 100644 --- a/crates/sui-bridge/src/types.rs +++ b/crates/sui-bridge/src/types.rs @@ -354,7 +354,7 @@ impl BridgeAction { // Digest of BridgeAction (with Keccak256 hasher) pub fn digest(&self) -> BridgeActionDigest { let mut hasher = Keccak256::default(); - hasher.update(&self.to_bytes()); + hasher.update(self.to_bytes()); BridgeActionDigest::new(hasher.finalize().into()) } diff --git a/crates/sui-cluster-test/src/test_case/coin_index_test.rs b/crates/sui-cluster-test/src/test_case/coin_index_test.rs index e3977af2602dc..460180d2b8b22 100644 --- a/crates/sui-cluster-test/src/test_case/coin_index_test.rs +++ b/crates/sui-cluster-test/src/test_case/coin_index_test.rs @@ -631,7 +631,7 @@ async fn publish_managed_coin_package( let compiled_package = compile_managed_coin_package(); let all_module_bytes = compiled_package.get_package_base64(/* with_unpublished_deps */ false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let params = rpc_params![ ctx.get_wallet_address(), diff --git a/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs b/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs index a53c685c46c9b..eef1107892b0f 100644 --- a/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs +++ b/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs @@ -24,7 +24,7 @@ impl TestCaseImpl for FullNodeBuildPublishTransactionTest { let compiled_package = compile_basics_package(); let all_module_bytes = compiled_package.get_package_base64(/* with_unpublished_deps */ false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let params = rpc_params![ ctx.get_wallet_address(), diff --git a/crates/sui-config/Cargo.toml b/crates/sui-config/Cargo.toml index 87f5ab16a33ad..9704c09230224 100644 --- a/crates/sui-config/Cargo.toml +++ b/crates/sui-config/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anemo.workspace = true anyhow = { workspace = true, features = ["backtrace"] } diff --git a/crates/sui-config/src/node.rs b/crates/sui-config/src/node.rs index 1b608c36bc2f4..9637ffe4e2be8 100644 --- a/crates/sui-config/src/node.rs +++ b/crates/sui-config/src/node.rs @@ -19,7 +19,6 @@ use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; -use std::usize; use sui_keys::keypair_file::{read_authority_keypair_from_file, read_keypair_from_file}; use sui_types::base_types::{ObjectID, SuiAddress}; use sui_types::committee::EpochId; @@ -228,6 +227,8 @@ fn default_jwk_fetch_interval_seconds() -> u64 { pub fn default_zklogin_oauth_providers() -> BTreeMap> { let mut map = BTreeMap::new(); + + // providers that are available on devnet only. let experimental_providers = BTreeSet::from([ "Google".to_string(), "Facebook".to_string(), @@ -239,15 +240,19 @@ pub fn default_zklogin_oauth_providers() -> BTreeMap> { "Microsoft".to_string(), "KarrierOne".to_string(), "Credenza3".to_string(), - "AwsTenant-region:us-east-1-tenant_id:us-east-1_LPSLCkC3A".to_string(), - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8".to_string(), + "AwsTenant-region:us-east-1-tenant_id:us-east-1_LPSLCkC3A".to_string(), // test tenant in mysten aws + "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8".to_string(), // ambrus, external partner ]); + + // providers that are available for mainnet and testnet. let providers = BTreeSet::from([ "Google".to_string(), "Facebook".to_string(), "Twitch".to_string(), "Apple".to_string(), "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8".to_string(), + "KarrierOne".to_string(), + "Credenza3".to_string(), ]); map.insert(Chain::Mainnet, providers.clone()); map.insert(Chain::Testnet, providers); diff --git a/crates/sui-config/src/p2p.rs b/crates/sui-config/src/p2p.rs index 4be9e93ddde90..7b7b4ba59198d 100644 --- a/crates/sui-config/src/p2p.rs +++ b/crates/sui-config/src/p2p.rs @@ -267,6 +267,7 @@ impl StateSyncConfig { /// * If the node marks itself as Private, only nodes that have it in /// their `allowlisted_peers` or `seed_peers` will try to connect to it. /// * If not set, defaults to Public. +/// /// AccessType is useful when a network of nodes want to stay private. To achieve this, /// mark every node in this network as `Private` and allowlist/seed them to each other. #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] diff --git a/crates/sui-core/Cargo.toml b/crates/sui-core/Cargo.toml index 408818130972b..55bec0f16f9b5 100644 --- a/crates/sui-core/Cargo.toml +++ b/crates/sui-core/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, features = ["backtrace"] } arc-swap.workspace = true @@ -86,7 +89,6 @@ sui-framework.workspace = true sui-swarm-config.workspace = true sui-genesis-builder.workspace = true sui-json-rpc-types.workspace = true -sui-rest-api.workspace = true sui-macros.workspace = true sui-move-build.workspace = true sui-network.workspace = true diff --git a/crates/sui-core/src/authority.rs b/crates/sui-core/src/authority.rs index c8b50cf07c330..b101ca281321a 100644 --- a/crates/sui-core/src/authority.rs +++ b/crates/sui-core/src/authority.rs @@ -871,15 +871,12 @@ impl AuthorityState { self.get_backing_package_store().as_ref(), )?; - let (input_objects, receiving_objects) = self - .input_loader - .read_objects_for_signing( - Some(tx_digest), - &input_object_kinds, - &receiving_objects_refs, - epoch_store.epoch(), - ) - .await?; + let (input_objects, receiving_objects) = self.input_loader.read_objects_for_signing( + Some(tx_digest), + &input_object_kinds, + &receiving_objects_refs, + epoch_store.epoch(), + )?; let (_gas_status, checked_input_objects) = sui_transaction_checks::check_transaction_input( epoch_store.protocol_config(), @@ -1168,9 +1165,7 @@ impl AuthorityState { debug!("execute_certificate_internal"); let tx_digest = certificate.digest(); - let input_objects = self - .read_objects_for_execution(certificate, epoch_store) - .await?; + let input_objects = self.read_objects_for_execution(certificate, epoch_store)?; if expected_effects_digest.is_none() { // We could be re-executing a previously executed but uncommitted transaction, perhaps after @@ -1197,7 +1192,7 @@ impl AuthorityState { .tap_err(|e| info!(?tx_digest, "process_certificate failed: {e}")) } - pub async fn read_objects_for_execution( + pub fn read_objects_for_execution( &self, certificate: &VerifiedExecutableTransaction, epoch_store: &Arc, @@ -1208,14 +1203,12 @@ impl AuthorityState { .execution_load_input_objects_latency .start_timer(); let input_objects = &certificate.data().transaction_data().input_objects()?; - self.input_loader - .read_objects_for_execution( - epoch_store.as_ref(), - &certificate.key(), - input_objects, - epoch_store.epoch(), - ) - .await + self.input_loader.read_objects_for_execution( + epoch_store.as_ref(), + &certificate.key(), + input_objects, + epoch_store.epoch(), + ) } /// Test only wrapper for `try_execute_immediately()` above, useful for checking errors if the @@ -1779,16 +1772,13 @@ impl AuthorityState { self.get_backing_package_store().as_ref(), )?; - let (input_objects, receiving_objects) = self - .input_loader - .read_objects_for_signing( - // We don't want to cache this transaction since it's a dry run. - None, - &input_object_kinds, - &receiving_object_refs, - epoch_store.epoch(), - ) - .await?; + let (input_objects, receiving_objects) = self.input_loader.read_objects_for_signing( + // We don't want to cache this transaction since it's a dry run. + None, + &input_object_kinds, + &receiving_object_refs, + epoch_store.epoch(), + )?; // make a gas object if one was not provided let mut gas_object_refs = transaction.gas().to_vec(); @@ -1998,16 +1988,13 @@ impl AuthorityState { self.get_backing_package_store().as_ref(), )?; - let (mut input_objects, receiving_objects) = self - .input_loader - .read_objects_for_signing( - // We don't want to cache this transaction since it's a dev inspect. - None, - &input_object_kinds, - &receiving_object_refs, - epoch_store.epoch(), - ) - .await?; + let (mut input_objects, receiving_objects) = self.input_loader.read_objects_for_signing( + // We don't want to cache this transaction since it's a dev inspect. + None, + &input_object_kinds, + &receiving_object_refs, + epoch_store.epoch(), + )?; // Create and use a dummy gas object if there is no gas object provided. let dummy_gas_object = Object::new_gas_with_balance_and_owner_for_testing( @@ -3725,12 +3712,12 @@ impl AuthorityState { .rev() .skip_while(|d| cursor.is_some() && Some(*d) != cursor) .skip(usize::from(cursor.is_some())); - return Ok(iter.take(limit.unwrap_or(usize::max_value())).collect()); + return Ok(iter.take(limit.unwrap_or(usize::MAX)).collect()); } else { let iter = iter .skip_while(|d| cursor.is_some() && Some(*d) != cursor) .skip(usize::from(cursor.is_some())); - return Ok(iter.take(limit.unwrap_or(usize::max_value())).collect()); + return Ok(iter.take(limit.unwrap_or(usize::MAX)).collect()); } } self.get_indexes()? @@ -4914,9 +4901,7 @@ impl AuthorityState { ) .await?; - let input_objects = self - .read_objects_for_execution(&executable_tx, epoch_store) - .await?; + let input_objects = self.read_objects_for_execution(&executable_tx, epoch_store)?; let (temporary_store, effects, _execution_error_opt) = self.prepare_certificate(&execution_guard, &executable_tx, input_objects, epoch_store)?; @@ -5545,7 +5530,6 @@ impl NodeStateDump { Ok(path) } - #[cfg(not(release))] pub fn read_from_file(path: &PathBuf) -> Result { let file = File::open(path)?; serde_json::from_reader(file).map_err(|e| anyhow::anyhow!(e)) diff --git a/crates/sui-core/src/authority/authority_per_epoch_store.rs b/crates/sui-core/src/authority/authority_per_epoch_store.rs index 367e77965d1d7..9e922112eff57 100644 --- a/crates/sui-core/src/authority/authority_per_epoch_store.rs +++ b/crates/sui-core/src/authority/authority_per_epoch_store.rs @@ -125,8 +125,8 @@ pub(crate) type EncG = bls12381::G2Element; // CertLockGuard and CertTxGuard are functionally identical right now, but we retain a distinction // anyway. If we need to support distributed object storage, having this distinction will be // useful, as we will most likely have to re-implement a retry / write-ahead-log at that point. -pub struct CertLockGuard(MutexGuard); -pub struct CertTxGuard(CertLockGuard); +pub struct CertLockGuard(#[allow(unused)] MutexGuard); +pub struct CertTxGuard(#[allow(unused)] CertLockGuard); impl CertTxGuard { pub fn release(self) {} @@ -370,15 +370,15 @@ pub struct AuthorityEpochTables { /// The tables below manage shared object locks / versions. There are three ways they can be /// updated: /// 1. (validators only): Upon receiving a certified transaction from consensus, the authority - /// assigns the next version to each shared object of the transaction. The next versions of - /// the shared objects are updated as well. + /// assigns the next version to each shared object of the transaction. The next versions of + /// the shared objects are updated as well. /// 2. (validators only): Upon receiving a new consensus commit, the authority assigns the - /// next version of the randomness state object to an expected future transaction to be - /// generated after the next random value is available. The next version of the randomness - /// state object is updated as well. + /// next version of the randomness state object to an expected future transaction to be + /// generated after the next random value is available. The next version of the randomness + /// state object is updated as well. /// 3. (fullnodes + validators): Upon receiving a certified effect from state sync, or - /// transaction orchestrator fast execution path, the node assigns the shared object - /// versions from the transaction effect. Next object versions are not updated. + /// transaction orchestrator fast execution path, the node assigns the shared object + /// versions from the transaction effect. Next object versions are not updated. /// /// REQUIRED: all authorities must assign the same shared object versions for each transaction. assigned_shared_object_versions: DBMap>, diff --git a/crates/sui-core/src/authority/authority_store_pruner.rs b/crates/sui-core/src/authority/authority_store_pruner.rs index aca327f15f5a2..17f34f5755f89 100644 --- a/crates/sui-core/src/authority/authority_store_pruner.rs +++ b/crates/sui-core/src/authority/authority_store_pruner.rs @@ -1152,6 +1152,8 @@ mod pprof_tests { } #[tokio::test] + // un-ignore once https://github.com/tikv/pprof-rs/issues/250 is fixed + #[ignore] async fn ensure_no_tombstone_fragmentation_in_stack_frame_with_ignore_tombstones( ) -> Result<(), anyhow::Error> { // This test writes a bunch of objects to objects table, invokes pruning on it and @@ -1188,6 +1190,8 @@ mod pprof_tests { } #[tokio::test] + // un-ignore once https://github.com/tikv/pprof-rs/issues/250 is fixed + #[ignore] async fn ensure_no_tombstone_fragmentation_in_stack_frame_after_flush( ) -> Result<(), anyhow::Error> { // This test writes a bunch of objects to objects table, invokes pruning on it and diff --git a/crates/sui-core/src/authority/authority_store_tables.rs b/crates/sui-core/src/authority/authority_store_tables.rs index d61187a3050d3..db8394b2de2d5 100644 --- a/crates/sui-core/src/authority/authority_store_tables.rs +++ b/crates/sui-core/src/authority/authority_store_tables.rs @@ -71,9 +71,10 @@ pub struct AuthorityPerpetualTables { /// A map between the transaction digest of a certificate to the effects of its execution. /// We store effects into this table in two different cases: /// 1. When a transaction is synced through state_sync, we store the effects here. These effects - /// are known to be final in the network, but may not have been executed locally yet. + /// are known to be final in the network, but may not have been executed locally yet. /// 2. When the transaction is executed locally on this node, we store the effects here. This means that - /// it's possible to store the same effects twice (once for the synced transaction, and once for the executed). + /// it's possible to store the same effects twice (once for the synced transaction, and once for the executed). + /// /// It's also possible for the effects to be reverted if the transaction didn't make it into the epoch. #[default_options_override_fn = "effects_table_default_config"] pub(crate) effects: DBMap, diff --git a/crates/sui-core/src/authority/shared_object_congestion_tracker.rs b/crates/sui-core/src/authority/shared_object_congestion_tracker.rs index 4732afe4b7399..041e523594074 100644 --- a/crates/sui-core/src/authority/shared_object_congestion_tracker.rs +++ b/crates/sui-core/src/authority/shared_object_congestion_tracker.rs @@ -78,9 +78,7 @@ impl SharedObjectCongestionTracker { previously_deferred_tx_digests: &HashMap, commit_round: Round, ) -> Option<(DeferralKey, Vec)> { - let Some(tx_cost) = self.get_tx_cost(cert) else { - return None; - }; + let tx_cost = self.get_tx_cost(cert)?; let shared_input_objects: Vec<_> = cert.shared_input_objects().collect(); if shared_input_objects.is_empty() { diff --git a/crates/sui-core/src/authority/test_authority_builder.rs b/crates/sui-core/src/authority/test_authority_builder.rs index a1fc9c40020c8..6ba9dd32d4801 100644 --- a/crates/sui-core/src/authority/test_authority_builder.rs +++ b/crates/sui-core/src/authority/test_authority_builder.rs @@ -227,10 +227,7 @@ impl<'a> TestAuthorityBuilder<'a> { epoch_flags, ) .unwrap(); - let expensive_safety_checks = match self.expensive_safety_checks { - None => ExpensiveSafetyCheckConfig::default(), - Some(config) => config, - }; + let expensive_safety_checks = self.expensive_safety_checks.unwrap_or_default(); let cache_traits = build_execution_cache(&epoch_start_configuration, ®istry, &authority_store); diff --git a/crates/sui-core/src/authority_aggregator.rs b/crates/sui-core/src/authority_aggregator.rs index 6bc31210d6c51..8365a2064fb3b 100644 --- a/crates/sui-core/src/authority_aggregator.rs +++ b/crates/sui-core/src/authority_aggregator.rs @@ -277,6 +277,7 @@ pub enum AggregatorProcessCertificateError { } pub fn group_errors(errors: Vec<(SuiError, Vec, StakeUnit)>) -> GroupedErrors { + #[allow(clippy::mutable_key_type)] let mut grouped_errors = HashMap::new(); for (error, names, stake) in errors { let entry = grouped_errors.entry(error).or_insert((0, vec![])); @@ -1501,11 +1502,11 @@ where // create a set of validators that we should sample to request input/output objects from let validators_to_sample = if request.include_input_objects || request.include_output_objects { - // Always at least ask 1 validator - let number_to_sample = std::cmp::max(1, self.committee.num_members() / 2); + // Number of validators to request input/output objects from + const NUMBER_TO_SAMPLE: usize = 5; self.committee - .choose_multiple_weighted_iter(number_to_sample) + .choose_multiple_weighted_iter(NUMBER_TO_SAMPLE) .cloned() .collect() } else { diff --git a/crates/sui-core/src/authority_server.rs b/crates/sui-core/src/authority_server.rs index b61975ef79d40..cb819f18a1a16 100644 --- a/crates/sui-core/src/authority_server.rs +++ b/crates/sui-core/src/authority_server.rs @@ -1083,7 +1083,7 @@ fn make_tonic_request_for_testing(message: T) -> tonic::Request { // TODO: refine error matching here fn normalize(err: SuiError) -> Weight { - match dbg!(err) { + match err { SuiError::UserInputError { .. } | SuiError::InvalidSignature { .. } | SuiError::SignerSignatureAbsent { .. } diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs index b1872a9f21723..fd0a9c90a34d8 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs @@ -94,7 +94,7 @@ pub struct CheckpointTimeoutConfig { // the function is still very cheap to call so this is okay. thread_local! { static SCHEDULING_TIMEOUT: once_cell::sync::OnceCell = - once_cell::sync::OnceCell::new(); + const { once_cell::sync::OnceCell::new() }; } #[cfg(msim)] diff --git a/crates/sui-core/src/consensus_adapter.rs b/crates/sui-core/src/consensus_adapter.rs index 8b33d8031fac7..f67388061656c 100644 --- a/crates/sui-core/src/consensus_adapter.rs +++ b/crates/sui-core/src/consensus_adapter.rs @@ -1029,12 +1029,12 @@ impl<'a> InflightDropGuard<'a> { adapter .metrics .sequencing_certificate_inflight - .with_label_values(&[&tx_type]) + .with_label_values(&[tx_type]) .inc(); adapter .metrics .sequencing_certificate_attempt - .with_label_values(&[&tx_type]) + .with_label_values(&[tx_type]) .inc(); Self { adapter, diff --git a/crates/sui-core/src/consensus_handler.rs b/crates/sui-core/src/consensus_handler.rs index 05c0168b831fd..0897ed724e03f 100644 --- a/crates/sui-core/src/consensus_handler.rs +++ b/crates/sui-core/src/consensus_handler.rs @@ -10,6 +10,7 @@ use std::{ use arc_swap::ArcSwap; use async_trait::async_trait; +use consensus_core::CommitConsumerMonitor; use lru::LruCache; use mysten_metrics::{monitored_mpsc::UnboundedReceiver, monitored_scope, spawn_monitored_task}; use narwhal_config::Committee; @@ -504,12 +505,16 @@ impl MysticetiConsensusHandler { pub fn new( mut consensus_handler: ConsensusHandler, mut receiver: UnboundedReceiver, + commit_consumer_monitor: Arc, ) -> Self { let handle = spawn_monitored_task!(async move { + // TODO: pause when execution is overloaded, so consensus can detect the backpressure. while let Some(consensus_output) = receiver.recv().await { + let commit_index = consensus_output.commit_ref.index; consensus_handler .handle_consensus_output_internal(consensus_output) .await; + commit_consumer_monitor.set_highest_handled_commit(commit_index); } }); Self { diff --git a/crates/sui-core/src/consensus_manager/mysticeti_manager.rs b/crates/sui-core/src/consensus_manager/mysticeti_manager.rs index 3aa2abc343e5e..6a0304f06b2d0 100644 --- a/crates/sui-core/src/consensus_manager/mysticeti_manager.rs +++ b/crates/sui-core/src/consensus_manager/mysticeti_manager.rs @@ -1,11 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::{path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc, time::Duration}; use arc_swap::ArcSwapOption; use async_trait::async_trait; use consensus_config::{Committee, NetworkKeyPair, Parameters, ProtocolKeyPair}; -use consensus_core::{CommitConsumer, CommitIndex, ConsensusAuthority, Round}; +use consensus_core::{CommitConsumer, CommitIndex, ConsensusAuthority}; use fastcrypto::ed25519; use mysten_metrics::{monitored_mpsc::unbounded_channel, RegistryID, RegistryService}; use narwhal_executor::ExecutionState; @@ -41,6 +41,7 @@ pub struct MysticetiManager { metrics: Arc, registry_service: RegistryService, authority: ArcSwapOption<(ConsensusAuthority, RegistryID)>, + boot_counter: Mutex, // Use a shared lazy mysticeti client so we can update the internal mysticeti // client that gets created for every new epoch. client: Arc, @@ -69,6 +70,7 @@ impl MysticetiManager { authority: ArcSwapOption::empty(), client, consensus_handler: Mutex::new(None), + boot_counter: Mutex::new(0), } } @@ -124,11 +126,17 @@ impl ConsensusManagerTrait for MysticetiManager { let consensus_config = config .consensus_config() .expect("consensus_config should exist"); - let parameters = Parameters { + + let mut parameters = Parameters { db_path: self.get_store_path(epoch), ..consensus_config.parameters.clone().unwrap_or_default() }; + // Disable the automated last known block sync for mainnet for now + if epoch_store.get_chain_identifier().chain() == sui_protocol_config::Chain::Mainnet { + parameters.sync_last_known_own_block_timeout = Duration::ZERO; + }; + let own_protocol_key = self.protocol_keypair.public(); let (own_index, _) = committee .authorities() @@ -143,12 +151,13 @@ impl ConsensusManagerTrait for MysticetiManager { let consumer = CommitConsumer::new( commit_sender, // TODO(mysticeti): remove dependency on narwhal executor - consensus_handler.last_executed_sub_dag_round() as Round, consensus_handler.last_executed_sub_dag_index() as CommitIndex, ); + let monitor = consumer.monitor(); // TODO(mysticeti): Investigate if we need to return potential errors from // AuthorityNode and add retries here? + let boot_counter = *self.boot_counter.lock().await; let authority = ConsensusAuthority::start( network_type, own_index, @@ -160,10 +169,15 @@ impl ConsensusManagerTrait for MysticetiManager { Arc::new(tx_validator.clone()), consumer, registry.clone(), + boot_counter, ) .await; let client = authority.transaction_client(); + // Now increment the boot counter + let mut boot_counter = self.boot_counter.lock().await; + *boot_counter += 1; + let registry_id = self.registry_service.add(registry.clone()); self.authority @@ -173,7 +187,7 @@ impl ConsensusManagerTrait for MysticetiManager { self.client.set(client); // spin up the new mysticeti consensus handler to listen for committed sub dags - let handler = MysticetiConsensusHandler::new(consensus_handler, commit_receiver); + let handler = MysticetiConsensusHandler::new(consensus_handler, commit_receiver, monitor); let mut consensus_handler = self.consensus_handler.lock().await; *consensus_handler = Some(handler); } diff --git a/crates/sui-core/src/mysticeti_adapter.rs b/crates/sui-core/src/mysticeti_adapter.rs index 7fe53780e0e29..48bbb39b77a71 100644 --- a/crates/sui-core/src/mysticeti_adapter.rs +++ b/crates/sui-core/src/mysticeti_adapter.rs @@ -4,14 +4,14 @@ use std::{sync::Arc, time::Duration}; use arc_swap::{ArcSwapOption, Guard}; -use consensus_core::TransactionClient; +use consensus_core::{ClientError, TransactionClient}; use sui_types::{ error::{SuiError, SuiResult}, messages_consensus::{ConsensusTransaction, ConsensusTransactionKind}, }; use tap::prelude::*; use tokio::time::{sleep, Instant}; -use tracing::warn; +use tracing::{error, info, warn}; use crate::{ authority::authority_per_epoch_store::AuthorityPerEpochStore, @@ -92,9 +92,21 @@ impl SubmitToConsensus for LazyMysticetiClient { .expect("Client should always be returned") .submit(transactions_bytes) .await - .tap_err(|r| { + .tap_err(|err| { // Will be logged by caller as well. - warn!("Submit transactions failed with: {:?}", r); + let msg = format!("Transaction submission failed with: {:?}", err); + match err { + ClientError::ConsensusShuttingDown(_) => { + info!("{}", msg); + } + ClientError::OversizedTransaction(_, _) => { + if cfg!(debug_assertions) { + panic!("{}", msg); + } else { + error!("{}", msg); + } + } + }; }) .map_err(|err| SuiError::FailedToSubmitToConsensus(err.to_string()))?; diff --git a/crates/sui-core/src/rest_index.rs b/crates/sui-core/src/rest_index.rs index 67d9de2ed69ee..ade98f8ada4dd 100644 --- a/crates/sui-core/src/rest_index.rs +++ b/crates/sui-core/src/rest_index.rs @@ -15,13 +15,13 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::Instant; -use sui_rest_api::CheckpointData; use sui_types::base_types::MoveObjectType; use sui_types::base_types::ObjectID; use sui_types::base_types::SequenceNumber; use sui_types::base_types::SuiAddress; use sui_types::digests::TransactionDigest; use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldType}; +use sui_types::full_checkpoint_content::CheckpointData; use sui_types::layout_resolver::LayoutResolver; use sui_types::messages_checkpoint::CheckpointContents; use sui_types::object::Object; @@ -114,9 +114,9 @@ struct IndexStoreTables { /// /// A few uses for this singleton: /// - determining if the DB has been initialized (as some tables will still be empty post - /// initializatio) + /// initialization) /// - version of the DB. Everytime a new table or schema is changed the version number needs to - /// be incremented. + /// be incremented. meta: DBMap<(), MetadataInfo>, /// An index of extra metadata for Transactions. @@ -406,7 +406,7 @@ impl IndexStoreTables { for tx in &checkpoint.transactions { // determine changes from removed objects - for removed_object in tx.removed_objects() { + for removed_object in tx.removed_objects_pre_version() { match removed_object.owner() { Owner::AddressOwner(address) => { let owner_key = OwnerIndexKey::new(*address, removed_object.id()); diff --git a/crates/sui-core/src/storage.rs b/crates/sui-core/src/storage.rs index d2f6606b9fb95..fe274bd1f0123 100644 --- a/crates/sui-core/src/storage.rs +++ b/crates/sui-core/src/storage.rs @@ -119,10 +119,16 @@ impl ReadStore for RocksDbStore { } fn get_lowest_available_checkpoint(&self) -> Result { - self.checkpoint_store + let highest_pruned_cp = self + .checkpoint_store .get_highest_pruned_checkpoint_seq_number() - .map(|seq| seq + 1) - .map_err(Into::into) + .map_err(Into::::into)?; + + if highest_pruned_cp == 0 { + Ok(0) + } else { + Ok(highest_pruned_cp + 1) + } } fn get_full_checkpoint_contents_by_sequence_number( @@ -233,7 +239,8 @@ impl ReadStore for RocksDbStore { fn get_latest_checkpoint(&self) -> sui_types::storage::error::Result { self.checkpoint_store - .get_latest_certified_checkpoint() + .get_highest_executed_checkpoint() + .map_err(sui_types::storage::error::Error::custom)? .ok_or_else(|| { sui_types::storage::error::Error::missing("unable to get latest checkpoint") }) @@ -507,11 +514,17 @@ impl RestStateReader for RestReadStore { fn get_lowest_available_checkpoint_objects( &self, ) -> sui_types::storage::error::Result { - self.state + let highest_pruned_cp = self + .state .get_object_cache_reader() .get_highest_pruned_checkpoint() - .map(|seq| seq + 1) - .map_err(StorageError::custom) + .map_err(StorageError::custom)?; + + if highest_pruned_cp == 0 { + Ok(0) + } else { + Ok(highest_pruned_cp + 1) + } } fn get_chain_identifier( diff --git a/crates/sui-core/src/transaction_input_loader.rs b/crates/sui-core/src/transaction_input_loader.rs index a33fbd1e45f89..b9f1028598c88 100644 --- a/crates/sui-core/src/transaction_input_loader.rs +++ b/crates/sui-core/src/transaction_input_loader.rs @@ -34,7 +34,7 @@ impl TransactionInputLoader { /// a single hash map lookup when notify_read_objects_for_execution is called later. /// TODO: implement this caching #[instrument(level = "trace", skip_all)] - pub async fn read_objects_for_signing( + pub fn read_objects_for_signing( &self, _tx_digest_for_caching: Option<&TransactionDigest>, input_object_kinds: &[InputObjectKind], @@ -122,7 +122,7 @@ impl TransactionInputLoader { /// cached, but only with appropriate invalidation logic for when an object is received by a /// different tx first. #[instrument(level = "trace", skip_all)] - pub async fn read_objects_for_execution( + pub fn read_objects_for_execution( &self, shared_lock_store: &impl GetSharedLocks, tx_key: &TransactionKey, diff --git a/crates/sui-core/src/transaction_manager.rs b/crates/sui-core/src/transaction_manager.rs index 5accb919a8680..d7796b69edbc2 100644 --- a/crates/sui-core/src/transaction_manager.rs +++ b/crates/sui-core/src/transaction_manager.rs @@ -62,6 +62,7 @@ pub struct TransactionManager { #[derive(Clone, Debug)] pub struct PendingCertificateStats { // The time this certificate enters transaction manager. + #[allow(unused)] pub enqueue_time: Instant, // The time this certificate becomes ready for execution. pub ready_time: Option, @@ -966,9 +967,7 @@ impl TransactionQueue { /// After removing the digest, first() will return the new oldest entry /// in the queue (which may be unchanged). fn remove(&mut self, digest: &TransactionDigest) -> Option { - let Some(when) = self.digests.remove(digest) else { - return None; - }; + let when = self.digests.remove(digest)?; // This loop removes all previously inserted entries that no longer // correspond to live entries in self.digests. When the loop terminates, diff --git a/crates/sui-core/src/transaction_orchestrator.rs b/crates/sui-core/src/transaction_orchestrator.rs index 1db608f9e7b6c..ce69aeb474033 100644 --- a/crates/sui-core/src/transaction_orchestrator.rs +++ b/crates/sui-core/src/transaction_orchestrator.rs @@ -17,7 +17,7 @@ use futures::future::{select, Either, Future}; use futures::FutureExt; use mysten_common::sync::notify_read::NotifyRead; use mysten_metrics::histogram::{Histogram, HistogramVec}; -use mysten_metrics::{spawn_logged_monitored_task, spawn_monitored_task}; +use mysten_metrics::{add_server_timing, spawn_logged_monitored_task, spawn_monitored_task}; use mysten_metrics::{TX_TYPE_SHARED_OBJ_TX, TX_TYPE_SINGLE_WRITER_TX}; use prometheus::core::{AtomicI64, AtomicU64, GenericCounter, GenericGauge}; use prometheus::{ @@ -111,22 +111,15 @@ where ); let effects_receiver = quorum_driver_handler.subscribe_to_effects(); - let state_clone = validator_state.clone(); let metrics = Arc::new(TransactionOrchestratorMetrics::new(prometheus_registry)); - let metrics_clone = metrics.clone(); let pending_tx_log = Arc::new(WritePathPendingTransactionLog::new( parent_path.join("fullnode_pending_transactions"), )); let pending_tx_log_clone = pending_tx_log.clone(); let _local_executor_handle = { spawn_monitored_task!(async move { - Self::loop_execute_finalized_tx_locally( - state_clone, - effects_receiver, - pending_tx_log_clone, - metrics_clone, - ) - .await; + Self::loop_execute_finalized_tx_locally(effects_receiver, pending_tx_log_clone) + .await; }) }; Self::schedule_txes_in_log(pending_tx_log.clone(), quorum_driver_handler.clone()); @@ -172,7 +165,7 @@ where transaction, response.effects_cert.executed_epoch(), ); - Self::execute_finalized_tx_locally_with_timeout( + let executed_locally = Self::execute_finalized_tx_locally_with_timeout( &self.validator_state, &epoch_store, &executable_tx, @@ -180,7 +173,9 @@ where &self.metrics, ) .await - .is_ok() + .is_ok(); + add_server_timing("local_execution"); + executed_locally } else { false }; @@ -286,6 +281,7 @@ where self.metrics.wait_for_finality_timeout.inc(); return Err(QuorumDriverError::TimeoutBeforeFinality); }; + add_server_timing("wait_for_finality"); drop(_txn_finality_timer); drop(_wait_for_finality_gauge); @@ -428,57 +424,19 @@ where } async fn loop_execute_finalized_tx_locally( - validator_state: Arc, mut effects_receiver: Receiver, pending_transaction_log: Arc, - metrics: Arc, ) { loop { match effects_receiver.recv().await { - Ok(Ok((transaction, QuorumDriverResponse { effects_cert, .. }))) => { + Ok(Ok((transaction, ..))) => { let tx_digest = transaction.digest(); if let Err(err) = pending_transaction_log.finish_transaction(tx_digest) { - panic!( - "Failed to finish transaction {tx_digest} in pending transaction log: {err}" + error!( + ?tx_digest, + "Failed to finish transaction in pending transaction log: {err}" ); } - - if transaction.contains_shared_object() { - // Do not locally execute transactions with shared objects, as this can - // cause forks until MVCC is merged. - continue; - } - - let epoch_store = validator_state.load_epoch_store_one_call_per_task(); - - // This is a redundant verification, but SignatureVerifier will cache the - // previous result. - let transaction = match epoch_store.verify_transaction(transaction) { - Ok(transaction) => transaction, - Err(err) => { - // This should be impossible, since we verified the transaction - // before sending it to quorum driver. - error!( - ?err, - "Transaction signature failed to verify after quorum driver execution." - ); - continue; - } - }; - - let executable_tx = VerifiedExecutableTransaction::new_from_quorum_execution( - transaction, - effects_cert.executed_epoch(), - ); - - let _ = Self::execute_finalized_tx_locally_with_timeout( - &validator_state, - &epoch_store, - &executable_tx, - &effects_cert, - &metrics, - ) - .await; } Ok(Err((tx_digest, _err))) => { if let Err(err) = pending_transaction_log.finish_transaction(&tx_digest) { @@ -756,18 +714,15 @@ impl TransactionOrchestratorMetrics { } #[async_trait::async_trait] -impl sui_rest_api::TransactionExecutor for TransactiondOrchestrator +impl sui_types::transaction_executor::TransactionExecutor for TransactiondOrchestrator where A: AuthorityAPI + Send + Sync + 'static + Clone, { async fn execute_transaction( &self, - request: sui_types::quorum_driver_types::ExecuteTransactionRequestV3, + request: ExecuteTransactionRequestV3, client_addr: Option, - ) -> Result< - sui_types::quorum_driver_types::ExecuteTransactionResponseV3, - sui_types::quorum_driver_types::QuorumDriverError, - > { + ) -> Result { self.execute_transaction_v3(request, client_addr).await } } diff --git a/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs b/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs index 56260c18e8e40..ff43d0cac5a3e 100644 --- a/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs @@ -693,7 +693,7 @@ fn get_genesis_agg( .build_custom_clients(clients) } -fn get_agg_at_epoch( +fn get_agg_at_epoch( authorities: BTreeMap, clients: BTreeMap, epoch: EpochId, diff --git a/crates/sui-core/src/unit_tests/authority_tests.rs b/crates/sui-core/src/unit_tests/authority_tests.rs index b0b48a4dfedb6..75e72190ec5f9 100644 --- a/crates/sui-core/src/unit_tests/authority_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_tests.rs @@ -6156,7 +6156,6 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { .unwrap(), authority.epoch_store_for_testing().epoch(), ) - .await .unwrap(); // The lamport version should be the lamport version of the owned objects. diff --git a/crates/sui-core/src/unit_tests/execution_driver_tests.rs b/crates/sui-core/src/unit_tests/execution_driver_tests.rs index e9a5f381dceeb..a138886c4f6a7 100644 --- a/crates/sui-core/src/unit_tests/execution_driver_tests.rs +++ b/crates/sui-core/src/unit_tests/execution_driver_tests.rs @@ -46,7 +46,7 @@ use tokio::time::{sleep, timeout}; #[allow(dead_code)] async fn wait_for_certs( stream: &mut UnboundedReceiver, - certs: &Vec, + certs: &[VerifiedCertificate], ) { if certs.is_empty() { if timeout(Duration::from_secs(30), stream.recv()) diff --git a/crates/sui-core/src/unit_tests/move_integration_tests.rs b/crates/sui-core/src/unit_tests/move_integration_tests.rs index 1df56180e648a..7e43c4f7ac14b 100644 --- a/crates/sui-core/src/unit_tests/move_integration_tests.rs +++ b/crates/sui-core/src/unit_tests/move_integration_tests.rs @@ -2806,7 +2806,7 @@ pub fn build_package( let compiled_package = BuildConfig::new_for_testing().build(&path).unwrap(); let digest = compiled_package.get_package_digest(with_unpublished_deps); let modules = compiled_package.get_package_bytes(with_unpublished_deps); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); (digest.to_vec(), modules, dependencies) } @@ -2826,7 +2826,7 @@ pub async fn build_and_try_publish_test_package( let compiled_package = BuildConfig::new_for_testing().build(&path).unwrap(); let all_module_bytes = compiled_package.get_package_bytes(with_unpublished_deps); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let gas_object = authority.get_object(gas_object_id).await.unwrap(); let gas_object_ref = gas_object.unwrap().compute_object_reference(); diff --git a/crates/sui-core/src/unit_tests/mysticeti_manager_tests.rs b/crates/sui-core/src/unit_tests/mysticeti_manager_tests.rs index 394191e235bdd..f56e373ec8c54 100644 --- a/crates/sui-core/src/unit_tests/mysticeti_manager_tests.rs +++ b/crates/sui-core/src/unit_tests/mysticeti_manager_tests.rs @@ -29,32 +29,35 @@ async fn test_mysticeti_manager() { .committee_size(1.try_into().unwrap()) .build(); - for _i in 0..3 { - let config = &configs.validator_configs()[0]; + let config = &configs.validator_configs()[0]; - let consensus_config = config.consensus_config().unwrap(); - let registry_service = RegistryService::new(Registry::new()); - let secret = Arc::pin(config.protocol_key_pair().copy()); - let genesis = config.genesis().unwrap(); + let consensus_config = config.consensus_config().unwrap(); + let registry_service = RegistryService::new(Registry::new()); + let secret = Arc::pin(config.protocol_key_pair().copy()); + let genesis = config.genesis().unwrap(); - let state = TestAuthorityBuilder::new() - .with_genesis_and_keypair(genesis, &secret) - .build() - .await; + let state = TestAuthorityBuilder::new() + .with_genesis_and_keypair(genesis, &secret) + .build() + .await; - let metrics = Arc::new(ConsensusManagerMetrics::new(&Registry::new())); - let epoch_store = state.epoch_store_for_testing(); - let client = Arc::new(LazyMysticetiClient::default()); - - let manager = MysticetiManager::new( - config.worker_key_pair().copy(), - config.network_key_pair().copy(), - consensus_config.db_path().to_path_buf(), - registry_service, - metrics, - client, - ); + let metrics = Arc::new(ConsensusManagerMetrics::new(&Registry::new())); + let epoch_store = state.epoch_store_for_testing(); + let client = Arc::new(LazyMysticetiClient::default()); + + let manager = MysticetiManager::new( + config.worker_key_pair().copy(), + config.network_key_pair().copy(), + consensus_config.db_path().to_path_buf(), + registry_service, + metrics, + client, + ); + let boot_counter = *manager.boot_counter.lock().await; + assert_eq!(boot_counter, 0); + + for i in 1..=3 { let consensus_handler_initializer = ConsensusHandlerInitializer::new_for_testing( state.clone(), checkpoint_service_for_testing(state.clone()), @@ -86,5 +89,8 @@ async fn test_mysticeti_manager() { // THEN assert!(!manager.is_running().await); + + let boot_counter = *manager.boot_counter.lock().await; + assert_eq!(boot_counter, i); } } diff --git a/crates/sui-data-ingestion-core/src/progress_store/mod.rs b/crates/sui-data-ingestion-core/src/progress_store/mod.rs index aa844e7695310..c7358eb117580 100644 --- a/crates/sui-data-ingestion-core/src/progress_store/mod.rs +++ b/crates/sui-data-ingestion-core/src/progress_store/mod.rs @@ -38,11 +38,9 @@ impl ProgressStore for ProgressStoreWrapper

{ task_name: String, checkpoint_number: CheckpointSequenceNumber, ) -> Result<()> { - if checkpoint_number > self.load(task_name.clone()).await? { - self.progress_store - .save(task_name.clone(), checkpoint_number) - .await?; - } + self.progress_store + .save(task_name.clone(), checkpoint_number) + .await?; self.pending_state.insert(task_name, checkpoint_number); Ok(()) } diff --git a/crates/sui-data-ingestion/src/progress_store.rs b/crates/sui-data-ingestion/src/progress_store.rs index c8524ccbd5331..b82d91c85b2ab 100644 --- a/crates/sui-data-ingestion/src/progress_store.rs +++ b/crates/sui-data-ingestion/src/progress_store.rs @@ -4,6 +4,7 @@ use anyhow::Result; use async_trait::async_trait; use aws_config::timeout::TimeoutConfig; +use aws_sdk_dynamodb::error::SdkError; use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_dynamodb::Client; use aws_sdk_s3::config::{Credentials, Region}; @@ -58,7 +59,7 @@ impl ProgressStore for DynamoDBProgressStore { .send() .await?; if let Some(output) = item.item() { - if let AttributeValue::S(checkpoint_number) = &output["state"] { + if let AttributeValue::N(checkpoint_number) = &output["nstate"] { return Ok(CheckpointSequenceNumber::from_str(checkpoint_number)?); } } @@ -71,14 +72,29 @@ impl ProgressStore for DynamoDBProgressStore { ) -> Result<()> { let backoff = backoff::ExponentialBackoff::default(); backoff::future::retry(backoff, || async { - self.client - .put_item() + let result = self + .client + .update_item() .table_name(self.table_name.clone()) - .item("task_name", AttributeValue::S(task_name.clone())) - .item("state", AttributeValue::S(checkpoint_number.to_string())) + .key("task_name", AttributeValue::S(task_name.clone())) + .update_expression("SET #nstate = :newState") + .condition_expression("#nstate < :newState") + .expression_attribute_names("#nstate", "nstate") + .expression_attribute_values( + ":newState", + AttributeValue::N(checkpoint_number.to_string()), + ) .send() - .await - .map_err(backoff::Error::transient) + .await; + match result { + Ok(_) => Ok(()), + Err(SdkError::ServiceError(err)) + if err.err().is_conditional_check_failed_exception() => + { + Ok(()) + } + Err(err) => Err(backoff::Error::transient(err)), + } }) .await?; Ok(()) diff --git a/crates/sui-e2e-tests/Cargo.toml b/crates/sui-e2e-tests/Cargo.toml index 4d7e294f06c75..970458f3a9e94 100644 --- a/crates/sui-e2e-tests/Cargo.toml +++ b/crates/sui-e2e-tests/Cargo.toml @@ -6,6 +6,9 @@ publish = false edition = "2021" version.workspace = true +[lints] +workspace = true + [dependencies] [dev-dependencies] @@ -67,4 +70,4 @@ passkey-client.workspace = true passkey-authenticator.workspace = true coset.workspace = true url.workspace = true -p256.workspace = true \ No newline at end of file +p256.workspace = true diff --git a/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs b/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs index 3d0b374dfefa8..454328bace837 100644 --- a/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs +++ b/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs @@ -57,6 +57,7 @@ trait StatePredicate { runner: &StressTestRunner, effects: &TransactionEffects, ); + #[allow(unused)] async fn post_epoch_post_condition( &mut self, runner: &StressTestRunner, @@ -351,7 +352,7 @@ async fn fuzz_dynamic_committee() { let num_operations = 10; // Add more actions here as we create them - let actions = vec![Box::new(add_stake::RequestAddStakeGen)]; + let actions = [Box::new(add_stake::RequestAddStakeGen)]; let mut runner = StressTestRunner::new().await; diff --git a/crates/sui-e2e-tests/tests/full_node_tests.rs b/crates/sui-e2e-tests/tests/full_node_tests.rs index 87ee651e03879..90c018f08aa2f 100644 --- a/crates/sui-e2e-tests/tests/full_node_tests.rs +++ b/crates/sui-e2e-tests/tests/full_node_tests.rs @@ -7,6 +7,7 @@ use jsonrpsee::rpc_params; use move_core_types::annotated_value::MoveStructLayout; use move_core_types::ident_str; use rand::rngs::OsRng; +use std::path::PathBuf; use std::sync::Arc; use sui::client_commands::{OptsWithGas, SuiClientCommandResult, SuiClientCommands}; use sui_config::node::RunWithRange; @@ -1407,3 +1408,29 @@ async fn test_full_node_run_with_range_epoch() -> Result<(), anyhow::Error> { Ok(()) } + +// This test checks that the fullnode is able to resolve events emitted from a transaction +// that references the structs defined in the package published by the transaction itself, +// without local execution. +#[sim_test] +async fn publish_init_events_without_local_execution() { + let test_cluster = TestClusterBuilder::new().build().await; + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/move_test_code"); + let tx_data = test_cluster + .test_transaction_builder() + .await + .publish(path) + .build(); + let tx = test_cluster.sign_transaction(&tx_data); + let client = test_cluster.wallet.get_client().await.unwrap(); + let response = client + .quorum_driver_api() + .execute_transaction_block( + tx, + SuiTransactionBlockResponseOptions::new().with_events(), + Some(ExecuteTransactionRequestType::WaitForEffectsCert), + ) + .await + .unwrap(); + assert_eq!(response.events.unwrap().data.len(), 1); +} diff --git a/crates/sui-e2e-tests/tests/move_test_code/sources/init_with_events.move b/crates/sui-e2e-tests/tests/move_test_code/sources/init_with_events.move new file mode 100644 index 0000000000000..0230961c8e19e --- /dev/null +++ b/crates/sui-e2e-tests/tests/move_test_code/sources/init_with_events.move @@ -0,0 +1,10 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module move_test_code::init_with_event { + public struct Event has drop, copy {} + + fun init(_ctx: &mut TxContext) { + sui::event::emit(Event {}); + } +} \ No newline at end of file diff --git a/crates/sui-e2e-tests/tests/rest.rs b/crates/sui-e2e-tests/tests/rest.rs index 7795e04599008..ff408edee6549 100644 --- a/crates/sui-e2e-tests/tests/rest.rs +++ b/crates/sui-e2e-tests/tests/rest.rs @@ -15,9 +15,7 @@ use test_cluster::TestClusterBuilder; async fn execute_transaction_transfer() { let test_cluster = TestClusterBuilder::new().build().await; - let rest_url = format!("{}/v2", test_cluster.rpc_url()); - - let client = Client::new(rest_url); + let client = Client::new(test_cluster.rpc_url()); let address = SuiAddress::random_for_testing_only(); let amount = 9; diff --git a/crates/sui-e2e-tests/tests/traffic_control_tests.rs b/crates/sui-e2e-tests/tests/traffic_control_tests.rs index 1c4abc4ba8636..4ae24ddcc7ea6 100644 --- a/crates/sui-e2e-tests/tests/traffic_control_tests.rs +++ b/crates/sui-e2e-tests/tests/traffic_control_tests.rs @@ -5,6 +5,7 @@ //! they should nearly all be tokio::test rather than simtest. use core::panic; +use fastcrypto::encoding::Base64; use jsonrpsee::{ core::{client::ClientT, RpcResult}, rpc_params, @@ -316,6 +317,63 @@ async fn test_fullnode_traffic_control_spam_blocked() -> Result<(), anyhow::Erro panic!("Expected spam policy to trigger within {txn_count} requests"); } +#[tokio::test] +async fn test_fullnode_traffic_control_error_blocked() -> Result<(), anyhow::Error> { + let txn_count = 5; + let policy_config = PolicyConfig { + connection_blocklist_ttl_sec: 3, + error_policy_type: PolicyType::TestNConnIP(txn_count - 1), + dry_run: false, + ..Default::default() + }; + let test_cluster = TestClusterBuilder::new() + .with_fullnode_policy_config(Some(policy_config)) + .build() + .await; + + let jsonrpc_client = &test_cluster.fullnode_handle.rpc_client; + let context = test_cluster.wallet; + + let mut txns = batch_make_transfer_transactions(&context, txn_count as usize).await; + assert!( + txns.len() >= txn_count as usize, + "Expect at least {} txns. Do we generate enough gas objects during genesis?", + txn_count, + ); + + // it should take no more than 4 requests to be added to the blocklist + for _ in 0..txn_count { + let txn = txns.swap_remove(0); + let tx_digest = txn.digest(); + let (tx_bytes, _signatures) = txn.to_tx_bytes_and_signatures(); + // create invalid (empty) client signature + let signatures: Vec = vec![]; + let params = rpc_params![ + tx_bytes, + signatures, + SuiTransactionBlockResponseOptions::new(), + ExecuteTransactionRequestType::WaitForLocalExecution + ]; + let response: RpcResult = jsonrpc_client + .request("sui_executeTransactionBlock", params.clone()) + .await; + if let Err(err) = response { + if err.to_string().contains("Too many requests") { + return Ok(()); + } + } else { + let SuiTransactionBlockResponse { + digest, + confirmed_local_execution, + .. + } = response.unwrap(); + assert_eq!(&digest, tx_digest); + assert!(confirmed_local_execution.unwrap()); + } + } + panic!("Expected spam policy to trigger within {txn_count} requests"); +} + #[tokio::test] async fn test_validator_traffic_control_error_delegated() -> Result<(), anyhow::Error> { let n = 5; diff --git a/crates/sui-faucet/src/faucet/simple_faucet.rs b/crates/sui-faucet/src/faucet/simple_faucet.rs index 0bdbb8641d764..00133d92dcd03 100644 --- a/crates/sui-faucet/src/faucet/simple_faucet.rs +++ b/crates/sui-faucet/src/faucet/simple_faucet.rs @@ -328,6 +328,7 @@ impl SimpleFaucet { /// Check if the gas coin is still valid. A valid gas coin /// 1. Exists presently /// 2. is a gas coin + /// /// If the coin is valid, return Ok(Some(GasCoin)) /// If the coin invalid, return Ok(None) /// If the fullnode returns an unexpected error, returns Err(e) @@ -1303,7 +1304,7 @@ mod tests { .await .unwrap(); - let amounts = &vec![coin_amount]; + let amounts = &[coin_amount]; // Create a vector containing five randomly generated addresses let target_addresses: Vec = (0..5) @@ -1384,7 +1385,7 @@ mod tests { .await .unwrap(); - let amounts = &vec![1; 1]; + let amounts = &[1; 1]; // Create a vector containing five randomly generated addresses let target_addresses: Vec = (0..5) .map(|_| SuiAddress::random_for_testing_only()) diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000001 b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000001 new file mode 100644 index 0000000000000..6aae601648984 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000001 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000002 b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000002 new file mode 100644 index 0000000000000..54d541c98c202 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000002 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000003 b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000003 new file mode 100644 index 0000000000000..f5e57b11051a4 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x0000000000000000000000000000000000000000000000000000000000000003 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/55/0x000000000000000000000000000000000000000000000000000000000000000b b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x000000000000000000000000000000000000000000000000000000000000000b new file mode 100644 index 0000000000000..7a2443d5f5413 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x000000000000000000000000000000000000000000000000000000000000000b differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/55/0x000000000000000000000000000000000000000000000000000000000000dee9 b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x000000000000000000000000000000000000000000000000000000000000dee9 new file mode 100644 index 0000000000000..d6568ff2626fa Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/55/0x000000000000000000000000000000000000000000000000000000000000dee9 differ diff --git a/crates/sui-framework-snapshot/manifest.json b/crates/sui-framework-snapshot/manifest.json index 0e67794cdd02f..52a6de4971010 100644 --- a/crates/sui-framework-snapshot/manifest.json +++ b/crates/sui-framework-snapshot/manifest.json @@ -436,5 +436,15 @@ "0x000000000000000000000000000000000000000000000000000000000000dee9", "0x000000000000000000000000000000000000000000000000000000000000000b" ] + }, + "55": { + "git_revision": "495a499c3ed2", + "package_ids": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x000000000000000000000000000000000000000000000000000000000000dee9", + "0x000000000000000000000000000000000000000000000000000000000000000b" + ] } -} +} \ No newline at end of file diff --git a/crates/sui-framework-tests/Cargo.toml b/crates/sui-framework-tests/Cargo.toml index ad593b3634716..2093974fa826f 100644 --- a/crates/sui-framework-tests/Cargo.toml +++ b/crates/sui-framework-tests/Cargo.toml @@ -7,6 +7,9 @@ description = "Runs Move tests for sui-framework" license = "Apache-2.0" publish = false +[lints] +workspace = true + [[test]] name = "move_tests" harness = false diff --git a/crates/sui-framework-tests/src/metered_verifier.rs b/crates/sui-framework-tests/src/metered_verifier.rs index 0d30fe4febfbe..3c8ca440704f5 100644 --- a/crates/sui-framework-tests/src/metered_verifier.rs +++ b/crates/sui-framework-tests/src/metered_verifier.rs @@ -35,7 +35,7 @@ fn test_metered_move_bytecode_verifier() { let protocol_config = ProtocolConfig::get_for_max_version_UNSAFE(); let mut verifier_config = protocol_config.verifier_config(/* for_signing */ true); - let mut meter_config = protocol_config.meter_config(); + let mut meter_config = protocol_config.meter_config_for_signing(); let registry = &Registry::new(); let bytecode_verifier_metrics = Arc::new(BytecodeVerifierMetrics::new(registry)); let mut meter = SuiVerifierMeter::new(meter_config.clone()); @@ -202,7 +202,7 @@ fn test_metered_move_bytecode_verifier() { let protocol_config = ProtocolConfig::get_for_max_version_UNSAFE(); let verifier_config = protocol_config.verifier_config(/* for_signing */ true); - let meter_config = protocol_config.meter_config(); + let meter_config = protocol_config.meter_config_for_signing(); // Check if the same meter is indeed used multiple invocations of the verifier let mut meter = SuiVerifierMeter::new(meter_config); @@ -229,7 +229,7 @@ fn test_meter_system_packages() { let protocol_config = ProtocolConfig::get_for_max_version_UNSAFE(); let verifier_config = protocol_config.verifier_config(/* for_signing */ true); - let meter_config = protocol_config.meter_config(); + let meter_config = protocol_config.meter_config_for_signing(); let registry = &Registry::new(); let bytecode_verifier_metrics = Arc::new(BytecodeVerifierMetrics::new(registry)); let mut meter = SuiVerifierMeter::new(meter_config); @@ -283,7 +283,7 @@ fn test_build_and_verify_programmability_examples() { let protocol_config = ProtocolConfig::get_for_max_version_UNSAFE(); let verifier_config = protocol_config.verifier_config(/* for_signing */ true); - let meter_config = protocol_config.meter_config(); + let meter_config = protocol_config.meter_config_for_signing(); let registry = &Registry::new(); let bytecode_verifier_metrics = Arc::new(BytecodeVerifierMetrics::new(registry)); let examples = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../examples"); diff --git a/crates/sui-framework/packages/bridge/sources/bridge.move b/crates/sui-framework/packages/bridge/sources/bridge.move index 86b0b5ba08df4..f0dbc8d10788b 100644 --- a/crates/sui-framework/packages/bridge/sources/bridge.move +++ b/crates/sui-framework/packages/bridge/sources/bridge.move @@ -843,4 +843,65 @@ module bridge::bridge { let inner = load_inner_mut(bridge); inner.execute_add_tokens_on_sui(payload); } + + #[test_only] + public fun get_seq_num_for(bridge: &mut Bridge, message_type: u8): u64 { + let inner = load_inner_mut(bridge); + let seq_num = if (inner.sequence_nums.contains(&message_type)) { + inner.sequence_nums[&message_type] + } else { + inner.sequence_nums.insert(message_type, 0); + 0 + }; + seq_num + } + + #[test_only] + public fun get_seq_num_inc_for(bridge: &mut Bridge, message_type: u8): u64 { + let inner = load_inner_mut(bridge); + inner.get_current_seq_num_and_increment(message_type) + } + + #[test_only] + public fun transfer_approve_key(event: TokenTransferApproved): BridgeMessageKey { + event.message_key + } + + #[test_only] + public fun transfer_claimed_key(event: TokenTransferClaimed): BridgeMessageKey { + event.message_key + } + + #[test_only] + public fun transfer_already_approved_key(event: TokenTransferAlreadyApproved): BridgeMessageKey { + event.message_key + } + + #[test_only] + public fun transfer_already_claimed_key(event: TokenTransferAlreadyClaimed): BridgeMessageKey { + event.message_key + } + + #[test_only] + public fun transfer_limit_exceed_key(event: TokenTransferLimitExceed): BridgeMessageKey { + event.message_key + } + + #[test_only] + public fun unwrap_deposited_event(event: TokenDepositedEvent): (u64, u8, vector, u8, vector, u8, u64) { + ( + event.seq_num, + event.source_chain, + event.sender_address, + event.target_chain, + event.target_address, + event.token_type, + event.amount, + ) + } + + #[test_only] + public fun unwrap_emergency_op_event(event: EmergencyOpEvent): bool { + event.frozen + } } diff --git a/crates/sui-framework/packages/bridge/sources/limiter.move b/crates/sui-framework/packages/bridge/sources/limiter.move index 391476f3921b8..55f01477f68a7 100644 --- a/crates/sui-framework/packages/bridge/sources/limiter.move +++ b/crates/sui-framework/packages/bridge/sources/limiter.move @@ -276,4 +276,11 @@ module bridge::limiter { public(package) fun hour_tail(record: &TransferRecord): u64 { record.hour_tail } + + #[test_only] + public(package) fun unpack_route_limit_event(event: UpdateRouteLimitEvent): + (u8, u8, u64) + { + (event.sending_chain, event.receiving_chain, event.new_limit) + } } diff --git a/crates/sui-framework/packages/bridge/sources/message.move b/crates/sui-framework/packages/bridge/sources/message.move index e46d30b0e1aa9..769e57f3577a9 100644 --- a/crates/sui-framework/packages/bridge/sources/message.move +++ b/crates/sui-framework/packages/bridge/sources/message.move @@ -663,4 +663,9 @@ module bridge::message { token_prices, } } + + #[test_only] + public(package) fun unpack_message(msg: BridgeMessageKey): (u8, u8, u64) { + (msg.source_chain, msg.message_type, msg.bridge_seq_num) + } } diff --git a/crates/sui-framework/packages/bridge/sources/treasury.move b/crates/sui-framework/packages/bridge/sources/treasury.move index 27898d3a6e3b0..245189fa583f6 100644 --- a/crates/sui-framework/packages/bridge/sources/treasury.move +++ b/crates/sui-framework/packages/bridge/sources/treasury.move @@ -280,4 +280,19 @@ module bridge::treasury { public fun treasuries(treasury: &BridgeTreasury): &ObjectBag { &treasury.treasuries } + + #[test_only] + public fun unwrap_update_event(event: UpdateTokenPriceEvent): (u8, u64) { + (event.token_id, event.new_price) + } + + #[test_only] + public fun unwrap_new_token_event(event: NewTokenEvent): (u8, TypeName, bool, u64, u64) { + (event.token_id, event.type_name, event.native_token, event.decimal_multiplier, event.notional_value) + } + + #[test_only] + public fun unwrap_registration_event(event: TokenRegistrationEvent): (TypeName, u8, bool) { + (event.type_name, event.decimal, event.native_token) + } } diff --git a/crates/sui-framework/packages/bridge/tests/bridge_env.move b/crates/sui-framework/packages/bridge/tests/bridge_env.move new file mode 100644 index 0000000000000..c2ef819b3c7cb --- /dev/null +++ b/crates/sui-framework/packages/bridge/tests/bridge_env.move @@ -0,0 +1,1429 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[test_only] +module bridge::bridge_env { + use bridge::bridge::{ + assert_not_paused, + assert_paused, + create_bridge_for_testing, + inner_token_transfer_records, + test_init_bridge_committee, + test_load_inner_mut, + Bridge, + EmergencyOpEvent, + TokenDepositedEvent, + TokenTransferAlreadyApproved, + TokenTransferAlreadyClaimed, + TokenTransferApproved, + TokenTransferClaimed, + TokenTransferLimitExceed + }; + use bridge::btc::{Self, BTC}; + use bridge::chain_ids; + use bridge::committee::BlocklistValidatorEvent; + use bridge::eth::{Self, ETH}; + use bridge::limiter::UpdateRouteLimitEvent; + use bridge::message::{ + Self, + BridgeMessage, + create_add_tokens_on_sui_message, + create_blocklist_message, + emergency_op_pause, + emergency_op_unpause + }; + use bridge::message_types; + use bridge::test_token::{Self, TEST_TOKEN}; + use bridge::treasury::{ + TokenRegistrationEvent, + NewTokenEvent, + UpdateTokenPriceEvent + }; + use bridge::usdc::{Self, USDC}; + use bridge::usdt::{Self, USDT}; + use std::ascii::String; + use std::type_name; + use sui::address; + use sui::clock::Clock; + use sui::coin::{Self, Coin, CoinMetadata, TreasuryCap}; + use sui::ecdsa_k1::{KeyPair, secp256k1_keypair_from_seed, secp256k1_sign}; + use sui::event; + use sui::package::UpgradeCap; + use sui::test_scenario::{Self, Scenario}; + use sui::test_utils::destroy; + use sui_system::governance_test_utils::{ + advance_epoch_with_reward_amounts, + create_sui_system_state_for_testing, + create_validator_for_testing + }; + use sui_system::sui_system::{ + validator_voting_powers_for_testing, + SuiSystemState + }; + + // + // Token IDs + // + const BTC_ID: u8 = 1; + const ETH_ID: u8 = 2; + const USDC_ID: u8 = 3; + const USDT_ID: u8 = 4; + const TEST_TOKEN_ID: u8 = 5; + + public fun btc_id(): u8 { + BTC_ID + } + + public fun eth_id(): u8 { + ETH_ID + } + + public fun usdc_id(): u8 { + USDC_ID + } + + public fun usdt_id(): u8 { + USDT_ID + } + + public fun test_token_id(): u8 { + TEST_TOKEN_ID + } + + // + // Claim status + // + const CLAIMED: u8 = 1; + const ALREADY_CLAIMED: u8 = 2; + const LIMIT_EXCEEDED: u8 = 3; + + public fun claimed(): u8 { + CLAIMED + } + + public fun already_claimed(): u8 { + ALREADY_CLAIMED + } + + public fun limit_exceeded(): u8 { + LIMIT_EXCEEDED + } + + // + // Approve status + // + const APPROVED: u8 = 1; + const ALREADY_APPROVED: u8 = 2; + + public fun approved(): u8 { + APPROVED + } + + public fun already_approved(): u8 { + ALREADY_APPROVED + } + + // + // Validators setup and info + // + + // Validator info + public struct ValidatorInfo has drop { + validator: address, + key_pair: KeyPair, + stake_amount: u64, + } + + public fun addr(validator: &ValidatorInfo): address { + validator.validator + } + + public fun public_key(validator: &ValidatorInfo): &vector { + validator.key_pair.public_key() + } + + public fun create_validator( + validator: address, + stake_amount: u64, + seed: &vector, + ): ValidatorInfo { + ValidatorInfo { + validator, + key_pair: secp256k1_keypair_from_seed(seed), + stake_amount, + } + } + + // Bridge environemnt + public struct BridgeEnv { + scenario: Scenario, + validators: vector, + chain_id: u8, + vault: Vault, + clock: Clock, + } + + // Holds coins for different bridged tokens + public struct Vault { + btc_coins: Coin, + eth_coins: Coin, + usdc_coins: Coin, + usdt_coins: Coin, + test_coins: Coin, + } + + // HotPotato to access shared state + // TODO: if the bridge is the only shared state we could remvove this + public struct BridgeWrapper { + bridge: Bridge, + } + + public fun bridge(env: &mut BridgeEnv, sender: address): BridgeWrapper { + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let bridge = scenario.take_shared(); + BridgeWrapper { bridge } + } + + public fun bridge_ref(wrapper: &BridgeWrapper): &Bridge { + &wrapper.bridge + } + + public fun bridge_ref_mut(wrapper: &mut BridgeWrapper): &mut Bridge { + &mut wrapper.bridge + } + + public fun return_bridge(bridge: BridgeWrapper) { + let BridgeWrapper { bridge } = bridge; + test_scenario::return_shared(bridge); + } + + // + // Public functions + // + + // + // Environment creation and destruction + // + + public fun create_env(chain_id: u8): BridgeEnv { + let mut scenario = test_scenario::begin(@0x0); + let ctx = scenario.ctx(); + let mut clock = sui::clock::create_for_testing(ctx); + clock.set_for_testing(1_000_000_000); + let btc_coins = coin::zero(ctx); + let eth_coins = coin::zero(ctx); + let usdc_coins = coin::zero(ctx); + let usdt_coins = coin::zero(ctx); + let test_coins = coin::zero(ctx); + let vault = Vault { + btc_coins, + eth_coins, + usdc_coins, + usdt_coins, + test_coins, + }; + BridgeEnv { + scenario, + chain_id, + vault, + validators: vector::empty(), + clock, + } + } + + public fun destroy_env(env: BridgeEnv) { + let BridgeEnv { + scenario, + chain_id: _, + vault, + validators: _, + clock, + } = env; + destroy_valut(vault); + clock.destroy_for_testing(); + scenario.end(); + } + + // + // Add a set of validators to the chain. + // Call only once in a test scenario. + public fun setup_validators( + env: &mut BridgeEnv, + validators_info: vector, + ) { + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let ctx = scenario.ctx(); + let validators = validators_info.map_ref!( + |validator| { + create_validator_for_testing( + validator.validator, + validator.stake_amount, + ctx, + ) + }, + ); + env.validators = validators_info; + create_sui_system_state_for_testing(validators, 0, 0, ctx); + advance_epoch_with_reward_amounts(0, 0, scenario); + } + + // + // Bridge creation and setup + // + + // Set up an environment with 3 validators, a bridge with + // a treasury and a committee with all 3 validators. + // The treasury will contain 4 tokens: ETH, BTC, USDT, USDC. + // Save the Bridge as a shared object. + public fun create_bridge_default(env: &mut BridgeEnv) { + let validators = vector[ + create_validator( + @0xAAAA, + 100, + &b"1234567890_1234567890_1234567890", + ), + create_validator( + @0xBBBB, + 100, + &b"234567890_1234567890_1234567890_", + ), + create_validator( + @0xCCCC, + 100, + &b"34567890_1234567890_1234567890_1", + ), + ]; + env.setup_validators(validators); + + let sender = @0x0; + env.create_bridge(sender); + env.register_committee(); + env.init_committee(sender); + env.setup_treasury(sender); + } + + // Create a bridge and set up a treasury. + // The treasury will contain 4 tokens: ETH, BTC, USDT, USDC. + // Save the Bridge as a shared object. + // No operation on the validators. + public fun create_bridge(env: &mut BridgeEnv, sender: address) { + env.scenario.next_tx(sender); + let ctx = env.scenario.ctx(); + create_bridge_for_testing(object::new(ctx), env.chain_id, ctx); + } + + // Register 3 committee members (validators `@0xA`, `@0xB`, `@0xC`) + public fun register_committee(env: &mut BridgeEnv) { + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + let mut system_state = test_scenario::take_shared( + scenario, + ); + + env + .validators + .do_ref!( + |validator| { + scenario.next_tx(validator.validator); + bridge.committee_registration( + &mut system_state, + *validator.key_pair.public_key(), + b"", + scenario.ctx(), + ); + }, + ); + + test_scenario::return_shared(bridge); + test_scenario::return_shared(system_state); + } + + // Init the bridge committee + public fun init_committee(env: &mut BridgeEnv, sender: address) { + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + let mut system_state = test_scenario::take_shared( + scenario, + ); + let voting_powers = validator_voting_powers_for_testing( + &mut system_state, + ); + bridge.test_init_bridge_committee( + voting_powers, + 50, + scenario.ctx(), + ); + test_scenario::return_shared(bridge); + test_scenario::return_shared(system_state); + } + + // Set up a treasury with 4 tokens: ETH, BTC, USDT, USDC. + public fun setup_treasury(env: &mut BridgeEnv, sender: address) { + env.register_default_tokens(sender); + env.add_default_tokens(sender); + env.load_vault(sender); + } + + // Register 4 tokens with the Bridge: ETH, BTC, USDT, USDC. + fun register_default_tokens(env: &mut BridgeEnv, sender: address) { + env.scenario.next_tx(sender); + let mut bridge = env.scenario.take_shared(); + + // BTC + let (upgrade_cap, treasury_cap, metadata) = btc::create_bridge_token(env + .scenario + .ctx()); + bridge.register_foreign_token( + treasury_cap, + upgrade_cap, + &metadata, + ); + destroy(metadata); + // ETH + let (upgrade_cap, treasury_cap, metadata) = eth::create_bridge_token(env + .scenario + .ctx()); + bridge.register_foreign_token( + treasury_cap, + upgrade_cap, + &metadata, + ); + destroy(metadata); + // USDC + let ( + upgrade_cap, + treasury_cap, + metadata, + ) = usdc::create_bridge_token(env.scenario.ctx()); + bridge.register_foreign_token( + treasury_cap, + upgrade_cap, + &metadata, + ); + destroy(metadata); + // USDT + let ( + upgrade_cap, + treasury_cap, + metadata, + ) = usdt::create_bridge_token(env.scenario.ctx()); + bridge.register_foreign_token( + treasury_cap, + upgrade_cap, + &metadata, + ); + destroy(metadata); + + test_scenario::return_shared(bridge); + } + + // Add the 4 tokens previously registered: ETH, BTC, USDT, USDC. + fun add_default_tokens(env: &mut BridgeEnv, sender: address) { + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + + let add_token_message = create_add_tokens_on_sui_message( + env.chain_id, + bridge.get_seq_num_for(message_types::add_tokens_on_sui()), + false, + vector[BTC_ID, ETH_ID, USDC_ID, USDT_ID], + vector[ + type_name::get().into_string(), + type_name::get().into_string(), + type_name::get().into_string(), + type_name::get().into_string(), + ], + vector[1000, 100, 1, 1], + ); + let signatures = env.sign_message(add_token_message); + bridge.execute_system_message(add_token_message, signatures); + + test_scenario::return_shared(bridge); + } + + // + // Utility functions for custom behavior + // + + public fun token_type(env: &mut BridgeEnv): u8 { + env.scenario.next_tx(@0x0); + let bridge = env.scenario.take_shared(); + let inner = bridge.test_load_inner(); + let token_id = inner.inner_treasury().token_id(); + test_scenario::return_shared(bridge); + token_id + } + + const SUI_MESSAGE_PREFIX: vector = b"SUI_BRIDGE_MESSAGE"; + + fun sign_message( + env: &BridgeEnv, + message: BridgeMessage, + ): vector> { + let mut message_bytes = SUI_MESSAGE_PREFIX; + message_bytes.append(message.serialize_message()); + let mut message_bytes = SUI_MESSAGE_PREFIX; + message_bytes.append(message.serialize_message()); + env + .validators + .map_ref!( + |validator| { + secp256k1_sign( + validator.key_pair.private_key(), + &message_bytes, + 0, + true, + ) + }, + ) + } + + public fun sign_message_with( + env: &BridgeEnv, + message: BridgeMessage, + validator_idxs: vector, + ): vector> { + let mut message_bytes = SUI_MESSAGE_PREFIX; + message_bytes.append(message.serialize_message()); + validator_idxs.map!( + |idx| { + secp256k1_sign( + env.validators[idx].key_pair.private_key(), + &message_bytes, + 0, + true, + ) + }, + ) + } + + public fun bridge_in_message( + env: &mut BridgeEnv, + source_chain: u8, + source_address: vector, + target_address: address, + amount: u64, + ): BridgeMessage { + let token_type = env.token_type(); + + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + + let message = message::create_token_bridge_message( + source_chain, + bridge.get_seq_num_inc_for(message_types::token()), + source_address, + env.chain_id, + address::to_bytes(target_address), + token_type, + amount, + ); + test_scenario::return_shared(bridge); + message + } + + public fun bridge_out_message( + env: &mut BridgeEnv, + target_chain: u8, + target_address: vector, + source_address: address, + amount: u64, + transfer_id: u64, + ): BridgeMessage { + let token_type = env.token_type(); + + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let bridge = scenario.take_shared(); + + let message = message::create_token_bridge_message( + env.chain_id, + transfer_id, + address::to_bytes(source_address), + target_chain, + target_address, + token_type, + amount, + ); + test_scenario::return_shared(bridge); + message + } + + public fun bridge_token_signed_message( + env: &mut BridgeEnv, + source_chain: u8, + source_address: vector, + target_address: address, + amount: u64, + ): (BridgeMessage, vector>) { + let token_type = env.token_type(); + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + let seq_num = bridge.get_seq_num_inc_for(message_types::token()); + test_scenario::return_shared(bridge); + let message = message::create_token_bridge_message( + source_chain, + seq_num, + source_address, + env.chain_id, + address::to_bytes(target_address), + token_type, + amount, + ); + let signatures = env.sign_message(message); + (message, signatures) + } + + // Bridge the `amount` of the given `Token` from the `source_chain`. + public fun bridge_to_sui( + env: &mut BridgeEnv, + source_chain: u8, + source_address: vector, + target_address: address, + amount: u64, + ): u64 { + let token_type = env.token_type(); + + // setup + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + + // sign message + let seq_num = bridge.get_seq_num_inc_for(message_types::token()); + let message = message::create_token_bridge_message( + source_chain, + seq_num, + source_address, + env.chain_id, + address::to_bytes(target_address), + token_type, + amount, + ); + let signatures = env.sign_message(message); + + // run approval + bridge.approve_token_transfer(message, signatures); + + // verify approval events + let approved_events = event::events_by_type(); + let already_approved_events = event::events_by_type< + TokenTransferAlreadyApproved, + >(); + assert!( + approved_events.length() == 1 || + already_approved_events.length() == 1, + ); + let key = if (approved_events.length() == 1) { + approved_events[0].transfer_approve_key() + } else { + already_approved_events[0].transfer_already_approved_key() + }; + let (sc, mt, sn) = key.unpack_message(); + assert!(source_chain == sc); + assert!(mt == message_types::token()); + assert!(sn == seq_num); + + // tear down + test_scenario::return_shared(bridge); + seq_num + } + + // Approves a token transer + public fun approve_token_transfer( + env: &mut BridgeEnv, + message: BridgeMessage, + signatures: vector>, + ): u8 { + let msg_key = message.key(); + + // set up + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + + // run approval + bridge.approve_token_transfer(message, signatures); + + // verify approval events + let approved = event::events_by_type(); + let already_approved = event::events_by_type< + TokenTransferAlreadyApproved, + >(); + assert!(approved.length() == 1 || already_approved.length() == 1); + let (key, approve_status) = if (approved.length() == 1) { + (approved[0].transfer_approve_key(), APPROVED) + } else { + ( + already_approved[0].transfer_already_approved_key(), + ALREADY_APPROVED, + ) + }; + assert!(msg_key == key); + + // tear down + test_scenario::return_shared(bridge); + approve_status + } + + // Clain a token transfer and returns the coin + public fun claim_token( + env: &mut BridgeEnv, + sender: address, + source_chain: u8, + bridge_seq_num: u64, + ): Coin { + // set up + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let clock = &env.clock; + let mut bridge = scenario.take_shared(); + let ctx = scenario.ctx(); + let total_supply_before = get_total_supply(&bridge); + + // run claim + let token = bridge.claim_token( + clock, + source_chain, + bridge_seq_num, + ctx, + ); + + // verify value change and claim events + let token_value = token.value(); + assert!( + total_supply_before + token_value == get_total_supply(&bridge), + ); + let claimed = event::events_by_type(); + let already_claimed = event::events_by_type< + TokenTransferAlreadyClaimed, + >(); + let limit_exceeded = event::events_by_type(); + assert!( + claimed.length() == 1 || already_claimed.length() == 1 || + limit_exceeded.length() == 1, + ); + let key = if (claimed.length() == 1) { + claimed[0].transfer_claimed_key() + } else if (already_claimed.length() == 1) { + already_claimed[0].transfer_already_claimed_key() + } else { + limit_exceeded[0].transfer_limit_exceed_key() + }; + let (sc, mt, sn) = key.unpack_message(); + assert!(source_chain == sc); + assert!(mt == message_types::token()); + assert!(sn == bridge_seq_num); + + // tear down + test_scenario::return_shared(bridge); + token + } + + // Claim a token and transfer to the receiver in the bridge message + public fun claim_and_transfer_token( + env: &mut BridgeEnv, + source_chain: u8, + bridge_seq_num: u64, + ): u8 { + // set up + let sender = @0xA1B2C3; // random sender + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let clock = &env.clock; + let mut bridge = scenario.take_shared(); + let ctx = scenario.ctx(); + let total_supply_before = get_total_supply(&bridge); + + // run claim and transfer + bridge.claim_and_transfer_token( + clock, + source_chain, + bridge_seq_num, + ctx, + ); + + // verify claim events + let claimed = event::events_by_type(); + let already_claimed = event::events_by_type< + TokenTransferAlreadyClaimed, + >(); + let limit_exceeded = event::events_by_type(); + assert!( + claimed.length() == 1 || already_claimed.length() == 1 || + limit_exceeded.length() == 1, + ); + let (key, claim_status) = if (claimed.length() == 1) { + (claimed[0].transfer_claimed_key(), CLAIMED) + } else if (already_claimed.length() == 1) { + (already_claimed[0].transfer_already_claimed_key(), ALREADY_CLAIMED) + } else { + (limit_exceeded[0].transfer_limit_exceed_key(), LIMIT_EXCEEDED) + }; + let (sc, mt, sn) = key.unpack_message(); + assert!(source_chain == sc); + assert!(mt == message_types::token()); + assert!(sn == bridge_seq_num); + + // verify effects + let effects = scenario.next_tx(@0xABCDEF); + let created = effects.created(); + if (!created.is_empty()) { + let token_id = effects.created()[0]; + let token = scenario.take_from_sender_by_id>(token_id); + let token_value = token.value(); + assert!( + total_supply_before + token_value == + get_total_supply(&bridge), + ); + scenario.return_to_sender(token); + }; + + // tear down + test_scenario::return_shared(bridge); + claim_status + } + + // Send a coin (token) to the target chain + public fun send_token( + env: &mut BridgeEnv, + sender: address, + target_chain_id: u8, + eth_address: vector, + coin: Coin, + ): u64 { + // set up + let chain_id = env.chain_id; + let scenario = env.scenario(); + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + let coin_value = coin.value(); + let total_supply_before = get_total_supply(&bridge); + let seq_num = bridge.get_seq_num_for(message_types::token()); + + // run send + bridge.send_token(target_chain_id, eth_address, coin, scenario.ctx()); + + // verify send events + assert!( + total_supply_before - coin_value == get_total_supply(&bridge), + ); + let deposited_events = event::events_by_type(); + assert!(deposited_events.length() == 1); + let ( + event_seq_num, + _event_source_chain, + _event_sender_address, + _event_target_chain, + _event_target_address, + _event_token_type, + event_amount, + ) = deposited_events[0].unwrap_deposited_event(); + assert!(event_seq_num == seq_num); + assert!(event_amount == coin_value); + assert_key(chain_id, &bridge); + + // tear down + test_scenario::return_shared(bridge); + seq_num + } + + // Update the limit for a given route + public fun update_bridge_limit( + env: &mut BridgeEnv, + sender: address, + receiving_chain: u8, + sending_chain: u8, + limit: u64, + ): u64 { + // set up + let scenario = env.scenario(); + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + + // message signed + let msg = message::create_update_bridge_limit_message( + receiving_chain, + bridge.get_seq_num_for(message_types::update_bridge_limit()), + sending_chain, + limit, + ); + let signatures = env.sign_message(msg); + + // run limit update + bridge.execute_system_message(msg, signatures); + + // verify limit events + let limit_events = event::events_by_type(); + assert!(limit_events.length() == 1); + let event = limit_events[0]; + let (sc, rc, new_limit) = event.unpack_route_limit_event(); + assert!(sc == sending_chain); + assert!(rc == receiving_chain); + assert!(new_limit == limit); + + // tear down + test_scenario::return_shared(bridge); + new_limit + } + + // Update a given asset price (notional value) + public fun update_asset_price( + env: &mut BridgeEnv, + sender: address, + token_id: u8, + value: u64, + ) { + // set up + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + + // message signed + let message = message::create_update_asset_price_message( + token_id, + env.chain_id, + bridge.get_seq_num_for(message_types::update_asset_price()), + value, + ); + let signatures = env.sign_message(message); + + // run price update + bridge.execute_system_message(message, signatures); + + // verify price events + let update_events = event::events_by_type(); + assert!(update_events.length() == 1); + let (event_token_id, event_new_price) = update_events[ + 0 + ].unwrap_update_event(); + assert!(event_token_id == token_id); + assert!(event_new_price == value); + + // tear down + test_scenario::return_shared(bridge); + } + + // Register the `TEST_TOKEN` token + public fun register_test_token(env: &mut BridgeEnv) { + // set up + let scenario = &mut env.scenario; + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + + // "create" the `Coin` + let ( + upgrade_cap, + treasury_cap, + metadata, + ) = test_token::create_bridge_token(scenario.ctx()); + // register the coin/token with the bridge + bridge.register_foreign_token( + treasury_cap, + upgrade_cap, + &metadata, + ); + + // verify registration events + let register_events = event::events_by_type(); + assert!(register_events.length() == 1); + let (type_name, decimal, nat) = register_events[ + 0 + ].unwrap_registration_event(); + assert!(type_name == type_name::get()); + assert!(decimal == 8); + assert!(nat == false); + + // tear down + destroy(metadata); + test_scenario::return_shared(bridge); + } + + // Add a list of tokens to the bridge. + public fun add_tokens( + env: &mut BridgeEnv, + sender: address, + native_token: bool, + token_ids: vector, + type_names: vector, + token_prices: vector, + ) { + // set up + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + + // message signed + let message = create_add_tokens_on_sui_message( + env.chain_id, + bridge.get_seq_num_for(message_types::add_tokens_on_sui()), + native_token, + token_ids, + type_names, + token_prices, + ); + let signatures = env.sign_message(message); + + // run token addition + bridge.execute_system_message(message, signatures); + + // verify token addition events + let new_tokens_events = event::events_by_type(); + assert!(new_tokens_events.length() <= token_ids.length()); + + // tear down + test_scenario::return_shared(bridge); + } + + // Blocklist a list of bridge nodes + public fun execute_blocklist( + env: &mut BridgeEnv, + sender: address, + chain_id: u8, + blocklist_type: u8, + validator_ecdsa_addresses: vector>, + ) { + // set up + let scenario = env.scenario(); + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + + // message signed + let blocklist = create_blocklist_message( + chain_id, + bridge.get_seq_num_for(message_types::committee_blocklist()), + blocklist_type, + validator_ecdsa_addresses, + ); + let signatures = env.sign_message(blocklist); + + // run blocklist + bridge.execute_system_message(blocklist, signatures); + + // verify blocklist events + let block_list_events = event::events_by_type< + BlocklistValidatorEvent, + >(); + assert!( + block_list_events.length() == validator_ecdsa_addresses.length(), + ); + + // tear down + test_scenario::return_shared(bridge); + } + + // Register new token + public fun register_foreign_token( + env: &mut BridgeEnv, + treasury_cap: TreasuryCap, + upgrade_cap: UpgradeCap, + metadata: CoinMetadata, + sender: address, + ) { + // set up + let scenario = env.scenario(); + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + + // run registration + bridge.register_foreign_token(treasury_cap, upgrade_cap, &metadata); + + // verify registration events + let register_events = event::events_by_type(); + assert!(register_events.length() == 1); + + // verify changes in bridge + let type_name = type_name::get(); + let inner = bridge.test_load_inner(); + let treasury = inner.inner_treasury(); + let waiting_room = treasury.waiting_room(); + assert!(waiting_room.contains(type_name::into_string(type_name))); + let treasuries = treasury.treasuries(); + assert!(treasuries.contains(type_name)); + + // tear down + test_scenario::return_shared(bridge); + destroy(metadata); + } + + // Freeze the bridge + public fun freeze_bridge(env: &mut BridgeEnv, sender: address, error: u64) { + // set up + let scenario = env.scenario(); + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + let seq_num = bridge.get_seq_num_for(message_types::emergency_op()); + + // message signed + let msg = message::create_emergency_op_message( + env.chain_id, + seq_num, + emergency_op_pause(), + ); + let signatures = env.sign_message(msg); + + // run freeze + bridge.execute_system_message(msg, signatures); + + // verify freeze events + let register_events = event::events_by_type(); + assert!(register_events.length() == 1); + assert!(register_events[0].unwrap_emergency_op_event() == true); + + // verify freeze + let inner = bridge.test_load_inner_mut(); + inner.assert_paused(error); + + // tear down + test_scenario::return_shared(bridge); + } + + // Unfreeze the bridge + public fun unfreeze_bridge( + env: &mut BridgeEnv, + sender: address, + error: u64, + ) { + // set up + let scenario = env.scenario(); + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + let seq_num = bridge.get_seq_num_for(message_types::emergency_op()); + + // message signed + let msg = message::create_emergency_op_message( + env.chain_id, + seq_num, + emergency_op_unpause(), + ); + let signatures = env.sign_message(msg); + + // run unfreeze + bridge.execute_system_message(msg, signatures); + let register_events = event::events_by_type(); + assert!(register_events.length() == 1); + assert!(register_events[0].unwrap_emergency_op_event() == false); + + // verify unfreeze events + + // verify unfreeze + let inner = bridge.test_load_inner_mut(); + inner.assert_not_paused(error); + + // tear down + test_scenario::return_shared(bridge); + } + + // + // Getters + // + + public fun ctx(env: &mut BridgeEnv): &mut TxContext { + env.scenario.ctx() + } + + public fun scenario(env: &mut BridgeEnv): &mut Scenario { + &mut env.scenario + } + + public fun chain_id(env: &mut BridgeEnv): u8 { + env.chain_id + } + + public fun validators(env: &BridgeEnv): &vector { + &env.validators + } + + public fun get_btc(env: &mut BridgeEnv, amount: u64): Coin { + let scenario = &mut env.scenario; + let ctx = scenario.ctx(); + env.vault.btc_coins.split(amount, ctx) + } + + public fun get_eth(env: &mut BridgeEnv, amount: u64): Coin { + let scenario = &mut env.scenario; + let ctx = scenario.ctx(); + env.vault.eth_coins.split(amount, ctx) + } + + public fun get_usdc(env: &mut BridgeEnv, amount: u64): Coin { + let scenario = &mut env.scenario; + let ctx = scenario.ctx(); + env.vault.usdc_coins.split(amount, ctx) + } + + public fun get_usdt(env: &mut BridgeEnv, amount: u64): Coin { + let scenario = &mut env.scenario; + let ctx = scenario.ctx(); + env.vault.usdt_coins.split(amount, ctx) + } + + public fun limits(env: &mut BridgeEnv, dest: u8): u64 { + let scenario = env.scenario(); + scenario.next_tx(@0x0); + let bridge = scenario.take_shared(); + let route = chain_ids::get_route(dest, env.chain_id); + let limits = bridge + .test_load_inner() + .inner_limiter() + .get_route_limit(&route); + test_scenario::return_shared(bridge); + limits + } + + fun assert_key(chain_id: u8, bridge: &Bridge) { + let inner = bridge.test_load_inner(); + let transfer_record = inner.inner_token_transfer_records(); + let seq_num = inner.sequence_nums()[&message_types::token()] - 1; + let key = message::create_key( + chain_id, + message_types::token(), + seq_num, + ); + assert!(transfer_record.contains(key)); + } + + // + // Internal functions + // + + // Destroy the vault + fun destroy_valut(vault: Vault) { + let Vault { + btc_coins, + eth_coins, + usdc_coins, + usdt_coins, + test_coins, + } = vault; + btc_coins.burn_for_testing(); + eth_coins.burn_for_testing(); + usdc_coins.burn_for_testing(); + usdt_coins.burn_for_testing(); + test_coins.burn_for_testing(); + } + + // Load the vault with some coins + fun load_vault(env: &mut BridgeEnv, sender: address) { + let scenario = &mut env.scenario; + scenario.next_tx(sender); + let mut bridge = scenario.take_shared(); + let vault = &mut env.vault; + vault.btc_coins.join(mint_some(&mut bridge, scenario.ctx())); + vault.eth_coins.join(mint_some(&mut bridge, scenario.ctx())); + vault.usdc_coins.join(mint_some(&mut bridge, scenario.ctx())); + vault.usdt_coins.join(mint_some(&mut bridge, scenario.ctx())); + test_scenario::return_shared(bridge); + } + + // Mint some coins + fun mint_some(bridge: &mut Bridge, ctx: &mut TxContext): Coin { + let treasury = bridge.test_load_inner_mut().inner_treasury_mut(); + let coin = treasury.mint(1_000_000, ctx); + coin + } + + fun get_total_supply(bridge: &Bridge): u64 { + let inner = bridge.test_load_inner(); + let treasury = inner.inner_treasury(); + let treasuries = treasury.treasuries(); + let tc: &TreasuryCap = &treasuries[type_name::get()]; + tc.total_supply() + } +} + +// +// Test Coins +// + +#[test_only] +module bridge::test_token { + use std::ascii; + use std::type_name; + use sui::address; + use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; + use sui::hex; + use sui::package::{UpgradeCap, test_publish}; + use sui::test_utils::create_one_time_witness; + + public struct TEST_TOKEN has drop {} + + public fun create_bridge_token( + ctx: &mut TxContext, + ): (UpgradeCap, TreasuryCap, CoinMetadata) { + let otw = create_one_time_witness(); + let (treasury_cap, metadata) = create_currency( + otw, + 8, + b"tst", + b"test", + b"bridge test token", + option::none(), + ctx, + ); + + let type_name = type_name::get(); + let address_bytes = hex::decode( + ascii::into_bytes(type_name::get_address(&type_name)), + ); + let coin_id = address::from_bytes(address_bytes).to_id(); + let upgrade_cap = test_publish(coin_id, ctx); + + (upgrade_cap, treasury_cap, metadata) + } +} + +#[test_only] +module bridge::btc { + use std::ascii; + use std::type_name; + use sui::address; + use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; + use sui::hex; + use sui::package::{UpgradeCap, test_publish}; + use sui::test_utils::create_one_time_witness; + + public struct BTC has drop {} + + public fun create_bridge_token( + ctx: &mut TxContext, + ): (UpgradeCap, TreasuryCap, CoinMetadata) { + let otw = create_one_time_witness(); + let (treasury_cap, metadata) = create_currency( + otw, + 8, + b"btc", + b"bitcoin", + b"bridge bitcoin token", + option::none(), + ctx, + ); + + let type_name = type_name::get(); + let address_bytes = hex::decode( + ascii::into_bytes(type_name::get_address(&type_name)), + ); + let coin_id = address::from_bytes(address_bytes).to_id(); + let upgrade_cap = test_publish(coin_id, ctx); + + (upgrade_cap, treasury_cap, metadata) + } +} + +#[test_only] +module bridge::eth { + use std::ascii; + use std::type_name; + use sui::address; + use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; + use sui::hex; + use sui::package::{UpgradeCap, test_publish}; + use sui::test_utils::create_one_time_witness; + + public struct ETH has drop {} + + public fun create_bridge_token( + ctx: &mut TxContext, + ): (UpgradeCap, TreasuryCap, CoinMetadata) { + let otw = create_one_time_witness(); + let (treasury_cap, metadata) = create_currency( + otw, + 8, + b"eth", + b"eth", + b"bridge ethereum token", + option::none(), + ctx, + ); + + let type_name = type_name::get(); + let address_bytes = hex::decode( + ascii::into_bytes(type_name::get_address(&type_name)), + ); + let coin_id = address::from_bytes(address_bytes).to_id(); + let upgrade_cap = test_publish(coin_id, ctx); + + (upgrade_cap, treasury_cap, metadata) + } +} + +#[test_only] +module bridge::usdc { + use std::ascii; + use std::type_name; + use sui::address; + use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; + use sui::hex; + use sui::package::{UpgradeCap, test_publish}; + use sui::test_utils::create_one_time_witness; + + public struct USDC has drop {} + + public fun create_bridge_token( + ctx: &mut TxContext, + ): (UpgradeCap, TreasuryCap, CoinMetadata) { + let otw = create_one_time_witness(); + let (treasury_cap, metadata) = create_currency( + otw, + 6, + b"usdc", + b"usdc", + b"bridge usdc token", + option::none(), + ctx, + ); + + let type_name = type_name::get(); + let address_bytes = hex::decode( + ascii::into_bytes(type_name::get_address(&type_name)), + ); + let coin_id = address::from_bytes(address_bytes).to_id(); + let upgrade_cap = test_publish(coin_id, ctx); + + (upgrade_cap, treasury_cap, metadata) + } +} + +#[test_only] +module bridge::usdt { + use std::ascii; + use std::type_name; + use sui::address; + use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; + use sui::hex; + use sui::package::{UpgradeCap, test_publish}; + use sui::test_utils::create_one_time_witness; + + public struct USDT has drop {} + + public fun create_bridge_token( + ctx: &mut TxContext, + ): (UpgradeCap, TreasuryCap, CoinMetadata) { + let otw = create_one_time_witness(); + let (treasury_cap, metadata) = create_currency( + otw, + 6, + b"usdt", + b"usdt", + b"bridge usdt token", + option::none(), + ctx, + ); + + let type_name = type_name::get(); + let address_bytes = hex::decode( + ascii::into_bytes(type_name::get_address(&type_name)), + ); + let coin_id = address::from_bytes(address_bytes).to_id(); + let upgrade_cap = test_publish(coin_id, ctx); + + (upgrade_cap, treasury_cap, metadata) + } +} diff --git a/crates/sui-framework/packages/bridge/tests/bridge_setup.move b/crates/sui-framework/packages/bridge/tests/bridge_setup.move deleted file mode 100644 index 4652bb058bbd9..0000000000000 --- a/crates/sui-framework/packages/bridge/tests/bridge_setup.move +++ /dev/null @@ -1,800 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module bridge::bridge_setup { - use bridge::bridge::{ - assert_not_paused, assert_paused, create_bridge_for_testing, inner_token_transfer_records, - test_execute_add_tokens_on_sui, test_execute_emergency_op, test_init_bridge_committee, - test_load_inner_mut, Bridge, - }; - use bridge::btc::{Self, BTC}; - use bridge::eth::{Self, ETH}; - use bridge::message::{ - Self, create_add_tokens_on_sui_message, create_blocklist_message, emergency_op_pause, - emergency_op_unpause, - }; - use bridge::message_types; - use bridge::test_token::TEST_TOKEN; - use bridge::usdc::{Self, USDC}; - use bridge::usdt::{Self, USDT}; - use std::{ascii::String, type_name}; - use sui::coin::{Self, Coin, CoinMetadata, TreasuryCap}; - use sui::hex; - use sui::package::UpgradeCap; - use sui::test_scenario::{Self, Scenario}; - use sui::test_utils::destroy; - use sui_system::{ - governance_test_utils::{ - advance_epoch_with_reward_amounts, - create_sui_system_state_for_testing, - create_validator_for_testing, - }, - sui_system::{ - validator_voting_powers_for_testing, - SuiSystemState, - }, - }; - - // - // Token IDs - // - const BTC_ID: u8 = 1; - const ETH_ID: u8 = 2; - const USDC_ID: u8 = 3; - const USDT_ID: u8 = 4; - const TEST_TOKEN_ID: u8 = 5; - - public fun btc_id(): u8 { - BTC_ID - } - - public fun eth_id(): u8 { - ETH_ID - } - - public fun usdc_id(): u8 { - USDC_ID - } - - public fun usdt_id(): u8 { - USDT_ID - } - - public fun test_token_id(): u8 { - TEST_TOKEN_ID - } - - // - // Validators setup and info - // - - const VALIDATOR1_PUBKEY: vector = b"029bef8d556d80e43ae7e0becb3a7e6838b95defe45896ed6075bb9035d06c9964"; - const VALIDATOR2_PUBKEY: vector = b"033e99a541db69bd32040dfe5037fbf5210dafa8151a71e21c5204b05d95ce0a62"; - const VALIDATOR3_PUBKEY: vector = b"033e99a541db69bd32040dfe5037fbf5210dafa8151a71e21c5204b05d95ce0a63"; - - // Bridge environemnt - public struct BridgeEnv { - scenario: Scenario, - chain_id: u8, - seq_num: u64, - vault: Vault, - } - - public struct Vault { - btc_coins: Coin, - eth_coins: Coin, - usdc_coins: Coin, - usdt_coins: Coin, - test_coins: Coin, - } - - // Info to set up a validator - public struct ValidatorInfo has copy, drop { - validator: address, - stake_amount: u64, - } - - // HotPotato to access the Bridge - public struct BridgeWrapper { - bridge: Bridge, - } - - // - // Public functions - // - - // - // Environment creation and destruction - // - - public fun create_env(chain_id: u8, start_addr: address): BridgeEnv { - let mut scenario = test_scenario::begin(start_addr); - let ctx = scenario.ctx(); - let btc_coins = coin::zero(ctx); - let eth_coins = coin::zero(ctx); - let usdc_coins = coin::zero(ctx); - let usdt_coins = coin::zero(ctx); - let test_coins = coin::zero(ctx); - let vault = Vault { - btc_coins, - eth_coins, - usdc_coins, - usdt_coins, - test_coins, - }; - BridgeEnv { - scenario, - chain_id, - seq_num: 0, - vault, - } - } - - public fun destroy_env(env: BridgeEnv) { - let BridgeEnv {scenario, chain_id: _, seq_num: _, vault} = env; - destroy_valut(vault); - scenario.end(); - } - - public fun create_validator_info(validator: address, stake_amount: u64): ValidatorInfo { - ValidatorInfo { - validator, - stake_amount, - } - } - - // - // Add a set of validators to the chain. - // Call only once in a test scenario. - public fun setup_validators( - env: &mut BridgeEnv, - validators_info: vector, - sender: address, - ) { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let ctx = scenario.ctx(); - let mut validators = vector::empty(); - let mut count = validators_info.length(); - while (count > 0) { - count = count - 1; - validators.push_back(create_validator_for_testing( - validators_info[count].validator, - validators_info[count].stake_amount, - ctx, - )); - }; - create_sui_system_state_for_testing(validators, 0, 0, ctx); - advance_epoch_with_reward_amounts(0, 0, scenario); - } - - // - // Bridge creation and setup - // - - // Set up an environment with 3 validators, a bridge with - // a treasury and a committee with all 3 validators. - // The treasury will contain 4 tokens: ETH, BTC, USDT, USDC. - // Save the Bridge as a shared object. - public fun create_bridge_default(env: &mut BridgeEnv) { - let validators = vector[ - ValidatorInfo { validator: @0xA, stake_amount: 100 }, - ValidatorInfo { validator: @0xB, stake_amount: 100 }, - ValidatorInfo { validator: @0xC, stake_amount: 100 }, - ]; - let sender = @0x0; - env.setup_validators(validators, sender); - env.create_bridge(sender); - env.register_committee(); - env.init_committee(sender); - } - - // Create a bridge and set up a treasury. - // The treasury will contain 4 tokens: ETH, BTC, USDT, USDC. - // Save the Bridge as a shared object. - // No operation on the validators. - public fun create_bridge(env: &mut BridgeEnv, sender: address) { - env.scenario.next_tx(sender); - let ctx = env.scenario.ctx(); - create_bridge_for_testing(object::new(ctx), env.chain_id, ctx); - env.setup_treasury(sender); - } - - // Register 3 committee members (validators `@0xA`, `@0xB`, `@0xC`) - public fun register_committee(env: &mut BridgeEnv) { - let scenario = &mut env.scenario; - scenario.next_tx(@0x0); - let mut bridge = scenario.take_shared(); - let mut system_state = test_scenario::take_shared(scenario); - - // register committee member `@0xA` - scenario.next_tx(@0xA); - bridge.committee_registration( - &mut system_state, - hex::decode(VALIDATOR1_PUBKEY), - b"", - scenario.ctx(), - ); - - // register committee member `@0xB` - scenario.next_tx(@0xB); - bridge.committee_registration( - &mut system_state, - hex::decode(VALIDATOR2_PUBKEY), - b"", - scenario.ctx(), - ); - - // register committee member `@0xC` - scenario.next_tx(@0xC); - bridge.committee_registration( - &mut system_state, - hex::decode(VALIDATOR3_PUBKEY), - b"", - scenario.ctx(), - ); - - test_scenario::return_shared(bridge); - test_scenario::return_shared(system_state); - } - - // Init the bridge committee - public fun init_committee(env: &mut BridgeEnv, sender: address) { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let mut system_state = test_scenario::take_shared(scenario); - let voting_powers = validator_voting_powers_for_testing(&mut system_state); - bridge.test_init_bridge_committee( - voting_powers, - 50, - scenario.ctx(), - ); - test_scenario::return_shared(bridge); - test_scenario::return_shared(system_state); - } - - // Set up a treasury with 4 tokens: ETH, BTC, USDT, USDC. - public fun setup_treasury(env: &mut BridgeEnv, sender: address) { - env.register_default_tokens(sender); - env.add_default_tokens(sender); - env.load_vault(sender); - } - - // Register 4 tokens with the Bridge: ETH, BTC, USDT, USDC. - public fun register_default_tokens(env: &mut BridgeEnv, sender: address) { - env.scenario.next_tx(sender); - let mut bridge = env.scenario.take_shared(); - - // BTC - let (upgrade_cap, treasury_cap, metadata) = - btc::create_bridge_token(env.scenario.ctx()); - bridge.register_foreign_token( - treasury_cap, - upgrade_cap, - &metadata, - ); - destroy(metadata); - // ETH - let (upgrade_cap, treasury_cap, metadata) = - eth::create_bridge_token(env.scenario.ctx()); - bridge.register_foreign_token( - treasury_cap, - upgrade_cap, - &metadata, - ); - destroy(metadata); - // USDC - let (upgrade_cap, treasury_cap, metadata) = - usdc::create_bridge_token(env.scenario.ctx()); - bridge.register_foreign_token( - treasury_cap, - upgrade_cap, - &metadata, - ); - destroy(metadata); - // USDT - let (upgrade_cap, treasury_cap, metadata) = - usdt::create_bridge_token(env.scenario.ctx()); - bridge.register_foreign_token( - treasury_cap, - upgrade_cap, - &metadata, - ); - destroy(metadata); - - test_scenario::return_shared(bridge); - } - - // Add the 4 tokens previously registered: ETH, BTC, USDT, USDC. - public fun add_default_tokens(env: &mut BridgeEnv, sender: address) { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - - let add_token_message = create_add_tokens_on_sui_message( - env.chain_id, - env.seq_num(), - false, // native_token - vector[BTC_ID, ETH_ID, USDC_ID, USDT_ID], - vector[ - type_name::get().into_string(), - type_name::get().into_string(), - type_name::get().into_string(), - type_name::get().into_string(), - ], - vector[1000, 100, 1, 1], - ); - let payload = add_token_message.extract_add_tokens_on_sui(); - bridge.test_execute_add_tokens_on_sui(payload); - - test_scenario::return_shared(bridge); - } - - // Add the 4 tokens previously registered: ETH, BTC, USDT, USDC. - public fun add_tokens( - env: &mut BridgeEnv, - sender: address, - native_token: bool, - token_ids: vector, - type_names: vector, - token_prices: vector, - ) { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - - let add_token_message = create_add_tokens_on_sui_message( - env.chain_id, - env.seq_num(), - native_token, - token_ids, - type_names, - token_prices, - ); - let payload = add_token_message.extract_add_tokens_on_sui(); - bridge.test_execute_add_tokens_on_sui(payload); - - test_scenario::return_shared(bridge); - } - - public fun update_asset_price( - env: &mut BridgeEnv, - sender: address, - token_id: u8, - value:u64, - ) { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let inner = bridge.test_load_inner_mut(); - - let msg = message::create_update_asset_price_message( - token_id, - env.chain_id, - env.seq_num(), - value, - ); - let payload = msg.extract_update_asset_price(); - inner.test_execute_update_asset_price(payload); - - test_scenario::return_shared(bridge); - } - - // - // Getters - // - - public fun validator_pubkeys(): vector> { - vector[ - VALIDATOR1_PUBKEY, - VALIDATOR2_PUBKEY, - VALIDATOR3_PUBKEY, - ] - } - - public fun ctx(env: &mut BridgeEnv): &mut TxContext { - env.scenario.ctx() - } - - public fun scenario(env: &mut BridgeEnv): &mut Scenario { - &mut env.scenario - } - - public fun bridge(env: &mut BridgeEnv, sender: address): BridgeWrapper { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let bridge = scenario.take_shared(); - BridgeWrapper { bridge } - } - - public fun bridge_ref(wrapper: &BridgeWrapper): &Bridge { - &wrapper.bridge - } - - public fun return_bridge(bridge: BridgeWrapper) { - let BridgeWrapper { bridge } = bridge; - test_scenario::return_shared(bridge); - } - - public fun get_btc(env: &mut BridgeEnv, amount: u64): Coin { - let scenario = &mut env.scenario; - let ctx = scenario.ctx(); - env.vault.btc_coins.split(amount, ctx) - } - - public fun get_eth(env: &mut BridgeEnv, amount: u64): Coin { - let scenario = &mut env.scenario; - let ctx = scenario.ctx(); - env.vault.eth_coins.split(amount, ctx) - } - - public fun get_usdc(env: &mut BridgeEnv, amount: u64): Coin { - let scenario = &mut env.scenario; - let ctx = scenario.ctx(); - env.vault.usdc_coins.split(amount, ctx) - } - - public fun get_usdt(env: &mut BridgeEnv, amount: u64): Coin { - let scenario = &mut env.scenario; - let ctx = scenario.ctx(); - env.vault.usdt_coins.split(amount, ctx) - } - - // - // Bridge commands - // - - // Register new tokens - public fun register_foreign_token( - env: &mut BridgeEnv, - treasury_cap: TreasuryCap, - upgrade_cap: UpgradeCap, - metadata: CoinMetadata, - sender: address, - ) { - let scenario = env.scenario(); - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - bridge.register_foreign_token(treasury_cap, upgrade_cap, &metadata); - - // assert changes in bridge - let type_name = type_name::get(); - let inner = bridge.test_load_inner(); - let treasury = inner.inner_treasury(); - let waiting_room = treasury.waiting_room(); - assert!(waiting_room.contains(type_name::into_string(type_name))); - let treasuries = treasury.treasuries(); - assert!(treasuries.contains(type_name)); - - test_scenario::return_shared(bridge); - destroy(metadata); - } - - // Freeze the bridge - public fun freeze_bridge(env: &mut BridgeEnv, sender: address, error: u64) { - let scenario = env.scenario(); - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let inner = bridge.test_load_inner_mut(); - let msg = message::create_emergency_op_message(env.chain_id, 0, emergency_op_pause()); - let payload = msg.extract_emergency_op_payload(); - inner.test_execute_emergency_op(payload); - inner.assert_paused(error); - test_scenario::return_shared(bridge); - } - - // Unfreeze the bridge - public fun unfreeze_bridge(env: &mut BridgeEnv, sender: address, error: u64) { - let scenario = env.scenario(); - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let inner = bridge.test_load_inner_mut(); - let msg = message::create_emergency_op_message(env.chain_id, 1, emergency_op_unpause()); - let payload = msg.extract_emergency_op_payload(); - inner.test_execute_emergency_op(payload); - inner.assert_not_paused(error); - test_scenario::return_shared(bridge); - } - - public fun send_token( - env: &mut BridgeEnv, - target_chain_id: u8, - eth_address: vector, - coin: Coin, - ) { - let scenario = env.scenario(); - scenario.next_tx(@0xAAAA); - let mut bridge = scenario.take_shared(); - let coin_value = coin.value(); - let total_supply_before = get_total_supply(&bridge); - - bridge.send_token(target_chain_id, eth_address, coin, scenario.ctx()); - - assert!(total_supply_before - coin_value == get_total_supply(&bridge)); - - let inner = bridge.test_load_inner(); - let transfer_record = inner.inner_token_transfer_records(); - let seq_num = inner.sequence_nums()[&message_types::token()] - 1; - let key = message::create_key(env.chain_id, message_types::token(), seq_num); - assert!(transfer_record.contains(key)); - - test_scenario::return_shared(bridge); - } - - public fun execute_blocklist( - env: &mut BridgeEnv, - sender: address, - chain_id: u8, - blocklist_type: u8, - validator_ecdsa_addresses: vector>, - signatures: vector>, - ) { - let scenario = env.scenario(); - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let blocklist = create_blocklist_message( - chain_id, - env.seq_num(), - blocklist_type, - validator_ecdsa_addresses, - ); - bridge.execute_system_message(blocklist, signatures); - test_scenario::return_shared(bridge); - } - - public fun update_bridge_limit( - env: &mut BridgeEnv, - sender: address, - receiving_chain: u8, - sending_chain: u8, - limit: u64, - ) { - let scenario = env.scenario(); - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let msg = message::create_update_bridge_limit_message( - receiving_chain, - env.seq_num(), - sending_chain, - limit, - ); - let payload = msg.extract_update_bridge_limit(); - bridge.test_load_inner_mut().test_execute_update_bridge_limit(payload); - test_scenario::return_shared(bridge); - } - - // - // Internal functions - // - - fun seq_num(env: &mut BridgeEnv): u64 { - let seq_num = env.seq_num; - env.seq_num = seq_num + 1; - seq_num - } - - // Destroy the vault - fun destroy_valut(vault: Vault) { - let Vault { - btc_coins, - eth_coins, - usdc_coins, - usdt_coins, - test_coins, - } = vault; - btc_coins.burn_for_testing(); - eth_coins.burn_for_testing(); - usdc_coins.burn_for_testing(); - usdt_coins.burn_for_testing(); - test_coins.burn_for_testing(); - } - - // Load the vault with some coins - fun load_vault(env: &mut BridgeEnv, sender: address) { - let scenario = &mut env.scenario; - scenario.next_tx(sender); - let mut bridge = scenario.take_shared(); - let vault = &mut env.vault; - vault.btc_coins.join(mint_some(&mut bridge, scenario.ctx())); - vault.eth_coins.join(mint_some(&mut bridge, scenario.ctx())); - vault.usdc_coins.join(mint_some(&mut bridge, scenario.ctx())); - vault.usdt_coins.join(mint_some(&mut bridge, scenario.ctx())); - test_scenario::return_shared(bridge); - } - - // Mint some coins - fun mint_some(bridge: &mut Bridge, ctx: &mut TxContext): Coin { - let treasury = bridge.test_load_inner_mut().inner_treasury_mut(); - let coin = treasury.mint(1_000_000, ctx); - coin - } - - fun get_total_supply(bridge: &Bridge): u64 { - let inner = bridge.test_load_inner(); - let treasury = inner.inner_treasury(); - let treasuries = treasury.treasuries(); - let tc: &TreasuryCap = &treasuries[type_name::get()]; - tc.total_supply() - } -} - -// -// Test Coins -// - -#[test_only] -module bridge::test_token { - use std::{ascii, type_name}; - use sui::address; - use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; - use sui::hex; - use sui::package::{UpgradeCap, test_publish}; - use sui::test_utils::create_one_time_witness; - - - public struct TEST_TOKEN has drop {} - - public fun create_bridge_token( - ctx: &mut TxContext, - ): (UpgradeCap, TreasuryCap, CoinMetadata) { - let otw = create_one_time_witness(); - let (treasury_cap, metadata) = create_currency( - otw, - 8, - b"tst", - b"test", - b"bridge test token", - option::none(), - ctx, - ); - - let type_name = type_name::get(); - let address_bytes = hex::decode(ascii::into_bytes(type_name::get_address(&type_name))); - let coin_id = address::from_bytes(address_bytes).to_id(); - let upgrade_cap = test_publish(coin_id, ctx); - - (upgrade_cap, treasury_cap, metadata) - } -} - -#[test_only] -module bridge::btc { - use std::{ascii, type_name}; - use sui::address; - use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; - use sui::hex; - use sui::package::{UpgradeCap, test_publish}; - use sui::test_utils::create_one_time_witness; - - - public struct BTC has drop {} - - public fun create_bridge_token( - ctx: &mut TxContext, - ): (UpgradeCap, TreasuryCap, CoinMetadata) { - let otw = create_one_time_witness(); - let (treasury_cap, metadata) = create_currency( - otw, - 8, - b"btc", - b"bitcoin", - b"bridge bitcoin token", - option::none(), - ctx, - ); - - let type_name = type_name::get(); - let address_bytes = hex::decode(ascii::into_bytes(type_name::get_address(&type_name))); - let coin_id = address::from_bytes(address_bytes).to_id(); - let upgrade_cap = test_publish(coin_id, ctx); - - (upgrade_cap, treasury_cap, metadata) - } -} - -#[test_only] -module bridge::eth { - use std::{ascii, type_name}; - use sui::address; - use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; - use sui::hex; - use sui::package::{UpgradeCap, test_publish}; - use sui::test_utils::create_one_time_witness; - - - public struct ETH has drop {} - - public fun create_bridge_token( - ctx: &mut TxContext, - ): (UpgradeCap, TreasuryCap, CoinMetadata) { - let otw = create_one_time_witness(); - let (treasury_cap, metadata) = create_currency( - otw, - 8, - b"eth", - b"eth", - b"bridge ethereum token", - option::none(), - ctx, - ); - - let type_name = type_name::get(); - let address_bytes = hex::decode(ascii::into_bytes(type_name::get_address(&type_name))); - let coin_id = address::from_bytes(address_bytes).to_id(); - let upgrade_cap = test_publish(coin_id, ctx); - - (upgrade_cap, treasury_cap, metadata) - } -} - -#[test_only] -module bridge::usdc { - use std::{ascii, type_name}; - use sui::address; - use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; - use sui::hex; - use sui::package::{UpgradeCap, test_publish}; - use sui::test_utils::create_one_time_witness; - - - public struct USDC has drop {} - - public fun create_bridge_token( - ctx: &mut TxContext, - ): (UpgradeCap, TreasuryCap, CoinMetadata) { - let otw = create_one_time_witness(); - let (treasury_cap, metadata) = create_currency( - otw, - 6, - b"usdc", - b"usdc", - b"bridge usdc token", - option::none(), - ctx, - ); - - let type_name = type_name::get(); - let address_bytes = hex::decode(ascii::into_bytes(type_name::get_address(&type_name))); - let coin_id = address::from_bytes(address_bytes).to_id(); - let upgrade_cap = test_publish(coin_id, ctx); - - (upgrade_cap, treasury_cap, metadata) - } -} - -#[test_only] -module bridge::usdt { - use std::{ascii, type_name}; - use sui::address; - use sui::coin::{CoinMetadata, TreasuryCap, create_currency}; - use sui::hex; - use sui::package::{UpgradeCap, test_publish}; - use sui::test_utils::create_one_time_witness; - - - public struct USDT has drop {} - - public fun create_bridge_token( - ctx: &mut TxContext, - ): (UpgradeCap, TreasuryCap, CoinMetadata) { - let otw = create_one_time_witness(); - let (treasury_cap, metadata) = create_currency( - otw, - 6, - b"usdt", - b"usdt", - b"bridge usdt token", - option::none(), - ctx, - ); - - let type_name = type_name::get(); - let address_bytes = hex::decode(ascii::into_bytes(type_name::get_address(&type_name))); - let coin_id = address::from_bytes(address_bytes).to_id(); - let upgrade_cap = test_publish(coin_id, ctx); - - (upgrade_cap, treasury_cap, metadata) - } -} diff --git a/crates/sui-framework/packages/bridge/tests/bridge_tests.move b/crates/sui-framework/packages/bridge/tests/bridge_tests.move index 3c416c33fb24f..d26b5c919a9a2 100644 --- a/crates/sui-framework/packages/bridge/tests/bridge_tests.move +++ b/crates/sui-framework/packages/bridge/tests/bridge_tests.move @@ -2,652 +2,761 @@ // SPDX-License-Identifier: Apache-2.0 #[test_only] -module bridge::bridge_tests { - use bridge::bridge::{ - inner_limiter, inner_paused, inner_treasury, inner_token_transfer_records_mut, - new_bridge_record_for_testing, new_for_testing, test_get_current_seq_num_and_increment, - test_execute_update_asset_price, test_get_token_transfer_action_signatures, - test_load_inner, test_load_inner_mut, test_get_token_transfer_action_status, - transfer_status_approved, transfer_status_claimed, transfer_status_not_found, - transfer_status_pending, Bridge, - }; - use bridge::bridge_setup::{ - btc_id, create_bridge, create_bridge_default, create_env, create_validator_info, - eth_id, freeze_bridge, init_committee, register_committee, unfreeze_bridge, test_token_id, - }; - use bridge::btc::BTC; - use bridge::chain_ids; - use bridge::eth::ETH; - use bridge::message::{Self, to_parsed_token_transfer_message}; - use bridge::message_types; - use bridge::test_token::{TEST_TOKEN, create_bridge_token as create_test_token}; - use bridge::usdc::USDC; - use std::type_name; - use sui::address; - use sui::balance; - use sui::coin::{Self, Coin}; - use sui::hex; - use sui::package::test_publish; - use sui::test_scenario::Self; - use sui::test_utils::destroy; - - // common error start code for unexpected errors in tests (assertions). - // If more than one assert in a test needs to use an unexpected error code, - // use this as the starting error and add 1 to subsequent errors - const UNEXPECTED_ERROR: u64 = 10293847; - // use on tests that fail to save cleanup - const TEST_DONE: u64 = 74839201; - - #[test] - fun test_bridge_create() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge(@0x0); - - let bridge = env.bridge(@0x0); - let inner = bridge.bridge_ref().test_load_inner(); - inner.assert_not_paused(UNEXPECTED_ERROR); - assert!(inner.inner_token_transfer_records().length() == 0); - bridge.return_bridge(); - - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::ENotSystemAddress)] - fun test_bridge_create_non_system_addr() { - let mut env = create_env(chain_ids::sui_mainnet(), @0x1); - env.create_bridge(@0x1); - - abort TEST_DONE - } - - #[test] - fun test_init_committee() { - let mut env = create_env(chain_ids::sui_custom(), @0x0); - env.create_bridge_default(); - env.destroy_env(); - } - - #[test] - fun test_init_committee_twice() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - env.init_committee(@0x0); // second time is a no-op - - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::ENotSystemAddress)] - fun test_init_committee_non_system_addr() { - let mut env = create_env(chain_ids::sui_mainnet(), @0x0); - // TODO: this is too brittle, fix it - env.setup_validators( - vector[ - create_validator_info(@0xA, 100), - create_validator_info(@0xB, 100), - create_validator_info(@0xC, 100), - ], - @0x0, - ); - env.create_bridge(@0x0); - env.register_committee(); - env.init_committee(@0xA); - - abort TEST_DONE - } - - #[test] - #[expected_failure(abort_code = bridge::committee::ECommitteeAlreadyInitiated)] - fun test_register_committee_after_init() { - let mut env = create_env(chain_ids::sui_custom(), @0x0); - env.create_bridge_default(); - env.register_committee(); - - abort TEST_DONE - } - - #[test] - fun test_register_foreign_token() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - let (upgrade_cap, treasury_cap, metadata) = create_test_token(env.scenario().ctx()); - env.register_foreign_token(treasury_cap, upgrade_cap, metadata, addr); - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::treasury::ETokenSupplyNonZero)] - fun test_register_foreign_token_non_zero_supply() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - let (upgrade_cap, mut treasury_cap, metadata) = create_test_token(env.scenario().ctx()); - let _coin = treasury_cap.mint(1, env.scenario().ctx()); - env.register_foreign_token(treasury_cap, upgrade_cap, metadata, addr); - - abort 0 - } - - #[test] - #[expected_failure(abort_code = bridge::treasury::EInvalidNotionalValue)] - fun test_add_token_price_zero_value() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - env.add_tokens( - addr, - false, - vector[test_token_id()], - vector[type_name::get().into_string()], - vector[0], - ); +module bridge::bridge_tests; +use bridge::bridge::{ + inner_limiter, + inner_paused, + inner_treasury, + inner_token_transfer_records_mut, + new_bridge_record_for_testing, + new_for_testing, + test_get_current_seq_num_and_increment, + test_execute_update_asset_price, + test_get_token_transfer_action_signatures, + test_load_inner, + test_load_inner_mut, + test_get_token_transfer_action_status, + transfer_status_approved, + transfer_status_claimed, + transfer_status_not_found, + transfer_status_pending, + Bridge +}; +use bridge::bridge_env::{ + btc_id, + create_bridge, + create_bridge_default, + create_env, + create_validator, + eth_id, + freeze_bridge, + init_committee, + register_committee, + unfreeze_bridge, + test_token_id +}; +use bridge::btc::BTC; +use bridge::chain_ids; +use bridge::eth::ETH; +use bridge::message::{Self, to_parsed_token_transfer_message}; +use bridge::message_types; +use bridge::test_token::{TEST_TOKEN, create_bridge_token as create_test_token}; +use bridge::usdc::USDC; +use std::type_name; +use sui::address; +use sui::balance; +use sui::coin::{Self, Coin}; +use sui::hex; +use sui::package::test_publish; +use sui::test_scenario; +use sui::test_utils::destroy; + +// common error start code for unexpected errors in tests (assertions). +// If more than one assert in a test needs to use an unexpected error code, +// use this as the starting error and add 1 to subsequent errors +const UNEXPECTED_ERROR: u64 = 10293847; +// use on tests that fail to save cleanup +const TEST_DONE: u64 = 74839201; + +#[test] +fun test_bridge_create() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge(@0x0); + + let bridge = env.bridge(@0x0); + let inner = bridge.bridge_ref().test_load_inner(); + inner.assert_not_paused(UNEXPECTED_ERROR); + assert!(inner.inner_token_transfer_records().length() == 0); + bridge.return_bridge(); + + env.destroy_env(); +} - abort 0 - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EMalformedMessageError)] - fun test_add_token_malformed_1() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - env.add_tokens( - addr, - false, - vector[test_token_id(), eth_id()], - vector[type_name::get().into_string()], - vector[10], - ); +#[test] +#[expected_failure(abort_code = bridge::bridge::ENotSystemAddress)] +fun test_bridge_create_non_system_addr() { + let mut env = create_env(chain_ids::sui_mainnet()); + env.create_bridge(@0x1); - abort 0 - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EMalformedMessageError)] - fun test_add_token_malformed_2() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - env.add_tokens( - addr, - false, - vector[test_token_id()], - vector[type_name::get().into_string(), type_name::get().into_string()], - vector[10], - ); + abort TEST_DONE +} - abort 0 - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EMalformedMessageError)] - fun test_add_token_malformed_3() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - env.add_tokens( - addr, - false, - vector[test_token_id()], - vector[type_name::get().into_string()], - vector[10, 20], - ); +#[test] +fun test_create_bridge_default() { + let mut env = create_env(chain_ids::sui_custom()); + env.create_bridge_default(); + env.destroy_env(); +} - abort 0 - } - - #[test] - fun test_add_native_token_nop() { - // adding a native token is simply a NO-OP at the moment - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - env.add_tokens( - addr, - true, - vector[test_token_id()], - vector[type_name::get().into_string()], - vector[100], - ); - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::treasury::EInvalidUpgradeCap)] - fun test_register_foreign_token_bad_upgrade_cap() { - let addr = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), addr); - env.create_bridge_default(); - let (_upgrade_cap, treasury_cap, metadata) = create_test_token(env.scenario().ctx()); - let upgrade_cap = test_publish(@0x42.to_id(), env.scenario().ctx()); - env.register_foreign_token(treasury_cap, upgrade_cap, metadata, addr); - - abort 0 - } - - #[test] - fun test_execute_send_token() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - let btc: Coin = env.get_btc(1); - let eth_address = x"0000000000000000000000000000000000000000"; - env.send_token(chain_ids::eth_sepolia(), eth_address, btc); - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::ETokenValueIsZero)] - fun test_execute_send_token_zero_value() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - let btc: Coin = env.get_btc(0); - let eth_address = x"0000000000000000000000000000000000000000"; - env.send_token(chain_ids::eth_sepolia(), eth_address, btc); - - abort TEST_DONE - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EInvalidEvmAddress)] - fun test_execute_send_token_invalid_evem_address() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - let btc: Coin = env.get_btc(1); - let eth_address = x"1234"; - env.send_token(chain_ids::eth_sepolia(), eth_address, btc); - - abort TEST_DONE - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EBridgeUnavailable)] - fun test_execute_send_token_frozen() { - let chain_id = chain_ids::sui_testnet(); - let mut env = create_env(chain_id, @0x0); - env.create_bridge_default(); - let eth: Coin = env.get_eth(1); - let eth_address = x"0000000000000000000000000000000000000000"; - env.freeze_bridge(@0x0, UNEXPECTED_ERROR); - env.send_token(chain_ids::eth_sepolia(), eth_address, eth); - - abort TEST_DONE - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EInvalidBridgeRoute)] - fun test_execute_send_token_invalid_route() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - let usdc: Coin = env.get_usdc(100); - let eth_address = x"0000000000000000000000000000000000000000"; - env.send_token(chain_ids::eth_mainnet(), eth_address, usdc); - - abort TEST_DONE - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EUnexpectedChainID)] - fun test_system_msg_incorrect_chain_id() { - let sender = @0x0; - let mut env = create_env(chain_ids::sui_testnet(), sender); - env.create_bridge_default(); - env.execute_blocklist(sender, chain_ids::sui_mainnet(), 0, vector[], vector[]); - - abort TEST_DONE - } - - #[test] - fun test_get_seq_num_and_increment() { - let mut scenario = test_scenario::begin(@0x0); - let ctx = scenario.ctx(); - let chain_id = chain_ids::sui_testnet(); - let mut bridge = new_for_testing(chain_id, ctx); - - let inner = bridge.test_load_inner_mut(); - assert!( - inner.test_get_current_seq_num_and_increment( - message_types::committee_blocklist(), - ) == 0, - ); - assert!( - inner.sequence_nums()[&message_types::committee_blocklist()] == 1, - ); - assert!( - inner.test_get_current_seq_num_and_increment( - message_types::committee_blocklist(), - ) == 1, - ); - // other message type nonce does not change - assert!( - !inner.sequence_nums().contains(&message_types::token()), - ); - assert!( - !inner.sequence_nums().contains(&message_types::emergency_op()), - ); - assert!( - !inner.sequence_nums().contains(&message_types::update_bridge_limit()), - ); - assert!( - !inner.sequence_nums().contains(&message_types::update_asset_price()), - ); - assert!( - inner.test_get_current_seq_num_and_increment(message_types::token()) == 0, - ); - assert!( - inner.test_get_current_seq_num_and_increment( - message_types::emergency_op(), - ) == 0, - ); - assert!( - inner.test_get_current_seq_num_and_increment( - message_types::update_bridge_limit(), - ) == 0, - ); - assert!( - inner.test_get_current_seq_num_and_increment( - message_types::update_asset_price(), - ) == 0, - ); +#[test] +fun test_init_committee_twice() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.init_committee(@0x0); // second time is a no-op + + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::ENotSystemAddress)] +fun test_init_committee_non_system_addr() { + let mut env = create_env(chain_ids::sui_mainnet()); + env.setup_validators(vector[ + create_validator(@0xA, 100, &b"12345678901234567890123456789012"), + ]); + env.create_bridge(@0x0); + env.register_committee(); + env.init_committee(@0xA); + + abort TEST_DONE +} + +#[test] +#[expected_failure(abort_code = bridge::committee::ECommitteeAlreadyInitiated)] +fun test_register_committee_after_init() { + let mut env = create_env(chain_ids::sui_custom()); + env.create_bridge_default(); + env.register_committee(); + + abort TEST_DONE +} + +#[test] +fun test_register_foreign_token() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let (upgrade_cap, treasury_cap, metadata) = create_test_token(env + .scenario() + .ctx()); + env.register_foreign_token( + treasury_cap, + upgrade_cap, + metadata, + addr, + ); + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::treasury::ETokenSupplyNonZero)] +fun test_register_foreign_token_non_zero_supply() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let (upgrade_cap, mut treasury_cap, metadata) = create_test_token(env + .scenario() + .ctx()); + let _coin = treasury_cap.mint(1, env.scenario().ctx()); + env.register_foreign_token( + treasury_cap, + upgrade_cap, + metadata, + addr, + ); + + abort 0 +} + +#[test] +#[expected_failure(abort_code = bridge::treasury::EInvalidNotionalValue)] +fun test_add_token_price_zero_value() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.add_tokens( + addr, + false, + vector[test_token_id()], + vector[type_name::get().into_string()], + vector[0], + ); + + abort 0 +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EMalformedMessageError)] +fun test_add_token_malformed_1() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.add_tokens( + addr, + false, + vector[test_token_id(), eth_id()], + vector[type_name::get().into_string()], + vector[10], + ); + + abort 0 +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EMalformedMessageError)] +fun test_add_token_malformed_2() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.add_tokens( + addr, + false, + vector[test_token_id()], + vector[ + type_name::get().into_string(), + type_name::get().into_string(), + ], + vector[10], + ); + + abort 0 +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EMalformedMessageError)] +fun test_add_token_malformed_3() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.add_tokens( + addr, + false, + vector[test_token_id()], + vector[type_name::get().into_string()], + vector[10, 20], + ); + + abort 0 +} + +#[test] +fun test_add_native_token_nop() { + // adding a native token is simply a NO-OP at the moment + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.add_tokens( + addr, + true, + vector[test_token_id()], + vector[type_name::get().into_string()], + vector[100], + ); + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::treasury::EInvalidUpgradeCap)] +fun test_register_foreign_token_bad_upgrade_cap() { + let addr = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let (_upgrade_cap, treasury_cap, metadata) = create_test_token(env + .scenario() + .ctx()); + let upgrade_cap = test_publish(@0x42.to_id(), env.scenario().ctx()); + env.register_foreign_token( + treasury_cap, + upgrade_cap, + metadata, + addr, + ); + + abort 0 +} + +#[test] +fun test_execute_send_token() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let btc: Coin = env.get_btc(1); + let eth_address = x"0000000000000000000000000000000000000000"; + env.send_token(@0xABCD, chain_ids::eth_sepolia(), eth_address, btc); + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::ETokenValueIsZero)] +fun test_execute_send_token_zero_value() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let btc: Coin = env.get_btc(0); + let eth_address = x"0000000000000000000000000000000000000000"; + env.send_token(@0x0, chain_ids::eth_sepolia(), eth_address, btc); + + abort TEST_DONE +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EInvalidEvmAddress)] +fun test_execute_send_token_invalid_evem_address() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let btc: Coin = env.get_btc(1); + let eth_address = x"1234"; + let val_addr = env.validators()[0].addr(); + env.send_token(val_addr, chain_ids::eth_sepolia(), eth_address, btc); + + abort TEST_DONE +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EBridgeUnavailable)] +fun test_execute_send_token_frozen() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + let eth: Coin = env.get_eth(1); + let eth_address = x"0000000000000000000000000000000000000000"; + env.freeze_bridge(@0x0, UNEXPECTED_ERROR); + env.send_token(@0xAAAA, chain_ids::eth_sepolia(), eth_address, eth); + + abort TEST_DONE +} - destroy(bridge); - scenario.end(); - } - - #[test] - fun test_update_limit() { - let chain_id = chain_ids::sui_mainnet(); - let mut env = create_env(chain_id, @0x0); - env.create_bridge_default(); - - let bridge = env.bridge(@0x0); - let inner = bridge.bridge_ref().test_load_inner(); - // Assert the starting limit is a different value - assert!( - inner.inner_limiter().get_route_limit( +#[test] +#[expected_failure(abort_code = bridge::bridge::EInvalidBridgeRoute)] +fun test_execute_send_token_invalid_route() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let usdc: Coin = env.get_usdc(100); + let eth_address = x"0000000000000000000000000000000000000000"; + env.send_token(@0xABCDEF, chain_ids::eth_mainnet(), eth_address, usdc); + + abort TEST_DONE +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EUnexpectedChainID)] +fun test_system_msg_incorrect_chain_id() { + let sender = @0x0; + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.execute_blocklist(sender, chain_ids::sui_mainnet(), 0, vector[]); + + abort TEST_DONE +} + +#[test] +fun test_get_seq_num_and_increment() { + let mut scenario = test_scenario::begin(@0x0); + let ctx = scenario.ctx(); + let chain_id = chain_ids::sui_testnet(); + let mut bridge = new_for_testing(chain_id, ctx); + + let inner = bridge.test_load_inner_mut(); + assert!( + inner.test_get_current_seq_num_and_increment( + message_types::committee_blocklist(), + ) == + 0, + ); + assert!( + inner.sequence_nums()[&message_types::committee_blocklist()] == 1, + ); + assert!( + inner.test_get_current_seq_num_and_increment( + message_types::committee_blocklist(), + ) == + 1, + ); + // other message type nonce does not change + assert!( + !inner.sequence_nums().contains(&message_types::token()), + ); + assert!( + !inner.sequence_nums().contains(&message_types::emergency_op()), + ); + assert!( + !inner.sequence_nums().contains(&message_types::update_bridge_limit()), + ); + assert!( + !inner.sequence_nums().contains(&message_types::update_asset_price()), + ); + assert!( + inner.test_get_current_seq_num_and_increment(message_types::token()) == + 0, + ); + assert!( + inner.test_get_current_seq_num_and_increment( + message_types::emergency_op(), + ) == + 0, + ); + assert!( + inner.test_get_current_seq_num_and_increment( + message_types::update_bridge_limit(), + ) == + 0, + ); + assert!( + inner.test_get_current_seq_num_and_increment( + message_types::update_asset_price(), + ) == + 0, + ); + + destroy(bridge); + scenario.end(); +} + +#[test] +fun test_update_limit() { + let chain_id = chain_ids::sui_mainnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + + let bridge = env.bridge(@0x0); + let inner = bridge.bridge_ref().test_load_inner(); + // Assert the starting limit is a different value + assert!( + inner + .inner_limiter() + .get_route_limit( &chain_ids::get_route( chain_ids::eth_mainnet(), chain_ids::sui_mainnet(), ), - ) != 1, - ); - bridge.return_bridge(); - - // update limit - env.update_bridge_limit( - @0x0, - chain_ids::sui_mainnet(), - chain_ids::eth_mainnet(), - 1, - ); - - let bridge = env.bridge(@0x0); - let inner = bridge.bridge_ref().test_load_inner(); - // Assert the starting limit is a different value - assert!( - inner.inner_limiter().get_route_limit( + ) != + 1, + ); + bridge.return_bridge(); + + // update limit + env.update_bridge_limit( + @0x0, + chain_ids::sui_mainnet(), + chain_ids::eth_mainnet(), + 1, + ); + + let bridge = env.bridge(@0x0); + let inner = bridge.bridge_ref().test_load_inner(); + // Assert the starting limit is a different value + assert!( + inner + .inner_limiter() + .get_route_limit( &chain_ids::get_route( chain_ids::eth_mainnet(), chain_ids::sui_mainnet(), ), - ) == 1, - ); - // other routes are not impacted - assert!( - inner.inner_limiter().get_route_limit( + ) == + 1, + ); + // other routes are not impacted + assert!( + inner + .inner_limiter() + .get_route_limit( &chain_ids::get_route( chain_ids::eth_sepolia(), chain_ids::sui_testnet(), ), - ) != 1, - ); - bridge.return_bridge(); - - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EUnexpectedChainID)] - fun test_execute_update_bridge_limit_abort_with_unexpected_chain_id() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - - // This abort because the receiving_chain (sui_mainnet) is not the same as - // the bridge's chain_id (sui_devnet) - env.update_bridge_limit( - @0x0, - chain_ids::sui_mainnet(), - chain_ids::eth_mainnet(), - 1, - ); + ) != + 1, + ); + bridge.return_bridge(); - abort TEST_DONE - } - - #[test] - fun test_update_asset_price() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - let scenario = env.scenario(); - scenario.next_tx(@0x0); - let mut bridge = scenario.take_shared(); - let inner = bridge.test_load_inner_mut(); - - // Assert the starting limit is a different value - assert!( - inner.inner_treasury().notional_value() != 1_001_000_000, - ); - // now change it to 100_001_000 - let msg = message::create_update_asset_price_message( - inner.inner_treasury().token_id(), - chain_ids::sui_mainnet(), - 0, - 1_001_000_000, - ); - let payload = msg.extract_update_asset_price(); - inner.test_execute_update_asset_price(payload); - - // should be 1_001_000_000 now - assert!(inner.inner_treasury().notional_value() == 1_001_000_000); - // other assets are not impacted - assert!(inner.inner_treasury().notional_value() != 1_001_000_000); - - destroy(bridge); - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::treasury::EInvalidNotionalValue)] - fun test_invalid_price_update() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - env.update_asset_price(@0x0, btc_id(), 0); - - abort 0 - } - - #[test] - #[expected_failure(abort_code = bridge::treasury::EUnsupportedTokenType)] - fun test_unsupported_token_type() { - let mut env = create_env(chain_ids::sui_testnet(), @0x0); - env.create_bridge_default(); - env.update_asset_price(@0x0, 42, 100); - - abort 0 - } - - #[test] - fun test_execute_freeze_unfreeze() { - let chain_id = chain_ids::sui_testnet(); - let mut env = create_env(chain_id, @0x0); - env.create_bridge_default(); - env.freeze_bridge(@0x0, UNEXPECTED_ERROR + 1); - let bridge = env.bridge(@0x0); - assert!(bridge.bridge_ref().test_load_inner().inner_paused()); - bridge.return_bridge(); - env.unfreeze_bridge(@0x0, UNEXPECTED_ERROR + 2); - let bridge = env.bridge(@0x0); - assert!(!bridge.bridge_ref().test_load_inner().inner_paused()); - bridge.return_bridge(); - env.destroy_env(); - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EBridgeNotPaused)] - fun test_execute_unfreeze_err() { - let chain_id = chain_ids::sui_testnet(); - let mut env = create_env(chain_id, @0x0); - env.create_bridge_default(); - let bridge = env.bridge(@0x0); - assert!(!bridge.bridge_ref().test_load_inner().inner_paused()); - bridge.return_bridge(); - env.unfreeze_bridge(@0x0, UNEXPECTED_ERROR + 2); - - abort TEST_DONE - } - - #[test] - #[expected_failure(abort_code = bridge::bridge::EBridgeAlreadyPaused)] - fun test_execute_emergency_op_abort_when_already_frozen() { - let chain_id = chain_ids::sui_testnet(); - let mut env = create_env(chain_id, @0x0); - env.create_bridge_default(); - - // initially it's unfrozen - let bridge = env.bridge(@0x0); - assert!(!bridge.bridge_ref().test_load_inner().inner_paused()); - bridge.return_bridge(); - // freeze it - env.freeze_bridge(@0x0, UNEXPECTED_ERROR); - let bridge = env.bridge(@0x0); - assert!(bridge.bridge_ref().test_load_inner().inner_paused()); - bridge.return_bridge(); - // freeze it again, should abort - env.freeze_bridge(@0x0, UNEXPECTED_ERROR); - - abort TEST_DONE - } - - #[test] - fun test_get_token_transfer_action_data() { - let mut scenario = test_scenario::begin(@0x0); - let ctx = scenario.ctx(); - let chain_id = chain_ids::sui_testnet(); - let mut bridge = new_for_testing(chain_id, ctx); - let coin = coin::mint_for_testing(12345, ctx); - - // Test when pending - let message = message::create_token_bridge_message( - chain_ids::sui_testnet(), // source chain - 10, // seq_num - address::to_bytes(ctx.sender()), // sender address - chain_ids::eth_sepolia(), // target_chain - hex::decode(b"00000000000000000000000000000000000000c8"), // target_address - 1u8, // token_type - coin.balance().value(), - ); + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EUnexpectedChainID)] +fun test_execute_update_bridge_limit_abort_with_unexpected_chain_id() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + + // This abort because the receiving_chain (sui_mainnet) is not the same as + // the bridge's chain_id (sui_devnet) + env.update_bridge_limit( + @0x0, + chain_ids::sui_mainnet(), + chain_ids::eth_mainnet(), + 1, + ); + + abort TEST_DONE +} + +#[test] +fun test_update_asset_price() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + let scenario = env.scenario(); + scenario.next_tx(@0x0); + let mut bridge = scenario.take_shared(); + let inner = bridge.test_load_inner_mut(); + + // Assert the starting limit is a different value + assert!( + inner.inner_treasury().notional_value() != 1_001_000_000, + ); + // now change it to 100_001_000 + let msg = message::create_update_asset_price_message( + inner.inner_treasury().token_id(), + chain_ids::sui_mainnet(), + 0, + 1_001_000_000, + ); + let payload = msg.extract_update_asset_price(); + inner.test_execute_update_asset_price(payload); + + // should be 1_001_000_000 now + assert!(inner.inner_treasury().notional_value() == 1_001_000_000); + // other assets are not impacted + assert!(inner.inner_treasury().notional_value() != 1_001_000_000); + + destroy(bridge); + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::treasury::EInvalidNotionalValue)] +fun test_invalid_price_update() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.update_asset_price(@0x0, btc_id(), 0); + + abort 0 +} + +#[test] +#[expected_failure(abort_code = bridge::treasury::EUnsupportedTokenType)] +fun test_unsupported_token_type() { + let mut env = create_env(chain_ids::sui_testnet()); + env.create_bridge_default(); + env.update_asset_price(@0x0, 42, 100); - let key = message.key(); - bridge.test_load_inner_mut().inner_token_transfer_records_mut().push_back( + abort 0 +} + +#[test] +fun test_execute_freeze_unfreeze() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + env.freeze_bridge(@0x0, UNEXPECTED_ERROR + 1); + let bridge = env.bridge(@0x0); + assert!(bridge.bridge_ref().test_load_inner().inner_paused()); + bridge.return_bridge(); + env.unfreeze_bridge(@0x0, UNEXPECTED_ERROR + 2); + let bridge = env.bridge(@0x0); + assert!(!bridge.bridge_ref().test_load_inner().inner_paused()); + bridge.return_bridge(); + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EBridgeNotPaused)] +fun test_execute_unfreeze_err() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + let bridge = env.bridge(@0x0); + assert!(!bridge.bridge_ref().test_load_inner().inner_paused()); + bridge.return_bridge(); + env.unfreeze_bridge(@0x0, UNEXPECTED_ERROR + 2); + + abort TEST_DONE +} + +#[test] +#[expected_failure(abort_code = bridge::bridge::EBridgeAlreadyPaused)] +fun test_execute_emergency_op_abort_when_already_frozen() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + + // initially it's unfrozen + let bridge = env.bridge(@0x0); + assert!(!bridge.bridge_ref().test_load_inner().inner_paused()); + bridge.return_bridge(); + // freeze it + env.freeze_bridge(@0x0, UNEXPECTED_ERROR); + let bridge = env.bridge(@0x0); + assert!(bridge.bridge_ref().test_load_inner().inner_paused()); + bridge.return_bridge(); + // freeze it again, should abort + env.freeze_bridge(@0x0, UNEXPECTED_ERROR); + + abort TEST_DONE +} + +#[test] +fun test_get_token_transfer_action_data() { + let mut scenario = test_scenario::begin(@0x0); + let ctx = scenario.ctx(); + let chain_id = chain_ids::sui_testnet(); + let mut bridge = new_for_testing(chain_id, ctx); + let coin = coin::mint_for_testing(12345, ctx); + + // Test when pending + let message = message::create_token_bridge_message( + chain_ids::sui_testnet(), // source chain + 10, // seq_num + address::to_bytes(ctx.sender()), // sender address + chain_ids::eth_sepolia(), // target_chain + hex::decode( + b"00000000000000000000000000000000000000c8", + ), // target_address + 1u8, // token_type + coin.balance().value(), + ); + + let key = message.key(); + bridge + .test_load_inner_mut() + .inner_token_transfer_records_mut() + .push_back( key, new_bridge_record_for_testing(message, option::none(), false), ); - assert!( - bridge.test_get_token_transfer_action_status(chain_id, 10) - == transfer_status_pending(), - ); - assert!(bridge.test_get_token_transfer_action_signatures(chain_id, 10) == option::none()); - - // Test when ready for claim - let message = message::create_token_bridge_message( - chain_ids::sui_testnet(), // source chain - 11, // seq_num - address::to_bytes(ctx.sender()), // sender address - chain_ids::eth_sepolia(), // target_chain - hex::decode(b"00000000000000000000000000000000000000c8"), // target_address - 1u8, // token_type - balance::value(coin::balance(&coin)) - ); - let key = message.key(); - bridge.test_load_inner_mut().inner_token_transfer_records_mut().push_back( + assert!( + bridge.test_get_token_transfer_action_status(chain_id, 10) == + transfer_status_pending(), + ); + assert!( + bridge.test_get_token_transfer_action_signatures(chain_id, 10) == + option::none(), + ); + + // Test when ready for claim + let message = message::create_token_bridge_message( + chain_ids::sui_testnet(), // source chain + 11, // seq_num + address::to_bytes(ctx.sender()), // sender address + chain_ids::eth_sepolia(), // target_chain + hex::decode( + b"00000000000000000000000000000000000000c8", + ), // target_address + 1u8, // token_type + balance::value(coin::balance(&coin)), + ); + let key = message.key(); + bridge + .test_load_inner_mut() + .inner_token_transfer_records_mut() + .push_back( key, - new_bridge_record_for_testing(message, option::some(vector[]), false), - ); - assert!( - bridge.test_get_token_transfer_action_status(chain_id, 11) - == transfer_status_approved(), - ); - assert!( - bridge.test_get_token_transfer_action_signatures(chain_id, 11) - == option::some(vector[]), - ); - assert!( - bridge.test_get_parsed_token_transfer_message(chain_id, 11) - == option::some( - to_parsed_token_transfer_message(&message) + new_bridge_record_for_testing( + message, + option::some(vector[]), + false, ), ); - - // Test when already claimed - let message = message::create_token_bridge_message( - chain_ids::sui_testnet(), // source chain - 12, // seq_num - address::to_bytes(ctx.sender()), // sender address - chain_ids::eth_sepolia(), // target_chain - hex::decode(b"00000000000000000000000000000000000000c8"), // target_address - 1u8, // token_type - balance::value(coin::balance(&coin)) - ); - let key = message.key(); - bridge.test_load_inner_mut().inner_token_transfer_records_mut().push_back( + assert!( + bridge.test_get_token_transfer_action_status(chain_id, 11) == + transfer_status_approved(), + ); + assert!( + bridge.test_get_token_transfer_action_signatures(chain_id, 11) == + option::some(vector[]), + ); + assert!( + bridge.test_get_parsed_token_transfer_message(chain_id, 11) == + option::some( + to_parsed_token_transfer_message(&message), + ), + ); + + // Test when already claimed + let message = message::create_token_bridge_message( + chain_ids::sui_testnet(), // source chain + 12, // seq_num + address::to_bytes(ctx.sender()), // sender address + chain_ids::eth_sepolia(), // target_chain + hex::decode( + b"00000000000000000000000000000000000000c8", + ), // target_address + 1u8, // token_type + balance::value(coin::balance(&coin)), + ); + let key = message.key(); + bridge + .test_load_inner_mut() + .inner_token_transfer_records_mut() + .push_back( key, - new_bridge_record_for_testing(message, option::some(vector[b"1234"]), true), - ); - assert!( - bridge.test_get_token_transfer_action_status(chain_id, 12) - == transfer_status_claimed(), - ); - assert!( - bridge.test_get_token_transfer_action_signatures(chain_id, 12) - == option::some(vector[b"1234"]), - ); - assert!( - bridge.test_get_parsed_token_transfer_message(chain_id, 12) - == option::some( - to_parsed_token_transfer_message(&message) + new_bridge_record_for_testing( + message, + option::some(vector[b"1234"]), + true, ), ); + assert!( + bridge.test_get_token_transfer_action_status(chain_id, 12) == + transfer_status_claimed(), + ); + assert!( + bridge.test_get_token_transfer_action_signatures(chain_id, 12) == + option::some(vector[b"1234"]), + ); + assert!( + bridge.test_get_parsed_token_transfer_message(chain_id, 12) == + option::some( + to_parsed_token_transfer_message(&message), + ), + ); + + // Test when message not found + assert!( + bridge.test_get_token_transfer_action_status(chain_id, 13) == + transfer_status_not_found(), + ); + assert!( + bridge.test_get_token_transfer_action_signatures(chain_id, 13) == + option::none(), + ); + assert!( + bridge.test_get_parsed_token_transfer_message(chain_id, 13) == + option::none(), + ); + + destroy(bridge); + coin.burn_for_testing(); + scenario.end(); +} - // Test when message not found - assert!( - bridge.test_get_token_transfer_action_status(chain_id, 13) - == transfer_status_not_found(), - ); - assert!( - bridge.test_get_token_transfer_action_signatures(chain_id, 13) - == option::none(), - ); - assert!( - bridge.test_get_parsed_token_transfer_message(chain_id, 13) - == option::none(), - ); +#[test] +#[expected_failure(abort_code = bridge::treasury::EUnsupportedTokenType)] +fun test_get_metadata_no_token() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + let bridge = env.bridge(@0x0); + let treasury = bridge.bridge_ref().test_load_inner().inner_treasury(); + treasury.notional_value(); + + abort 0 +} + +#[test] +fun change_url() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + let mut bridge = env.bridge(@0xAAAA); + bridge + .bridge_ref_mut() + .update_node_url(b"", env.scenario().ctx()); + bridge.return_bridge(); + env.destroy_env(); +} - destroy(bridge); - coin.burn_for_testing(); - scenario.end(); - } - - #[test] - #[expected_failure(abort_code = bridge::treasury::EUnsupportedTokenType)] - fun test_get_metadata_no_token() { - let chain_id = chain_ids::sui_testnet(); - let mut env = create_env(chain_id, @0x0); - env.create_bridge_default(); - let bridge = env.bridge(@0x0); - let treasury = bridge.bridge_ref().test_load_inner().inner_treasury(); - treasury.notional_value(); - - abort 0 - } +#[test] +#[ + expected_failure( + abort_code = bridge::committee::ESenderIsNotInBridgeCommittee, + ), +] +fun change_url_bad_sender() { + let chain_id = chain_ids::sui_testnet(); + let mut env = create_env(chain_id); + env.create_bridge_default(); + let mut bridge = env.bridge(@0x0); + bridge + .bridge_ref_mut() + .update_node_url(b"", env.scenario().ctx()); + abort 0 } diff --git a/crates/sui-framework/packages/bridge/tests/bridge_txns.move b/crates/sui-framework/packages/bridge/tests/bridge_txns.move new file mode 100644 index 0000000000000..8e2a082f57faa --- /dev/null +++ b/crates/sui-framework/packages/bridge/tests/bridge_txns.move @@ -0,0 +1,329 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[test_only] +module bridge::bridge_txns; +use bridge::bridge_env::{ + already_approved, + already_claimed, + approved, + claimed, + create_bridge_default, + create_env, + create_validator, + eth_id, + limit_exceeded, + register_test_token, + test_token_id +}; +use bridge::chain_ids; +use bridge::crypto::ecdsa_pub_key_to_eth_address; +use bridge::eth::ETH; +use bridge::test_token::TEST_TOKEN; +use std::type_name; + +#[test] +fun test_limits() { + let mut env = create_env(chain_ids::sui_custom()); + env.create_bridge_default(); + + let source_chain = chain_ids::eth_custom(); + let sui_address = @0xABCDEF; + let eth_address = x"0000000000000000000000000000000000001234"; + + // lower limits + let chain_id = env.chain_id(); + env.update_bridge_limit(@0x0, chain_id, source_chain, 3000); + let transfer_id1 = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + 4000000000, + ); + let transfer_id2 = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + 1000, + ); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id1) == + limit_exceeded(), + ); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id2) == + claimed(), + ); + // double claim is ok and it is a no-op + assert!( + env.claim_and_transfer_token(source_chain, transfer_id2) == + already_claimed(), + ); + + // up limits to allow claim + env.update_bridge_limit(@0x0, chain_id, source_chain, 4000); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id1) == + claimed(), + ); + + env.destroy_env(); +} + +#[test] +fun test_bridge_and_claim() { + let mut env = create_env(chain_ids::sui_custom()); + env.create_bridge_default(); + + let source_chain = chain_ids::eth_custom(); + let sui_address = @0xABCDEF; + let eth_address = x"0000000000000000000000000000000000001234"; + let amount = 1000; + + // + // move from eth and transfer to sui account + let transfer_id1 = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + amount, + ); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id1) == + claimed(), + ); + let transfer_id2 = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + amount, + ); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id2) == + claimed(), + ); + // double claim is ok and it is a no-op + assert!( + env.claim_and_transfer_token(source_chain, transfer_id2) == + already_claimed(), + ); + + // + // change order + let transfer_id1 = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + amount, + ); + let transfer_id2 = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + amount, + ); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id1) == + claimed(), + ); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id2) == + claimed(), + ); + + // + // move from eth and send it back + let transfer_id = env.bridge_to_sui( + source_chain, + eth_address, + sui_address, + amount, + ); + let token = env.claim_token(sui_address, source_chain, transfer_id); + env.send_token( + sui_address, + source_chain, + eth_address, + token, + ); + + // + // approve with subset of signatures + let message = env.bridge_in_message( + source_chain, + eth_address, + sui_address, + amount, + ); + let signatures = env.sign_message_with(message, vector[0, 2]); + let transfer_id = message.seq_num(); + assert!(env.approve_token_transfer(message, signatures) == approved()); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id) == + claimed(), + ); + + // + // multiple approve with subset of signatures + let message = env.bridge_in_message( + source_chain, + eth_address, + sui_address, + amount, + ); + let signatures = env.sign_message_with(message, vector[0, 2]); + let transfer_id = message.seq_num(); + assert!(env.approve_token_transfer(message, signatures) == approved()); + assert!( + env.approve_token_transfer(message, signatures) == already_approved(), + ); + assert!( + env.approve_token_transfer(message, signatures) == already_approved(), + ); + let token = env.claim_token(sui_address, source_chain, transfer_id); + let send_token_id = env.send_token( + sui_address, + source_chain, + eth_address, + token, + ); + let message = env.bridge_out_message( + source_chain, + eth_address, + sui_address, + amount, + send_token_id, + ); + let signatures = env.sign_message_with(message, vector[1, 2]); + assert!(env.approve_token_transfer(message, signatures) == approved()); + let signatures = env.sign_message_with(message, vector[0, 2]); + assert!( + env.approve_token_transfer(message, signatures) == already_approved(), + ); + + // + // multiple approve with different subset of signatures + let message = env.bridge_in_message( + source_chain, + eth_address, + sui_address, + amount, + ); + let transfer_id = message.seq_num(); + let signatures = env.sign_message_with(message, vector[0, 2]); + assert!(env.approve_token_transfer(message, signatures) == approved()); + let signatures = env.sign_message_with(message, vector[0, 1]); + assert!( + env.approve_token_transfer(message, signatures) == already_approved(), + ); + let signatures = env.sign_message_with(message, vector[1, 2]); + assert!( + env.approve_token_transfer(message, signatures) == already_approved(), + ); + let token = env.claim_token(sui_address, source_chain, transfer_id); + env.send_token( + sui_address, + source_chain, + eth_address, + token, + ); + + env.destroy_env(); +} + +#[test] +#[expected_failure(abort_code = bridge::committee::ESignatureBelowThreshold)] +fun test_blocklist() { + let mut env = create_env(chain_ids::sui_custom()); + let validators = vector[ + create_validator(@0xAAAA, 100, &b"1234567890_1234567890_1234567890"), + create_validator(@0xBBBB, 100, &b"234567890_1234567890_1234567890_"), + create_validator(@0xCCCC, 100, &b"34567890_1234567890_1234567890_1"), + create_validator(@0xDDDD, 100, &b"4567890_1234567890_1234567890_12"), + ]; + env.setup_validators(validators); + + let sender = @0x0; + env.create_bridge(sender); + env.register_committee(); + env.init_committee(sender); + env.setup_treasury(sender); + + let source_chain = chain_ids::eth_custom(); + let sui_address = @0xABCDEF; + let eth_address = x"0000000000000000000000000000000000001234"; + let amount = 1000; + + // bridging in and out works + let message = env.bridge_in_message( + source_chain, + eth_address, + sui_address, + amount, + ); + let signatures = env.sign_message_with(message, vector[0, 2]); + let transfer_id = message.seq_num(); + assert!(env.approve_token_transfer(message, signatures) == approved()); + assert!( + env.claim_and_transfer_token(source_chain, transfer_id) == + claimed(), + ); + + // block bridge node 0 + let chain_id = env.chain_id(); + let node_key = ecdsa_pub_key_to_eth_address(env + .validators()[0] + .public_key()); + env.execute_blocklist(@0x0, chain_id, 0, vector[node_key]); + + // signing with 2 valid bridge nodes works + let message = env.bridge_in_message( + source_chain, + eth_address, + sui_address, + amount, + ); + let signatures = env.sign_message_with(message, vector[1, 2]); + assert!(env.approve_token_transfer(message, signatures) == approved()); + assert!( + env.approve_token_transfer(message, signatures) == already_approved(), + ); + + // signing with blocked node fails + let message = env.bridge_in_message( + source_chain, + eth_address, + sui_address, + amount, + ); + let signatures = env.sign_message_with(message, vector[0, 2]); + env.approve_token_transfer(message, signatures); + + env.destroy_env(); +} + +#[test] +fun test_system_messages() { + let addr = @0xABCDEF0123; // random address + let mut env = create_env(chain_ids::sui_custom()); + env.create_bridge_default(); + + env.update_asset_price(addr, eth_id(), 735); + + env.register_test_token(); + env.add_tokens( + addr, + false, + vector[test_token_id()], + vector[type_name::get().into_string()], + vector[333], + ); + + let chain_id = env.chain_id(); + let node_key = ecdsa_pub_key_to_eth_address(env + .validators()[0] + .public_key()); + env.execute_blocklist(@0x0, chain_id, 0, vector[node_key]); + + env.destroy_env(); +} diff --git a/crates/sui-framework/packages/move-stdlib/sources/option.move b/crates/sui-framework/packages/move-stdlib/sources/option.move index b0f1862c91ba5..00d5b9a20686f 100644 --- a/crates/sui-framework/packages/move-stdlib/sources/option.move +++ b/crates/sui-framework/packages/move-stdlib/sources/option.move @@ -153,17 +153,14 @@ module std::option { /// Destroy `Option` and call the closure `f` on the value inside if it holds one. public macro fun do<$T>($o: Option<$T>, $f: |$T|) { let o = $o; - if (o.is_some()) { - $f(o.destroy_some()); - } + if (o.is_some()) $f(o.destroy_some()) + else o.destroy_none() } /// Execute a closure on the value inside `t` if it holds one. public macro fun do_ref<$T>($o: &Option<$T>, $f: |&$T|) { let o = $o; - if (o.is_some()) { - $f(o.borrow()); - } + if (o.is_some()) $f(o.borrow()); } /// Execute a closure on the mutable reference to the value inside `t` if it holds one. @@ -176,16 +173,24 @@ module std::option { /// Equivalent to Rust's `a.or(b)`. public macro fun or<$T>($o: Option<$T>, $default: Option<$T>): Option<$T> { let o = $o; - if (o.is_some()) o - else $default + if (o.is_some()) { + o + } else { + o.destroy_none(); + $default + } } /// If the value is `Some`, call the closure `f` on it. Otherwise, return `None`. /// Equivalent to Rust's `t.and_then(f)`. public macro fun and<$T, $U>($o: Option<$T>, $f: |$T| -> Option<$U>): Option<$U> { let o = $o; - if (o.is_some()) $f(o.extract()) - else none() + if (o.is_some()) { + $f(o.destroy_some()) + } else { + o.destroy_none(); + none() + } } /// If the value is `Some`, call the closure `f` on it. Otherwise, return `None`. @@ -199,9 +204,13 @@ module std::option { /// Map an `Option` to `Option` by applying a function to a contained value. /// Equivalent to Rust's `t.map(f)`. public macro fun map<$T, $U>($o: Option<$T>, $f: |$T| -> $U): Option<$U> { - let mut o = $o; - if (o.is_some()) some($f(o.extract())) - else none() + let o = $o; + if (o.is_some()) { + some($f(o.destroy_some())) + } else { + o.destroy_none(); + none() + } } /// Map an `Option` value to `Option` by applying a function to a contained value by reference. @@ -234,7 +243,11 @@ module std::option { /// deprecated in favor of this function. public macro fun destroy_or<$T>($o: Option<$T>, $default: $T): $T { let o = $o; - if (o.is_some()) o.destroy_some() - else $default + if (o.is_some()) { + o.destroy_some() + } else { + o.destroy_none(); + $default + } } } diff --git a/crates/sui-framework/packages/move-stdlib/tests/option_tests.move b/crates/sui-framework/packages/move-stdlib/tests/option_tests.move index a8cb8875d5495..18c02dcee2218 100644 --- a/crates/sui-framework/packages/move-stdlib/tests/option_tests.move +++ b/crates/sui-framework/packages/move-stdlib/tests/option_tests.move @@ -172,6 +172,8 @@ module std::option_tests { // === Macros === + public struct NoDrop {} + #[test] fun do_destroy() { let mut counter = 0; @@ -179,6 +181,12 @@ module std::option_tests { option::some(10).do!(|x| counter = counter + x); assert!(counter == 15); + + let some = option::some(NoDrop {}); + let none = option::none(); + + some.do!(|el| { let NoDrop {} = el; }); + none.do!(|el| { let NoDrop {} = el; }); } #[test] @@ -199,6 +207,49 @@ module std::option_tests { assert!(option::none().map_ref!(|x| vector[*x]) == option::none()); } + #[test] + fun map_no_drop() { + let none = option::none().map!(|el| { + let NoDrop {} = el; + 100u64 + }); + let some = option::some(NoDrop {}).map!(|el| { + let NoDrop {} = el; + 100u64 + }); + + assert!(none == option::none()); + assert!(some == option::some(100)); + } + + #[test] + fun or_no_drop() { + let none = option::none().or!(option::some(NoDrop {})); + let some = option::some(NoDrop {}).or!(option::some(NoDrop {})); + + assert!(none.is_some()); + assert!(some.is_some()); + + let NoDrop {} = none.destroy_some(); + let NoDrop {} = some.destroy_some(); + } + + #[test] + fun and_no_drop() { + let none = option::none().and!(|e| { + let NoDrop {} = e; + option::some(100) + }); + + let some = option::some(NoDrop {}).and!(|e| { + let NoDrop {} = e; + option::some(100) + }); + + assert!(some == option::some(100)); + assert!(none == option::none()); + } + #[test] fun filter() { assert!(option::some(5).filter!(|x| *x == 5) == option::some(5)); @@ -217,4 +268,13 @@ module std::option_tests { assert!(option::none().destroy_or!(10) == 10); assert!(option::some(5).destroy_or!(10) == 5); } + + #[test] + fun destroy_or_no_drop() { + let none = option::none().destroy_or!(NoDrop {}); + let some = option::some(NoDrop {}).destroy_or!(NoDrop {}); + + let NoDrop {} = some; + let NoDrop {} = none; + } } diff --git a/crates/sui-framework/packages_compiled/sui-framework b/crates/sui-framework/packages_compiled/sui-framework index 7e808acfe68d4..fa0f19b2b4ab5 100644 Binary files a/crates/sui-framework/packages_compiled/sui-framework and b/crates/sui-framework/packages_compiled/sui-framework differ diff --git a/crates/sui-framework/src/lib.rs b/crates/sui-framework/src/lib.rs index dfc8679cd2fc6..f51f746585acb 100644 --- a/crates/sui-framework/src/lib.rs +++ b/crates/sui-framework/src/lib.rs @@ -256,9 +256,7 @@ pub async fn compare_system_package( let mut new_normalized = new_pkg.normalize(binary_config).ok()?; for (name, cur_module) in cur_normalized { - let Some(new_module) = new_normalized.remove(&name) else { - return None; - }; + let new_module = new_normalized.remove(&name)?; if let Err(e) = compatibility.check(&cur_module, &new_module) { error!("Compatibility check failed, for new version of {id}::{name}: {e:?}"); diff --git a/crates/sui-genesis-builder/Cargo.toml b/crates/sui-genesis-builder/Cargo.toml index 32e62bf08712b..c439f28b4caf7 100644 --- a/crates/sui-genesis-builder/Cargo.toml +++ b/crates/sui-genesis-builder/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow.workspace = true bcs.workspace = true diff --git a/crates/sui-genesis-builder/src/lib.rs b/crates/sui-genesis-builder/src/lib.rs index fbab297181074..b0925b19951f3 100644 --- a/crates/sui-genesis-builder/src/lib.rs +++ b/crates/sui-genesis-builder/src/lib.rs @@ -693,11 +693,11 @@ fn create_genesis_context( ) -> TxContext { let mut hasher = DefaultHash::default(); hasher.update(b"sui-genesis"); - hasher.update(&bcs::to_bytes(genesis_chain_parameters).unwrap()); - hasher.update(&bcs::to_bytes(genesis_validators).unwrap()); - hasher.update(&bcs::to_bytes(token_distribution_schedule).unwrap()); + hasher.update(bcs::to_bytes(genesis_chain_parameters).unwrap()); + hasher.update(bcs::to_bytes(genesis_validators).unwrap()); + hasher.update(bcs::to_bytes(token_distribution_schedule).unwrap()); for system_package in system_packages { - hasher.update(&bcs::to_bytes(system_package.bytes()).unwrap()); + hasher.update(bcs::to_bytes(system_package.bytes()).unwrap()); } let hash = hasher.finalize(); diff --git a/crates/sui-graphql-e2e-tests/Cargo.toml b/crates/sui-graphql-e2e-tests/Cargo.toml index fb8642a2a404e..1bc925321f16e 100644 --- a/crates/sui-graphql-e2e-tests/Cargo.toml +++ b/crates/sui-graphql-e2e-tests/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dev-dependencies] datatest-stable.workspace = true sui-graphql-rpc.workspace = true diff --git a/crates/sui-graphql-e2e-tests/tests/consistency/balances.exp b/crates/sui-graphql-e2e-tests/tests/consistency/balances.exp index 0420427fbe84c..e4529876bfe9b 100644 --- a/crates/sui-graphql-e2e-tests/tests/consistency/balances.exp +++ b/crates/sui-graphql-e2e-tests/tests/consistency/balances.exp @@ -59,7 +59,7 @@ task 15, line 76: Checkpoint created: 7 task 16, lines 78-99: -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -95,7 +95,7 @@ Response: { } task 17, lines 101-122: -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -131,7 +131,7 @@ Response: { } task 18, lines 124-145: -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -175,7 +175,7 @@ task 20, line 149: Checkpoint created: 8 task 21, lines 151-172: -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -211,7 +211,7 @@ Response: { } task 22, lines 174-195: -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -247,7 +247,7 @@ Response: { } task 23, lines 197-218: -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -291,7 +291,7 @@ task 25, line 222: Checkpoint created: 9 task 26, lines 224-245: -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -326,7 +326,7 @@ Response: { } task 27, lines 247-268: -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -362,7 +362,7 @@ Response: { } task 28, lines 270-291: -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -406,7 +406,7 @@ task 30, line 296: Checkpoint created: 10 task 31, lines 298-319: -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -441,7 +441,7 @@ Response: { } task 32, lines 321-342: -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} Response: { "data": { "transactionBlocks": { @@ -476,7 +476,7 @@ Response: { } task 33, lines 344-365: -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} Response: { "data": { "transactionBlocks": { diff --git a/crates/sui-graphql-e2e-tests/tests/consistency/balances.move b/crates/sui-graphql-e2e-tests/tests/consistency/balances.move index 70059773d3946..48bb049a865ab 100644 --- a/crates/sui-graphql-e2e-tests/tests/consistency/balances.move +++ b/crates/sui-graphql-e2e-tests/tests/consistency/balances.move @@ -75,7 +75,7 @@ module P0::fake { //# create-checkpoint -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 2. Fake coin balance should be 700. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -98,7 +98,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 3. Fake coin balance should be 500. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -121,7 +121,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 4. Fake coin balance should be 400. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -148,7 +148,7 @@ module P0::fake { //# create-checkpoint -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 2. Fake coin balance should be 700. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -171,7 +171,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 3. Fake coin balance should be 500. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -194,7 +194,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 4. Fake coin balance should be 400. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -221,7 +221,7 @@ module P0::fake { //# create-checkpoint -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} # Outside available range { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -244,7 +244,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 3. Fake coin balance should be 500. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -267,7 +267,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} # Emulating viewing transaction blocks at checkpoint 4. Fake coin balance should be 400. { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -295,7 +295,7 @@ module P0::fake { //# create-checkpoint -//# run-graphql --cursors {"c":2,"t":1,"tc":1} +//# run-graphql --cursors {"c":2,"t":1,"i":false} # Outside available range { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -318,7 +318,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":3,"t":1,"tc":1} +//# run-graphql --cursors {"c":3,"t":1,"i":false} # Outside available range { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { @@ -341,7 +341,7 @@ module P0::fake { } } -//# run-graphql --cursors {"c":4,"t":1,"tc":1} +//# run-graphql --cursors {"c":4,"t":1,"i":false} # Outside available range { transactionBlocks(first: 1, after: "@{cursor_0}", filter: {signAddress: "@{A}"}) { diff --git a/crates/sui-graphql-e2e-tests/tests/consistency/checkpoints/transaction_blocks.exp b/crates/sui-graphql-e2e-tests/tests/consistency/checkpoints/transaction_blocks.exp index a02e97976ee4a..add91acf3898d 100644 --- a/crates/sui-graphql-e2e-tests/tests/consistency/checkpoints/transaction_blocks.exp +++ b/crates/sui-graphql-e2e-tests/tests/consistency/checkpoints/transaction_blocks.exp @@ -92,7 +92,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjozLCJ0IjoyLCJ0YyI6MX0", + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs", "sender": { @@ -107,7 +107,7 @@ Response: { } }, { - "cursor": "eyJjIjozLCJ0IjozLCJ0YyI6MX0", + "cursor": "eyJjIjozLCJ0IjozLCJpIjpmYWxzZX0", "node": { "digest": "H1WU8uXMGaENQs54EpoHGpV1iMYdH8P5scd1d16s9ECB", "sender": { @@ -122,7 +122,7 @@ Response: { } }, { - "cursor": "eyJjIjozLCJ0Ijo0LCJ0YyI6MX0", + "cursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", "node": { "digest": "4vJbSYKwEJb5sYU2jiayqsZNRnBywD8y6sd3RQoMppF9", "sender": { @@ -137,7 +137,7 @@ Response: { } }, { - "cursor": "eyJjIjozLCJ0Ijo1LCJ0YyI6MX0", + "cursor": "eyJjIjozLCJ0Ijo1LCJpIjpmYWxzZX0", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB", "sender": { @@ -159,7 +159,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjozLCJ0Ijo2LCJ0YyI6Mn0", + "cursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0", "node": { "digest": "JLAF7P6DumC8rgzT1Ygp2QgTwpHE2FUqQbVXL6cGEEQ", "sender": { @@ -174,7 +174,7 @@ Response: { } }, { - "cursor": "eyJjIjozLCJ0Ijo3LCJ0YyI6Mn0", + "cursor": "eyJjIjozLCJ0Ijo3LCJpIjpmYWxzZX0", "node": { "digest": "BVMVdn7DDpTbCjtYwWFekcFA9sNeMgDh1wTNWRrngZxh", "sender": { @@ -189,7 +189,7 @@ Response: { } }, { - "cursor": "eyJjIjozLCJ0Ijo4LCJ0YyI6Mn0", + "cursor": "eyJjIjozLCJ0Ijo4LCJpIjpmYWxzZX0", "node": { "digest": "4J5tno4AoU4NPS2NgEseAZK7cpLDh6KJduVtbtwzmHk5", "sender": { @@ -211,7 +211,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjozLCJ0Ijo5LCJ0YyI6M30", + "cursor": "eyJjIjozLCJ0Ijo5LCJpIjpmYWxzZX0", "node": { "digest": "5BCS9sencxEJRJHBBPeGhx3rWutYoGSuLFCmnMAaYcDm", "sender": { @@ -226,7 +226,7 @@ Response: { } }, { - "cursor": "eyJjIjozLCJ0IjoxMCwidGMiOjN9", + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", "node": { "digest": "HQYJnLLcGf4DwgTkpqF4zHbQsLHwc1s4WbQ3Xr5BBaxh", "sender": { diff --git a/crates/sui-graphql-e2e-tests/tests/consistency/dynamic_fields/dynamic_fields.exp b/crates/sui-graphql-e2e-tests/tests/consistency/dynamic_fields/dynamic_fields.exp index afbfbb96ca3a1..51b7b407d25ad 100644 --- a/crates/sui-graphql-e2e-tests/tests/consistency/dynamic_fields/dynamic_fields.exp +++ b/crates/sui-graphql-e2e-tests/tests/consistency/dynamic_fields/dynamic_fields.exp @@ -1166,7 +1166,17 @@ task 34, lines 497-528: Response: { "data": { "parent_version_4": { - "dfAtParentVersion4_outside_range": null + "dfAtParentVersion4_outside_range": { + "name": { + "bcs": "A2RmMQ==", + "type": { + "repr": "0x0000000000000000000000000000000000000000000000000000000000000001::string::String" + } + }, + "value": { + "json": "df1" + } + } }, "parent_version_6": { "dfAtParentVersion6": null diff --git a/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.exp b/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.exp index 721c1e0dafaf0..7b25ef908d6bf 100644 --- a/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.exp +++ b/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.exp @@ -39,25 +39,25 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjozLCJ0IjowLCJ0YyI6MH0", + "cursor": "eyJjIjozLCJ0IjowLCJpIjpmYWxzZX0", "node": { "digest": "J7mHXcoa7LXwyjzZUWsk8zvYZjek359TM4d2hQK4LGHo" } }, { - "cursor": "eyJjIjozLCJ0IjoxLCJ0YyI6MX0", + "cursor": "eyJjIjozLCJ0IjoxLCJpIjpmYWxzZX0", "node": { "digest": "J1pYPDrTgsKgzB8XWtW8jLJ8RPsbJcC1SQ4Mv2T1hAWt" } }, { - "cursor": "eyJjIjozLCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } }, { - "cursor": "eyJjIjozLCJ0IjozLCJ0YyI6M30", + "cursor": "eyJjIjozLCJ0IjozLCJpIjpmYWxzZX0", "node": { "digest": "Bym7b7ELP77KxVHtgj6F4FB7H6n5LYQuBQYmdvvFxEmM" } @@ -141,7 +141,7 @@ task 21, line 91: Epoch advanced: 3 task 22, lines 93-157: -//# run-graphql --cursors {"t":3,"tc":3,"c":4} {"t":7,"tc":7,"c":8} {"t":11,"tc":11,"c":12} +//# run-graphql --cursors {"t":3,"i":false,"c":4} {"t":7,"i":false,"c":8} {"t":11,"i":false,"c":12} Response: { "data": { "checkpoint": { @@ -152,25 +152,25 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoxMiwidCI6MCwidGMiOjB9", + "cursor": "eyJjIjoxMiwidCI6MCwiaSI6ZmFsc2V9", "node": { "digest": "J7mHXcoa7LXwyjzZUWsk8zvYZjek359TM4d2hQK4LGHo" } }, { - "cursor": "eyJjIjoxMiwidCI6MSwidGMiOjF9", + "cursor": "eyJjIjoxMiwidCI6MSwiaSI6ZmFsc2V9", "node": { "digest": "J1pYPDrTgsKgzB8XWtW8jLJ8RPsbJcC1SQ4Mv2T1hAWt" } }, { - "cursor": "eyJjIjoxMiwidCI6MiwidGMiOjJ9", + "cursor": "eyJjIjoxMiwidCI6MiwiaSI6ZmFsc2V9", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } }, { - "cursor": "eyJjIjoxMiwidCI6MywidGMiOjN9", + "cursor": "eyJjIjoxMiwidCI6MywiaSI6ZmFsc2V9", "node": { "digest": "Bym7b7ELP77KxVHtgj6F4FB7H6n5LYQuBQYmdvvFxEmM" } @@ -181,19 +181,19 @@ Response: { "txs_epoch_0": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjowLCJ0YyI6MH0", + "cursor": "eyJjIjo0LCJ0IjowLCJpIjpmYWxzZX0", "node": { "digest": "J7mHXcoa7LXwyjzZUWsk8zvYZjek359TM4d2hQK4LGHo" } }, { - "cursor": "eyJjIjo0LCJ0IjoxLCJ0YyI6MX0", + "cursor": "eyJjIjo0LCJ0IjoxLCJpIjpmYWxzZX0", "node": { "digest": "J1pYPDrTgsKgzB8XWtW8jLJ8RPsbJcC1SQ4Mv2T1hAWt" } }, { - "cursor": "eyJjIjo0LCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjo0LCJ0IjoyLCJpIjpmYWxzZX0", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } @@ -205,25 +205,25 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoxMiwidCI6NCwidGMiOjR9", + "cursor": "eyJjIjoxMiwidCI6NCwiaSI6ZmFsc2V9", "node": { "digest": "H1WU8uXMGaENQs54EpoHGpV1iMYdH8P5scd1d16s9ECB" } }, { - "cursor": "eyJjIjoxMiwidCI6NSwidGMiOjV9", + "cursor": "eyJjIjoxMiwidCI6NSwiaSI6ZmFsc2V9", "node": { "digest": "4vJbSYKwEJb5sYU2jiayqsZNRnBywD8y6sd3RQoMppF9" } }, { - "cursor": "eyJjIjoxMiwidCI6NiwidGMiOjZ9", + "cursor": "eyJjIjoxMiwidCI6NiwiaSI6ZmFsc2V9", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB" } }, { - "cursor": "eyJjIjoxMiwidCI6NywidGMiOjd9", + "cursor": "eyJjIjoxMiwidCI6NywiaSI6ZmFsc2V9", "node": { "digest": "D251V1BnvyRKNFZmiFxaf7gSZLGdLo8fYbbVDb5vJWfd" } @@ -234,43 +234,43 @@ Response: { "txs_epoch_1": { "edges": [ { - "cursor": "eyJjIjo4LCJ0IjowLCJ0YyI6MH0", + "cursor": "eyJjIjo4LCJ0IjowLCJpIjpmYWxzZX0", "node": { "digest": "J7mHXcoa7LXwyjzZUWsk8zvYZjek359TM4d2hQK4LGHo" } }, { - "cursor": "eyJjIjo4LCJ0IjoxLCJ0YyI6MX0", + "cursor": "eyJjIjo4LCJ0IjoxLCJpIjpmYWxzZX0", "node": { "digest": "J1pYPDrTgsKgzB8XWtW8jLJ8RPsbJcC1SQ4Mv2T1hAWt" } }, { - "cursor": "eyJjIjo4LCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjo4LCJ0IjoyLCJpIjpmYWxzZX0", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } }, { - "cursor": "eyJjIjo4LCJ0IjozLCJ0YyI6M30", + "cursor": "eyJjIjo4LCJ0IjozLCJpIjpmYWxzZX0", "node": { "digest": "Bym7b7ELP77KxVHtgj6F4FB7H6n5LYQuBQYmdvvFxEmM" } }, { - "cursor": "eyJjIjo4LCJ0Ijo0LCJ0YyI6NH0", + "cursor": "eyJjIjo4LCJ0Ijo0LCJpIjpmYWxzZX0", "node": { "digest": "H1WU8uXMGaENQs54EpoHGpV1iMYdH8P5scd1d16s9ECB" } }, { - "cursor": "eyJjIjo4LCJ0Ijo1LCJ0YyI6NX0", + "cursor": "eyJjIjo4LCJ0Ijo1LCJpIjpmYWxzZX0", "node": { "digest": "4vJbSYKwEJb5sYU2jiayqsZNRnBywD8y6sd3RQoMppF9" } }, { - "cursor": "eyJjIjo4LCJ0Ijo2LCJ0YyI6Nn0", + "cursor": "eyJjIjo4LCJ0Ijo2LCJpIjpmYWxzZX0", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB" } @@ -282,25 +282,25 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoxMiwidCI6OCwidGMiOjh9", + "cursor": "eyJjIjoxMiwidCI6OCwiaSI6ZmFsc2V9", "node": { "digest": "JLAF7P6DumC8rgzT1Ygp2QgTwpHE2FUqQbVXL6cGEEQ" } }, { - "cursor": "eyJjIjoxMiwidCI6OSwidGMiOjl9", + "cursor": "eyJjIjoxMiwidCI6OSwiaSI6ZmFsc2V9", "node": { "digest": "BVMVdn7DDpTbCjtYwWFekcFA9sNeMgDh1wTNWRrngZxh" } }, { - "cursor": "eyJjIjoxMiwidCI6MTAsInRjIjoxMH0", + "cursor": "eyJjIjoxMiwidCI6MTAsImkiOmZhbHNlfQ", "node": { "digest": "4J5tno4AoU4NPS2NgEseAZK7cpLDh6KJduVtbtwzmHk5" } }, { - "cursor": "eyJjIjoxMiwidCI6MTEsInRjIjoxMX0", + "cursor": "eyJjIjoxMiwidCI6MTEsImkiOmZhbHNlfQ", "node": { "digest": "GngPX2ztACkKE96VUfoujZ3vA11MMDhPSwwgKhK7hVa" } @@ -311,67 +311,67 @@ Response: { "txs_epoch_2": { "edges": [ { - "cursor": "eyJjIjoxMiwidCI6MCwidGMiOjB9", + "cursor": "eyJjIjoxMiwidCI6MCwiaSI6ZmFsc2V9", "node": { "digest": "J7mHXcoa7LXwyjzZUWsk8zvYZjek359TM4d2hQK4LGHo" } }, { - "cursor": "eyJjIjoxMiwidCI6MSwidGMiOjF9", + "cursor": "eyJjIjoxMiwidCI6MSwiaSI6ZmFsc2V9", "node": { "digest": "J1pYPDrTgsKgzB8XWtW8jLJ8RPsbJcC1SQ4Mv2T1hAWt" } }, { - "cursor": "eyJjIjoxMiwidCI6MiwidGMiOjJ9", + "cursor": "eyJjIjoxMiwidCI6MiwiaSI6ZmFsc2V9", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } }, { - "cursor": "eyJjIjoxMiwidCI6MywidGMiOjN9", + "cursor": "eyJjIjoxMiwidCI6MywiaSI6ZmFsc2V9", "node": { "digest": "Bym7b7ELP77KxVHtgj6F4FB7H6n5LYQuBQYmdvvFxEmM" } }, { - "cursor": "eyJjIjoxMiwidCI6NCwidGMiOjR9", + "cursor": "eyJjIjoxMiwidCI6NCwiaSI6ZmFsc2V9", "node": { "digest": "H1WU8uXMGaENQs54EpoHGpV1iMYdH8P5scd1d16s9ECB" } }, { - "cursor": "eyJjIjoxMiwidCI6NSwidGMiOjV9", + "cursor": "eyJjIjoxMiwidCI6NSwiaSI6ZmFsc2V9", "node": { "digest": "4vJbSYKwEJb5sYU2jiayqsZNRnBywD8y6sd3RQoMppF9" } }, { - "cursor": "eyJjIjoxMiwidCI6NiwidGMiOjZ9", + "cursor": "eyJjIjoxMiwidCI6NiwiaSI6ZmFsc2V9", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB" } }, { - "cursor": "eyJjIjoxMiwidCI6NywidGMiOjd9", + "cursor": "eyJjIjoxMiwidCI6NywiaSI6ZmFsc2V9", "node": { "digest": "D251V1BnvyRKNFZmiFxaf7gSZLGdLo8fYbbVDb5vJWfd" } }, { - "cursor": "eyJjIjoxMiwidCI6OCwidGMiOjh9", + "cursor": "eyJjIjoxMiwidCI6OCwiaSI6ZmFsc2V9", "node": { "digest": "JLAF7P6DumC8rgzT1Ygp2QgTwpHE2FUqQbVXL6cGEEQ" } }, { - "cursor": "eyJjIjoxMiwidCI6OSwidGMiOjl9", + "cursor": "eyJjIjoxMiwidCI6OSwiaSI6ZmFsc2V9", "node": { "digest": "BVMVdn7DDpTbCjtYwWFekcFA9sNeMgDh1wTNWRrngZxh" } }, { - "cursor": "eyJjIjoxMiwidCI6MTAsInRjIjoxMH0", + "cursor": "eyJjIjoxMiwidCI6MTAsImkiOmZhbHNlfQ", "node": { "digest": "4J5tno4AoU4NPS2NgEseAZK7cpLDh6KJduVtbtwzmHk5" } @@ -382,7 +382,7 @@ Response: { } task 23, lines 159-199: -//# run-graphql --cursors {"t":0,"tc":0,"c":7} {"t":4,"tc":4,"c":11} {"t":8,"tc":8,"c":12} +//# run-graphql --cursors {"t":0,"i":false,"c":7} {"t":4,"i":false,"c":11} {"t":8,"i":false,"c":12} Response: { "data": { "checkpoint": { @@ -393,19 +393,19 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo3LCJ0IjoxLCJ0YyI6MX0", + "cursor": "eyJjIjo3LCJ0IjoxLCJpIjpmYWxzZX0", "node": { "digest": "J1pYPDrTgsKgzB8XWtW8jLJ8RPsbJcC1SQ4Mv2T1hAWt" } }, { - "cursor": "eyJjIjo3LCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjo3LCJ0IjoyLCJpIjpmYWxzZX0", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } }, { - "cursor": "eyJjIjo3LCJ0IjozLCJ0YyI6M30", + "cursor": "eyJjIjo3LCJ0IjozLCJpIjpmYWxzZX0", "node": { "digest": "Bym7b7ELP77KxVHtgj6F4FB7H6n5LYQuBQYmdvvFxEmM" } @@ -418,19 +418,19 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoxMSwidCI6NSwidGMiOjV9", + "cursor": "eyJjIjoxMSwidCI6NSwiaSI6ZmFsc2V9", "node": { "digest": "4vJbSYKwEJb5sYU2jiayqsZNRnBywD8y6sd3RQoMppF9" } }, { - "cursor": "eyJjIjoxMSwidCI6NiwidGMiOjZ9", + "cursor": "eyJjIjoxMSwidCI6NiwiaSI6ZmFsc2V9", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB" } }, { - "cursor": "eyJjIjoxMSwidCI6NywidGMiOjd9", + "cursor": "eyJjIjoxMSwidCI6NywiaSI6ZmFsc2V9", "node": { "digest": "D251V1BnvyRKNFZmiFxaf7gSZLGdLo8fYbbVDb5vJWfd" } @@ -443,19 +443,19 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoxMiwidCI6OSwidGMiOjl9", + "cursor": "eyJjIjoxMiwidCI6OSwiaSI6ZmFsc2V9", "node": { "digest": "BVMVdn7DDpTbCjtYwWFekcFA9sNeMgDh1wTNWRrngZxh" } }, { - "cursor": "eyJjIjoxMiwidCI6MTAsInRjIjoxMH0", + "cursor": "eyJjIjoxMiwidCI6MTAsImkiOmZhbHNlfQ", "node": { "digest": "4J5tno4AoU4NPS2NgEseAZK7cpLDh6KJduVtbtwzmHk5" } }, { - "cursor": "eyJjIjoxMiwidCI6MTEsInRjIjoxMX0", + "cursor": "eyJjIjoxMiwidCI6MTEsImkiOmZhbHNlfQ", "node": { "digest": "GngPX2ztACkKE96VUfoujZ3vA11MMDhPSwwgKhK7hVa" } @@ -467,7 +467,7 @@ Response: { } task 24, lines 201-241: -//# run-graphql --cursors {"t":1,"tc":1,"c":2} {"t":5,"tc":5,"c":6} {"t":9,"tc":9,"c":10} +//# run-graphql --cursors {"t":1,"i":false,"c":2} {"t":5,"i":false,"c":6} {"t":9,"i":false,"c":10} Response: { "data": { "checkpoint": { @@ -478,7 +478,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoyLCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjoyLCJ0IjoyLCJpIjpmYWxzZX0", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs" } @@ -491,7 +491,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo2LCJ0Ijo2LCJ0YyI6Nn0", + "cursor": "eyJjIjo2LCJ0Ijo2LCJpIjpmYWxzZX0", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB" } @@ -504,7 +504,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjoxMCwidCI6MTAsInRjIjoxMH0", + "cursor": "eyJjIjoxMCwidCI6MTAsImkiOmZhbHNlfQ", "node": { "digest": "4J5tno4AoU4NPS2NgEseAZK7cpLDh6KJduVtbtwzmHk5" } @@ -516,7 +516,7 @@ Response: { } task 25, lines 243-282: -//# run-graphql --cursors {"t":5,"tc":5,"c":6} +//# run-graphql --cursors {"t":5,"i":false,"c":6} Response: { "data": { "checkpoint": { @@ -525,7 +525,7 @@ Response: { "with_cursor": { "edges": [ { - "cursor": "eyJjIjo2LCJ0Ijo2LCJ0YyI6Nn0", + "cursor": "eyJjIjo2LCJ0Ijo2LCJpIjpmYWxzZX0", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB", "sender": { @@ -556,7 +556,7 @@ Response: { "without_cursor": { "edges": [ { - "cursor": "eyJjIjoxMiwidCI6MiwidGMiOjJ9", + "cursor": "eyJjIjoxMiwidCI6MiwiaSI6ZmFsc2V9", "node": { "digest": "Cwqr9jTgQjajoYaqcjzAaQGcQEyCg8XxoN7smGCLiBrs", "sender": { @@ -592,7 +592,7 @@ Response: { } }, { - "cursor": "eyJjIjoxMiwidCI6NCwidGMiOjR9", + "cursor": "eyJjIjoxMiwidCI6NCwiaSI6ZmFsc2V9", "node": { "digest": "H1WU8uXMGaENQs54EpoHGpV1iMYdH8P5scd1d16s9ECB", "sender": { @@ -628,7 +628,7 @@ Response: { } }, { - "cursor": "eyJjIjoxMiwidCI6NSwidGMiOjV9", + "cursor": "eyJjIjoxMiwidCI6NSwiaSI6ZmFsc2V9", "node": { "digest": "4vJbSYKwEJb5sYU2jiayqsZNRnBywD8y6sd3RQoMppF9", "sender": { @@ -664,7 +664,7 @@ Response: { } }, { - "cursor": "eyJjIjoxMiwidCI6NiwidGMiOjZ9", + "cursor": "eyJjIjoxMiwidCI6NiwiaSI6ZmFsc2V9", "node": { "digest": "4W23PZz7dHVxoZ2VMCWU9j38Jxy7tLkqcFBcJUB3aCSB", "sender": { @@ -700,7 +700,7 @@ Response: { } }, { - "cursor": "eyJjIjoxMiwidCI6OCwidGMiOjh9", + "cursor": "eyJjIjoxMiwidCI6OCwiaSI6ZmFsc2V9", "node": { "digest": "JLAF7P6DumC8rgzT1Ygp2QgTwpHE2FUqQbVXL6cGEEQ", "sender": { @@ -736,7 +736,7 @@ Response: { } }, { - "cursor": "eyJjIjoxMiwidCI6OSwidGMiOjl9", + "cursor": "eyJjIjoxMiwidCI6OSwiaSI6ZmFsc2V9", "node": { "digest": "BVMVdn7DDpTbCjtYwWFekcFA9sNeMgDh1wTNWRrngZxh", "sender": { @@ -772,7 +772,7 @@ Response: { } }, { - "cursor": "eyJjIjoxMiwidCI6MTAsInRjIjoxMH0", + "cursor": "eyJjIjoxMiwidCI6MTAsImkiOmZhbHNlfQ", "node": { "digest": "4J5tno4AoU4NPS2NgEseAZK7cpLDh6KJduVtbtwzmHk5", "sender": { diff --git a/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.move b/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.move index 39c8368818df3..425849aef9e16 100644 --- a/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.move +++ b/crates/sui-graphql-e2e-tests/tests/consistency/epochs/transaction_blocks.move @@ -90,7 +90,7 @@ module Test::M1 { //# advance-epoch -//# run-graphql --cursors {"t":3,"tc":3,"c":4} {"t":7,"tc":7,"c":8} {"t":11,"tc":11,"c":12} +//# run-graphql --cursors {"t":3,"i":false,"c":4} {"t":7,"i":false,"c":8} {"t":11,"i":false,"c":12} # View transactions before the last transaction in each epoch, from the perspective of the first # checkpoint in the next epoch. { @@ -156,7 +156,7 @@ module Test::M1 { } } -//# run-graphql --cursors {"t":0,"tc":0,"c":7} {"t":4,"tc":4,"c":11} {"t":8,"tc":8,"c":12} +//# run-graphql --cursors {"t":0,"i":false,"c":7} {"t":4,"i":false,"c":11} {"t":8,"i":false,"c":12} # View transactions after the first transaction in each epoch, from the perspective of the last # checkpoint in the next epoch. { @@ -198,7 +198,7 @@ module Test::M1 { } } -//# run-graphql --cursors {"t":1,"tc":1,"c":2} {"t":5,"tc":5,"c":6} {"t":9,"tc":9,"c":10} +//# run-graphql --cursors {"t":1,"i":false,"c":2} {"t":5,"i":false,"c":6} {"t":9,"i":false,"c":10} # View transactions after the second transaction in each epoch, from the perspective of a checkpoint # around the middle of each epoch. { @@ -240,7 +240,7 @@ module Test::M1 { } } -//# run-graphql --cursors {"t":5,"tc":5,"c":6} +//# run-graphql --cursors {"t":5,"i":false,"c":6} # Verify that with a cursor, we are locked into a view as if we were at the checkpoint stored in # the cursor. Compare against `without_cursor`, which should show the latest state at the actual # latest checkpoint. There should only be 1 transaction block in the `with_cursor` query, but diff --git a/crates/sui-graphql-e2e-tests/tests/epoch/protocol_configs.exp b/crates/sui-graphql-e2e-tests/tests/epoch/protocol_configs.exp new file mode 100644 index 0000000000000..e237e6000cd9a --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/epoch/protocol_configs.exp @@ -0,0 +1,40 @@ +processed 4 tasks + +init: +C: object(0,0) + +task 1, line 6: +//# create-checkpoint +Checkpoint created: 1 + +task 2, lines 8-19: +//# run-graphql +Response: { + "data": { + "protocolConfig": { + "protocolVersion": 51, + "config": { + "value": "128" + }, + "featureFlag": { + "value": true + } + } + } +} + +task 3, lines 21-32: +//# run-graphql +Response: { + "data": { + "protocolConfig": { + "protocolVersion": 8, + "config": { + "value": null + }, + "featureFlag": { + "value": false + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/epoch/protocol_configs.move b/crates/sui-graphql-e2e-tests/tests/epoch/protocol_configs.move new file mode 100644 index 0000000000000..901326e4969a3 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/epoch/protocol_configs.move @@ -0,0 +1,32 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 51 --simulator --accounts C + +//# create-checkpoint + +//# run-graphql +{ + protocolConfig { + protocolVersion + config(key: "max_move_identifier_len") { + value + } + featureFlag(key: "enable_coin_deny_list") { + value + } + } +} + +//# run-graphql +{ + protocolConfig(protocolVersion: 8) { + protocolVersion + config(key: "max_move_identifier_len") { + value + } + featureFlag(key: "enable_coin_deny_list") { + value + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/packages/versioning.exp b/crates/sui-graphql-e2e-tests/tests/packages/versioning.exp new file mode 100644 index 0000000000000..7f0e8a7153b98 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/packages/versioning.exp @@ -0,0 +1,814 @@ +processed 17 tasks + +init: +A: object(0,0) + +task 1, lines 6-9: +//# publish --upgradeable --sender A +created: object(1,0), object(1,1) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 5076800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 11: +//# create-checkpoint +Checkpoint created: 1 + +task 3, lines 13-50: +//# run-graphql +Response: { + "data": { + "latestPackage": { + "version": 1, + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + }, + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + } + ] + } + }, + "firstPackage": { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1, + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + }, + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + } + ] + } + }, + "packages": { + "nodes": [ + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": 1 + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000002", + "version": 1 + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000003", + "version": 1 + }, + { + "address": "0x000000000000000000000000000000000000000000000000000000000000dee9", + "version": 1 + }, + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + } + ] + } + } +} + +task 4, lines 52-56: +//# upgrade --package P0 --upgrade-capability 1,1 --sender A +created: object(4,0) +mutated: object(0,0), object(1,1) +gas summary: computation_cost: 1000000, storage_cost: 5251600, storage_rebate: 2595780, non_refundable_storage_fee: 26220 + +task 5, line 58: +//# create-checkpoint +Checkpoint created: 2 + +task 6, lines 60-97: +//# run-graphql +Response: { + "data": { + "latestPackage": { + "version": 2, + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + }, + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + } + ] + } + }, + "firstPackage": { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1, + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + }, + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + } + ] + } + }, + "packages": { + "nodes": [ + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": 1 + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000002", + "version": 1 + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000003", + "version": 1 + }, + { + "address": "0x000000000000000000000000000000000000000000000000000000000000dee9", + "version": 1 + }, + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + } + ] + } + } +} + +task 7, lines 99-104: +//# upgrade --package P1 --upgrade-capability 1,1 --sender A +created: object(7,0) +mutated: object(0,0), object(1,1) +gas summary: computation_cost: 1000000, storage_cost: 5426400, storage_rebate: 2595780, non_refundable_storage_fee: 26220 + +task 8, line 106: +//# create-checkpoint +Checkpoint created: 3 + +task 9, lines 108-145: +//# run-graphql +Response: { + "data": { + "latestPackage": { + "version": 3, + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + }, + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + }, + { + "address": "0x0eae57b7a07b0548b1f6b0c309f0692828ff994e9159b541334b25582980631c", + "version": 3 + } + ] + } + }, + "firstPackage": { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1, + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + }, + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + }, + { + "address": "0x0eae57b7a07b0548b1f6b0c309f0692828ff994e9159b541334b25582980631c", + "version": 3 + } + ] + } + }, + "packages": { + "nodes": [ + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": 1 + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000002", + "version": 1 + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000003", + "version": 1 + }, + { + "address": "0x000000000000000000000000000000000000000000000000000000000000dee9", + "version": 1 + }, + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + }, + { + "address": "0x0eae57b7a07b0548b1f6b0c309f0692828ff994e9159b541334b25582980631c", + "version": 3 + } + ] + } + } +} + +task 10, lines 147-184: +//# run-graphql +Response: { + "data": { + "v1": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + }, + "latestPackage": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + }, + "v2": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + }, + "latestPackage": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + }, + "v3": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + }, + "latestPackage": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + } + } +} + +task 11, lines 186-223: +//# run-graphql +Response: { + "data": { + "v1_from_p1": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + } + }, + "v1_from_p2": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + } + }, + "v2_from_p0": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + } + }, + "v2_from_p2": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + } + }, + "v3_from_p0": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + }, + "v3_from_p1": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + } +} + +task 12, lines 225-280: +//# run-graphql +Response: { + "data": { + "v1": { + "v1": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + } + }, + "v2": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + } + }, + "v3": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + }, + "v2": { + "v1": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + } + }, + "v2": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + } + }, + "v3": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + }, + "v3": { + "v1": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + } + ] + } + } + }, + "v2": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + } + ] + } + } + }, + "v3": { + "module": { + "functions": { + "nodes": [ + { + "name": "f" + }, + { + "name": "g" + }, + { + "name": "h" + } + ] + } + } + } + } + } +} + +task 13, lines 282-310: +//# run-graphql +Response: { + "data": { + "v0": null, + "v1": { + "v0": null, + "v4": null + }, + "v4": null + } +} + +task 14, lines 312-343: +//# run-graphql +Response: { + "data": { + "before": { + "nodes": [ + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": 1, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 0 + } + } + } + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000002", + "version": 1, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 0 + } + } + } + }, + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000003", + "version": 1, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 0 + } + } + } + }, + { + "address": "0x000000000000000000000000000000000000000000000000000000000000dee9", + "version": 1, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 0 + } + } + } + } + ] + }, + "after": { + "nodes": [ + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "address": "0x0eae57b7a07b0548b1f6b0c309f0692828ff994e9159b541334b25582980631c", + "version": 3, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + }, + "between": { + "nodes": [ + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2, + "previousTransactionBlock": { + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 15, lines 345-380: +//# run-graphql +Response: { + "data": { + "packageVersions": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + }, + { + "address": "0x0eae57b7a07b0548b1f6b0c309f0692828ff994e9159b541334b25582980631c", + "version": 3 + } + ] + }, + "after": { + "nodes": [ + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + }, + { + "address": "0x0eae57b7a07b0548b1f6b0c309f0692828ff994e9159b541334b25582980631c", + "version": 3 + } + ] + }, + "before": { + "nodes": [ + { + "address": "0x175ae86f2df1eb652d57fbe9e44c7f2d67870d2b6776a4356f30930221b63b88", + "version": 1 + }, + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + } + ] + }, + "between": { + "nodes": [ + { + "address": "0x351bc614b36f0f522a64334e4c278d4bfe200234958870c084e0a005f041d681", + "version": 2 + } + ] + } + } +} + +task 16, lines 382-400: +//# run-graphql +Response: { + "data": { + "packageVersions": { + "nodes": [ + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": 1 + } + ] + }, + "package": { + "packageVersions": { + "nodes": [ + { + "address": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": 1 + } + ] + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/packages/versioning.move b/crates/sui-graphql-e2e-tests/tests/packages/versioning.move new file mode 100644 index 0000000000000..b0e0900bbcb59 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/packages/versioning.move @@ -0,0 +1,400 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 39 --addresses P0=0x0 P1=0x0 P2=0x0 --accounts A --simulator + +//# publish --upgradeable --sender A +module P0::m { + public fun f(): u64 { 42 } +} + +//# create-checkpoint + +//# run-graphql +{ + latestPackage(address: "@{P0}") { + version + module(name: "m") { + functions { nodes { name } } + } + + packageVersions { + nodes { + address + version + } + } + } + + firstPackage: package(address: "@{P0}", version: 1) { + address + version + module(name: "m") { + functions { nodes { name } } + } + + packageVersions { + nodes { + address + version + } + } + } + + packages(first: 10) { + nodes { + address + version + } + } +} + +//# upgrade --package P0 --upgrade-capability 1,1 --sender A +module P1::m { + public fun f(): u64 { 42 } + public fun g(): u64 { 43 } +} + +//# create-checkpoint + +//# run-graphql +{ + latestPackage(address: "@{P0}") { + version + module(name: "m") { + functions { nodes { name } } + } + + packageVersions { + nodes { + address + version + } + } + } + + firstPackage: package(address: "@{P1}", version: 1) { + address + version + module(name: "m") { + functions { nodes { name } } + } + + packageVersions { + nodes { + address + version + } + } + } + + packages(first: 10) { + nodes { + address + version + } + } +} + +//# upgrade --package P1 --upgrade-capability 1,1 --sender A +module P2::m { + public fun f(): u64 { 42 } + public fun g(): u64 { 43 } + public fun h(): u64 { 44 } +} + +//# create-checkpoint + +//# run-graphql +{ + latestPackage(address: "@{P0}") { + version + module(name: "m") { + functions { nodes { name } } + } + + packageVersions { + nodes { + address + version + } + } + } + + firstPackage: package(address: "@{P2}", version: 1) { + address + version + module(name: "m") { + functions { nodes { name } } + } + + packageVersions { + nodes { + address + version + } + } + } + + packages(first: 10) { + nodes { + address + version + } + } +} + +//# run-graphql +{ # Test fetching by ID + v1: package(address: "@{P0}") { + module(name: "m") { + functions { nodes { name } } + } + + latestPackage { + module(name: "m") { + functions { nodes { name } } + } + } + } + + v2: package(address: "@{P1}") { + module(name: "m") { + functions { nodes { name } } + } + + latestPackage { + module(name: "m") { + functions { nodes { name } } + } + } + } + + v3: package(address: "@{P2}") { + module(name: "m") { + functions { nodes { name } } + } + + latestPackage { + module(name: "m") { + functions { nodes { name } } + } + } + } +} + +//# run-graphql +{ # Test fetching by version + v1_from_p1: package(address: "@{P1}", version: 1) { + module(name: "m") { + functions { nodes { name } } + } + } + + v1_from_p2: package(address: "@{P2}", version: 1) { + module(name: "m") { + functions { nodes { name } } + } + } + + v2_from_p0: package(address: "@{P0}", version: 2) { + module(name: "m") { + functions { nodes { name } } + } + } + + v2_from_p2: package(address: "@{P2}", version: 2) { + module(name: "m") { + functions { nodes { name } } + } + } + + v3_from_p0: package(address: "@{P0}", version: 3) { + module(name: "m") { + functions { nodes { name } } + } + } + + v3_from_p1: package(address: "@{P1}", version: 3) { + module(name: "m") { + functions { nodes { name } } + } + } +} + +//# run-graphql +{ # Go from one version to another using packageAtVersion + v1: package(address: "@{P1}") { + v1: packageAtVersion(version: 1) { + module(name: "m") { + functions { nodes { name } } + } + } + v2: packageAtVersion(version: 2) { + module(name: "m") { + functions { nodes { name } } + } + } + v3: packageAtVersion(version: 3) { + module(name: "m") { + functions { nodes { name } } + } + } + } + + v2: package(address: "@{P2}") { + v1: packageAtVersion(version: 1) { + module(name: "m") { + functions { nodes { name } } + } + } + v2: packageAtVersion(version: 2) { + module(name: "m") { + functions { nodes { name } } + } + } + v3: packageAtVersion(version: 3) { + module(name: "m") { + functions { nodes { name } } + } + } + } + + v3: package(address: "@{P2}") { + v1: packageAtVersion(version: 1) { + module(name: "m") { + functions { nodes { name } } + } + } + v2: packageAtVersion(version: 2) { + module(name: "m") { + functions { nodes { name } } + } + } + v3: packageAtVersion(version: 3) { + module(name: "m") { + functions { nodes { name } } + } + } + } +} + +//# run-graphql +{ # Fetch out of range versions (should return null) + v0: package(address: "@{P0}", version: 0) { + module(name: "m") { + functions { nodes { name } } + } + } + + # This won't return null, but its inner queries will + v1: package(address: "@{P0}") { + v0: packageAtVersion(version: 0) { + module(name: "m") { + functions { nodes { name } } + } + } + + v4: packageAtVersion(version: 4) { + module(name: "m") { + functions { nodes { name } } + } + } + } + + v4: package(address: "@{P0}", version: 4) { + module(name: "m") { + functions { nodes { name } } + } + } +} + +//# run-graphql +{ # Querying packages with checkpoint bounds + before: packages(first: 10, filter: { beforeCheckpoint: 1 }) { + nodes { + address + version + previousTransactionBlock { + effects { checkpoint { sequenceNumber } } + } + } + } + + after: packages(first: 10, filter: { afterCheckpoint: 1 }) { + nodes { + address + version + previousTransactionBlock { + effects { checkpoint { sequenceNumber } } + } + } + } + + between: packages(first: 10, filter: { afterCheckpoint: 1, beforeCheckpoint: 3 }) { + nodes { + address + version + previousTransactionBlock { + effects { checkpoint { sequenceNumber } } + } + } + } +} + +//# run-graphql +{ # Query for versions of a user package + packageVersions(address: "@{P0}") { + nodes { + address + version + } + } + + after: packageVersions(address: "@{P0}", filter: { afterVersion: 1 }) { + nodes { + address + version + } + } + + before: packageVersions(address: "@{P0}", filter: { beforeVersion: 3 }) { + nodes { + address + version + } + } + + between: packageVersions( + address: "@{P0}", + filter: { + afterVersion: 1, + beforeVersion: 3, + }, + ) { + nodes { + address + version + } + } +} + +//# run-graphql +{ # Query for versions of a system package (there will be only one because we + # don't have a way to upgrade system packages in these tests.) + packageVersions(address: "0x1") { + nodes { + address + version + } + } + + package(address: "0x1") { + packageVersions { + nodes { + address + version + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/at_checkpoint.exp b/crates/sui-graphql-e2e-tests/tests/transactions/at_checkpoint.exp new file mode 100644 index 0000000000000..48209abf430e8 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/at_checkpoint.exp @@ -0,0 +1,210 @@ +processed 11 tasks + +init: +A: object(0,0) + +task 2, lines 10-12: +//# programmable --sender A --inputs 1 @A +//> 0: SplitCoins(Gas, [Input(0)]); +//> TransferObjects([Result(0)], Input(1)) +created: object(2,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 1976000, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 3, line 14: +//# create-checkpoint +Checkpoint created: 1 + +task 5, line 18: +//# create-checkpoint +Checkpoint created: 2 + +task 7, line 22: +//# create-checkpoint +Checkpoint created: 3 + +task 8, lines 24-36: +//# run-graphql +Response: { + "data": { + "c0": { + "nodes": [ + { + "digest": "FPhSSzT7tHmrPhs3H9GT1n4Dqj3eyCgaFLkQSc9FEDVV", + "kind": { + "__typename": "GenesisTransaction" + } + } + ] + }, + "c1": { + "nodes": [ + { + "digest": "43wY12GuxKzFAJAAW7oCcYfRGb3BSKXxgrVTtXwuELfn", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + }, + { + "digest": "Cn6D9eKgVx5EeZddUSpQeTFcVyHKjqmt6yeiroKgr9h6", + "kind": { + "__typename": "ProgrammableTransactionBlock" + } + } + ] + }, + "c2": { + "nodes": [ + { + "digest": "9eMYXfB8mzZhdQgJ6HJTTdcwyXZ3EHVXDpcvERnnBWvR", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + } + ] + }, + "c3": { + "nodes": [ + { + "digest": "E1TmDoToDfVSW7kMEFiYsNFL2UeCaL1wNbWLdFjxe5mx", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + } + ] + }, + "c4": { + "nodes": [] + } + } +} + +task 9, lines 38-50: +//# run-graphql +Response: { + "data": { + "c0": { + "transactionBlocks": { + "nodes": [ + { + "digest": "FPhSSzT7tHmrPhs3H9GT1n4Dqj3eyCgaFLkQSc9FEDVV", + "kind": { + "__typename": "GenesisTransaction" + } + } + ] + } + }, + "c1": { + "transactionBlocks": { + "nodes": [ + { + "digest": "43wY12GuxKzFAJAAW7oCcYfRGb3BSKXxgrVTtXwuELfn", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + }, + { + "digest": "Cn6D9eKgVx5EeZddUSpQeTFcVyHKjqmt6yeiroKgr9h6", + "kind": { + "__typename": "ProgrammableTransactionBlock" + } + } + ] + } + }, + "c2": { + "transactionBlocks": { + "nodes": [ + { + "digest": "9eMYXfB8mzZhdQgJ6HJTTdcwyXZ3EHVXDpcvERnnBWvR", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + } + ] + } + }, + "c3": { + "transactionBlocks": { + "nodes": [ + { + "digest": "E1TmDoToDfVSW7kMEFiYsNFL2UeCaL1wNbWLdFjxe5mx", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + } + ] + } + }, + "c4": null + } +} + +task 10, lines 52-63: +//# run-graphql +Response: { + "data": { + "checkpoints": { + "pageInfo": { + "hasNextPage": false + }, + "nodes": [ + { + "transactionBlocks": { + "nodes": [ + { + "digest": "FPhSSzT7tHmrPhs3H9GT1n4Dqj3eyCgaFLkQSc9FEDVV", + "kind": { + "__typename": "GenesisTransaction" + } + } + ] + } + }, + { + "transactionBlocks": { + "nodes": [ + { + "digest": "43wY12GuxKzFAJAAW7oCcYfRGb3BSKXxgrVTtXwuELfn", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + }, + { + "digest": "Cn6D9eKgVx5EeZddUSpQeTFcVyHKjqmt6yeiroKgr9h6", + "kind": { + "__typename": "ProgrammableTransactionBlock" + } + } + ] + } + }, + { + "transactionBlocks": { + "nodes": [ + { + "digest": "9eMYXfB8mzZhdQgJ6HJTTdcwyXZ3EHVXDpcvERnnBWvR", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + } + ] + } + }, + { + "transactionBlocks": { + "nodes": [ + { + "digest": "E1TmDoToDfVSW7kMEFiYsNFL2UeCaL1wNbWLdFjxe5mx", + "kind": { + "__typename": "ConsensusCommitPrologueTransaction" + } + } + ] + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/at_checkpoint.move b/crates/sui-graphql-e2e-tests/tests/transactions/at_checkpoint.move new file mode 100644 index 0000000000000..5d5e27dbed2e8 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/at_checkpoint.move @@ -0,0 +1,63 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 51 --accounts A --simulator + +// Limiting transactions by the checkpoint they are in + +//# advance-clock --duration-ns 1 + +//# programmable --sender A --inputs 1 @A +//> 0: SplitCoins(Gas, [Input(0)]); +//> TransferObjects([Result(0)], Input(1)) + +//# create-checkpoint + +//# advance-clock --duration-ns 1 + +//# create-checkpoint + +//# advance-clock --duration-ns 1 + +//# create-checkpoint + +//# run-graphql +{ # Top-level query, with a filter + c0: transactionBlocks(filter: { atCheckpoint: 0 }) { nodes { ...Tx } } + c1: transactionBlocks(filter: { atCheckpoint: 1 }) { nodes { ...Tx } } + c2: transactionBlocks(filter: { atCheckpoint: 2 }) { nodes { ...Tx } } + c3: transactionBlocks(filter: { atCheckpoint: 3 }) { nodes { ...Tx } } + c4: transactionBlocks(filter: { atCheckpoint: 4 }) { nodes { ...Tx } } +} + +fragment Tx on TransactionBlock { + digest + kind { __typename } +} + +//# run-graphql +{ # Via a checkpoint query + c0: checkpoint(id: { sequenceNumber: 0 }) { transactionBlocks { nodes { ...Tx } } } + c1: checkpoint(id: { sequenceNumber: 1 }) { transactionBlocks { nodes { ...Tx } } } + c2: checkpoint(id: { sequenceNumber: 2 }) { transactionBlocks { nodes { ...Tx } } } + c3: checkpoint(id: { sequenceNumber: 3 }) { transactionBlocks { nodes { ...Tx } } } + c4: checkpoint(id: { sequenceNumber: 4 }) { transactionBlocks { nodes { ...Tx } } } +} + +fragment Tx on TransactionBlock { + digest + kind { __typename } +} + +//# run-graphql +{ # Via paginating checkpoints + checkpoints(first: 5) { + pageInfo { hasNextPage } + nodes { transactionBlocks { nodes { ...Tx } } } + } +} + +fragment Tx on TransactionBlock { + digest + kind { __typename } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/filters/kind.exp b/crates/sui-graphql-e2e-tests/tests/transactions/filters/kind.exp new file mode 100644 index 0000000000000..5f9783d3a76fe --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/filters/kind.exp @@ -0,0 +1,246 @@ +processed 16 tasks + +init: +A: object(0,0), B: object(0,1), C: object(0,2), D: object(0,3), E: object(0,4) + +task 1, lines 6-19: +//# publish +created: object(1,0) +mutated: object(0,5) +gas summary: computation_cost: 1000000, storage_cost: 5175600, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 21: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 23: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 25: +//# run Test::M1::create --args 1 @A --sender B +created: object(4,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 5, line 27: +//# run Test::M1::create --args 2 @A --sender C +created: object(5,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 6, line 29: +//# run Test::M1::create --args 3 @A --sender D +created: object(6,0) +mutated: object(0,3) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 7, line 31: +//# run Test::M1::create --args 4 @A --sender E +created: object(7,0) +mutated: object(0,4) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 8, line 33: +//# create-checkpoint +Checkpoint created: 2 + +task 9, lines 35-53: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0Ijo2LCJpIjpmYWxzZX0", + "startCursor": "eyJjIjoyLCJ0IjoyLCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "78YAzuJPHbHXsqqj2GjiBibpAiWim7sha6MseSCN2Y6g", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "9zVSnZuHjSQZKcbrwmpkUfsRTA4J9VKqSiqzNCmycPex", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "BmipCooPMB1CHeRuFHS15q4VQ14pSLoYvuuNEfNsvZBc", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "W3YEPxp4z4LzuwoGq4kmCBiy12xv4cNuEvwusrsxDem", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "HCNwnSLqsQYEju3KoQbxpa3SD5mVG6FLcggkd2ZYxHvB", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + ] + } + } +} + +task 10, lines 55-73: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0IjoyLCJpIjpmYWxzZX0", + "startCursor": "eyJjIjoyLCJ0IjoyLCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "78YAzuJPHbHXsqqj2GjiBibpAiWim7sha6MseSCN2Y6g", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + ] + } + } +} + +task 11, lines 75-93: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0IjozLCJpIjpmYWxzZX0", + "startCursor": "eyJjIjoyLCJ0IjozLCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "9zVSnZuHjSQZKcbrwmpkUfsRTA4J9VKqSiqzNCmycPex", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + ] + } + } +} + +task 12, lines 95-113: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0Ijo0LCJpIjpmYWxzZX0", + "startCursor": "eyJjIjoyLCJ0Ijo0LCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "BmipCooPMB1CHeRuFHS15q4VQ14pSLoYvuuNEfNsvZBc", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + ] + } + } +} + +task 13, lines 115-133: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0Ijo1LCJpIjpmYWxzZX0", + "startCursor": "eyJjIjoyLCJ0Ijo1LCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "W3YEPxp4z4LzuwoGq4kmCBiy12xv4cNuEvwusrsxDem", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + ] + } + } +} + +task 14, lines 135-153: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0Ijo2LCJpIjpmYWxzZX0", + "startCursor": "eyJjIjoyLCJ0Ijo2LCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "HCNwnSLqsQYEju3KoQbxpa3SD5mVG6FLcggkd2ZYxHvB", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + ] + } + } +} + +task 15, lines 155-173: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": null, + "startCursor": null + }, + "nodes": [] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/filters/kind.move b/crates/sui-graphql-e2e-tests/tests/transactions/filters/kind.move new file mode 100644 index 0000000000000..485cb837f2023 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/filters/kind.move @@ -0,0 +1,173 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B C D E --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# run Test::M1::create --args 1 @A --sender B + +//# run Test::M1::create --args 2 @A --sender C + +//# run Test::M1::create --args 3 @A --sender D + +//# run Test::M1::create --args 4 @A --sender E + +//# create-checkpoint + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: PROGRAMMABLE_TX atCheckpoint: 2}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: PROGRAMMABLE_TX atCheckpoint: 2 signAddress: "@{A}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: PROGRAMMABLE_TX atCheckpoint: 2 signAddress: "@{B}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: PROGRAMMABLE_TX atCheckpoint: 2 signAddress: "@{C}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: PROGRAMMABLE_TX atCheckpoint: 2 signAddress: "@{D}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: PROGRAMMABLE_TX atCheckpoint: 2 signAddress: "@{E}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 50 filter: {kind: SYSTEM_TX atCheckpoint: 2}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/filters/transaction_ids.exp b/crates/sui-graphql-e2e-tests/tests/transactions/filters/transaction_ids.exp new file mode 100644 index 0000000000000..68ccdaff1ab7f --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/filters/transaction_ids.exp @@ -0,0 +1,95 @@ +processed 9 tasks + +init: +A: object(0,0) + +task 1, lines 6-19: +//# publish +created: object(1,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 5175600, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 21: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 23: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 25: +//# create-checkpoint +Checkpoint created: 2 + +task 5, lines 27-45: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": null, + "startCursor": null + }, + "nodes": [] + } + } +} + +task 6, lines 47-65: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": null, + "startCursor": null + }, + "nodes": [] + } + } +} + +task 7, lines 67-85: +//# run-graphql +Response: { + "data": null, + "errors": [ + { + "message": "A scan limit must be specified for the given filter combination", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "transactionBlocks" + ], + "extensions": { + "code": "BAD_USER_INPUT" + } + } + ] +} + +task 8, lines 87-105: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjoyLCJ0IjoyLCJpIjp0cnVlfQ", + "startCursor": "eyJjIjoyLCJ0IjowLCJpIjp0cnVlfQ" + }, + "nodes": [] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/filters/transaction_ids.move b/crates/sui-graphql-e2e-tests/tests/transactions/filters/transaction_ids.move new file mode 100644 index 0000000000000..ba15bcdc5ce9b --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/filters/transaction_ids.move @@ -0,0 +1,105 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# create-checkpoint + +//# run-graphql +{ + transactionBlocks(filter: {transactionIds: []}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(filter: {signAddress: "@{A}" transactionIds: []}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(filter: {recvAddress: "@{A}" transactionIds: []}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(scanLimit: 10 filter: {recvAddress: "@{A}" transactionIds: []}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/programmable.exp b/crates/sui-graphql-e2e-tests/tests/transactions/programmable.exp index d1a45fd72cb9b..96578345e23ca 100644 --- a/crates/sui-graphql-e2e-tests/tests/transactions/programmable.exp +++ b/crates/sui-graphql-e2e-tests/tests/transactions/programmable.exp @@ -1008,7 +1008,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjowLCJ0YyI6MH0", + "cursor": "eyJjIjo0LCJ0IjowLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "GenesisTransaction" @@ -1016,7 +1016,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0IjoxLCJ0YyI6MX0", + "cursor": "eyJjIjo0LCJ0IjoxLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1024,7 +1024,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjo0LCJ0IjoyLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1032,7 +1032,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0IjozLCJ0YyI6M30", + "cursor": "eyJjIjo0LCJ0IjozLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1040,7 +1040,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0Ijo0LCJ0YyI6M30", + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1048,7 +1048,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0Ijo1LCJ0YyI6NH0", + "cursor": "eyJjIjo0LCJ0Ijo1LCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1067,7 +1067,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjowLCJ0YyI6MH0", + "cursor": "eyJjIjo0LCJ0IjowLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "GenesisTransaction" @@ -1086,7 +1086,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjoxLCJ0YyI6MX0", + "cursor": "eyJjIjo0LCJ0IjoxLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1094,7 +1094,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0IjoyLCJ0YyI6Mn0", + "cursor": "eyJjIjo0LCJ0IjoyLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1102,7 +1102,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0IjozLCJ0YyI6M30", + "cursor": "eyJjIjo0LCJ0IjozLCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1110,7 +1110,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0Ijo0LCJ0YyI6M30", + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1118,7 +1118,7 @@ Response: { } }, { - "cursor": "eyJjIjo0LCJ0Ijo1LCJ0YyI6NH0", + "cursor": "eyJjIjo0LCJ0Ijo1LCJpIjpmYWxzZX0", "node": { "kind": { "__typename": "ProgrammableTransactionBlock" @@ -1149,10 +1149,10 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjoyLCJ0YyI6Mn0" + "cursor": "eyJjIjo0LCJ0IjoyLCJpIjpmYWxzZX0" }, { - "cursor": "eyJjIjo0LCJ0IjozLCJ0YyI6M30" + "cursor": "eyJjIjo0LCJ0IjozLCJpIjpmYWxzZX0" } ] } @@ -1166,10 +1166,10 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjozLCJ0YyI6M30" + "cursor": "eyJjIjo0LCJ0IjozLCJpIjpmYWxzZX0" }, { - "cursor": "eyJjIjo0LCJ0Ijo0LCJ0YyI6M30" + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0" } ] } @@ -1183,7 +1183,7 @@ Response: { "transactionBlocks": { "edges": [ { - "cursor": "eyJjIjo0LCJ0IjozLCJ0YyI6M30" + "cursor": "eyJjIjo0LCJ0IjozLCJpIjpmYWxzZX0" } ] } diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/alternating.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/alternating.exp new file mode 100644 index 0000000000000..3ae2d4b5218c5 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/alternating.exp @@ -0,0 +1,300 @@ +processed 22 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 8-29: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 31: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 33: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 35: +//# run Test::M1::create --args 1 @B --sender B +created: object(4,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 5, line 37: +//# run Test::M1::create --args 2 @A --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 39: +//# run Test::M1::create --args 3 @B --sender B +created: object(6,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 41: +//# run Test::M1::create --args 4 @A --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 43: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 45: +//# run Test::M1::create --args 100 @B --sender B +created: object(9,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 47: +//# run Test::M1::create --args 101 @A --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 49: +//# run Test::M1::create --args 102 @B --sender B +created: object(11,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 51: +//# run Test::M1::create --args 103 @A --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 53: +//# run Test::M1::create --args 104 @B --sender B +created: object(13,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 55: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 57-78: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0IjozLCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 16, lines 80-104: +//# run-graphql --cursors {"c":3,"t":3,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", + "endCursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "Hgu3LePqrpyR8Vq3Ve4L2KmvErcdcz92u7YiiotkKJ1N", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "2EwyAHiMofhbM5z5ty7XT1QXs4sNZfHmZLX513Ag8sD3", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 17, lines 106-129: +//# run-graphql --cursors {"c":3,"t":3,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0Ijo0LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0Ijo1LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "Hgu3LePqrpyR8Vq3Ve4L2KmvErcdcz92u7YiiotkKJ1N", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 18, lines 131-155: +//# run-graphql --cursors {"c":3,"t":6,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0Ijo3LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0Ijo4LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0Ijo4LCJpIjpmYWxzZX0", + "node": { + "digest": "4LUhoFJMmZfG71RHiRkwa9KHovrDv3S3mqUM1vu9JWKJ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 19, lines 157-184: +//# run-graphql --cursors {"c":3,"t":5,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0Ijo2LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0Ijo4LCJpIjpmYWxzZX0" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "2EwyAHiMofhbM5z5ty7XT1QXs4sNZfHmZLX513Ag8sD3", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo4LCJpIjpmYWxzZX0", + "node": { + "digest": "4LUhoFJMmZfG71RHiRkwa9KHovrDv3S3mqUM1vu9JWKJ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 20, lines 186-209: +//# run-graphql --cursors {"c":3,"t":8,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "endCursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AnqDERsdbEiE26CACJa6KtJTLsggisgu7yxhMJ6mU1JZ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 21, lines 211-235: +//# run-graphql --cursors {"c":3,"t":8,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0Ijo5LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AnqDERsdbEiE26CACJa6KtJTLsggisgu7yxhMJ6mU1JZ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/alternating.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/alternating.move new file mode 100644 index 0000000000000..a5f339d31d94e --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/alternating.move @@ -0,0 +1,235 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Testing behavior of alternating between a scan-limited and normal query + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# run Test::M1::create --args 1 @B --sender B + +//# run Test::M1::create --args 2 @A --sender A + +//# run Test::M1::create --args 3 @B --sender B + +//# run Test::M1::create --args 4 @A --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @A --sender A + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @A --sender A + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run-graphql +{ + transactionBlocks(first: 2 scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":3,"t":3,"i":true} +# This should return the next two matching transactions after 3, +# so tx 4 and 6. the boundary cursors should wrap the response set, +# and both should have isScanLimited set to false +{ + transactionBlocks(first: 2 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":3,"t":3,"i":true} +# Meanwhile, because of the scanLimit of 2, the boundary cursors are +# startCursor: 4, endCursor: 5, and both are scan limited +{ + transactionBlocks(first: 2 after: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":3,"t":6,"i":false} +# From a previous query that was not scan limited, paginate with scan limit +# startCursor: 7, endCursor: 8, both scan limited +# response set consists of single tx 8 +{ + transactionBlocks(first: 2 after: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":3,"t":5,"i":true} +# from tx 5, select the next two transactions that match +# setting the scanLimit to impose all of the remaining txs +# even though we've finished scanning +# we should indicate there is a next page so we don't skip any txs +# consequently, the endCursor wraps the result set +# startCursor: 6, endCursor: 8, endCursor is not scan limited +{ + transactionBlocks(first: 2 after: "@{cursor_0}" scanLimit: 6 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":3,"t":8,"i":false} +# fetch the last tx without scan limit +# startCursor = endCursor = 10, wrapping the response set +{ + transactionBlocks(first: 2 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":3,"t":8,"i":false} +# fetch the last tx with scan limit +# unlike the not-scan-limited query, the start and end cursors +# are expanded out to the scanned window, instead of wrapping the response set +{ + transactionBlocks(first: 2 after: "@{cursor_0}" scanLimit: 6 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/both_cursors.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/both_cursors.exp new file mode 100644 index 0000000000000..2fee54d94c6f6 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/both_cursors.exp @@ -0,0 +1,177 @@ +processed 18 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 9-30: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 32: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 34: +//# run Test::M1::create --args 0 @B --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 36: +//# run Test::M1::create --args 1 @A --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 38: +//# run Test::M1::create --args 2 @B --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 40: +//# run Test::M1::create --args 3 @A --sender A +created: object(6,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 42: +//# run Test::M1::create --args 4 @B --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 44: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 46: +//# run Test::M1::create --args 100 @A --sender A +created: object(9,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 48: +//# run Test::M1::create --args 101 @A --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 50: +//# run Test::M1::create --args 102 @A --sender A +created: object(11,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 52: +//# run Test::M1::create --args 103 @B --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 54: +//# run Test::M1::create --args 104 @B --sender A +created: object(13,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 56: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 58-81: +//# run-graphql --cursors {"c":4,"t":2,"i":true} {"c":4,"t":7,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0IjozLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "6RKZYt946ztfY8ZVspCv8faXBzKxDcTUEHnrCyBSZ4Li", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 16, lines 83-108: +//# run-graphql --cursors {"c":4,"t":2,"i":true} {"c":4,"t":7,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0IjozLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo2LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "6RKZYt946ztfY8ZVspCv8faXBzKxDcTUEHnrCyBSZ4Li", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo0LCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "83AZLnLVtQeUdrXGg3igLkeo94j3wTLuwY4izobLLVBT", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 17, lines 110-133: +//# run-graphql --cursors {"c":4,"t":4,"i":true} {"c":4,"t":8,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo1LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo1LCJpIjpmYWxzZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo1LCJpIjpmYWxzZX0", + "node": { + "digest": "74pdWZw8nEhvtan9aoYHAeZxGouHsj9cwBr5GcgncNAz", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/both_cursors.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/both_cursors.move new file mode 100644 index 0000000000000..ba913319b65ad --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/both_cursors.move @@ -0,0 +1,133 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Tests paginating forwards where first and scanLimit are equal. The 1st, 3rd, 5th, and 7th through +// 10th transactions will match the filtering criteria. + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @B --sender A + +//# run Test::M1::create --args 1 @A --sender A + +//# run Test::M1::create --args 2 @B --sender A + +//# run Test::M1::create --args 3 @A --sender A + +//# run Test::M1::create --args 4 @B --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @A --sender A + +//# run Test::M1::create --args 101 @A --sender A + +//# run Test::M1::create --args 102 @A --sender A + +//# run Test::M1::create --args 103 @B --sender A + +//# run Test::M1::create --args 104 @B --sender A + +//# create-checkpoint + +//# run-graphql --cursors {"c":4,"t":2,"i":true} {"c":4,"t":7,"i":true} +# startCursor is at 3 + scanLimited, endCursor at 4 + not scanLimited +# this is because between (2, 7), txs 4 and 6 match, and thus endCursor snaps to last of result +{ + transactionBlocks(first: 1 scanLimit: 4 after: "@{cursor_0}" before: "@{cursor_1}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":2,"i":true} {"c":4,"t":7,"i":true} +# startCursor is at 3 + scanLimited, endCursor at 6 + scanLimited +# we return txs 4 and 6, paginate_results thinks we do not have a next page, +# and scan-limit logic will override this as there are still more txs to scan +# note that we're scanning txs [3, 6] +{ + transactionBlocks(first: 3 scanLimit: 4 after: "@{cursor_0}" before: "@{cursor_1}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":4,"i":true} {"c":4,"t":8,"i":true} +# txs 5 and 7 match, but due to page size of `first: 1`, we only return tx 5 +# startCursor is 5 + scan limited, endCursor is also 5 + scan limited +{ + transactionBlocks(first: 1 scanLimit: 3 after: "@{cursor_0}" before: "@{cursor_1}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/first.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/first.exp new file mode 100644 index 0000000000000..b02eb2652a412 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/first.exp @@ -0,0 +1,358 @@ +processed 25 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 9-30: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 32: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 34: +//# run Test::M1::create --args 0 @B --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 36: +//# run Test::M1::create --args 1 @A --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 38: +//# run Test::M1::create --args 2 @B --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 40: +//# run Test::M1::create --args 3 @A --sender A +created: object(6,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 42: +//# run Test::M1::create --args 4 @B --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 44: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 46: +//# run Test::M1::create --args 100 @A --sender A +created: object(9,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 48: +//# run Test::M1::create --args 101 @A --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 50: +//# run Test::M1::create --args 102 @A --sender A +created: object(11,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 52: +//# run Test::M1::create --args 103 @B --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 54: +//# run Test::M1::create --args 104 @B --sender A +created: object(13,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 56: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 58-82: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "6RKZYt946ztfY8ZVspCv8faXBzKxDcTUEHnrCyBSZ4Li", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "83AZLnLVtQeUdrXGg3igLkeo94j3wTLuwY4izobLLVBT", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AWWgnnumcijVEY2YUzs4MtqzFPeLKFAbHnnjXwUyn6Gj", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "DVVVd1cLYDpV3KHhXpirV5NFpk3DKaCuKvXXFeG2owA7", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 16, lines 85-111: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0IjozLCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 17, lines 113-137: +//# run-graphql --cursors {"c":4,"t":3,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo0LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo0LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "6RKZYt946ztfY8ZVspCv8faXBzKxDcTUEHnrCyBSZ4Li", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 18, lines 139-165: +//# run-graphql --cursors {"c":4,"t":4,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo1LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo3LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "83AZLnLVtQeUdrXGg3igLkeo94j3wTLuwY4izobLLVBT", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 19, lines 167-193: +//# run-graphql --cursors {"c":4,"t":7,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo4LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo5LCJpIjp0cnVlfQ" + }, + "edges": [] + } + } +} + +task 20, lines 195-220: +//# run-graphql --cursors {"c":4,"t":9,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjo0LCJ0IjoxMCwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo0LCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AWWgnnumcijVEY2YUzs4MtqzFPeLKFAbHnnjXwUyn6Gj", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjo0LCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "DVVVd1cLYDpV3KHhXpirV5NFpk3DKaCuKvXXFeG2owA7", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 21, line 222: +//# run Test::M1::create --args 105 @A --sender A +created: object(21,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 22, line 224: +//# create-checkpoint +Checkpoint created: 4 + +task 23, lines 226-252: +//# run-graphql --cursors {"c":4,"t":11,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjo0LCJ0IjoxMiwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo0LCJ0IjoxMiwiaSI6dHJ1ZX0" + }, + "edges": [] + } + } +} + +task 24, lines 254-281: +//# run-graphql --cursors {"c":4,"t":12,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0IjoxMCwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo0LCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AWWgnnumcijVEY2YUzs4MtqzFPeLKFAbHnnjXwUyn6Gj", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjo0LCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "DVVVd1cLYDpV3KHhXpirV5NFpk3DKaCuKvXXFeG2owA7", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/first.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/first.move new file mode 100644 index 0000000000000..a3a3705413cba --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/first.move @@ -0,0 +1,281 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Tests paginating forwards where first and scanLimit are equal. The 1st, 3rd, 5th, and 7th through +// 10th transactions will match the filtering criteria. + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @B --sender A + +//# run Test::M1::create --args 1 @A --sender A + +//# run Test::M1::create --args 2 @B --sender A + +//# run Test::M1::create --args 3 @A --sender A + +//# run Test::M1::create --args 4 @B --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @A --sender A + +//# run Test::M1::create --args 101 @A --sender A + +//# run Test::M1::create --args 102 @A --sender A + +//# run Test::M1::create --args 103 @B --sender A + +//# run Test::M1::create --args 104 @B --sender A + +//# create-checkpoint + +//# run-graphql +# Expect 7 results +# [2, 3, 4, 5, 6, 7, 8, 9, 10, 11] <- tx_sequence_number +# [B, A, B, A, B, A, A, A, B, B] +{ + transactionBlocks(first: 50 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql +# scans [B, A] -> [2, 3] +# Because `scanLimit` is specified, both the start and end cursors should have `is_scan_limited` flag to true +# startCursor is at 2, endCursor is at 3 +# The cursor for the node will have `is_scan_limited` flag set to false, because we know for sure there is +# a corresponding element for the cursor in the result set. +{ + transactionBlocks(first: 2 scanLimit: 2 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":3,"i":true} +# scans [B] -> [4] +# Still paginating with `scanLimit`, both the start and end cursors should have `is_scan_limited` flag to true +# because of the scanLimit of 4, startCursor = endCursor = 4 +{ + transactionBlocks(first: 1 scanLimit: 1 after: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":4,"i":true} +# scans [A, B, A] -> [5, 6, 7] +# both the start and end cursors should have `is_scan_limited` flag to true +# startCursor at 5, the sole element has cursor at 6, endCursor at 7 +# instead of wrapping around the result set, the boundary cursors are pushed out +# to the first and last transaction scanned in this query +{ + transactionBlocks(first: 3 scanLimit: 3 after: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":7,"i":true} +# scans [A, A] -> [8, 9] +# both the start and end cursors should have `is_scan_limited` flag to true +# startCursor at 5, the sole element has cursor at 8, endCursor at 9 +# instead of returninng None, we set the boundary cursors +# to the first and last transaction scanned in this query +{ + transactionBlocks(first: 2 scanLimit: 2 after: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":9,"i":true} +# scans [B, B] -> [10, 11] +# both the start and end cursors should have `is_scan_limited` flag to true +# startCursor at 10, endCursor at 11 +# correctly detects we've reached the end of the upper bound +{ + transactionBlocks(first: 2 scanLimit: 2 after: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run Test::M1::create --args 105 @A --sender A + +//# create-checkpoint + +//# run-graphql --cursors {"c":4,"t":11,"i":true} +# we've introduced a new final transaction that doesn't match the filter +# both the start and end cursors should have `is_scan_limited` flag to true +# startCursor = endCursor = 12, because there is only 1 more from the given cursor, +# regardless of the specified scanLimit +# correctly detects we've reached the end of the upper bound +{ + transactionBlocks(first: 2 scanLimit: 2 after: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 5}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":12,"i":true} +# try paginating backwards on the last `endCursor` +# should yield startCursor at 10, endCursor at 11 +# and the result set consists of txs 10 and 11 +# the scanLimit is exclusive of the cursor, hence we reach tx 10 inclusively +# there is a next page, which is the 12th tx, which should yield an empty set +# per the filtering criteria +{ + transactionBlocks(last: 2 scanLimit: 2 before: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 5}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/last.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/last.exp new file mode 100644 index 0000000000000..e2b301c1dc7e3 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/last.exp @@ -0,0 +1,309 @@ +processed 22 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 8-29: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 31: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 33: +//# run Test::M1::create --args 0 @B --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 35: +//# run Test::M1::create --args 1 @B --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 37: +//# run Test::M1::create --args 2 @A --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 39: +//# run Test::M1::create --args 3 @A --sender A +created: object(6,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 41: +//# run Test::M1::create --args 4 @A --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 43: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 45: +//# run Test::M1::create --args 100 @B --sender A +created: object(9,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 47: +//# run Test::M1::create --args 101 @A --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 49: +//# run Test::M1::create --args 102 @B --sender A +created: object(11,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 51: +//# run Test::M1::create --args 103 @A --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 53: +//# run Test::M1::create --args 104 @B --sender A +created: object(13,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 55: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 57-79: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "DiywoRFzC33smQhVf5K7AcM853XFgfgFxBGErLTEvVWi", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo3LCJpIjpmYWxzZX0", + "node": { + "digest": "F32vrNL7p5sa1iFeykvFQ17UYLeM1urXnSNbbGyoqDRx", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo5LCJpIjpmYWxzZX0", + "node": { + "digest": "3eHY9XENiqep4VvNBr3ws79TEMGhp5egGvFEymk679dc", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "4p5avK1cStj1xtBnvCnHgEUVVr5qnfXQ76ujUZx4ZvP8", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 16, lines 82-106: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoxMCwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "4p5avK1cStj1xtBnvCnHgEUVVr5qnfXQ76ujUZx4ZvP8", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 17, lines 108-132: +//# run-graphql --cursors {"c":4,"t":10,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo5LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo5LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo5LCJpIjpmYWxzZX0", + "node": { + "digest": "3eHY9XENiqep4VvNBr3ws79TEMGhp5egGvFEymk679dc", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 18, lines 134-158: +//# run-graphql --cursors {"c":4,"t":9,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo2LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo4LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo3LCJpIjpmYWxzZX0", + "node": { + "digest": "F32vrNL7p5sa1iFeykvFQ17UYLeM1urXnSNbbGyoqDRx", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 19, lines 160-184: +//# run-graphql --cursors {"c":4,"t":6,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo0LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo1LCJpIjp0cnVlfQ" + }, + "edges": [] + } + } +} + +task 20, lines 186-209: +//# run-graphql --cursors {"c":4,"t":4,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0IjozLCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo0LCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "DiywoRFzC33smQhVf5K7AcM853XFgfgFxBGErLTEvVWi", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 21, lines 212-235: +//# run-graphql --cursors {"c":4,"t":2,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/last.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/last.move new file mode 100644 index 0000000000000..6a84f9eabe90b --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/equal/last.move @@ -0,0 +1,235 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Mirrors scan_limit/equal/first.move, paginating backwards where first and scanLimit are equal. + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @B --sender A + +//# run Test::M1::create --args 1 @B --sender A + +//# run Test::M1::create --args 2 @A --sender A + +//# run Test::M1::create --args 3 @A --sender A + +//# run Test::M1::create --args 4 @A --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender A + +//# run Test::M1::create --args 101 @A --sender A + +//# run Test::M1::create --args 102 @B --sender A + +//# run Test::M1::create --args 103 @A --sender A + +//# run Test::M1::create --args 104 @B --sender A + +//# create-checkpoint + +//# run-graphql +# Expect ten results +{ + transactionBlocks(last: 50 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql +# boundary cursors are scan limited +# startCursor: 10, endCursor: 11 +# result is single element with cursor: 11 +{ + transactionBlocks(last: 2 scanLimit: 2 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":10,"i":true} +# boundary cursors are scan limited +# startCursor: 9, endCursor: 9 +# result is single element with cursor: 9 +{ + transactionBlocks(last: 1 scanLimit: 1 before: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":9,"i":true} +# boundary cursors are scan limited +# startCursor: 6, endCursor: 8 +# result is single element with cursor: 7 +{ + transactionBlocks(last: 3 scanLimit: 3 before: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":6,"i":true} +# boundary cursors are scan limited +# startCursor: 4, endCursor: 5 +# expect empty set +{ + transactionBlocks(last: 2 scanLimit: 2 before: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":4,"i":true} +# Returns the first two matching transactions, boundary cursors both have `is_scan_limited: true` +# startCursor: 2, endCursor: 3 +{ + transactionBlocks(last: 2 scanLimit: 2 before: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql --cursors {"c":4,"t":2,"i":true} +# Since we know from the previous query that there is not a previous page at this cursor, +# Expect false for page flags and null for cursors +{ + transactionBlocks(last: 2 scanLimit: 2 before: "@{cursor_0}" filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/first.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/first.exp new file mode 100644 index 0000000000000..9f785684321d9 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/first.exp @@ -0,0 +1,378 @@ +processed 34 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 6-27: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 29: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 31: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 33: +//# run Test::M1::create --args 1 @A --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 35: +//# run Test::M1::create --args 2 @B --sender B +created: object(5,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 6, line 37: +//# run Test::M1::create --args 3 @B --sender B +created: object(6,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 39: +//# run Test::M1::create --args 4 @B --sender B +created: object(7,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 41: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 43: +//# run Test::M1::create --args 100 @B --sender B +created: object(9,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 45: +//# run Test::M1::create --args 101 @B --sender B +created: object(10,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 47: +//# run Test::M1::create --args 102 @B --sender B +created: object(11,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 49: +//# run Test::M1::create --args 103 @B --sender B +created: object(12,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 51: +//# run Test::M1::create --args 104 @B --sender B +created: object(13,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 53: +//# create-checkpoint +Checkpoint created: 3 + +task 15, line 55: +//# run Test::M1::create --args 100 @B --sender B +created: object(15,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 16, line 57: +//# run Test::M1::create --args 101 @B --sender B +created: object(16,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 17, line 59: +//# run Test::M1::create --args 102 @B --sender B +created: object(17,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 18, line 61: +//# run Test::M1::create --args 103 @B --sender B +created: object(18,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 19, line 63: +//# run Test::M1::create --args 104 @B --sender B +created: object(19,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 20, line 65: +//# create-checkpoint +Checkpoint created: 4 + +task 21, line 67: +//# run Test::M1::create --args 200 @A --sender A +created: object(21,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 22, line 69: +//# run Test::M1::create --args 201 @B --sender A +created: object(22,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 23, line 71: +//# run Test::M1::create --args 202 @B --sender B +created: object(23,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 24, line 73: +//# run Test::M1::create --args 203 @B --sender B +created: object(24,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 25, line 75: +//# run Test::M1::create --args 204 @A --sender A +created: object(25,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 26, line 77: +//# create-checkpoint +Checkpoint created: 5 + +task 27, lines 79-100: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjo1LCJ0IjoyLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjo1LCJ0IjoyMSwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjo1LCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "BB6bMUxrBJX9wANzzptKnJM3bMVFKnD4xtY8DGWthaH9", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjoxNywiaSI6ZmFsc2V9", + "node": { + "digest": "BhW3cLzaCgdRYc6jcmXB6DrQ6KaTfmYmb6vBNiu5xPhg", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjoxOCwiaSI6ZmFsc2V9", + "node": { + "digest": "FjpAF5bT173BfV6HLuBdBd2bYevsUVDeJNmYnmJYsTLb", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjoyMSwiaSI6ZmFsc2V9", + "node": { + "digest": "Eq611DTcfsBjH1P9Smrv7xFYQZXNtUMgbQCUr5jo3ycJ", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} + +task 28, lines 103-129: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjo1LCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo1LCJ0IjoyLCJpIjpmYWxzZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo1LCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 29, lines 131-155: +//# run-graphql --cursors {"c":7,"t":2,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjozLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo3LCJ0Ijo3LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "BB6bMUxrBJX9wANzzptKnJM3bMVFKnD4xtY8DGWthaH9", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 30, lines 157-180: +//# run-graphql --cursors {"c":7,"t":7,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0Ijo4LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo3LCJ0IjoxMiwiaSI6dHJ1ZX0" + }, + "edges": [] + } + } +} + +task 31, lines 182-205: +//# run-graphql --cursors {"c":7,"t":12,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjoxMywiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo3LCJ0IjoxNywiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjoxNywiaSI6ZmFsc2V9", + "node": { + "digest": "BhW3cLzaCgdRYc6jcmXB6DrQ6KaTfmYmb6vBNiu5xPhg", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} + +task 32, lines 207-232: +//# run-graphql --cursors {"c":7,"t":17,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjoxOCwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo3LCJ0IjoxOCwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjoxOCwiaSI6ZmFsc2V9", + "node": { + "digest": "FjpAF5bT173BfV6HLuBdBd2bYevsUVDeJNmYnmJYsTLb", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} + +task 33, lines 234-258: +//# run-graphql --cursors {"c":7,"t":18,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjo3LCJ0IjoxOSwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo3LCJ0IjoyMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjoyMSwiaSI6ZmFsc2V9", + "node": { + "digest": "Eq611DTcfsBjH1P9Smrv7xFYQZXNtUMgbQCUr5jo3ycJ", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/first.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/first.move new file mode 100644 index 0000000000000..b9aea53452ff2 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/first.move @@ -0,0 +1,258 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# run Test::M1::create --args 1 @A --sender A + +//# run Test::M1::create --args 2 @B --sender B + +//# run Test::M1::create --args 3 @B --sender B + +//# run Test::M1::create --args 4 @B --sender B + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @B --sender B + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @B --sender B + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @B --sender B + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @B --sender B + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run Test::M1::create --args 200 @A --sender A + +//# run Test::M1::create --args 201 @B --sender A + +//# run Test::M1::create --args 202 @B --sender B + +//# run Test::M1::create --args 203 @B --sender B + +//# run Test::M1::create --args 204 @A --sender A + +//# create-checkpoint + +//# run-graphql +{ + transactionBlocks(filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql +# startCursor is 2 and scanLimited, endCursor is 2 and not scanLimited +# instead of setting the endCursor to the last transaction scanned, +# we set it to the last transaction in the set +# this is so we don't end up skipping any other matches in the scan range +# but beyond the scope of the `limit` +{ + transactionBlocks(first: 1 scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":2,"i":false} +# startCursor: 3, endCursor: 7, both are scan-limited +# endCursor ends at 7, not 3, because we've exhausted all the matches +# within the window of scanning range, and will overwrite the endCursor to 7. +{ + transactionBlocks(first: 1 after: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":7,"i":true} +# startCursor: 8, endCursor: 12, both are scan-limited +# expect an empty set +{ + transactionBlocks(first: 1 after: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":12,"i":true} +# startCursor: 13, endCursor: 17, both are scan-limited +# single element returned, coincidentally also the last scanned transaction +{ + transactionBlocks(first: 1 after: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":17,"i":true} +# startCursor: 18 scanLimited, endCursor: 18 not scanLimited +# this is because we have multiple matches within the scanning range +# but due to the `first` limit, we return a subset. +# we don't want to skip over other matches, so we don't push the endCursor out +{ + transactionBlocks(first: 1 after: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":18,"i":false} +# startCursor: 19, endCursor: 21, both are scan-limited +# single element returned, coincidentally also the last scanned transaction +# note that the startCursor is 19, not 18 or 21, since we can use the scan-limited behavior +{ + transactionBlocks(first: 1 after: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/last.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/last.exp new file mode 100644 index 0000000000000..6f4367472900e --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/last.exp @@ -0,0 +1,355 @@ +processed 34 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 6-27: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 29: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 31: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 33: +//# run Test::M1::create --args 1 @A --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 35: +//# run Test::M1::create --args 2 @B --sender B +created: object(5,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 6, line 37: +//# run Test::M1::create --args 3 @B --sender B +created: object(6,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 39: +//# run Test::M1::create --args 4 @B --sender B +created: object(7,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 41: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 43: +//# run Test::M1::create --args 100 @B --sender B +created: object(9,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 45: +//# run Test::M1::create --args 101 @B --sender B +created: object(10,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 47: +//# run Test::M1::create --args 102 @B --sender B +created: object(11,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 49: +//# run Test::M1::create --args 103 @B --sender B +created: object(12,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 51: +//# run Test::M1::create --args 104 @B --sender B +created: object(13,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 53: +//# create-checkpoint +Checkpoint created: 3 + +task 15, line 55: +//# run Test::M1::create --args 100 @B --sender B +created: object(15,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 16, line 57: +//# run Test::M1::create --args 101 @B --sender B +created: object(16,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 17, line 59: +//# run Test::M1::create --args 102 @B --sender B +created: object(17,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 18, line 61: +//# run Test::M1::create --args 103 @B --sender B +created: object(18,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 19, line 63: +//# run Test::M1::create --args 104 @B --sender B +created: object(19,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 20, line 65: +//# create-checkpoint +Checkpoint created: 4 + +task 21, line 67: +//# run Test::M1::create --args 200 @A --sender A +created: object(21,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 22, line 69: +//# run Test::M1::create --args 201 @B --sender B +created: object(22,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 23, line 71: +//# run Test::M1::create --args 202 @B --sender B +created: object(23,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 24, line 73: +//# run Test::M1::create --args 203 @B --sender B +created: object(24,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 25, line 75: +//# run Test::M1::create --args 204 @A --sender A +created: object(25,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 26, line 77: +//# create-checkpoint +Checkpoint created: 5 + +task 27, lines 79-100: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjo1LCJ0IjoyLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjo1LCJ0IjoyMSwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjo1LCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "BB6bMUxrBJX9wANzzptKnJM3bMVFKnD4xtY8DGWthaH9", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjoxNywiaSI6ZmFsc2V9", + "node": { + "digest": "BhW3cLzaCgdRYc6jcmXB6DrQ6KaTfmYmb6vBNiu5xPhg", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + }, + { + "cursor": "eyJjIjo1LCJ0IjoyMSwiaSI6ZmFsc2V9", + "node": { + "digest": "G7DhbdbJeze8e5kmVLNY6aVKckM7yfrzadaABv71ssjx", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} + +task 28, lines 103-125: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjo1LCJ0IjoyMSwiaSI6ZmFsc2V9", + "endCursor": "eyJjIjo1LCJ0IjoyMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo1LCJ0IjoyMSwiaSI6ZmFsc2V9", + "node": { + "digest": "G7DhbdbJeze8e5kmVLNY6aVKckM7yfrzadaABv71ssjx", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} + +task 29, lines 127-152: +//# run-graphql --cursors {"c":7,"t":21,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjoxNiwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo3LCJ0IjoyMCwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjoxNywiaSI6ZmFsc2V9", + "node": { + "digest": "BhW3cLzaCgdRYc6jcmXB6DrQ6KaTfmYmb6vBNiu5xPhg", + "effects": { + "checkpoint": { + "sequenceNumber": 5 + } + } + } + } + ] + } + } +} + +task 30, lines 154-177: +//# run-graphql --cursors {"c":7,"t":16,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjoxMSwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo3LCJ0IjoxNSwiaSI6dHJ1ZX0" + }, + "edges": [] + } + } +} + +task 31, lines 179-201: +//# run-graphql --cursors {"c":7,"t":11,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0Ijo2LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo3LCJ0IjoxMCwiaSI6dHJ1ZX0" + }, + "edges": [] + } + } +} + +task 32, lines 203-227: +//# run-graphql --cursors {"c":7,"t":6,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjozLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjo3LCJ0Ijo1LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "BB6bMUxrBJX9wANzzptKnJM3bMVFKnD4xtY8DGWthaH9", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 33, lines 229-251: +//# run-graphql --cursors {"c":7,"t":3,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjo3LCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo3LCJ0IjoyLCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo3LCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/last.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/last.move new file mode 100644 index 0000000000000..1b8103b356dd2 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/ge_page/last.move @@ -0,0 +1,251 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# run Test::M1::create --args 1 @A --sender A + +//# run Test::M1::create --args 2 @B --sender B + +//# run Test::M1::create --args 3 @B --sender B + +//# run Test::M1::create --args 4 @B --sender B + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @B --sender B + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @B --sender B + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @B --sender B + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @B --sender B + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run Test::M1::create --args 200 @A --sender A + +//# run Test::M1::create --args 201 @B --sender B + +//# run Test::M1::create --args 202 @B --sender B + +//# run Test::M1::create --args 203 @B --sender B + +//# run Test::M1::create --args 204 @A --sender A + +//# create-checkpoint + +//# run-graphql +{ + transactionBlocks(filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql +# startCursor 21 not scan limited, endCursor 21 is scan limited +{ + transactionBlocks(last: 1 scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":21,"i":false} +# startCursor 16, endCursor 20, both isScanLimited +# This might be a bit confusing, but the `startCursor` is 16 and not 17 because +# `scanLimit` is 5 - if we set the `startCursor` to 17, then we will never yield tx 17 +# when paginating the other way, since the cursors are exclusive. +{ + transactionBlocks(last: 1 before: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":16,"i":true} +# continuing paginating backwards with scan limit +# startCursor 11, endCursor 15, both scan limited +{ + transactionBlocks(last: 1 before: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":11,"i":true} +# startCursor is 7, endCursor is 10, both scan limited +{ + transactionBlocks(last: 1 before: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":6,"i":true} +# startCursor is 3, not scanLimited, endCursor is 5, scanLimited +# this is because we found a matching element at tx 3, but within +# the scanned window there is another tx that we need to return for +{ + transactionBlocks(last: 1 before: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":7,"t":3,"i":false} +# Reached the end +{ + transactionBlocks(last: 1 before: "@{cursor_0}" scanLimit: 5 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 6}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/invalid_limits.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/invalid_limits.exp new file mode 100644 index 0000000000000..2dc10793ef702 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/invalid_limits.exp @@ -0,0 +1,112 @@ +processed 13 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 8-29: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 31: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 33: +//# run Test::M1::create --args 0 @B --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 35: +//# run Test::M1::create --args 1 @A --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 37: +//# run Test::M1::create --args 2 @B --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 39: +//# run Test::M1::create --args 3 @A --sender A +created: object(6,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 41: +//# run Test::M1::create --args 4 @B --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 43: +//# create-checkpoint +Checkpoint created: 2 + +task 9, lines 45-66: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} + +task 10, lines 68-89: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} + +task 11, lines 91-112: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} + +task 12, lines 114-135: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/invalid_limits.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/invalid_limits.move new file mode 100644 index 0000000000000..18c025402d2f3 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/invalid_limits.move @@ -0,0 +1,135 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// For any instance where limit is 0 or scan limit is 0, we should return an empty result + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @B --sender A + +//# run Test::M1::create --args 1 @A --sender A + +//# run Test::M1::create --args 2 @B --sender A + +//# run Test::M1::create --args 3 @A --sender A + +//# run Test::M1::create --args 4 @B --sender A + +//# create-checkpoint + +//# run-graphql +{ + transactionBlocks(first: 0 scanLimit: 2 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 2 scanLimit: 0 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 0 scanLimit: 0 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql +{ + transactionBlocks(first: 0 filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/first.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/first.exp new file mode 100644 index 0000000000000..4cd2dabaa365b --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/first.exp @@ -0,0 +1,364 @@ +processed 22 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 10-31: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 33: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 35: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 37: +//# run Test::M1::create --args 1 @B --sender B +created: object(4,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 5, line 39: +//# run Test::M1::create --args 2 @A --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 41: +//# run Test::M1::create --args 3 @B --sender B +created: object(6,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 43: +//# run Test::M1::create --args 4 @A --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 45: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 47: +//# run Test::M1::create --args 100 @B --sender B +created: object(9,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 49: +//# run Test::M1::create --args 101 @A --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 51: +//# run Test::M1::create --args 102 @B --sender B +created: object(11,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 53: +//# run Test::M1::create --args 103 @A --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 55: +//# run Test::M1::create --args 104 @B --sender B +created: object(13,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 57: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 59-81: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "FveHd4cC5ykjtknvewFjmf6V1gHYhdkuEoUEBAV3y52h", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "Hgu3LePqrpyR8Vq3Ve4L2KmvErcdcz92u7YiiotkKJ1N", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo1LCJpIjpmYWxzZX0", + "node": { + "digest": "5pVYLMHazkfshyLvmQyow1UY4vn9V481KWDxUYpX65BZ", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "2EwyAHiMofhbM5z5ty7XT1QXs4sNZfHmZLX513Ag8sD3", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo3LCJpIjpmYWxzZX0", + "node": { + "digest": "FtVAPJKNoPumPcQuyzznmZB99yhYNqdsmC7wKPj17xR3", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo4LCJpIjpmYWxzZX0", + "node": { + "digest": "4LUhoFJMmZfG71RHiRkwa9KHovrDv3S3mqUM1vu9JWKJ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo5LCJpIjpmYWxzZX0", + "node": { + "digest": "FasAf1kHei9QkZuPLBSkCdXXMtv13RbCgiywktFzx58m", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AnqDERsdbEiE26CACJa6KtJTLsggisgu7yxhMJ6mU1JZ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "GKKT2n7jc6YyJDcckP3kXhVN5FEZJEk1fN3aFzcgRyRr", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 16, lines 84-106: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0IjozLCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 17, lines 108-130: +//# run-graphql --cursors {"c":4,"t":3,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo0LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo1LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "Hgu3LePqrpyR8Vq3Ve4L2KmvErcdcz92u7YiiotkKJ1N", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 18, lines 132-158: +//# run-graphql --cursors {"c":4,"t":5,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo2LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo4LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "2EwyAHiMofhbM5z5ty7XT1QXs4sNZfHmZLX513Ag8sD3", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjo0LCJ0Ijo4LCJpIjpmYWxzZX0", + "node": { + "digest": "4LUhoFJMmZfG71RHiRkwa9KHovrDv3S3mqUM1vu9JWKJ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 19, lines 160-182: +//# run-graphql --cursors {"c":4,"t":8,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjo0LCJ0Ijo5LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AnqDERsdbEiE26CACJa6KtJTLsggisgu7yxhMJ6mU1JZ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 20, lines 184-207: +//# run-graphql --cursors {"c":4,"t":10,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjo0LCJ0IjoxMSwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjo0LCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [] + } + } +} + +task 21, lines 209-232: +//# run-graphql --cursors {"c":4,"t":11,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/first.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/first.move new file mode 100644 index 0000000000000..bd9b3d2fe2ace --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/first.move @@ -0,0 +1,232 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Without a scan limit, we would expect each query to yield a response containing two results. +// However, because we have a scanLimit of 2, we'll be limited to filtering only two candidate +// transactions per page, of which one will match the filtering criteria. + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# run Test::M1::create --args 1 @B --sender B + +//# run Test::M1::create --args 2 @A --sender A + +//# run Test::M1::create --args 3 @B --sender B + +//# run Test::M1::create --args 4 @A --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @A --sender A + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @A --sender A + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run-graphql +# ten transactions total +{ + transactionBlocks(first: 50 filter: {afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql +# startCursor 2, endCursor 3, both scan limited +{ + transactionBlocks(first: 3 scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":3,"i":true} +# startCursor: 4, endCursor 5, both scan limited +{ + transactionBlocks(first: 3 scanLimit: 2 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":5,"i":true} +# note the changes: first 3 -> 4, scanLimit 2 -> 3 +# startCursor: 6, endCursor: 8 both scanLimited +# because we've exhausted all matches in the scanned window, +# we set the endCursor to the final tx scanned, rather than snapping +# to the last matched tx +{ + transactionBlocks(first: 4 scanLimit: 3 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":8,"i":true} +# startCursor: 9, endCursor: 11 both scanLimited +{ + transactionBlocks(first: 4 scanLimit: 3 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":10,"i":false} +# using the last element's cursor from the previous query +# will yield an empty set, fixed on the last scannable tx +{ + transactionBlocks(first: 4 scanLimit: 3 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":11,"i":true} +# trying to paginate on the `endCursor` even though hasNextPage is false +# cursors are null, both page flags are false +{ + transactionBlocks(first: 4 scanLimit: 3 after: "@{cursor_0}" filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/last.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/last.exp new file mode 100644 index 0000000000000..714548cfc5c09 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/last.exp @@ -0,0 +1,365 @@ +processed 22 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 10-31: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 33: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 35: +//# run Test::M1::create --args 0 @A --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 37: +//# run Test::M1::create --args 1 @B --sender B +created: object(4,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 5, line 39: +//# run Test::M1::create --args 2 @A --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 41: +//# run Test::M1::create --args 3 @B --sender B +created: object(6,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 43: +//# run Test::M1::create --args 4 @A --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 45: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 47: +//# run Test::M1::create --args 100 @B --sender B +created: object(9,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 49: +//# run Test::M1::create --args 101 @A --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 51: +//# run Test::M1::create --args 102 @B --sender B +created: object(11,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 53: +//# run Test::M1::create --args 103 @A --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 55: +//# run Test::M1::create --args 104 @B --sender B +created: object(13,0) +mutated: object(0,1) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 57: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 59-81: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjozLCJpIjpmYWxzZX0", + "node": { + "digest": "FveHd4cC5ykjtknvewFjmf6V1gHYhdkuEoUEBAV3y52h", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "Hgu3LePqrpyR8Vq3Ve4L2KmvErcdcz92u7YiiotkKJ1N", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo1LCJpIjpmYWxzZX0", + "node": { + "digest": "5pVYLMHazkfshyLvmQyow1UY4vn9V481KWDxUYpX65BZ", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "2EwyAHiMofhbM5z5ty7XT1QXs4sNZfHmZLX513Ag8sD3", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo3LCJpIjpmYWxzZX0", + "node": { + "digest": "FtVAPJKNoPumPcQuyzznmZB99yhYNqdsmC7wKPj17xR3", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo4LCJpIjpmYWxzZX0", + "node": { + "digest": "4LUhoFJMmZfG71RHiRkwa9KHovrDv3S3mqUM1vu9JWKJ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0Ijo5LCJpIjpmYWxzZX0", + "node": { + "digest": "FasAf1kHei9QkZuPLBSkCdXXMtv13RbCgiywktFzx58m", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AnqDERsdbEiE26CACJa6KtJTLsggisgu7yxhMJ6mU1JZ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + }, + { + "cursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "node": { + "digest": "GKKT2n7jc6YyJDcckP3kXhVN5FEZJEk1fN3aFzcgRyRr", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 16, lines 83-105: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoxMCwiaSI6dHJ1ZX0", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoxMCwiaSI6ZmFsc2V9", + "node": { + "digest": "AnqDERsdbEiE26CACJa6KtJTLsggisgu7yxhMJ6mU1JZ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 17, lines 107-129: +//# run-graphql --cursors {"c":4,"t":10,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo4LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo5LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo4LCJpIjpmYWxzZX0", + "node": { + "digest": "4LUhoFJMmZfG71RHiRkwa9KHovrDv3S3mqUM1vu9JWKJ", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + } + ] + } + } +} + +task 18, lines 131-154: +//# run-graphql --cursors {"c":4,"t":8,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo2LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo3LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo2LCJpIjpmYWxzZX0", + "node": { + "digest": "2EwyAHiMofhbM5z5ty7XT1QXs4sNZfHmZLX513Ag8sD3", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 19, lines 156-178: +//# run-graphql --cursors {"c":4,"t":6,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0Ijo0LCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0Ijo1LCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0Ijo0LCJpIjpmYWxzZX0", + "node": { + "digest": "Hgu3LePqrpyR8Vq3Ve4L2KmvErcdcz92u7YiiotkKJ1N", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 20, lines 180-202: +//# run-graphql --cursors {"c":4,"t":4,"i":false} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": true, + "startCursor": "eyJjIjo0LCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjo0LCJ0IjozLCJpIjp0cnVlfQ" + }, + "edges": [ + { + "cursor": "eyJjIjo0LCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "CReUjLynvpq4dD4w6zekGxvSyBBQF2e3KG3K2Rs7oD8L", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} + +task 21, lines 205-227: +//# run-graphql --cursors {"c":4,"t":2,"i":true} +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": null, + "endCursor": null + }, + "edges": [] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/last.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/last.move new file mode 100644 index 0000000000000..9a1280bed8b3d --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/le_page/last.move @@ -0,0 +1,227 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Without a scan limit, we would expect each query to yield a response containing two results. +// However, because we have a scanLimit of 2, we'll be limited to filtering only two candidate +// transactions per page, of which one will match the filtering criteria. + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @A --sender A + +//# run Test::M1::create --args 1 @B --sender B + +//# run Test::M1::create --args 2 @A --sender A + +//# run Test::M1::create --args 3 @B --sender B + +//# run Test::M1::create --args 4 @A --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender B + +//# run Test::M1::create --args 101 @A --sender A + +//# run Test::M1::create --args 102 @B --sender B + +//# run Test::M1::create --args 103 @A --sender A + +//# run Test::M1::create --args 104 @B --sender B + +//# create-checkpoint + +//# run-graphql +# ten transactions total +{ + transactionBlocks(last: 50 filter: {afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql +# startCursor: 10, endCursor: 11, both scan limited +{ + transactionBlocks(last: 3 scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":10,"i":true} +# startCursor: 8, endCursor: 9, both scan limited +{ + transactionBlocks(last: 3 before: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":8,"i":false} +# use result's cursor instead of boundary cursor +# startCursor: 6, endCursor: 7, both scan limited +{ + transactionBlocks(last: 3 before: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":6,"i":true} +# startCursor: 4, endCursor: 5, both scan limited +{ + transactionBlocks(last: 3 before: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + +//# run-graphql --cursors {"c":4,"t":4,"i":false} +# reached the end with this query +{ + transactionBlocks(last: 3 before: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} + + +//# run-graphql --cursors {"c":4,"t":2,"i":true} +# cursors are null, and page flags are both false +{ + transactionBlocks(last: 3 before: "@{cursor_0}" scanLimit: 2 filter: {recvAddress: "@{A}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/require.exp b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/require.exp new file mode 100644 index 0000000000000..1e5207ad10ed9 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/require.exp @@ -0,0 +1,606 @@ +processed 25 tasks + +init: +A: object(0,0), B: object(0,1) + +task 1, lines 6-27: +//# publish +created: object(1,0) +mutated: object(0,2) +gas summary: computation_cost: 1000000, storage_cost: 5798800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 2, line 29: +//# create-checkpoint +Checkpoint created: 1 + +task 3, line 31: +//# run Test::M1::create --args 0 @B --sender A +created: object(3,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 0, non_refundable_storage_fee: 0 + +task 4, line 33: +//# run Test::M1::create --args 1 @B --sender A +created: object(4,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 5, line 35: +//# run Test::M1::create --args 2 @B --sender A +created: object(5,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 6, line 37: +//# run Test::M1::create --args 3 @B --sender A +created: object(6,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 7, line 39: +//# run Test::M1::create --args 4 @B --sender A +created: object(7,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 8, line 41: +//# create-checkpoint +Checkpoint created: 2 + +task 9, line 43: +//# run Test::M1::create --args 100 @B --sender A +created: object(9,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 10, line 45: +//# run Test::M1::create --args 101 @B --sender A +created: object(10,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 11, line 47: +//# run Test::M1::create --args 102 @B --sender A +created: object(11,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 12, line 49: +//# run Test::M1::create --args 103 @B --sender A +created: object(12,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 13, line 51: +//# run Test::M1::create --args 104 @B --sender A +created: object(13,0) +mutated: object(0,0) +gas summary: computation_cost: 1000000, storage_cost: 2302800, storage_rebate: 978120, non_refundable_storage_fee: 9880 + +task 14, line 53: +//# create-checkpoint +Checkpoint created: 3 + +task 15, lines 55-74: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "DiywoRFzC33smQhVf5K7AcM853XFgfgFxBGErLTEvVWi", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7MgEfj6QXfsjDFtvJSAE9FNL3RYt8Kdw21NnfuuVXkbt", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "B8mg7JvC64cnh656yuwHyXyFPULutxBECMFkCPQNStmZ", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7i7Evom2DUeS1PYxKwDnLfoZpv2r6kxdGoEWxXdFA9xV", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "5HoMDKMTYX3gibs8VroZUeSC3134MroLJN7hfAVZxdPM", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "BU8q4bm7XjaZiV8cPmchVp6SsmU8cmBWNUVmFTUNAHfb", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "HckXhLnDYV8hfMh1p8M8r7dpEpvzp5GpG9oHzv9dmv4R", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "Hvh3oJuoTeRYRU2wkTriwDsozpaay5c7dhRS7Ru3S2S5", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "ADhRGJc24AQCvUnQNgkfjnna3hsTFK51YTgq5J5VAKQr", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + ] + } + } +} + +task 16, lines 76-95: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6ZmFsc2V9", + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0" + }, + "nodes": [ + { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "DiywoRFzC33smQhVf5K7AcM853XFgfgFxBGErLTEvVWi", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7MgEfj6QXfsjDFtvJSAE9FNL3RYt8Kdw21NnfuuVXkbt", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "B8mg7JvC64cnh656yuwHyXyFPULutxBECMFkCPQNStmZ", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7i7Evom2DUeS1PYxKwDnLfoZpv2r6kxdGoEWxXdFA9xV", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "5HoMDKMTYX3gibs8VroZUeSC3134MroLJN7hfAVZxdPM", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "BU8q4bm7XjaZiV8cPmchVp6SsmU8cmBWNUVmFTUNAHfb", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "HckXhLnDYV8hfMh1p8M8r7dpEpvzp5GpG9oHzv9dmv4R", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "Hvh3oJuoTeRYRU2wkTriwDsozpaay5c7dhRS7Ru3S2S5", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "ADhRGJc24AQCvUnQNgkfjnna3hsTFK51YTgq5J5VAKQr", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + ] + } + } +} + +task 17, lines 97-116: +//# run-graphql +Response: { + "data": null, + "errors": [ + { + "message": "A scan limit must be specified for the given filter combination", + "locations": [ + { + "line": 3, + "column": 3 + } + ], + "path": [ + "transactionBlocks" + ], + "extensions": { + "code": "BAD_USER_INPUT" + } + } + ] +} + +task 18, lines 118-137: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0", + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ" + }, + "nodes": [ + { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "DiywoRFzC33smQhVf5K7AcM853XFgfgFxBGErLTEvVWi", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7MgEfj6QXfsjDFtvJSAE9FNL3RYt8Kdw21NnfuuVXkbt", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "B8mg7JvC64cnh656yuwHyXyFPULutxBECMFkCPQNStmZ", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7i7Evom2DUeS1PYxKwDnLfoZpv2r6kxdGoEWxXdFA9xV", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "5HoMDKMTYX3gibs8VroZUeSC3134MroLJN7hfAVZxdPM", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "BU8q4bm7XjaZiV8cPmchVp6SsmU8cmBWNUVmFTUNAHfb", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "HckXhLnDYV8hfMh1p8M8r7dpEpvzp5GpG9oHzv9dmv4R", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "Hvh3oJuoTeRYRU2wkTriwDsozpaay5c7dhRS7Ru3S2S5", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "ADhRGJc24AQCvUnQNgkfjnna3hsTFK51YTgq5J5VAKQr", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + ] + } + } +} + +task 19, lines 139-158: +//# run-graphql +Response: { + "data": null, + "errors": [ + { + "message": "A scan limit must be specified for the given filter combination", + "locations": [ + { + "line": 3, + "column": 3 + } + ], + "path": [ + "transactionBlocks" + ], + "extensions": { + "code": "BAD_USER_INPUT" + } + } + ] +} + +task 20, lines 160-179: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0", + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ" + }, + "nodes": [ + { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "DiywoRFzC33smQhVf5K7AcM853XFgfgFxBGErLTEvVWi", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7MgEfj6QXfsjDFtvJSAE9FNL3RYt8Kdw21NnfuuVXkbt", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "B8mg7JvC64cnh656yuwHyXyFPULutxBECMFkCPQNStmZ", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "7i7Evom2DUeS1PYxKwDnLfoZpv2r6kxdGoEWxXdFA9xV", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + }, + { + "digest": "5HoMDKMTYX3gibs8VroZUeSC3134MroLJN7hfAVZxdPM", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "BU8q4bm7XjaZiV8cPmchVp6SsmU8cmBWNUVmFTUNAHfb", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "HckXhLnDYV8hfMh1p8M8r7dpEpvzp5GpG9oHzv9dmv4R", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "Hvh3oJuoTeRYRU2wkTriwDsozpaay5c7dhRS7Ru3S2S5", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + }, + { + "digest": "ADhRGJc24AQCvUnQNgkfjnna3hsTFK51YTgq5J5VAKQr", + "effects": { + "checkpoint": { + "sequenceNumber": 3 + } + } + } + ] + } + } +} + +task 21, lines 181-200: +//# run-graphql +Response: { + "data": null, + "errors": [ + { + "message": "A scan limit must be specified for the given filter combination", + "locations": [ + { + "line": 3, + "column": 3 + } + ], + "path": [ + "transactionBlocks" + ], + "extensions": { + "code": "BAD_USER_INPUT" + } + } + ] +} + +task 22, lines 202-221: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasNextPage": false, + "hasPreviousPage": false, + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0", + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ" + }, + "nodes": [] + } + } +} + +task 23, lines 224-243: +//# run-graphql +Response: { + "data": null, + "errors": [ + { + "message": "A scan limit must be specified for the given filter combination", + "locations": [ + { + "line": 3, + "column": 3 + } + ], + "path": [ + "transactionBlocks" + ], + "extensions": { + "code": "BAD_USER_INPUT" + } + } + ] +} + +task 24, lines 245-269: +//# run-graphql +Response: { + "data": { + "transactionBlocks": { + "pageInfo": { + "hasPreviousPage": false, + "hasNextPage": false, + "startCursor": "eyJjIjozLCJ0IjoyLCJpIjp0cnVlfQ", + "endCursor": "eyJjIjozLCJ0IjoxMSwiaSI6dHJ1ZX0" + }, + "edges": [ + { + "cursor": "eyJjIjozLCJ0IjoyLCJpIjpmYWxzZX0", + "node": { + "digest": "HzyC8gcn4m1ymKxYSpWMaNnmbrqm4hX7UBteJ4me3LFd", + "effects": { + "checkpoint": { + "sequenceNumber": 2 + } + } + } + } + ] + } + } +} diff --git a/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/require.move b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/require.move new file mode 100644 index 0000000000000..90b5570b7ded3 --- /dev/null +++ b/crates/sui-graphql-e2e-tests/tests/transactions/scan_limit/require.move @@ -0,0 +1,269 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//# init --protocol-version 48 --addresses Test=0x0 --accounts A B --simulator + +//# publish +module Test::M1 { + public struct Object has key, store { + id: UID, + value: u64, + } + + public entry fun create(value: u64, recipient: address, ctx: &mut TxContext) { + transfer::public_transfer( + Object { id: object::new(ctx), value }, + recipient + ) + } + + public fun swap_value_and_send(mut lhs: Object, mut rhs: Object, recipient: address) { + let tmp = lhs.value; + lhs.value = rhs.value; + rhs.value = tmp; + transfer::public_transfer(lhs, recipient); + transfer::public_transfer(rhs, recipient); + } +} + +//# create-checkpoint + +//# run Test::M1::create --args 0 @B --sender A + +//# run Test::M1::create --args 1 @B --sender A + +//# run Test::M1::create --args 2 @B --sender A + +//# run Test::M1::create --args 3 @B --sender A + +//# run Test::M1::create --args 4 @B --sender A + +//# create-checkpoint + +//# run Test::M1::create --args 100 @B --sender A + +//# run Test::M1::create --args 101 @B --sender A + +//# run Test::M1::create --args 102 @B --sender A + +//# run Test::M1::create --args 103 @B --sender A + +//# run Test::M1::create --args 104 @B --sender A + +//# create-checkpoint + +//# run-graphql +# Expect ten results +{ + transactionBlocks(filter: {recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# Don't need scanLimit with sender +{ + transactionBlocks(filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# scanLimit required +{ + transactionBlocks(filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 function: "@{Test}::M1::create"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# valid +{ + transactionBlocks(scanLimit: 50 filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 function: "@{Test}::M1::create"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# scanLimit required +{ + transactionBlocks(filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 kind: PROGRAMMABLE_TX}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# valid +{ + transactionBlocks(scanLimit: 50 filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 kind: PROGRAMMABLE_TX}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# scanLimit required +{ + transactionBlocks(filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 inputObject: "@{obj_3_0}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# valid +{ + transactionBlocks(scanLimit: 50 filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 inputObject: "@{obj_3_0}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + + +//# run-graphql +# scanLimit required +{ + transactionBlocks(filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 changedObject: "@{obj_3_0}"}) { + pageInfo { + hasNextPage + hasPreviousPage + endCursor + startCursor + } + nodes { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } +} + +//# run-graphql +# Only one of the transactions will match this filter +# Because scanLimit is specified, the boundary cursors should be at 2 and 11, +# and both will indicate is_scan_limited +{ + transactionBlocks(scanLimit: 50 filter: {signAddress: "@{A}" recvAddress: "@{B}" afterCheckpoint: 1 beforeCheckpoint: 4 changedObject: "@{obj_3_0}"}) { + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + edges { + cursor + node { + digest + effects { + checkpoint { + sequenceNumber + } + } + } + } + } +} diff --git a/crates/sui-graphql-rpc/Cargo.toml b/crates/sui-graphql-rpc/Cargo.toml index 87c8a1571a9ef..12414fd4888c7 100644 --- a/crates/sui-graphql-rpc/Cargo.toml +++ b/crates/sui-graphql-rpc/Cargo.toml @@ -32,7 +32,6 @@ lru.workspace = true move-binary-format.workspace = true move-disassembler.workspace = true move-ir-types.workspace = true -markdown-gen.workspace = true mysten-metrics.workspace = true mysten-network.workspace = true move-core-types.workspace = true diff --git a/crates/sui-graphql-rpc/docs/examples.md b/crates/sui-graphql-rpc/docs/examples.md deleted file mode 100644 index b227665131112..0000000000000 --- a/crates/sui-graphql-rpc/docs/examples.md +++ /dev/null @@ -1,1700 +0,0 @@ -# Sui GraphQL Examples -### [Address](#0) -####   [Address](#0) -####   [Transaction Block Connection](#1) -### [Balance Connection](#1) -####   [Balance Connection](#65535) -### [Chain Id](#2) -####   [Chain Id](#131070) -### [Checkpoint](#3) -####   [At Digest](#196605) -####   [At Seq Num](#196606) -####   [First Two Tx Blocks For Checkpoint](#196607) -####   [Latest Checkpoint](#196608) -####   [Multiple Selections](#196609) -####   [With Timestamp Tx Block Live Objects](#196610) -####   [With Tx Sent Addr Filter](#196611) -### [Checkpoint Connection](#4) -####   [Ascending Fetch](#262140) -####   [First Ten After Checkpoint](#262141) -####   [Last Ten After Checkpoint](#262142) -### [Coin Connection](#5) -####   [Coin Connection](#327675) -### [Coin Metadata](#6) -####   [Coin Metadata](#393210) -### [Epoch](#7) -####   [Latest Epoch](#458745) -####   [Specific Epoch](#458746) -####   [With Checkpoint Connection](#458747) -####   [With Tx Block Connection](#458748) -####   [With Tx Block Connection Latest Epoch](#458749) -### [Event Connection](#8) -####   [Event Connection](#524280) -####   [Filter By Emitting Package Module And Event Type](#524281) -####   [Filter By Sender](#524282) -### [Name Service](#9) -####   [Name Service](#589815) -### [Object](#10) -####   [Object](#655350) -### [Object Connection](#11) -####   [Filter Object Ids](#720885) -####   [Filter On Generic Type](#720886) -####   [Filter On Type](#720887) -####   [Filter Owner](#720888) -####   [Object Connection](#720889) -### [Owner](#12) -####   [Dynamic Field](#786420) -####   [Dynamic Field Connection](#786421) -####   [Dynamic Object Field](#786422) -####   [Owner](#786423) -### [Protocol Configs](#13) -####   [Key Value](#851955) -####   [Key Value Feature Flag](#851956) -####   [Specific Config](#851957) -####   [Specific Feature Flag](#851958) -### [Service Config](#14) -####   [Service Config](#917490) -### [Stake Connection](#15) -####   [Stake Connection](#983025) -### [Sui System State Summary](#16) -####   [Sui System State Summary](#1048560) -### [Transaction Block](#17) -####   [Transaction Block](#1114095) -####   [Transaction Block Kind](#1114096) -### [Transaction Block Connection](#18) -####   [Before After Checkpoint](#1179630) -####   [Changed Object Filter](#1179631) -####   [Input Object Filter](#1179632) -####   [Input Object Sign Addr Filter](#1179633) -####   [Package Filter](#1179634) -####   [Package Module Filter](#1179635) -####   [Package Module Func Filter](#1179636) -####   [Recv Addr Filter](#1179637) -####   [Sign Addr Filter](#1179638) -####   [Tx Ids Filter](#1179639) -####   [Tx Kind Filter](#1179640) -####   [With Defaults Ascending](#1179641) -### [Transaction Block Effects](#19) -####   [Transaction Block Effects](#1245165) -## -## Address -### -### Address -#### Get the address' balance and its coins' id and type - ->

{
->  address(
->    address: "0x5094652429957619e6efa79a404a6714d1126e63f551f4b6c7fb76440f8118c9"
->  ) {
->    address
->    balance {
->      coinType {
->        repr
->      }
->      coinObjectCount
->      totalBalance
->    }
->    coins {
->      nodes {
->        contents {
->          type {
->            repr
->          }
->        }
->      }
->    }
->  }
->}
- -### -### Transaction Block Connection -#### See examples in Query::transactionBlocks as this is similar behavior -#### to the `transactionBlocks` in Query but supports additional -#### `AddressTransactionBlockRelationship` filter -#### Filtering on package where the signer of the TX is the current -#### address and displaying the transaction's sender and the gas price -#### and budget. - ->
# See examples in Query::transactionBlocks as this is similar behavior
-># to the `transactionBlocks` in Query but supports additional
-># `AddressTransactionBlockRelationship` filter
->
-># Filtering on package where the signer of the TX is the current
-># address and displaying the transaction's sender and the gas price
-># and budget.
->query transaction_block_with_relation_filter {
->  address(address: "0x2") {
->    transactionBlocks(relation: SIGN, filter: { function: "0x2" }) {
->      nodes {
->        sender {
->          address
->        }
->        gasInput {
->          gasPrice
->          gasBudget
->        }
->      }
->    }
->  }
->}
- -## -## Balance Connection -### -### Balance Connection -#### Query the balance for objects of type COIN and then for each coin -#### get the coin type, the number of objects, and the total balance - ->
{
->  address(
->    address: "0x5094652429957619e6efa79a404a6714d1126e63f551f4b6c7fb76440f8118c9"
->  ) {
->    balance(
->      type: "0xc060006111016b8a020ad5b33834984a437aaa7d3c74c18e09a95d48aceab08c::coin::COIN"
->    ) {
->      coinObjectCount
->      totalBalance
->    }
->    balances {
->      nodes {
->        coinType {
->          repr
->        }
->        coinObjectCount
->        totalBalance
->      }
->      pageInfo {
->        endCursor
->      }
->    }
->  }
->}
- -## -## Chain Id -### -### Chain Id -#### Returns the chain identifier for the chain that the server is tracking - ->
{
->  chainIdentifier
->}
- -## -## Checkpoint -### -### At Digest -#### Get the checkpoint's information at a particular digest - ->
{
->  checkpoint(id: { digest: "GaDeWEfbSQCQ8FBQHUHVdm4KjrnbgMqEZPuhStoq5njU" }) {
->    digest
->    sequenceNumber
->    validatorSignatures
->    previousCheckpointDigest
->    networkTotalTransactions
->    rollingGasSummary {
->      computationCost
->      storageCost
->      storageRebate
->      nonRefundableStorageFee
->    }
->    epoch {
->      epochId
->      referenceGasPrice
->      startTimestamp
->      endTimestamp
->    }
->  }
->}
- -### -### At Seq Num -#### Get the checkpoint's information at a particular sequence number - ->
{
->  checkpoint(id: { sequenceNumber: 10 }) {
->    digest
->    sequenceNumber
->    validatorSignatures
->    previousCheckpointDigest
->    networkTotalTransactions
->    rollingGasSummary {
->      computationCost
->      storageCost
->      storageRebate
->      nonRefundableStorageFee
->    }
->    epoch {
->      epochId
->      referenceGasPrice
->      startTimestamp
->      endTimestamp
->    }
->  }
->}
- -### -### First Two Tx Blocks For Checkpoint -#### Get data for the first two transaction blocks of checkpoint at sequence number 10 - ->
{
->  checkpoint(id: { sequenceNumber: 10 }) {
->    transactionBlocks(first: 2) {
->      edges {
->        node {
->          kind {
->            __typename
->          }
->          digest
->          sender {
->            address
->          }
->          expiration {
->            epochId
->          }
->        }
->      }
->      pageInfo {
->        startCursor
->        hasNextPage
->        hasPreviousPage
->        endCursor
->      }
->    }
->  }
->}
- -### -### Latest Checkpoint -#### Latest checkpoint's data - ->
{
->  checkpoint {
->    digest
->    sequenceNumber
->    validatorSignatures
->    previousCheckpointDigest
->    networkTotalTransactions
->    rollingGasSummary {
->      computationCost
->      storageCost
->      storageRebate
->      nonRefundableStorageFee
->    }
->    epoch {
->      epochId
->      referenceGasPrice
->      startTimestamp
->      endTimestamp
->    }
->  }
->}
- -### -### Multiple Selections -#### Get the checkpoint at sequence 9769 and show -#### its transactions - ->
{
->  checkpoint(id: { sequenceNumber: 9769 }) {
->    digest
->    sequenceNumber
->    timestamp
->    validatorSignatures
->    previousCheckpointDigest
->    networkTotalTransactions
->    rollingGasSummary {
->      computationCost
->      storageCost
->      storageRebate
->      nonRefundableStorageFee
->    }
->    epoch {
->      epochId
->      liveObjectSetDigest
->    }
->    transactionBlocks {
->      edges {
->        node {
->          digest
->          sender {
->            address
->          }
->          expiration {
->            epochId
->          }
->        }
->      }
->    }
->  }
->}
- -### -### With Timestamp Tx Block Live Objects -#### Latest checkpoint's timestamp, and transaction block data - ->
{
->  checkpoint {
->    digest
->    sequenceNumber
->    timestamp
->    transactionBlocks {
->      edges {
->        node {
->          digest
->          sender {
->            address
->          }
->          expiration {
->            epochId
->          }
->        }
->      }
->    }
->  }
->}
- -### -### With Tx Sent Addr Filter -#### Select checkpoint at sequence number 14830285 for transactions from signAddress - ->
{
->  checkpoint(id: { sequenceNumber: 14830285 }) {
->    digest
->    sequenceNumber
->    timestamp
->    transactionBlocks(
->      filter: {
->        signAddress: "0x0000000000000000000000000000000000000000000000000000000000000000"
->      }
->    ) {
->      edges {
->        node {
->          digest
->          sender {
->            address
->          }
->          expiration {
->            epochId
->          }
->        }
->      }
->    }
->  }
->}
- -## -## Checkpoint Connection -### -### Ascending Fetch -#### Use the checkpoint connection to fetch some default amount of checkpoints in an ascending order - ->
{
->  checkpoints {
->    nodes {
->      digest
->      sequenceNumber
->      validatorSignatures
->      previousCheckpointDigest
->      networkTotalTransactions
->      rollingGasSummary {
->        computationCost
->        storageCost
->        storageRebate
->        nonRefundableStorageFee
->      }
->      epoch {
->        epochId
->        referenceGasPrice
->        startTimestamp
->        endTimestamp
->      }
->    }
->  }
->}
- -### -### First Ten After Checkpoint -#### Fetch the digest and sequence number of the first 10 checkpoints after the cursor, which in this example is set to be checkpoint 0. Note that the cursor is opaque. - ->
{
->  checkpoints(first: 10, after: "eyJjIjoyMjgwMDU4MCwicyI6MH0") {
->    nodes {
->      sequenceNumber
->      digest
->    }
->  }
->}
- -### -### Last Ten After Checkpoint -#### Fetch the digest and the sequence number of the last 20 checkpoints before the cursor - ->
{
->  checkpoints(last: 20, before: "eyJjIjoyMjgwMDY1MSwicyI6MjI4MDA2MzJ9") {
->    nodes {
->      sequenceNumber
->      digest
->    }
->  }
->}
- -## -## Coin Connection -### -### Coin Connection -#### Get last 3 coins owned by `0x0`. - ->
{
->  address(
->    address: "0x0000000000000000000000000000000000000000000000000000000000000000"
->  ) {
->    coins(last: 3) {
->      nodes {
->        coinBalance
->      }
->      pageInfo {
->        endCursor
->        hasNextPage
->      }
->    }
->  }
->}
- -## -## Coin Metadata -### -### Coin Metadata - ->
query CoinMetadata {
->  coinMetadata(coinType: "0x2::sui::SUI") {
->    decimals
->    name
->    symbol
->    description
->    iconUrl
->    supply
->    hasPublicTransfer
->  }
->}
- -## -## Epoch -### -### Latest Epoch -#### Latest epoch, since epoch omitted - ->
{
->  epoch {
->    protocolConfigs {
->      protocolVersion
->    }
->    epochId
->    referenceGasPrice
->    startTimestamp
->    endTimestamp
->  }
->}
- -### -### Specific Epoch -#### Selecting all fields for epoch 100 - ->
{
->  epoch(id: 100) {
->    protocolConfigs {
->      protocolVersion
->    }
->    epochId
->    referenceGasPrice
->    startTimestamp
->    endTimestamp
->    validatorSet {
->      totalStake
->      pendingActiveValidatorsSize
->      stakingPoolMappingsSize
->      inactivePoolsSize
->      validatorCandidatesSize
->      activeValidators {
->        nodes {
->          name
->          description
->          imageUrl
->          projectUrl
->          exchangeRates {
->            storageRebate
->            bcs
->            hasPublicTransfer
->          }
->          exchangeRatesSize
->          stakingPoolActivationEpoch
->          stakingPoolSuiBalance
->          rewardsPool
->          poolTokenBalance
->          pendingStake
->          pendingTotalSuiWithdraw
->          pendingPoolTokenWithdraw
->          votingPower
->          gasPrice
->          commissionRate
->          nextEpochStake
->          nextEpochGasPrice
->          nextEpochCommissionRate
->          atRisk
->        }
->      }
->    }
->  }
->}
- -### -### With Checkpoint Connection - ->
{
->  epoch {
->    checkpoints {
->      nodes {
->        transactionBlocks(first: 10) {
->          pageInfo {
->            hasNextPage
->            endCursor
->          }
->          edges {
->            cursor
->            node {
->              sender {
->                address
->              }
->              effects {
->                gasEffects {
->                  gasObject {
->                    address
->                  }
->                }
->              }
->              gasInput {
->                gasPrice
->                gasBudget
->              }
->            }
->          }
->        }
->      }
->    }
->  }
->}
- -### -### With Tx Block Connection -#### Fetch the first 20 transactions after tx 231220153 (encoded as a -#### cursor) in epoch 97. - ->
{
->  epoch(id: 97) {
->    transactionBlocks(first: 20, after:"eyJjIjoyNjkzMzc3OCwidCI6MjMxMjIwMTUzLCJ0YyI6ODAxMDg4NH0") {
->      pageInfo {
->        hasNextPage
->        endCursor
->      }
->      edges {
->        cursor
->        node {
->          digest
->          sender {
->            address
->          }
->          effects {
->            gasEffects {
->              gasObject {
->                address
->              }
->            }
->          }
->          gasInput {
->            gasPrice
->            gasBudget
->          }
->        }
->      }
->    }
->  }
->}
- -### -### With Tx Block Connection Latest Epoch - ->
{
->  epoch {
->    transactionBlocks(first: 20, after: "eyJjIjoyNjkzMzMyNCwidCI6MTEwMTYxMDQ4MywidGMiOjI2ODUxMjQ4fQ") {
->      pageInfo {
->        hasNextPage
->        endCursor
->      }
->      edges {
->        cursor
->        node {
->          sender {
->            address
->          }
->          effects {
->            gasEffects {
->              gasObject {
->                address
->              }
->            }
->          }
->          gasInput {
->            gasPrice
->            gasBudget
->          }
->        }
->      }
->    }
->  }
->}
- -## -## Event Connection -### -### Event Connection - ->
{
->  events(
->    filter: {
->      eventType: "0x3164fcf73eb6b41ff3d2129346141bd68469964c2d95a5b1533e8d16e6ea6e13::Market::ChangePriceEvent<0x2::sui::SUI>"
->    }
->  ) {
->    nodes {
->      sendingModule {
->        name
->        package { digest }
->      }
->      type {
->        repr
->      }
->      sender {
->        address
->      }
->      timestamp
->      json
->      bcs
->    }
->  }
->}
- -### -### Filter By Emitting Package Module And Event Type - ->
query ByEmittingPackageModuleAndEventType {
->  events(
->    first: 1
->    after: "eyJ0eCI6Njc2MywiZSI6MCwiYyI6MjI4MDA3NDJ9"
->    filter: {
->      emittingModule: "0x3::sui_system",
->      eventType: "0x3::validator::StakingRequestEvent"
->    }
->  ) {
->    pageInfo {
->      hasNextPage
->      endCursor
->    }
->    nodes {
->      sendingModule {
->        name
->      }
->      type {
->        repr
->      }
->      sender {
->        address
->      }
->      timestamp
->      json
->      bcs
->    }
->  }
->}
- -### -### Filter By Sender - ->
query ByTxSender {
->  events(
->    first: 1
->    filter: {
->      sender: "0xdff57c401e125a7e0e06606380560b459a179aacd08ed396d0162d57dbbdadfb"
->    }
->  ) {
->    pageInfo {
->      hasNextPage
->      endCursor
->    }
->    nodes {
->      sendingModule {
->        name
->      }
->      type {
->        repr
->      }
->      sender {
->        address
->      }
->      timestamp
->      json
->      bcs
->    }
->  }
->}
- -## -## Name Service -### -### Name Service - ->
{
->  resolveSuinsAddress(domain: "example.sui") {
->    address
->  }
->  address(
->    address: "0x0b86be5d779fac217b41d484b8040ad5145dc9ba0cba099d083c6cbda50d983e"
->  ) {
->    address
->    balance(type: "0x2::sui::SUI") {
->      coinType {
->        repr
->      }
->      coinObjectCount
->      totalBalance
->    }
->    defaultSuinsName
->  }
->}
- -## -## Object -### -### Object - ->
{
->  object(
->    address: "0x04e20ddf36af412a4096f9014f4a565af9e812db9a05cc40254846cf6ed0ad91"
->  ) {
->    address
->    version
->    digest
->    storageRebate
->    owner {
->      __typename
->      ... on Shared {
->        initialSharedVersion
->      }
->      __typename
->      ... on Parent {
->        parent {
->          address
->        }
->      }
->      __typename
->      ... on AddressOwner {
->        owner {
->          address
->        }
->      }
->    }
->    previousTransactionBlock {
->      digest
->    }
->  }
->}
- -## -## Object Connection -### -### Filter Object Ids -#### Filter on objectIds - ->
{
->  objects(filter: { objectIds: [
->    "0x4bba2c7b9574129c272bca8f58594eba933af8001257aa6e0821ad716030f149"
->  ]}) {
->    edges {
->      node {
->        storageRebate
->        owner {
->          __typename
->          ... on Shared {
->            initialSharedVersion
->          }
->          __typename
->          ... on Parent {
->            parent {
->              address
->            }
->          }
->          __typename
->          ... on AddressOwner {
->            owner {
->              address
->            }
->          }
->        }
->      }
->    }
->  }
->}
- -### -### Filter On Generic Type - ->
{
->  objects(filter: {type: "0x2::coin::Coin"}) {
->    edges {
->      node {
->        asMoveObject {
->          contents {
->            type { repr }
->          }
->        }
->      }
->    }
->  }
->}
- -### -### Filter On Type - ->
{
->  objects(filter: {type: "0x3::staking_pool::StakedSui"}) {
->    edges {
->      node {
->        asMoveObject {
->          contents {
->            type {
->              repr
->            }
->          }
->        }
->      }
->    }
->  }
->}
- -### -### Filter Owner -#### Filter on owner - ->
{
->  objects(filter: {
->    owner: "0x23b7b0e2badb01581ba9b3ab55587d8d9fdae087e0cfc79f2c72af36f5059439"
->  }) {
->    edges {
->      node {
->        storageRebate
->        owner {
->          __typename
->          ... on Shared {
->            initialSharedVersion
->          }
->          __typename
->          ... on Parent {
->            parent {
->              address
->            }
->          }
->          __typename
->          ... on AddressOwner {
->            owner {
->              address
->            }
->          }
->        }
->      }
->    }
->  }
->}
- -### -### Object Connection - ->
{
->  objects {
->    nodes {
->      version
->      digest
->      storageRebate
->      previousTransactionBlock {
->        digest
->        sender { defaultSuinsName }
->        gasInput {
->          gasPrice
->          gasBudget
->        }
->      }
->    }
->    pageInfo {
->      endCursor
->    }
->  }
->}
- -## -## Owner -### -### Dynamic Field - ->
fragment DynamicFieldValueSelection on DynamicFieldValue {
->  ... on MoveValue {
->    type {
->      repr
->    }
->    data
->    __typename
->  }
->  ... on MoveObject {
->    hasPublicTransfer
->    contents {
->      type {
->        repr
->      }
->      data
->    }
->    __typename
->  }
->}
->
->fragment DynamicFieldNameSelection on MoveValue {
->  type {
->    repr
->  }
->  data
->  bcs
->}
->
->fragment DynamicFieldSelect on DynamicField {
->  name {
->    ...DynamicFieldNameSelection
->  }
->  value {
->    ...DynamicFieldValueSelection
->  }
->}
->
->query DynamicField {
->  object(
->    address: "0xb57fba584a700a5bcb40991e1b2e6bf68b0f3896d767a0da92e69de73de226ac"
->  ) {
->    dynamicField(
->      name: {
->        type: "0x2::kiosk::Listing",
->        bcs: "NLArx1UJguOUYmXgNG8Pv8KbKXLjWtCi6i0Yeq1VhfwA",
->      }
->    ) {
->      ...DynamicFieldSelect
->    }
->  }
->}
- -### -### Dynamic Field Connection - ->
fragment DynamicFieldValueSelection on DynamicFieldValue {
->  ... on MoveValue {
->    type {
->      repr
->    }
->    data
->  }
->  ... on MoveObject {
->    hasPublicTransfer
->    contents {
->      type {
->        repr
->      }
->      data
->    }
->  }
->}
->
->fragment DynamicFieldNameSelection on MoveValue {
->  type {
->    repr
->  }
->  data
->  bcs
->}
->
->fragment DynamicFieldSelect on DynamicField {
->  name {
->    ...DynamicFieldNameSelection
->  }
->  value {
->    ...DynamicFieldValueSelection
->  }
->}
->
->query DynamicFields {
->  object(
->    address: "0xb57fba584a700a5bcb40991e1b2e6bf68b0f3896d767a0da92e69de73de226ac"
->  ) {
->    dynamicFields {
->      pageInfo {
->        hasNextPage
->        endCursor
->      }
->      edges {
->        cursor
->        node {
->          ...DynamicFieldSelect
->        }
->      }
->    }
->  }
->}
- -### -### Dynamic Object Field - ->
fragment DynamicFieldValueSelection on DynamicFieldValue {
->  ... on MoveValue {
->    type {
->      repr
->    }
->    data
->    __typename
->  }
->  ... on MoveObject {
->    hasPublicTransfer
->    contents {
->      type {
->        repr
->      }
->      data
->    }
->    __typename
->  }
->}
->
->fragment DynamicFieldNameSelection on MoveValue {
->  type {
->    repr
->  }
->  data
->  bcs
->}
->
->fragment DynamicFieldSelect on DynamicField {
->  name {
->    ...DynamicFieldNameSelection
->  }
->  value {
->    ...DynamicFieldValueSelection
->  }
->}
->
->query DynamicObjectField {
->  object(
->    address: "0xb57fba584a700a5bcb40991e1b2e6bf68b0f3896d767a0da92e69de73de226ac"
->  ) {
->    dynamicObjectField(
->      name: {type: "0x2::kiosk::Item", bcs: "NLArx1UJguOUYmXgNG8Pv8KbKXLjWtCi6i0Yeq1Vhfw="}
->    ) {
->      ...DynamicFieldSelect
->    }
->  }
->}
- -### -### Owner - ->
{
->  owner(
->    address: "0x931f293ce7f65fd5ebe9542653e1fd92fafa03dda563e13b83be35da8a2eecbe"
->  ) {
->    address
->  }
->}
- -## -## Protocol Configs -### -### Key Value -#### Select the key and value of the protocol configuration - ->
{
->  protocolConfig {
->    configs {
->      key
->      value
->    }
->  }
->}
- -### -### Key Value Feature Flag -#### Select the key and value of the feature flag - ->
{
->  protocolConfig {
->    featureFlags {
->      key
->      value
->    }
->  }
->}
- -### -### Specific Config -#### Select the key and value of the specific protocol configuration, in this case `max_move_identifier_len` - ->
{
->  protocolConfig {
->    config(key: "max_move_identifier_len") {
->      key
->      value
->    }
->  }
->}
- -### -### Specific Feature Flag - ->
{
->  protocolConfig {
->    protocolVersion
->    featureFlag(key: "advance_epoch_start_time_in_safe_mode") {
->      value
->    }
->  }
->}
- -## -## Service Config -### -### Service Config -#### Get the configuration of the running service - ->
{
->  serviceConfig {
->    isEnabled(feature: ANALYTICS)
->    enabledFeatures
->    maxQueryDepth
->    maxQueryNodes
->    maxDbQueryCost
->    defaultPageSize
->    maxPageSize
->    requestTimeoutMs
->    maxQueryPayloadSize
->  }
->}
- -## -## Stake Connection -### -### Stake Connection -#### Get all the staked objects for this address and all the active validators at the epoch when the stake became active - ->
{
->  address(
->    address: "0xc0a5b916d0e406ddde11a29558cd91b29c49e644eef597b7424a622955280e1e"
->  ) {
->    address
->    balance(type: "0x2::sui::SUI") {
->      coinType {
->        repr
->      }
->      totalBalance
->    }
->    stakedSuis {
->      nodes {
->        status
->        principal
->        estimatedReward
->        activatedEpoch {
->          epochId
->          referenceGasPrice
->          validatorSet {
->            activeValidators {
->              nodes {
->                name
->                description
->                exchangeRatesSize
->              }
->            }
->            totalStake
->          }
->        }
->        requestedEpoch {
->          epochId
->        }
->      }
->    }
->  }
->}
- -## -## Sui System State Summary -### -### Sui System State Summary -#### Get the latest sui system state data - ->
{
->  epoch {
->    storageFund {
->      totalObjectStorageRebates
->      nonRefundableBalance
->    }
->    safeMode {
->      enabled
->      gasSummary {
->         computationCost
->         storageCost
->         storageRebate
->         nonRefundableStorageFee
->      }
->    }
->    systemStateVersion
->    systemParameters {
->      durationMs
->      stakeSubsidyStartEpoch
->      minValidatorCount
->      maxValidatorCount
->      minValidatorJoiningStake
->      validatorLowStakeThreshold
->      validatorVeryLowStakeThreshold
->      validatorLowStakeGracePeriod
->    }
->    systemStakeSubsidy {
->      balance
->      distributionCounter
->      currentDistributionAmount
->      periodLength
->      decreaseRate
->
->    }
->  }
->}
- -## -## Transaction Block -### -### Transaction Block -#### Get the data for a TransactionBlock by its digest - ->
{
->  transactionBlock(digest: "HvTjk3ELg8gRofmB1GgrpLHBFeA53QKmUKGEuhuypezg") {
->    sender {
->      address
->    }
->    gasInput {
->      gasSponsor {
->        address
->      }
->      gasPayment {
->        nodes {
->          address
->        }
->      }
->      gasPrice
->      gasBudget
->    }
->    kind {
->      __typename
->    }
->    signatures
->    digest
->    expiration {
->      epochId
->    }
->    effects {
->      timestamp
->    }
->  }
->}
- -### -### Transaction Block Kind - ->
{
->  object(
->    address: "0xd6b9c261ab53d636760a104e4ab5f46c2a3e9cda58bd392488fc4efa6e43728c"
->  ) {
->    previousTransactionBlock {
->      sender {
->        address
->      }
->      kind {
->        __typename
->        ... on ConsensusCommitPrologueTransaction {
->          epoch {
->            epochId
->            referenceGasPrice
->          }
->          round
->          commitTimestamp
->          consensusCommitDigest
->        }
->        ... on ChangeEpochTransaction {
->          computationCharge
->          storageCharge
->          startTimestamp
->          storageRebate
->        }
->        ... on GenesisTransaction {
->          objects {
->            nodes { address }
->          }
->        }
->      }
->    }
->  }
->}
- -## -## Transaction Block Connection -### -### Before After Checkpoint -#### Filter on before_ and after_checkpoint. If both are provided, before must be greater than after - ->
{
->  transactionBlocks(
->    filter: { afterCheckpoint: 10, beforeCheckpoint: 20 }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Changed Object Filter -#### Filter on changedObject - ->
{
->  transactionBlocks(
->    filter: {
->      changedObject: "0x0000000000000000000000000000000000000000000000000000000000000006"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Input Object Filter -#### Filter on inputObject - ->
{
->  transactionBlocks(
->    filter: {
->      inputObject: "0x0000000000000000000000000000000000000000000000000000000000000006"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Input Object Sign Addr Filter -#### multiple filters - ->
{
->  transactionBlocks(
->    filter: {
->      inputObject: "0x0000000000000000000000000000000000000000000000000000000000000006"
->      signAddress: "0x0000000000000000000000000000000000000000000000000000000000000000"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      effects {
->        gasEffects {
->          gasObject {
->            address
->          }
->        }
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Package Filter -#### Filtering on package - ->
{
->  transactionBlocks(filter: { function: "0x3" }) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Package Module Filter -#### Filtering on package and module - ->
{
->  transactionBlocks(
->    filter: {
->      function: "0x3::sui_system"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Package Module Func Filter -#### Filtering on package, module and function - ->
{
->  transactionBlocks(
->    filter: {
->      function: "0x3::sui_system::request_withdraw_stake"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Recv Addr Filter -#### Filter on recvAddress - ->
{
->  transactionBlocks(
->    filter: {
->      recvAddress: "0x0000000000000000000000000000000000000000000000000000000000000000"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Sign Addr Filter -#### Filter on signing address - ->
{
->  transactionBlocks(
->    filter: {
->      signAddress: "0x0000000000000000000000000000000000000000000000000000000000000000"
->    }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Tx Ids Filter -#### Filter on transactionIds - ->
{
->  transactionBlocks(
->    filter: { transactionIds: ["DtQ6v6iJW4wMLgadENPUCEUS5t8AP7qvdG5jX84T1akR"] }
->  ) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### Tx Kind Filter -#### Filter on TransactionKind (only SYSTEM_TX or PROGRAMMABLE_TX) - ->
{
->  transactionBlocks(filter: { kind: SYSTEM_TX }) {
->    nodes {
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->  }
->}
- -### -### With Defaults Ascending -#### Fetch some default amount of transactions, ascending - ->
{
->  transactionBlocks {
->    nodes {
->      digest
->      effects {
->        gasEffects {
->          gasObject {
->            version
->            digest
->          }
->          gasSummary {
->            computationCost
->            storageCost
->            storageRebate
->            nonRefundableStorageFee
->          }
->        }
->        errors
->      }
->      sender {
->        address
->      }
->      gasInput {
->        gasPrice
->        gasBudget
->      }
->    }
->    pageInfo {
->      endCursor
->    }
->  }
->}
- -## -## Transaction Block Effects -### -### Transaction Block Effects - ->
{
->  object(
->    address: "0x0bba1e7d907dc2832edfc3bf4468b6deacd9a2df435a35b17e640e135d2d5ddc"
->  ) {
->    version
->    owner {
->      __typename
->      ... on Shared {
->        initialSharedVersion
->      }
->      __typename
->      ... on Parent {
->        parent {
->          address
->        }
->      }
->      __typename
->      ... on AddressOwner {
->        owner {
->          address
->        }
->      }
->    }
->    previousTransactionBlock {
->      effects {
->        status
->        checkpoint {
->          sequenceNumber
->        }
->        lamportVersion
->        gasEffects {
->          gasSummary {
->            computationCost
->            storageCost
->            storageRebate
->            nonRefundableStorageFee
->          }
->        }
->        balanceChanges {
->          nodes {
->            owner {
->              address
->              balance(type: "0x2::sui::SUI") {
->                totalBalance
->              }
->            }
->            amount
->            coinType {
->              repr
->              signature
->              layout
->            }
->          }
->        }
->        dependencies {
->          nodes {
->            sender {
->              address
->            }
->          }
->        }
->      }
->    }
->  }
->}
- diff --git a/crates/sui-graphql-rpc/examples/epoch/with_tx_block_connection_latest_epoch.graphql b/crates/sui-graphql-rpc/examples/epoch/with_tx_block_connection_latest_epoch.graphql index 9fdc8ea5a7fa3..62bc23f9f5855 100644 --- a/crates/sui-graphql-rpc/examples/epoch/with_tx_block_connection_latest_epoch.graphql +++ b/crates/sui-graphql-rpc/examples/epoch/with_tx_block_connection_latest_epoch.graphql @@ -1,6 +1,6 @@ { epoch { - transactionBlocks(first: 20, after: "eyJjIjoyNjkzMzMyNCwidCI6MTEwMTYxMDQ4MywidGMiOjI2ODUxMjQ4fQ") { + transactionBlocks(first: 20, after: "eyJjIjoyNjkzMzMyNCwidCI6MTEwMTYxMDQ4MywiaSI6ZmFsc2V9") { pageInfo { hasNextPage endCursor diff --git a/crates/sui-graphql-rpc/schema/current_progress_schema.graphql b/crates/sui-graphql-rpc/schema.graphql similarity index 87% rename from crates/sui-graphql-rpc/schema/current_progress_schema.graphql rename to crates/sui-graphql-rpc/schema.graphql index 688f1490b5f64..defc55cc329d2 100644 --- a/crates/sui-graphql-rpc/schema/current_progress_schema.graphql +++ b/crates/sui-graphql-rpc/schema.graphql @@ -98,8 +98,27 @@ type Address implements IOwner { """ Similar behavior to the `transactionBlocks` in Query but supporting the additional `AddressTransactionBlockRelationship` filter, which defaults to `SIGN`. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, relation: AddressTransactionBlockRelationship, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, relation: AddressTransactionBlockRelationship, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! } type AddressConnection { @@ -409,8 +428,25 @@ type Checkpoint { epoch: Epoch """ Transactions in this checkpoint. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range consists of all transactions in this checkpoint. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! } type CheckpointConnection { @@ -517,8 +553,27 @@ type Coin implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -676,8 +731,27 @@ type CoinMetadata implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -1093,8 +1167,25 @@ type Epoch { checkpoints(first: Int, after: String, last: Int, before: String): CheckpointConnection! """ The epoch's corresponding transaction blocks. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range consists of all transactions in this epoch. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! } type Event { @@ -1429,7 +1520,7 @@ interface IObject { """ The transaction blocks that sent objects to this object. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -1972,8 +2063,27 @@ type MoveObject implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -2161,13 +2271,48 @@ type MovePackage implements IObject & IOwner { The transaction blocks that sent objects to this package. Note that objects that have been sent to a package become inaccessible. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the package's content. """ bcs: Base64 """ + Fetch another version of this package (the package that shares this package's original ID, + but has the specified `version`). + """ + packageAtVersion(version: Int!): MovePackage + """ + Fetch all versions of this package (packages that share this package's original ID), + optionally bounding the versions exclusively from below with `afterVersion`, or from above + with `beforeVersion`. + """ + packageVersions(first: Int, after: String, last: Int, before: String, filter: MovePackageVersionFilter): MovePackageConnection! + """ + Fetch the latest version of this package (the package with the highest `version` that shares + this packages's original ID) + """ + latestPackage: MovePackage! + """ A representation of the module called `name` in this package, including the structs and functions it defines. """ @@ -2191,6 +2336,22 @@ type MovePackage implements IObject & IOwner { moduleBcs: Base64 } +""" +Filter for paginating `MovePackage`s that were created within a range of checkpoints. +""" +input MovePackageCheckpointFilter { + """ + Fetch packages that were published strictly after this checkpoint. Omitting this fetches + packages published since genesis. + """ + afterCheckpoint: UInt53 + """ + Fetch packages that were published strictly before this checkpoint. Omitting this fetches + packages published up to the latest checkpoint (inclusive). + """ + beforeCheckpoint: UInt53 +} + type MovePackageConnection { """ Information to aid in pagination. @@ -2220,6 +2381,22 @@ type MovePackageEdge { cursor: String! } +""" +Filter for paginating versions of a given `MovePackage`. +""" +input MovePackageVersionFilter { + """ + Fetch versions of this package that are strictly newer than this version. Omitting this + fetches versions since the original version. + """ + afterVersion: UInt53 + """ + Fetch versions of this package that are strictly older than this version. Omitting this + fetches versions up to the latest version (inclusive). + """ + beforeVersion: UInt53 +} + """ Description of a struct type, defined in a Move module. """ @@ -2492,8 +2669,27 @@ type Object implements IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -3033,13 +3229,33 @@ type Query { state at the latest checkpoint known to the GraphQL RPC. Similarly, `Owner.asObject` will return the object's version at the latest checkpoint. """ - owner(address: SuiAddress!, rootVersion: Int): Owner + owner(address: SuiAddress!, rootVersion: UInt53): Owner """ The object corresponding to the given address at the (optionally) given version. When no version is given, the latest version is returned. """ object(address: SuiAddress!, version: UInt53): Object """ + The package corresponding to the given address (at the optionally given version). + + When no version is given, the package is loaded directly from the address given. Otherwise, + the address is translated before loading to point to the package whose original ID matches + the package at `address`, but whose version is `version`. For non-system packages, this + might result in a different address than `address` because different versions of a package, + introduced by upgrades, exist at distinct addresses. + + Note that this interpretation of `version` is different from a historical object read (the + interpretation of `version` for the `object` query). + """ + package(address: SuiAddress!, version: UInt53): MovePackage + """ + The latest version of the package at `address`. + + This corresponds to the package with the highest `version` that shares its original ID with + the package at `address`. + """ + latestPackage(address: SuiAddress!): MovePackage + """ Look-up an Account by its SuiAddress. """ address(address: SuiAddress!): Address @@ -3074,8 +3290,27 @@ type Query { checkpoints(first: Int, after: String, last: Int, before: String): CheckpointConnection! """ The transaction blocks that exist in the network. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The events that exist in the network. """ @@ -3085,6 +3320,20 @@ type Query { """ objects(first: Int, after: String, last: Int, before: String, filter: ObjectFilter): ObjectConnection! """ + The Move packages that exist in the network, optionally filtered to be strictly before + `beforeCheckpoint` and/or strictly after `afterCheckpoint`. + + This query returns all versions of a given user package that appear between the specified + checkpoints, but only records the latest versions of system packages. + """ + packages(first: Int, after: String, last: Int, before: String, filter: MovePackageCheckpointFilter): MovePackageConnection! + """ + Fetch all versions of package at `address` (packages that share this package's original ID), + optionally bounding the versions exclusively from below with `afterVersion`, or from above + with `beforeVersion`. + """ + packageVersions(first: Int, after: String, last: Int, before: String, address: SuiAddress!, filter: MovePackageVersionFilter): MovePackageConnection! + """ Fetch the protocol config by protocol version (defaults to the latest protocol version known to the GraphQL service). """ @@ -3279,6 +3528,14 @@ type ServiceConfig { Maximum nesting allowed in struct fields when calculating the layout of a single Move Type. """ maxMoveValueDepth: Int! + """ + Maximum number of transaction ids that can be passed to a `TransactionBlockFilter`. + """ + maxTransactionIds: Int! + """ + Maximum number of candidates to scan when gathering a page of results. + """ + maxScanLimit: Int! } """ @@ -3496,8 +3753,27 @@ type StakedSui implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -3698,8 +3974,27 @@ type SuinsRegistration implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ diff --git a/crates/sui-graphql-rpc/schema/draft_target_schema.graphql b/crates/sui-graphql-rpc/schema/draft_target_schema.graphql deleted file mode 100644 index 733f13eb57bf7..0000000000000 --- a/crates/sui-graphql-rpc/schema/draft_target_schema.graphql +++ /dev/null @@ -1,1588 +0,0 @@ -# Copyright (c) Mysten Labs, Inc. -# SPDX-License-Identifier: Apache-2.0 - -# GraphQL Schema Draft -# -------------------- -# -# This is a draft design of the schema used by the second iteration of -# the RPC service. Note that some elements may not be complete, and -# others may exist in this schema but may not appear in the production -# design initially, or ever. -# -# The source of truth for the actual schema is accessed by querying -# the GraphQL server for its `__schema`. - -schema { - query: Query - subscription: Subscription - mutation: Mutation -} - -type Query { - # First four bytes of the network's genesis checkpoint digest - # (uniquely identifies the network) - chainIdentifier: String! - - # Range of checkpoints that the RPC has data available for (for data - # that can be tied to a particular checkpoint). - availableRange: AvailableRange! - - # Configuration for this RPC service - serviceConfig: ServiceConfig! - - # Simulate running a transaction to inspect its effects without - # committing to them on-chain. - # - # `txBytes` either a `TransactionData` struct or a `TransactionKind` - # struct, BCS-encoded and then Base64-encoded. The expected - # type is controlled by the presence or absence of `txMeta`: If - # present, `txBytes` is assumed to be a `TransactionKind`, if - # absent, then `TransactionData`. - # - # `txMeta` the data that is missing from a `TransactionKind` to make - # a `TransactionData` (sender address and gas information). All - # its fields are nullable: `sender` defaults to `0x0`, if - # `gasObjects` is not present, or is an empty list, it is - # substituted with a mock Coin object, and `gasPrice` defaults to - # the reference gas price. - # - # `skipChecks` optional flag to disable the usual verification - # checks that prevent access to objects that are owned by - # addresses other than the sender, and calling non-public, - # non-entry functions. Defaults to false. - dryRunTransactionBlock( - txBytes: Base64!, - txMeta: TransactionMetadata, - skipChecks: Boolean, - ): DryRunResult - - owner(address: SuiAddress!): Owner - object(address: SuiAddress!, version: Int): Object - address(address: SuiAddress!): Address - type(type: String!): MoveType! - - # Fetch epoch information by ID (defaults to the latest epoch). - epoch(id: Int): Epoch - - # `protocolVersion` defaults to the latest protocol version. - protocolConfig(protocolVersion: Int): ProtocolConfigs - - # Fetch checkpoint information by sequence number or digest - # (defaults to the latest available checkpoint). - checkpoint(id: CheckpointId): Checkpoint - - # Fetch a transaction block by its transaction digest - transactionBlock(digest: String!): TransactionBlock - - coinMetadata(coinType: String!): CoinMetadata - - checkpoints( - first: Int, - after: String, - last: Int, - before: String, - ): CheckpointConnection! - - coins( - first: Int, - after: String, - last: Int, - before: String, - type: String, - ): CoinConnection! - - transactionBlocks( - first: Int, - after: String, - last: Int, - before: String, - filter: TransactionBlockFilter, - ): TransactionBlockConnection! - - events( - first: Int, - after: String, - last: Int, - before: String, - filter: EventFilter, - ): EventConnection! - - objects( - first: Int, - after: String, - last: Int, - before: String, - filter: ObjectFilter, - ): ObjectConnection! - - resolveSuinsAddress(name: String!): Address - - # NB. Will be moved into a private, explorer-specific extension. - networkMetrics: NetworkMetrics - moveCallMetrics: MoveCallMetrics - - allEpochAddressMetrics( - first: Int, - after: String, - last: Int, - before: String, - ): AddressMetricsConnection! -} - -# NB. Add after MVP has stabilised. -# -# Subscriptions use a "push-pull" system: Subscribers are notified -# when there is new data by being sent the cursor pointing after that -# new data. To actually fetch the data, a call must be made to the -# equivalent Connection API: -# -# e.g. When subscription `subscribe { events(filter: F) }` pushes -# cursor `E`. Then -# -# query { events(before: E, filter: F) } -# -# Will start paginating events up to the new data (multiple calls may -# be required if there are multiple pages of information between the -# start and the latest). If the client has already processed some -# prefix, up to cursor `P`, then they can resume with: -# -# query { events(after: P, before: E, filter: F) } -# -# The API for transactions is similar. -type Subscription { - events(filter: EventFilter): String! - transactions(filter: TransactionBlockFilter): String! -} - -type Mutation { - # Execute a transaction, committing its effects on chain. - # - # `txBytes` is a `TransactionData` struct that has been BCS-encoded - # and then Base64-encoded. - # `signatures` are a list of `flag || signature || pubkey` bytes, - # Base64-encoded. - # - # Waits until the transaction has been finalized on chain to return - # its transaction digest. If the transaction could not be - # finalized, returns the errors that prevented it, instead. - executeTransactionBlock( - txBytes: Base64!, - signatures: [Base64!]!, - ): ExecutionResult -} - -# String containing 32B hex-encoded address, with a leading "0x". -# Leading zeroes can be omitted on input but will always appear in -# outputs (SuiAddress in output is guaranteed to be 66 characters -# long). -scalar SuiAddress - -# String representation of an arbitrary width, possibly signed integer -scalar BigInt - -# String containing Base64-encoded binary data. -scalar Base64 - -# ISO-8601 Date and Time -scalar DateTime - -# Arbitrary JSON data -scalar JSON - -# Scalar representing the contents of a Move Value, corresponding to -# the following recursive type: -# -# type MoveData = -# { Number: BigInt } -# | { Bool: bool } -# | { Address: SuiAddress } -# | { UID: SuiAddress } -# | { ID: SuiAddress } -# | { String: string } -# | { Vector: [MoveData] } -# | { Option: MoveData? } -# | { Struct: [{ name: string, value: MoveData }] } -scalar MoveData - -# The signature of a concrete Move Type (a type with all its type -# parameters instantiated with concrete types, that contains no -# references), corresponding to the following recursive type: -# -# type MoveTypeSignature = -# "address" -# | "bool" -# | "u8" | "u16" | ... | "u256" -# | { vector: MoveTypeSignature } -# | { -# struct: { -# package: string, -# module: string, -# type: string, -# typeParameters: [MoveTypeSignature], -# } -# } -scalar MoveTypeSignature - -# The shape of a concrete Move Type (a type with all its type -# parameters instantiated with concrete types), corresponding to the -# following recursive type: -# -# type MoveTypeLayout = -# "address" -# | "bool" -# | "u8" | "u16" | ... | "u256" -# | { vector: MoveTypeLayout } -# | { -# struct: { -# type: string, -# fields: [{ name: string, layout: MoveTypeLayout }], -# } -# } -scalar MoveTypeLayout - -# The shape of an abstract Move Type (a type that can contain free -# type parameters, and can optionally be taken by reference), -# corresponding to the following recursive type: -# -# type OpenMoveTypeSignature = { -# ref: ("&" | "&mut")?, -# body: OpenMoveTypeSignatureBody, -# } -# -# type OpenMoveTypeSignatureBody = -# "address" -# | "bool" -# | "u8" | "u16" | ... | "u256" -# | { vector: OpenMoveTypeSignatureBody } -# | { -# struct: { -# package: string, -# module: string, -# type: string, -# typeParameters: [OpenMoveTypeSignatureBody]? -# } -# } -# | { typeParameter: number } -scalar OpenMoveTypeSignature - -# The extra data required to turn a `TransactionKind` into a -# `TransactionData` in a dry-run. -input TransactionMetadata { - sender: SuiAddress - gasPrice: Int - gasBudget: Int - gasObjects: [ObjectRef!] - gasSponsor: SuiAddress -} - -# A reference to a particular version of an object. -input ObjectRef { - address: SuiAddress! - version: Int! - digest: String! -} - -# Filter either by the digest, or the sequence number, or neither, to -# get the latest checkpoint. -input CheckpointId { - digest: String - sequenceNumber: Int -} - -input ObjectFilter { - # This field is used to specify the type of objects that should be - # include in the query results. - # - # Objects can be filtered by their type's package, package::module, - # or their fully qualified type name. - # - # Generic types can be queried by either the generic type name, e.g. - # `0x2::coin::Coin`, or by the full type name, such as - # `0x2::coin::Coin<0x2::sui::SUI>`. - type: String - - # Filter for live objects by their current owners. - owner: SuiAddress - - # Filter for live objects by their IDs. - objectIds: [SuiAddress!] - - # Filter for live or potentially historical objects by their ID and version. - objectKeys: [ObjectKey!] - - # Enhancement (post-MVP), compound filters. Compound filters are - # exclusive (must be the only filter set if they are used). - any: [ObjectFilter] - all: [ObjectFilter] - not: ObjectFilter -} - -input ObjectKey { - objectId: SuiAddress! - version: Int! -} - -input EventFilter { - sender: SuiAddress - transactionDigest: String - # Enhancement (post-MVP), requires compound filters to be useful. - afterCheckpoint: Int - beforeCheckpoint: Int - - # Events emitted by a particular module. An event is emitted by a - # particular module if some function in the module is called by a - # PTB and emits an event. - # - # Modules can be filtered by their package, or package::module. - emittingModule: String - - # This field is used to specify the type of event emitted. - # - # Events can be filtered by their type's package, package::module, - # or their fully qualified type name. - # - # Generic types can be queried by either the generic type name, e.g. - # `0x2::coin::Coin`, or by the full type name, such as - # `0x2::coin::Coin<0x2::sui::SUI>`. - eventType: String - - # Enhancement (post-MVP), requires compound filters to be useful. - startTime: DateTime - endTime: DateTime - - # Enhancement (post-MVP), compound filters. Compound filters are - # exclusive (must be the only filter set if they are used). - any: [EventFilter] - all: [EventFilter] - not: EventFilter -} - -input TransactionBlockFilter { - # Filter by the function called. Limited to an individual package, - # package::module, or package::module::function. - function: String - - kind: TransactionBlockKindInput - afterCheckpoint: Int - beforeCheckpoint: Int - - signAddress: SuiAddress - sentAddress: SuiAddress - recvAddress: SuiAddress - paidAddress: SuiAddress - - inputObject: SuiAddress - changedObject: SuiAddress - - transactionIDs: [String!] - - # Enhancement (post-MVP), consistency with EventFilter -- timestamp - # comes from checkpoint timestamp. - startTime: DateTime - endTime: DateTime - - # Enhancement (post-MVP), compound filters. Compound filters are - # exclusive (must be the only filter set if they are used). - any: [TransactionBlockFilter] - all: [TransactionBlockFilter] - not: TransactionBlockFilter -} - -input DynamicFieldFilter { - # Filter the type of dynamic field name. - # - # Names can be filtered by their type's package, package::module, or - # their fully qualified type name. - # - # Generic types can be queried by either the generic type name, e.g. - # `0x2::coin::Coin`, or by the full type name, such as - # `0x2::coin::Coin<0x2::sui::SUI>`. - nameType: String - - # Filter the type of dynamic field value. - # - # Values can be filtered by their type's package, package::module, - # or their fully qualified type name. - # - # Generic types can be queried by either the generic type name, e.g. - # `0x2::coin::Coin`, or by the full type name, such as - # `0x2::coin::Coin<0x2::sui::SUI>`. - valueType: String -} - -type AvailableRange { - first: Checkpoint - last: Checkpoint -} - -type ServiceConfig { - availableVersions: [String!] - enabledFeatures: [Feature!] - isEnabled(feature: Feature!): Boolean! - - maxQueryDepth: Int! - maxQueryNodes: Int! - maxOutputNodes: Int! - defaultPageSize: Int! - maxPageSize: Int! - requestTimeoutMs: Int! - maxQueryPayloadSize: Int! -} - -enum Feature { - ANALYTICS - COINS - DYNAMIC_FIELDS - NAME_SERVICE - SUBSCRIPTIONS - SYSTEM_STATE -} - -interface IOwner { - address: SuiAddress! - - objects( - first: Int, - after: String, - last: Int, - before: String, - # Enhancement (post-MVP) relies on compound filters. - filter: ObjectFilter, - ): MoveObjectConnection! - - balance(type: String!): Balance - balances( - first: Int, - after: String, - last: Int, - before: String, - ): BalanceConnection! - - # `type` defaults to `0x2::sui::SUI`. - coins( - first: Int, - after: String, - last: Int, - before: String, - type: String, - ): CoinConnection! - - stakedSuis( - first: Int, - after: String, - last: Int, - before: String, - ): StakedSuiConnection! - - dynamicField(dynamicFieldName: DynamicFieldName!): DynamicField - dynamicObjectField(dynamicFieldName: DynamicFieldName!): DynamicField - dynamicFields( - first: Int, - after: String, - last: Int, - before: String, - # Enhancement (post-MVP) to filter dynamic fields by type. - filter: DynamicFieldFilter, - ): DynamicFieldConnection! - - defaultSuinsName: String - suinsRegistrations( - first: Int, - after: String, - last: Int, - before: String, - ): SuinsRegistrationConnection! -} - -union ObjectOwner = Immutable | Shared | Parent | AddressOwner - -type Immutable { - # Dummy field - _: Boolean -} - -type Shared { - initialSharedVersion: Int! -} - -type Parent { - # Child objects are an implementation-detail of dynamic fields. Only - # another object can be a parent of a child object (not an address). - parent: Object -} - -type AddressOwner { - # The address that owns an object could be an Address, or an Object. - owner: Owner -} - -interface IObject { - version: Int! - digest: String! - owner: ObjectOwner - - previousTransactionBlock: TransactionBlock - storageRebate: BigInt - - display: [DisplayEntry!] - - # Transaction Blocks that sent objects to this object - receivedTransactionBlocks( - first: Int, - after: String, - last: Int, - before: String, - # Enhancement (post-MVP) relies on compound filters. - filter: TransactionBlockFilter, - ): TransactionBlockConnection! - - bcs: Base64 -} - -interface IMoveObject { - contents: MoveValue -} - -# Returned by Object.owner, where we can't disambiguate between -# Address and Object. -type Owner implements IOwner { - asAddress: Address - asObject: Object -} - -type Address implements IOwner { - transactionBlocks( - first: Int, - after: String, - last: Int, - before: String, - relation: AddressTransactionBlockRelationship, - # Enhancement (post-MVP) relies on compound filters. - filter: TransactionBlockFilter, - ): TransactionBlockConnection! -} - -enum AddressTransactionBlockRelationship { - SIGN # Transactions this address has signed - SENT # Transactions that transferred objects from this address - RECV # Transactions that received objects into this address - PAID # Transactions that were paid for by this address -} - -type Object implements IOwner & IObject { - asMoveObject: MoveObject - asMovePackage: MovePackage -} - -type DisplayEntry { - key: String! - value: String - error: String -} - -type Epoch { - epochId: Int! - protocolConfigs: ProtocolConfigs - referenceGasPrice: BigInt - validatorSet: ValidatorSet - - startTimestamp: DateTime! - endTimestamp: DateTime - - totalCheckpoints: BigInt - totalGasFees: BigInt - totalStakeRewards: BigInt - totalStakeSubsidies: BigInt - fundSize: BigInt - netInflow: BigInt - fundInflow: BigInt - fundOutflow: BigInt - - # SystemState fields - storageFund: StorageFund - safeMode: SafeMode - systemStateVersion: BigInt - systemParameters: SystemParameters - systemStakeSubsidy: StakeSubsidy - - checkpoints( - first: Int, - after: String, - last: Int, - before: String, - ): CheckpointConnection! - - transactionBlocks( - first: Int, - after: String, - last: Int, - before: String, - # Enhancement (post-MVP) relies on compound filters. - filter: TransactionBlockFilter, - ): TransactionBlockConnection! -} - -type ProtocolConfigs { - protocolVersion: Int! - featureFlags: [ProtocolConfigFeatureFlag!]! - configs: [ProtocolConfigAttr!]! - config(key: String!): ProtocolConfigAttr - featureFlag(key: String!): ProtocolConfigFeatureFlag -} - -type ProtocolConfigAttr { - key: String! - value: String! -} - -type ProtocolConfigFeatureFlag { - key: String! - value: Boolean! -} - -type SystemParameters { - durationMs: BigInt - stakeSubsidyStartEpoch: Int - - minValidatorCount: Int - maxValidatorCount: Int - - minValidatorJoiningStake: BigInt - validatorLowStakeThreshold: BigInt - validatorVeryLowStakeThreshold: BigInt - validatorLowStakeGracePeriod: Int -} - -type StakeSubsidy { - balance: BigInt - distributionCounter: Int - currentDistributionAmount: BigInt - periodLength: Int - decreaseRate: Int -} - -type ValidatorSet { - totalStake: BigInt - - activeValidators( - first: Int, - after: String, - last: Int, - before: String - ): ValidatorConnection! - - # Indices into `activeValidators` - pendingRemovals: [Int] - - pendingActiveValidators: MoveObject - pendingActiveValidatorsSize: Int - - stakePoolMappings: MoveObject - stakePoolMappingsSize: Int - - inactivePools: MoveObject - inactivePoolsSize: Int - - validatorCandidates: MoveObject - validatorCandidatesSize: Int -} - -type Validator { - address: Address! - - credentials: ValidatorCredentials - nextEpochCredentials: ValidatorCredentials - - name: String - description: String - imageUrl: String - projectUrl: String - - operationCap: MoveObject - stakingPool: MoveObject - - exchangeRates: MoveObject - exchangeRatesSize: Int - - stakingPoolActivationEpoch: Int - stakingPoolSuiBalance: BigInt - rewardsPool: BigInt - poolTokenBalance: BigInt - pendingStake: BigInt - pendingTotalSuiWithdraw: BigInt - pendingPoolTokenWithdraw: BigInt - - votingPower: Int - stakeUnits: Int - gasPrice: BigInt - commissionRate: Int - nextEpochStake: BigInt - nextEpochGasPrice: BigInt - nextEpochCommissionRate: Int - - # The number of epochs for which this validator has been below the - # low stake threshold. - atRisk: Int - - # The other validators this validator has reported - reportRecords: [SuiAddress!] - - apy: Int -} - -type ValidatorCredentials { - protocolPubKey: Base64 - networkPubKey: Base64 - workerPubKey: Base64 - proofOfPossession: Base64 - - netAddress: String - p2pAddreess: String - primaryAddress: String - workerAddress: String -} - -type StorageFund { - totalObjectStorageRebates: BigInt - nonRefundableBalance: BigInt -} - -type SafeMode { - enabled: Boolean - gasSummary: GasCostSummary -} - -type Checkpoint { - digest: String! - sequenceNumber: Int! - - timestamp: DateTime! - validatorSignatures: Base64 - - # Commitments - previousCheckpointDigest: String - liveObjectSetDigest: String - - networkTotalTransactions: Int - rollingGasSummary: GasCostSummary - - epoch: Epoch - - transactionBlocks( - first: Int, - after: String, - last: Int, - before: String, - # Enhancement (post-MVP) relies on compound filters. - filter: TransactionBlockFilter, - ): TransactionBlockConnection! - - # NB. Will be moved into a private, explorer-specific extension. - addressMetrics: AddressMetrics -} - -type TransactionBlock { - digest: String - - sender: Address - gasInput: GasInput - kind: TransactionBlockKind - signatures: [Base64!] - effects: TransactionBlockEffects - - expiration: Epoch - - bcs: Base64 -} - -enum TransactionBlockKindInput { - PROGRAMMABLE_TX - SYSTEM_TX -} - -union TransactionBlockKind = - ConsensusCommitPrologueTransaction - | GenesisTransaction - | ChangeEpochTransaction - | ProgrammableTransactionBlock - | AuthenticatorStateUpdateTransaction - | RandomnessStateUpdateTransaction - | EndOfEpochTransaction - -type ConsensusCommitPrologueTransaction { - epoch: Epoch! - round: Int! - commitTimestamp: DateTime! - consensusCommitDigest: String -} - -type GenesisTransaction { - objects( - first: Int, - after: String, - last: Int, - before: String, - ): ObjectConnection! -} - -type ChangeEpochTransaction { - epoch: Epoch - protocolVersion: Int! - startTimestamp: DateTime! - - storageCharge: BigInt! - computationCharge: BigInt! - storageRebate: BigInt! - nonRefundableStorageFee: BigInt! - - systemPackages( - first: Int, - after: String, - last: Int, - before: String, - ): MovePackageConnection! -} - -type ProgrammableTransactionBlock { - inputs( - first: Int, - after: String, - last: Int, - before: String, - ): TransactionInputConnection! - - transactions( - first: Int, - after: String, - last: Int, - before: String, - ): ProgrammableTransactionConnection! -} - -union TransactionInput = OwnedOrImmutable | SharedInput | Receiving | Pure - -type OwnedOrImmutable { - address: SuiAddress! - version: Int! - digest: String! - object: Object -} - -type SharedInput { - address: SuiAddress! - initialSharedVersion: Int! - mutable: Boolean! -} - -type Receiving { - address: SuiAddress! - version: Int! - digest: String! - object: Object -} - -type Pure { - bytes: Base64! -} - -union TransactionArgument = GasCoin | Input | Result - -type GasCoin { _: Boolean } -type Input { ix: Int! } -type Result { cmd: Int!, ix: Int } - -union ProgrammableTransaction = - MoveCallTransaction - | TransferObjectsTransaction - | SplitCoinTransaction - | MergeCoinsTransaction - | PublishTransaction - | UpgradeTransaction - | MakeMoveVecTransaction - -type MoveCallTransaction { - package: SuiAddress! - module: String! - functionName: String! - function: MoveFunction - typeArguments: [MoveType!]! - arguments: [TransactionArgument!]! -} - -type TransferObjectsTransaction { - objects: [TransactionArgument!]! - address: TransactionArgument! -} - -type SplitCoinsTransaction { - coin: TransactionArgument! - amounts: [TransactionArgument!]! -} - -type MergeCoinsTransaction { - coin: TransactionArgument! - coins: [TransactionArgument!]! -} - -type PublishTransaction { - modules: [Base64!]! - dependencies: [SuiAddress!]! -} - -type UpgradeTransaction { - modules: [Base64!]! - dependencies: [SuiAddress!]! - currentPackage: SuiAddress! - upgradeTicket: TransactionArgument! -} - -type MakeMoveVecTransaction { - type: MoveType - elements: [TransactionArgument!]! -} - -type TransactionBlockEffects { - transactionBlock: TransactionBlock! - status: ExecutionStatus - - errors: String - dependencies( - first: Int, - after: String, - last: Int, - before: String, - ): TransactionBlockConnection! - - lamportVersion: Int - gasEffects: GasEffects - - unchangedSharedObjects( - first: Int, - after: String, - last: Int, - before: String, - ): UnchangedSharedObjectConnection! - - objectChanges( - first: Int, - after: String, - last: Int, - before: String, - ): ObjectChangeConnection! - - balanceChanges( - first: Int, - after: String, - last: Int, - before: String, - ): BalanceChangeConnection! - - timestamp: DateTime - epoch: Epoch - checkpoint: Checkpoint - - events( - first: Int, - after: String, - last: Int, - before: String, - # Extension (post-MVP) relies on compound filters - filter: EventFilter, - ): EventConnection! - - bcs: Base64 -} - -enum ExecutionStatus { - SUCCESS - FAILURE -} - -type GasInput { - gasSponsor: Address - gasPayment( - first: Int, - after: String, - last: Int, - before: String, - ): ObjectConnection! - - gasPrice: BigInt - gasBudget: BigInt -} - -type GasEffects { - gasObject: Coin - gasSummary: GasCostSummary -} - -type GasCostSummary { - computationCost: BigInt - storageCost: BigInt - storageRebate: BigInt - nonRefundableStorageFee: BigInt -} - -union UnchangedSharedObject = SharedObjectRead | SharedObjectDelete - -type SharedObjectRead { - address: SuiAddress! - version: u64! - digest: String! - object: Object -} - -type SharedObjectDelete { - address: SuiAddress! - version: u64! - - # Whether this transaction intended to use this shared object - # mutably or not. - mutable: Boolean! -} - -type ObjectChange { - address: SuiAddress! - - inputState: Object - outputState: Object - - idCreated: Boolean - idDeleted: Boolean -} - -type BalanceChange { - owner: Owner - coinType: MoveType - amount: BigInt -} - -type Event { - # Module that the event was emitted by - sendingModule: MoveModule - - sender: Address - timestamp: DateTime - - type: MoveType! - bcs: Base64! - data: MoveData! - json: JSON! -} - -type Balance { - coinType: MoveType - coinObjectCount: Int - totalBalance: BigInt -} - -type Coin implements IOwner & IObject { - coinBalance: BigInt -} - -type StakedSui implements IOwner & IObject { - stakeStatus: StakeStatus! - requestEpoch: Epoch - activeEpoch: Epoch - principal: BigInt - - # Only available if status is `ACTIVE`. - estimatedReward: BigInt -} - -enum StakeStatus { - PENDING - ACTIVE - UNSTAKED -} - -type CoinMetadata implements IOwner & IObject { - decimals: Int - name: String - symbol: String - description: String - iconUrl: String - supply: BigInt -} - -input DynamicFieldName { - type: String! - bcs: Base64! -} - -type DynamicField { - name: MoveValue - value: DynamicFieldValue -} - -union DynamicFieldValue = MoveObject | MoveValue - -type MoveObject implements IOwner & IObject & IMoveObject { - asCoin: Coin - asStakedSui: StakedSui - asCoinMetadata: CoinMetadata - asSuinsRegistration: SuinsRegistration -} - -type MovePackage implements IOwner & IObject { - module(name: String!): MoveModule - modules( - first: Int, - after: String, - last: Int, - before: String, - ): MoveModuleConnection! - - linkage: [Linkage!] - typeOrigins: [TypeOrigin!] - - moduleBcs: Base64 -} - -type Linkage { - originalId: SuiAddress! - upgradedId: SuiAddress! - version: Int! -} - -type TypeOrigin { - module: String! - struct: String! - definingId: SuiAddress! -} - -enum MoveAbility { - COPY - DROP - STORE - KEY -} - -enum MoveVisibility { - PUBLIC - PRIVATE - FRIEND -} - -type MoveStructTypeParameter { - constraints: [MoveAbility!]! - isPhantom: Boolean! -} - -type MoveFunctionTypeParameter { - constraints: [MoveAbility!]! -} - -type MoveModule { - package: SuiAddress! - name: String! - - fileFormatVersion: Int! - - friends( - first: Int, - after: String, - last: Int, - before: String - ): MoveModuleConnection! - - struct(name: String!): MoveStruct - structs( - first: Int, - after: String, - last: Int, - before: String, - ): MoveStructConnection! - - function(name: String!): MoveFunction - functions( - first: Int, - after: String, - last: Int, - before: String, - ): MoveFunctionConnection! - - bytes: Base64 - disassembly: String -} - -type MoveStruct { - module: MoveModule! - name: String! - abilities: [MoveAbility!] - typeParameters: [MoveStructTypeParameter!] - fields: [MoveField!] -} - -type MoveField { - name: String! - type: OpenMoveType -} - -type MoveFunction { - module: MoveModule! - name: String! - - visibility: MoveVisibility - isEntry: Boolean - - typeParameters: [MoveFunctionTypeParameter!] - parameters: [OpenMoveType!] - return: [OpenMoveType!] -} - -type MoveValue { - type: MoveType! - data: MoveData! - json: JSON! - - bcs: Base64! -} - -# Represents concrete types (no type parameters, no references) -type MoveType { - # Flat representation of the type signature, as a displayable string. - repr: String! - # Structured representation of the type signature. - signature: MoveTypeSignature! - # Structured representation of the "shape" of values that match this type. - layout: MoveTypeLayout! - # The abilities this concrete type has. - abilities: [MoveAbility!]! -} - -# Represents types that could contain references or free type -# parameters. Such types can appear as function parameters, or fields -# in structs. -type OpenMoveType { - # Flat representation of the type signature, as a displayable string. - repr: String! - # Structured representation of the type signature. - signature: OpenMoveTypeSignature! -} - -# Metrics (omitted for brevity) -type NetworkMetrics -type MoveCallMetrics -type AddressMetrics - -# Execution - -# Either TransactionBlockEffects on success, or error on failure. -type ExecutionResult { - effects: TransactionBlockEffects - errors: [String!] -} - -type DryRunResult { - transaction: TransactionBlock - error: String - results: [DryRunEffect!] -} - -type DryRunEffect { - # Changes made to arguments that were mutably borrowed by this - # transaction - mutatedReferences: [DryRunMutation!] - - # Results of this transaction - returnValues: [DryRunReturn!] -} - -type DryRunMutation { - input: TransactionArgument - type: MoveType - bcs: Base64 -} - -type DryRunReturn { - type: MoveType - bcs: Base64 -} - -# Connections - -# Pagination -type PageInfo { - hasNextPage: Boolean! - hasPreviousPage: Boolean! - startCursor: String - endCursor: String -} - -# Checkpoints -type CheckpointConnection { - edges: [CheckpointEdge!]! - nodes: [Checkpoint!]! - pageInfo: PageInfo! -} - -type CheckpointEdge { - cursor: String - node: Checkpoint! -} - -# Balance -type BalanceConnection { - edges: [BalanceEdge!]! - nodes: [Balance!]! - pageInfo: PageInfo! -} - -type BalanceEdge { - cursor: String - node: Balance! -} - -# Coin -type CoinConnection { - edges: [CoinEdge!]! - nodes: [Coin!]! - pageInfo: PageInfo! -} - -type CoinEdge { - cursor: String - node: Coin! -} - -# DynamicField -type DynamicFieldConnection { - edges: [DynamicFieldEdge!]! - nodes: [DynamicField!]! - pageInfo: PageInfo! -} - -type DynamicFieldEdge { - cursor: String - node: DynamicField! -} - -# Object -type ObjectConnection { - edges: [ObjectEdge!]! - nodes: [Object!]! - pageInfo: PageInfo! -} - -type ObjectEdge { - cursor: String - node: Object! -} - -# MoveObject -type MoveObjectConnection { - edges: [MoveObjectEdge!]! - nodes: [MoveObject!]! - pageInfo: PageInfo! -} - -type MoveObjectEdge { - cursor: String - node: MoveObject! -} - -# MovePackage -type MovePackageConnection { - edges: [MovePackageEdge!]! - nodes: [MovePackage!]! - pageInfo: PageInfo! -} - -type MovePackageEdge { - cursor: String - node: MovePackage! -} - -# Event -type EventConnection { - edges: [EventEdge!]! - nodes: [Event!]! - pageInfo: PageInfo! -} - -type EventEdge { - cursor: String - node: Event! -} - -# MoveFunction -type MoveFunctionConnection { - edges: [MoveFunctionEdge!]! - nodes: [MoveFunction!]! - pageInfo: PageInfo! -} - -type MoveFunctionEdge { - cursor: String - node: MoveFunction! -} - -# MoveModuleConnection -type MoveModuleConnection { - edges: [MoveModuleEdge] - nodes: [MoveModule] - pageInfo: PageInfo! -} - -type MoveModuleEdge { - cursor: String - node: MoveModule -} - -# MoveStructConnection -type MoveStructConnection { - edges: [MoveStructEdge!]! - nodes: [MoveStruct!]! - pageInfo: PageInfo! -} - -type MoveStructEdge { - cursor: String - node: MoveStruct! -} - -# TransactionBlockConnection -type TransactionBlockConnection { - totalTransactionBlocks: Int - edges: [TransactionBlockEdge!]! - nodes: [TransactionBlock!]! - pageInfo: PageInfo! -} - -type TransactionBlockEdge { - cursor: String - node: TransactionBlock! -} - -# TransactionInputConnection -type TransactionInputConnection { - edges: [TransactionInputEdge!]! - nodes: [TransactionInput!]! - pageInfo: PageInfo! -} - -type TransactionInputEdge { - cursor: String - node: TransactionInput! -} - -# ProgrammableTransactionConnection -type ProgrammableTransactionConnection { - edges: [ProgrammableTransactionEdge!]! - nodes: [ProgrammableTransaction!]! - pageInfo: PageInfo! -} - -type ProgrammableTransactionEdge { - cursor: String - node: ProgrammableTransaction! -} - -# UnchangedSharedObjectConnection - -type UnchangedSharedObjectConnection { - edges: [UnchangedSharedObjectEdge!]! - nodes: [UnchangedSharedObject!]! - pageInfo: PageInfo! -} - -type UnchangedSharedObjectEdge { - cursor: String - node: UnchangedSharedObject! -} - -# ObjectChangeConnection -type ObjectChangeConnection { - edges: [ObjectChangeEdge!]! - nodes: [ObjectChange!]! - pageInfo: PageInfo! -} - -type ObjectChangeEdge { - cursor: String - node: ObjectChange -} - -# BalanceChangeConnection -type BalanceChangeConnection { - edges: [BalanceChangeEdge!]! - nodes: [BalanceChange!]! - pageInfo: PageInfo! -} - -type BalanceChangeEdge { - cursor: String - node: BalanceChange -} - -# MoveModuleConnection -type MoveModuleConnection { - edges: [MoveModuleEdge!]! - nodes: [MoveModule!]! - pageInfo: PageInfo! -} - -type MoveModuleEdge { - cursor: String - node: MoveModule! -} - -# SuinsRegistrationConnection -type SuinsRegistrationConnection { - edges: [SuinsRegistrationEdge!]! - nodes: [SuinsRegistration!]! - pageInfo: PageInfo! -} - -type SuinsRegistrationEdge { - cursor: String - node: SuinsRegistration -} - -type SuinsRegistration { - """ - Domain name of the SuinsRegistration object - """ - domain: String! - """ - Convert the SuinsRegistration object into a Move object - """ - asMoveObject: MoveObject! -} - -# AddressMetricsConnection -type AddressMetricsConnection { - edges: [AddressMetricEdge!]! - nodes: [AddressMetric!]! - pageInfo: PageInfo! -} - -type AddressMetricEdge { - cursor: String - node: AddressMetrics! -} - -# StakedSuiConnection -type StakedSuiConnection { - edges: [StakedSuiEdge!]! - nodes: [StakedSui!]! - pageInfo: PageInfo! -} - -type StakedSuiEdge { - cursor: String - node: StakedSui! -} - -# ValidatorConnection -type ValidatorConnection { - edges: [ValidatorEdge!]! - nodes: [Validator!]! - pageInfo: PageInfo! -} - -type ValidatorEdge { - cursor: String - node: Validator! -} diff --git a/crates/sui-graphql-rpc/src/commands.rs b/crates/sui-graphql-rpc/src/commands.rs index f605efd735946..4b0eca46c11cb 100644 --- a/crates/sui-graphql-rpc/src/commands.rs +++ b/crates/sui-graphql-rpc/src/commands.rs @@ -13,17 +13,13 @@ use std::path::PathBuf; version )] pub enum Command { - GenerateDocsExamples, - GenerateSchema { - /// Path to output GraphQL schema to, in SDL format. - #[clap(short, long)] - file: Option, - }, - GenerateExamples { - /// Path to output examples docs. - #[clap(short, long)] - file: Option, + /// Output a TOML config (suitable for passing into the --config parameter of the start-server + /// command) with all values set to their defaults. + GenerateConfig { + /// Optional path to an output file. Prints to `stdout` if not provided. + output: Option, }, + StartServer { /// The title to display at the top of the page #[clap(short, long)] diff --git a/crates/sui-graphql-rpc/src/config.rs b/crates/sui-graphql-rpc/src/config.rs index 71590683d308f..42f6feab0b084 100644 --- a/crates/sui-graphql-rpc/src/config.rs +++ b/crates/sui-graphql-rpc/src/config.rs @@ -92,6 +92,10 @@ pub struct Limits { pub max_type_nodes: u32, /// Maximum deph of a move value. pub max_move_value_depth: u32, + /// Maximum number of transaction ids that can be passed to a `TransactionBlockFilter`. + pub max_transaction_ids: u32, + /// Maximum number of candidates to scan when gathering a page of results. + pub max_scan_limit: u32, } #[GraphQLConfig] @@ -282,6 +286,16 @@ impl ServiceConfig { async fn max_move_value_depth(&self) -> u32 { self.limits.max_move_value_depth } + + /// Maximum number of transaction ids that can be passed to a `TransactionBlockFilter`. + async fn max_transaction_ids(&self) -> u32 { + self.limits.max_transaction_ids + } + + /// Maximum number of candidates to scan when gathering a page of results. + async fn max_scan_limit(&self) -> u32 { + self.limits.max_scan_limit + } } impl TxExecFullNodeConfig { @@ -452,6 +466,10 @@ impl Default for Limits { max_type_nodes: 256, // max_move_value_depth: 128, + // Filter-specific limits, such as the number of transaction ids that can be specified + // for the `TransactionBlockFilter`. + max_transaction_ids: 1000, + max_scan_limit: 100_000_000, } } } @@ -514,6 +532,8 @@ mod tests { max-type-argument-width = 64 max-type-nodes = 128 max-move-value-depth = 256 + max-transaction-ids = 11 + max-scan-limit = 50 "#, ) .unwrap(); @@ -533,6 +553,8 @@ mod tests { max_type_argument_width: 64, max_type_nodes: 128, max_move_value_depth: 256, + max_transaction_ids: 11, + max_scan_limit: 50, }, ..Default::default() }; @@ -596,6 +618,8 @@ mod tests { max-type-argument-width = 64 max-type-nodes = 128 max-move-value-depth = 256 + max-transaction-ids = 42 + max-scan-limit = 420 [experiments] test-flag = true @@ -618,6 +642,8 @@ mod tests { max_type_argument_width: 64, max_type_nodes: 128, max_move_value_depth: 256, + max_transaction_ids: 42, + max_scan_limit: 420, }, disabled_features: BTreeSet::from([FunctionalGroup::Analytics]), experiments: Experiments { test_flag: true }, diff --git a/crates/sui-graphql-rpc/src/connection.rs b/crates/sui-graphql-rpc/src/connection.rs new file mode 100644 index 0000000000000..4c48fc1727d0e --- /dev/null +++ b/crates/sui-graphql-rpc/src/connection.rs @@ -0,0 +1,125 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::borrow::Cow; +use std::marker::PhantomData; + +use async_graphql::connection::{ + ConnectionNameType, CursorType, DefaultConnectionName, DefaultEdgeName, Edge, EdgeNameType, + EmptyFields, EnableNodesField, NodesFieldSwitcherSealed, PageInfo, +}; +use async_graphql::{Object, ObjectType, OutputType, TypeName}; + +/// Mirrors the `Connection` type from async-graphql, with the exception that if `start_cursor` and/ +/// or `end_cursor` is set on the struct, then when `page_info` is called, it will use those values +/// before deferring to `edges`. The default implementation derives these cursors from the first and +/// last element of `edges`, so if `edges` is empty, both are set to null. This is undesirable for +/// queries that make use of `scan_limit`; when the scan limit is reached, a caller can continue to +/// paginate forwards or backwards until all candidates in the scanning range have been visited, +/// even if the current page yields no results. +pub(crate) struct ScanConnection< + Cursor, + Node, + EdgeFields = EmptyFields, + Name = DefaultConnectionName, + EdgeName = DefaultEdgeName, + NodesField = EnableNodesField, +> where + Cursor: CursorType + Send + Sync, + Node: OutputType, + EdgeFields: ObjectType, + Name: ConnectionNameType, + EdgeName: EdgeNameType, + NodesField: NodesFieldSwitcherSealed, +{ + _mark1: PhantomData, + _mark2: PhantomData, + _mark3: PhantomData, + pub edges: Vec>, + pub has_previous_page: bool, + pub has_next_page: bool, + pub start_cursor: Option, + pub end_cursor: Option, +} + +#[Object(name_type)] +impl + ScanConnection +where + Cursor: CursorType + Send + Sync, + Node: OutputType, + EdgeFields: ObjectType, + Name: ConnectionNameType, + EdgeName: EdgeNameType, +{ + /// Information to aid in pagination. + async fn page_info(&self) -> PageInfo { + // Unlike the default implementation, this Connection will use `start_cursor` and + // `end_cursor` if they are `Some`. + PageInfo { + has_previous_page: self.has_previous_page, + has_next_page: self.has_next_page, + start_cursor: self + .start_cursor + .clone() + .or_else(|| self.edges.first().map(|edge| edge.cursor.encode_cursor())), + end_cursor: self + .end_cursor + .clone() + .or_else(|| self.edges.last().map(|edge| edge.cursor.encode_cursor())), + } + } + + /// A list of edges. + #[inline] + async fn edges(&self) -> &[Edge] { + &self.edges + } + + /// A list of nodes. + async fn nodes(&self) -> Vec<&Node> { + self.edges.iter().map(|e| &e.node).collect() + } +} + +impl + ScanConnection +where + Cursor: CursorType + Send + Sync, + Node: OutputType, + EdgeFields: ObjectType, + Name: ConnectionNameType, + EdgeName: EdgeNameType, + NodesField: NodesFieldSwitcherSealed, +{ + /// Create a new connection. + #[inline] + pub fn new(has_previous_page: bool, has_next_page: bool) -> Self { + ScanConnection { + _mark1: PhantomData, + _mark2: PhantomData, + _mark3: PhantomData, + edges: Vec::new(), + has_previous_page, + has_next_page, + start_cursor: None, + end_cursor: None, + } + } +} + +impl TypeName + for ScanConnection +where + Cursor: CursorType + Send + Sync, + Node: OutputType, + EdgeFields: ObjectType, + Name: ConnectionNameType, + EdgeName: EdgeNameType, + NodesField: NodesFieldSwitcherSealed, +{ + #[inline] + fn type_name() -> Cow<'static, str> { + Name::type_name::().into() + } +} diff --git a/crates/sui-graphql-rpc/src/consistency.rs b/crates/sui-graphql-rpc/src/consistency.rs index a4719e5855cdd..285e6ce8f36ba 100644 --- a/crates/sui-graphql-rpc/src/consistency.rs +++ b/crates/sui-graphql-rpc/src/consistency.rs @@ -7,7 +7,7 @@ use sui_indexer::models::objects::StoredHistoryObject; use crate::raw_query::RawQuery; use crate::types::available_range::AvailableRange; -use crate::types::cursor::{JsonCursor, Page}; +use crate::types::cursor::{JsonCursor, Page, ScanLimited}; use crate::types::object::Cursor; use crate::{filter, query}; @@ -59,6 +59,10 @@ impl Checkpointed for JsonCursor { } } +impl ScanLimited for JsonCursor {} + +impl ScanLimited for JsonCursor {} + /// Constructs a `RawQuery` against the `objects_snapshot` and `objects_history` table to fetch /// objects that satisfy some filtering criteria `filter_fn` within the provided checkpoint `range`. /// The `objects_snapshot` table contains the latest versions of objects up to a checkpoint sequence diff --git a/crates/sui-graphql-rpc/src/data/package_resolver.rs b/crates/sui-graphql-rpc/src/data/package_resolver.rs index 467911753d266..f10067fd007b9 100644 --- a/crates/sui-graphql-rpc/src/data/package_resolver.rs +++ b/crates/sui-graphql-rpc/src/data/package_resolver.rs @@ -26,7 +26,7 @@ pub(crate) type PackageResolver = Arc>; /// to `fetch` pub struct DbPackageStore(DataLoader); -/// DataLoader key for fetching the latest version of a `Package` by its ID. +/// `DataLoader` key for fetching the latest version of a `Package` by its ID. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct PackageKey(AccountAddress); diff --git a/crates/sui-graphql-rpc/src/data/pg.rs b/crates/sui-graphql-rpc/src/data/pg.rs index 980bde05b1fda..cdf57f7d542b9 100644 --- a/crates/sui-graphql-rpc/src/data/pg.rs +++ b/crates/sui-graphql-rpc/src/data/pg.rs @@ -1,8 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::time::Instant; - use super::QueryExecutor; use crate::{config::Limits, error::Error, metrics::Metrics}; use async_trait::async_trait; @@ -12,6 +10,8 @@ use diesel::{ query_dsl::LoadQuery, QueryResult, RunQueryDsl, }; +use std::fmt; +use std::time::Instant; use sui_indexer::indexer_reader::IndexerReader; use sui_indexer::{run_query_async, run_query_repeatable_async, spawn_read_only_blocking}; @@ -29,6 +29,8 @@ pub(crate) struct PgConnection<'c> { conn: &'c mut diesel::PgConnection, } +pub(crate) struct ByteaLiteral<'a>(pub &'a [u8]); + impl PgExecutor { pub(crate) fn new( inner: IndexerReader, @@ -118,6 +120,16 @@ impl<'c> super::DbConnection for PgConnection<'c> { } } +impl fmt::Display for ByteaLiteral<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "'\\x{}'::bytea", hex::encode(self.0)) + } +} + +pub(crate) fn bytea_literal(slice: &[u8]) -> ByteaLiteral<'_> { + ByteaLiteral(slice) +} + /// Support for calculating estimated query cost using EXPLAIN and then logging it. mod query_cost { use super::*; @@ -208,7 +220,7 @@ mod tests { ) .unwrap(); let mut conn = get_pool_connection(&pool).unwrap(); - reset_database(&mut conn, /* drop_all */ true).unwrap(); + reset_database(&mut conn).unwrap(); let objects: Vec = BuiltInFramework::iter_system_packages() .map(|pkg| IndexedObject::from_object(1, pkg.genesis_object(), None).into()) diff --git a/crates/sui-graphql-rpc/src/examples.rs b/crates/sui-graphql-rpc/src/examples.rs deleted file mode 100644 index 56f86ca428180..0000000000000 --- a/crates/sui-graphql-rpc/src/examples.rs +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use anyhow::anyhow; -use markdown_gen::markdown::{AsMarkdown, Markdown}; -use std::io::{BufWriter, Read}; -use std::path::PathBuf; - -#[derive(Debug)] -pub struct ExampleQuery { - pub name: String, - pub contents: String, - pub path: PathBuf, -} - -#[derive(Debug)] -pub struct ExampleQueryGroup { - pub name: String, - pub queries: Vec, - pub _path: PathBuf, -} - -const QUERY_EXT: &str = "graphql"; - -fn regularize_string(s: &str) -> String { - // Replace underscore with space and make every word first letter uppercase - s.replace('_', " ") - .split_whitespace() - .map(|word| { - let mut chars = word.chars(); - match chars.next() { - None => String::new(), - Some(f) => f.to_uppercase().chain(chars).collect(), - } - }) - .collect::>() - .join(" ") -} - -pub fn load_examples() -> anyhow::Result> { - let mut buf: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - buf.push("examples"); - - let mut groups = vec![]; - for entry in std::fs::read_dir(buf).map_err(|e| anyhow::anyhow!(e))? { - let entry = entry.map_err(|e| anyhow::anyhow!(e))?; - let path = entry.path(); - let group_name = path - .file_stem() - .ok_or(anyhow::anyhow!("File stem cannot be read"))? - .to_str() - .ok_or(anyhow::anyhow!("File stem cannot be read"))? - .to_string(); - - let mut group = ExampleQueryGroup { - name: group_name.clone(), - queries: vec![], - _path: path.clone(), - }; - - for file in std::fs::read_dir(path).map_err(|e| anyhow::anyhow!(e))? { - assert!(file.is_ok()); - let file = file.map_err(|e| anyhow::anyhow!(e))?; - assert!(file.path().extension().is_some()); - let ext = file - .path() - .extension() - .ok_or(anyhow!("File extension cannot be read"))? - .to_str() - .ok_or(anyhow!("File extension cannot be read to string"))? - .to_string(); - assert_eq!(ext, QUERY_EXT, "wrong file extension for example"); - - let file_path = file.path(); - let query_name = file_path - .file_stem() - .ok_or(anyhow!("File stem cannot be read"))? - .to_str() - .ok_or(anyhow!("File extension cannot be read to string"))? - .to_string(); - - let mut contents = String::new(); - let mut fp = std::fs::File::open(file_path.clone()).map_err(|e| anyhow!(e))?; - fp.read_to_string(&mut contents).map_err(|e| anyhow!(e))?; - group.queries.push(ExampleQuery { - name: query_name, - contents, - path: file_path, - }); - } - group.queries.sort_by(|x, y| x.name.cmp(&y.name)); - - groups.push(group); - } - - groups.sort_by(|x, y| x.name.cmp(&y.name)); - Ok(groups) -} - -/// This generates a markdown page with all the examples, to be used in the docs site -pub fn generate_examples_for_docs() -> anyhow::Result { - let groups = load_examples()?; - - let mut output = BufWriter::new(Vec::new()); - let mut md = Markdown::new(&mut output); - md.write( - r#"--- -title: Examples -description: Query examples for working with the Sui GraphQL RPC. ---- -"#, - )?; - md.write("This page showcases a number of queries to interact with the network. These examples can also be found in the [repository](https://github.com/MystenLabs/sui/tree/main/crates/sui-graphql-rpc/examples). You can use the [interactive online IDE](https://mainnet.sui.io/rpc/graphql) to run these examples.")?; - for group in groups.iter() { - let group_name = regularize_string(&group.name); - md.write(group_name.heading(2)) - .map_err(|e| anyhow::anyhow!(e))?; - for query in group.queries.iter() { - let name = regularize_string(&query.name); - md.write(name.heading(3)).map_err(|e| anyhow::anyhow!(e))?; - let query = query.contents.lines().collect::>().join("\n"); - let content = format!("```graphql\n{}\n```", query); - md.write(content.as_str()).map_err(|e| anyhow::anyhow!(e))?; - } - } - let bytes = output.into_inner().map_err(|e| anyhow::anyhow!(e))?; - Ok(String::from_utf8(bytes) - .map_err(|e| anyhow::anyhow!(e))? - .replace('\\', "")) -} - -pub fn generate_markdown() -> anyhow::Result { - let groups = load_examples()?; - - let mut output = BufWriter::new(Vec::new()); - let mut md = Markdown::new(&mut output); - - md.write("Sui GraphQL Examples".heading(1)) - .map_err(|e| anyhow!(e))?; - - // TODO: reduce multiple loops - // Generate the table of contents - for (id, group) in groups.iter().enumerate() { - let group_name = regularize_string(&group.name); - let group_name_toc = format!("[{}](#{})", group_name, id); - md.write(group_name_toc.heading(3)) - .map_err(|e| anyhow!(e))?; - - for (inner, query) in group.queries.iter().enumerate() { - let inner_id = inner + 0xFFFF * id; - let inner_name = regularize_string(&query.name); - let inner_name_toc = format!("  [{}](#{})", inner_name, inner_id); - md.write(inner_name_toc.heading(4)) - .map_err(|e| anyhow!(e))?; - } - } - - for (id, group) in groups.iter().enumerate() { - let group_name = regularize_string(&group.name); - - let id_tag = format!("", id); - md.write(id_tag.heading(2)) - .map_err(|e| anyhow::anyhow!(e))?; - md.write(group_name.heading(2)) - .map_err(|e| anyhow::anyhow!(e))?; - for (inner, query) in group.queries.iter().enumerate() { - let inner_id = inner + 0xFFFF * id; - let name = regularize_string(&query.name); - - let id_tag = format!("", inner_id); - md.write(id_tag.heading(3)) - .map_err(|e| anyhow::anyhow!(e))?; - md.write(name.heading(3)).map_err(|e| anyhow::anyhow!(e))?; - - // Extract all lines that start with `#` and use them as headers - let mut headers = vec![]; - let mut query_start = 0; - for (idx, line) in query.contents.lines().enumerate() { - let line = line.trim(); - if line.starts_with('#') { - headers.push(line.trim_start_matches('#')); - } else if line.starts_with('{') { - query_start = idx; - break; - } - } - - // Remove headers from query - let query = query - .contents - .lines() - .skip(query_start) - .collect::>() - .join("\n"); - - let content = format!("
{}
", query); - for header in headers { - md.write(header.heading(4)) - .map_err(|e| anyhow::anyhow!(e))?; - } - md.write(content.quote()).map_err(|e| anyhow::anyhow!(e))?; - } - } - let bytes = output.into_inner().map_err(|e| anyhow::anyhow!(e))?; - Ok(String::from_utf8(bytes) - .map_err(|e| anyhow::anyhow!(e))? - .replace('\\', "")) -} - -#[test] -fn test_generate_markdown() { - use similar::*; - use std::fs::File; - - let mut buf: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - buf.push("docs"); - buf.push("examples.md"); - let mut out_file: File = File::open(buf).expect("Could not open examples.md"); - - // Read the current content of `out_file` - let mut current_content = String::new(); - out_file - .read_to_string(&mut current_content) - .expect("Could not read examples.md"); - let new_content: String = generate_markdown().expect("Generating examples markdown failed"); - - if current_content != new_content { - let mut res = vec![]; - let diff = TextDiff::from_lines(¤t_content, &new_content); - for change in diff.iter_all_changes() { - let sign = match change.tag() { - ChangeTag::Delete => "---", - ChangeTag::Insert => "+++", - ChangeTag::Equal => " ", - }; - res.push(format!("{}{}", sign, change)); - } - panic!("Doc examples have changed. Please run `sui-graphql-rpc generate-examples` to update the docs. Diff: {}", res.join("")); - } -} diff --git a/crates/sui-graphql-rpc/src/lib.rs b/crates/sui-graphql-rpc/src/lib.rs index baea0d2ce2ce8..e299aa241eec9 100644 --- a/crates/sui-graphql-rpc/src/lib.rs +++ b/crates/sui-graphql-rpc/src/lib.rs @@ -4,11 +4,11 @@ pub use sui_graphql_rpc_client as client; pub mod commands; pub mod config; +pub(crate) mod connection; pub(crate) mod consistency; pub mod context_data; pub(crate) mod data; mod error; -pub mod examples; pub mod extensions; pub(crate) mod functional_group; mod metrics; diff --git a/crates/sui-graphql-rpc/src/main.rs b/crates/sui-graphql-rpc/src/main.rs index 6e552a09e92e8..cedc55b39e72a 100644 --- a/crates/sui-graphql-rpc/src/main.rs +++ b/crates/sui-graphql-rpc/src/main.rs @@ -9,7 +9,6 @@ use sui_graphql_rpc::commands::Command; use sui_graphql_rpc::config::{ ConnectionConfig, Ide, ServerConfig, ServiceConfig, TxExecFullNodeConfig, Version, }; -use sui_graphql_rpc::server::builder::export_schema; use sui_graphql_rpc::server::graphiql_server::start_graphiql_server; use tokio_util::sync::CancellationToken; use tokio_util::task::TaskTracker; @@ -38,39 +37,19 @@ static VERSION: Version = Version { async fn main() { let cmd: Command = Command::parse(); match cmd { - Command::GenerateDocsExamples => { - let mut buf: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - // we are looking to put examples content in - // sui/docs/content/references/sui-graphql/examples.mdx - let filename = "docs/content/references/sui-graphql/examples.mdx"; - buf.pop(); - buf.pop(); - buf.push(filename); - let content = sui_graphql_rpc::examples::generate_examples_for_docs() - .expect("Generating examples markdown file for docs failed"); - std::fs::write(buf, content).expect("Writing examples markdown failed"); - println!("Generated the docs example.mdx file and copied it to {filename}."); - } - Command::GenerateSchema { file } => { - let out = export_schema(); - if let Some(file) = file { - println!("Write schema to file: {:?}", file); - std::fs::write(file, &out).unwrap(); + Command::GenerateConfig { output } => { + let config = ServiceConfig::default(); + let toml = toml::to_string_pretty(&config).expect("Failed to serialize configuration"); + + if let Some(path) = output { + fs::write(&path, toml).unwrap_or_else(|e| { + panic!("Failed to write configuration to {}: {e}", path.display()) + }); } else { - println!("{}", &out); + println!("{}", toml); } } - Command::GenerateExamples { file } => { - let new_content: String = sui_graphql_rpc::examples::generate_markdown() - .expect("Generating examples markdown failed"); - let mut buf: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - buf.push("docs"); - buf.push("examples.md"); - let file = file.unwrap_or(buf); - std::fs::write(file.clone(), new_content).expect("Writing examples markdown failed"); - println!("Written examples to file: {:?}", file); - } Command::StartServer { ide_title, db_url, diff --git a/crates/sui-graphql-rpc/src/raw_query.rs b/crates/sui-graphql-rpc/src/raw_query.rs index e372d478fb22f..55fed2d96c2f3 100644 --- a/crates/sui-graphql-rpc/src/raw_query.rs +++ b/crates/sui-graphql-rpc/src/raw_query.rs @@ -179,6 +179,27 @@ macro_rules! or_filter { }}; } +/// Accepts two `RawQuery` instances and a third expression consisting of which columns to join on. +#[macro_export] +macro_rules! inner_join { + ($lhs:expr, $alias:expr => $rhs_query:expr, using: [$using:expr $(, $more_using:expr)*]) => {{ + use $crate::raw_query::RawQuery; + + let (lhs_sql, mut binds) = $lhs.finish(); + let (rhs_sql, rhs_binds) = $rhs_query.finish(); + + binds.extend(rhs_binds); + + let sql = format!( + "{lhs_sql} INNER JOIN ({rhs_sql}) AS {} USING ({})", + $alias, + stringify!($using $(, $more_using)*), + ); + + RawQuery::new(sql, binds) + }}; +} + /// Accepts a `SELECT FROM` format string and optional subqueries. If subqueries are provided, there /// should be curly braces `{}` in the format string to interpolate each subquery's sql string into. /// Concatenates subqueries to the `SELECT FROM` clause, and creates a new `RawQuery` from the @@ -193,7 +214,8 @@ macro_rules! query { }; // Expects a select clause and one or more subqueries. The select clause should contain curly - // braces for subqueries to be interpolated into. + // braces for subqueries to be interpolated into. Use when the subqueries can be aliased + // directly in the select statement. ($select:expr $(,$subquery:expr)+) => {{ use $crate::raw_query::RawQuery; let mut binds = vec![]; diff --git a/crates/sui-graphql-rpc/src/server/graphiql_server.rs b/crates/sui-graphql-rpc/src/server/graphiql_server.rs index d5c2f329ecf7f..7a809c01d85be 100644 --- a/crates/sui-graphql-rpc/src/server/graphiql_server.rs +++ b/crates/sui-graphql-rpc/src/server/graphiql_server.rs @@ -31,7 +31,7 @@ pub async fn start_graphiql_server( version: &Version, cancellation_token: CancellationToken, ) -> Result<(), Error> { - info!("Starting server with config: {:?}", server_config); + info!("Starting server with config: {:#?}", server_config); info!("Server version: {}", version); start_graphiql_server_impl( ServerBuilder::from_config(server_config, version, cancellation_token).await?, diff --git a/crates/sui-graphql-rpc/src/server/version.rs b/crates/sui-graphql-rpc/src/server/version.rs index 0dcb46a5ba891..b73bc9b9d215d 100644 --- a/crates/sui-graphql-rpc/src/server/version.rs +++ b/crates/sui-graphql-rpc/src/server/version.rs @@ -17,6 +17,7 @@ use crate::{ pub(crate) static VERSION_HEADER: HeaderName = HeaderName::from_static("x-sui-rpc-version"); +#[allow(unused)] pub(crate) struct SuiRpcVersion(Vec, Vec>); const NAMED_VERSIONS: [&str; 3] = ["beta", "legacy", "stable"]; diff --git a/crates/sui-graphql-rpc/src/test_infra/cluster.rs b/crates/sui-graphql-rpc/src/test_infra/cluster.rs index 036984929df2b..1625192757f26 100644 --- a/crates/sui-graphql-rpc/src/test_infra/cluster.rs +++ b/crates/sui-graphql-rpc/src/test_infra/cluster.rs @@ -127,7 +127,7 @@ pub async fn serve_executor( let executor_server_handle = tokio::spawn(async move { sui_rest_api::RestService::new_without_version(executor) - .start_service(executor_server_url, Some("/rest".to_owned())) + .start_service(executor_server_url) .await; }); diff --git a/crates/sui-graphql-rpc/src/types/address.rs b/crates/sui-graphql-rpc/src/types/address.rs index 6ba4dd390c79d..abd07bc9701e9 100644 --- a/crates/sui-graphql-rpc/src/types/address.rs +++ b/crates/sui-graphql-rpc/src/types/address.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::connection::ScanConnection; + use super::{ balance::{self, Balance}, coin::Coin, @@ -135,6 +137,25 @@ impl Address { /// Similar behavior to the `transactionBlocks` in Query but supporting the additional /// `AddressTransactionBlockRelationship` filter, which defaults to `SIGN`. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. async fn transaction_blocks( &self, ctx: &Context<'_>, @@ -144,7 +165,8 @@ impl Address { before: Option, relation: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { use AddressTransactionBlockRelationship as R; let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; @@ -160,17 +182,12 @@ impl Address { ..Default::default() }, }) else { - return Ok(Connection::new(false, false)); + return Ok(ScanConnection::new(false, false)); }; - TransactionBlock::paginate( - ctx.data_unchecked(), - page, - filter, - self.checkpoint_viewed_at, - ) - .await - .extend() + TransactionBlock::paginate(ctx, page, filter, self.checkpoint_viewed_at, scan_limit) + .await + .extend() } } diff --git a/crates/sui-graphql-rpc/src/types/balance.rs b/crates/sui-graphql-rpc/src/types/balance.rs index 8e4d199df0be0..57eb83935d407 100644 --- a/crates/sui-graphql-rpc/src/types/balance.rs +++ b/crates/sui-graphql-rpc/src/types/balance.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use super::available_range::AvailableRange; -use super::cursor::{self, Page, RawPaginated, Target}; +use super::cursor::{self, Page, RawPaginated, ScanLimited, Target}; use super::uint53::UInt53; use super::{big_int::BigInt, move_type::MoveType, sui_address::SuiAddress}; use crate::consistency::Checkpointed; @@ -161,6 +161,8 @@ impl Checkpointed for Cursor { } } +impl ScanLimited for Cursor {} + impl TryFrom for Balance { type Error = Error; diff --git a/crates/sui-graphql-rpc/src/types/checkpoint.rs b/crates/sui-graphql-rpc/src/types/checkpoint.rs index cbebb9935f491..852c492967f21 100644 --- a/crates/sui-graphql-rpc/src/types/checkpoint.rs +++ b/crates/sui-graphql-rpc/src/types/checkpoint.rs @@ -5,7 +5,7 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use super::{ base64::Base64, - cursor::{self, Page, Paginated, Target}, + cursor::{self, Page, Paginated, ScanLimited, Target}, date_time::DateTime, digest::Digest, epoch::Epoch, @@ -13,7 +13,7 @@ use super::{ transaction_block::{self, TransactionBlock, TransactionBlockFilter}, uint53::UInt53, }; -use crate::consistency::Checkpointed; +use crate::{connection::ScanConnection, consistency::Checkpointed}; use crate::{ data::{self, Conn, DataLoader, Db, DbConnection, QueryExecutor}, error::Error, @@ -36,7 +36,7 @@ pub(crate) struct CheckpointId { pub sequence_number: Option, } -/// DataLoader key for fetching a `Checkpoint` by its sequence number, constrained by a consistency +/// `DataLoader` key for fetching a `Checkpoint` by its sequence number, constrained by a consistency /// cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct SeqNumKey { @@ -47,7 +47,7 @@ struct SeqNumKey { pub checkpoint_viewed_at: u64, } -/// DataLoader key for fetching a `Checkpoint` by its digest, constrained by a consistency cursor. +/// `DataLoader` key for fetching a `Checkpoint` by its digest, constrained by a consistency cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct DigestKey { pub digest: Digest, @@ -144,6 +144,23 @@ impl Checkpoint { } /// Transactions in this checkpoint. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range consists of all transactions in this checkpoint. async fn transaction_blocks( &self, ctx: &Context<'_>, @@ -152,7 +169,8 @@ impl Checkpoint { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; let Some(filter) = filter @@ -162,17 +180,12 @@ impl Checkpoint { ..Default::default() }) else { - return Ok(Connection::new(false, false)); + return Ok(ScanConnection::new(false, false)); }; - TransactionBlock::paginate( - ctx.data_unchecked(), - page, - filter, - self.checkpoint_viewed_at, - ) - .await - .extend() + TransactionBlock::paginate(ctx, page, filter, self.checkpoint_viewed_at, scan_limit) + .await + .extend() } } @@ -373,6 +386,8 @@ impl Checkpointed for Cursor { } } +impl ScanLimited for Cursor {} + #[async_trait::async_trait] impl Loader for Db { type Value = Checkpoint; diff --git a/crates/sui-graphql-rpc/src/types/coin.rs b/crates/sui-graphql-rpc/src/types/coin.rs index 537c7ca2c44b0..d9654bbe87f10 100644 --- a/crates/sui-graphql-rpc/src/types/coin.rs +++ b/crates/sui-graphql-rpc/src/types/coin.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::connection::ScanConnection; use crate::consistency::{build_objects_query, View}; use crate::data::{Db, QueryExecutor}; use crate::error::Error; @@ -193,6 +194,25 @@ impl Coin { } /// The transaction blocks that sent objects to this object. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -201,9 +221,10 @@ impl Coin { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(&self.super_.super_) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } diff --git a/crates/sui-graphql-rpc/src/types/coin_metadata.rs b/crates/sui-graphql-rpc/src/types/coin_metadata.rs index bad0545636b84..2052c8b42b03d 100644 --- a/crates/sui-graphql-rpc/src/types/coin_metadata.rs +++ b/crates/sui-graphql-rpc/src/types/coin_metadata.rs @@ -17,6 +17,7 @@ use super::suins_registration::{DomainFormat, SuinsRegistration}; use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; use super::type_filter::ExactTypeFilter; use super::uint53::UInt53; +use crate::connection::ScanConnection; use crate::data::Db; use crate::error::Error; use async_graphql::connection::Connection; @@ -182,6 +183,25 @@ impl CoinMetadata { } /// The transaction blocks that sent objects to this object. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -190,9 +210,10 @@ impl CoinMetadata { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(&self.super_.super_) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } diff --git a/crates/sui-graphql-rpc/src/types/cursor.rs b/crates/sui-graphql-rpc/src/types/cursor.rs index 65f7e21673bbb..868bfed21388b 100644 --- a/crates/sui-graphql-rpc/src/types/cursor.rs +++ b/crates/sui-graphql-rpc/src/types/cursor.rs @@ -50,7 +50,7 @@ pub(crate) struct Page { /// Whether the page is extracted from the beginning or the end of the range bounded by the cursors. #[derive(PartialEq, Eq, Debug, Clone, Copy)] -enum End { +pub(crate) enum End { Front, Back, } @@ -101,6 +101,21 @@ pub(crate) trait Target { fn cursor(&self, checkpoint_viewed_at: u64) -> C; } +/// Interface for dealing with cursors that may come from a scan-limit-ed query. +pub(crate) trait ScanLimited: Clone + PartialEq { + /// Whether the cursor was derived from a scan limit. Only applicable to the `startCursor` and + /// `endCursor` returned from a Connection's `PageInfo`, and indicates that the cursor may not + /// have a corresponding node in the result set. + fn is_scan_limited(&self) -> bool { + false + } + + /// Returns a version of the cursor that is not scan limited. + fn unlimited(&self) -> Self { + self.clone() + } +} + impl JsonCursor { pub(crate) fn new(cursor: C) -> Self { JsonCursor(OpaqueCursor(cursor)) @@ -184,6 +199,10 @@ impl Page { pub(crate) fn is_from_front(&self) -> bool { matches!(self.end, End::Front) } + + pub(crate) fn end(&self) -> End { + self.end + } } impl Page @@ -261,7 +280,7 @@ impl Page> { } } -impl Page { +impl Page { /// Treat the cursors of this page as upper- and lowerbound filters for a database `query`. /// Returns two booleans indicating whether there is a previous or next page in the range, /// followed by an iterator of values in the page, fetched from the database. @@ -361,7 +380,9 @@ impl Page { } /// Given the results of a database query, determine whether the result set has a previous and - /// next page and is consistent with the provided cursors. + /// next page and is consistent with the provided cursors. Slightly different logic applies + /// depending on whether the provided cursors stem from either tip of the response, or if they + /// were derived from a scan limit. /// /// Returns two booleans indicating whether there is a previous or next page in the range, /// followed by an iterator of values in the page, fetched from the database. The values @@ -373,7 +394,7 @@ impl Page { results: Vec, ) -> (bool, bool, impl Iterator) where - T: Send + 'static, + T: Target + Send + 'static, { // Detect whether the results imply the existence of a previous or next page. let (prev, next, prefix, suffix) = @@ -386,27 +407,32 @@ impl Page { } // Page drawn from the front, and the cursor for the first element does not match - // `after`. This implies the bound was invalid, so we return an empty result. - (Some(a), Some(f), _, _, End::Front) if f != *a => { + // `after`. If that cursor is not from a scan limit, then it must have appeared in + // the previous page, and should also be at the tip of the current page. This + // absence implies the bound was invalid, so we return an empty result. + (Some(a), Some(f), _, _, End::Front) if f != *a && !a.is_scan_limited() => { return (false, false, vec![].into_iter()); } // Similar to above case, but for back of results. - (_, _, Some(l), Some(b), End::Back) if l != *b => { + (_, _, Some(l), Some(b), End::Back) if l != *b && !b.is_scan_limited() => { return (false, false, vec![].into_iter()); } - // From here onwards, we know that the results are non-empty and if a cursor was - // supplied on the end the page is being drawn from, it was found in the results - // (implying a page follows in that direction). - (after, _, Some(l), before, End::Front) => { - let has_previous_page = after.is_some(); + // From here onwards, we know that the results are non-empty. In the forward + // pagination scenario, the presence of a previous page is determined by whether a + // cursor supplied on the end the page is being drawn from is found in the first + // position. The presence of a next page is determined by whether we have more + // results than the provided limit, and/ or if the end cursor element appears in the + // result set. + (after, Some(f), Some(l), before, End::Front) => { + let has_previous_page = after.is_some_and(|a| a.unlimited() == f); let prefix = has_previous_page as usize; // If results end with the before cursor, we will at least need to trim one element // from the suffix and we trim more off the end if there is more after applying the // limit. - let mut suffix = before.is_some_and(|b| *b == l) as usize; + let mut suffix = before.is_some_and(|b| b.unlimited() == l) as usize; suffix += results.len().saturating_sub(self.limit() + prefix + suffix); let has_next_page = suffix > 0; @@ -414,11 +440,13 @@ impl Page { } // Symmetric to the previous case, but drawing from the back. - (after, Some(f), _, before, End::Back) => { - let has_next_page = before.is_some(); + (after, Some(f), Some(l), before, End::Back) => { + // There is a next page if the last element of the results matches the `before`. + // This last element will get pruned from the result set. + let has_next_page = before.is_some_and(|b| b.unlimited() == l); let suffix = has_next_page as usize; - let mut prefix = after.is_some_and(|a| *a == f) as usize; + let mut prefix = after.is_some_and(|a| a.unlimited() == f) as usize; prefix += results.len().saturating_sub(self.limit() + prefix + suffix); let has_previous_page = prefix > 0; diff --git a/crates/sui-graphql-rpc/src/types/dynamic_field.rs b/crates/sui-graphql-rpc/src/types/dynamic_field.rs index 94e1c1cea0017..60e05ed610512 100644 --- a/crates/sui-graphql-rpc/src/types/dynamic_field.rs +++ b/crates/sui-graphql-rpc/src/types/dynamic_field.rs @@ -3,25 +3,21 @@ use async_graphql::connection::{Connection, CursorType, Edge}; use async_graphql::*; -use diesel::query_dsl::methods::FilterDsl; -use diesel::{ExpressionMethods, OptionalExtension}; use move_core_types::annotated_value::{self as A, MoveStruct}; -use sui_indexer::models::objects::{StoredHistoryObject, StoredObject}; -use sui_indexer::schema::objects; +use sui_indexer::models::objects::StoredHistoryObject; use sui_indexer::types::OwnerType; use sui_types::dynamic_field::{derive_dynamic_field_id, DynamicFieldInfo, DynamicFieldType}; use super::available_range::AvailableRange; use super::cursor::{Page, Target}; -use super::move_object::MoveObjectDowncastError; -use super::object::{self, deserialize_move_struct, Object, ObjectKind, ObjectLookup}; +use super::object::{self, deserialize_move_struct, Object, ObjectKind}; use super::type_filter::ExactTypeFilter; use super::{ base64::Base64, move_object::MoveObject, move_value::MoveValue, sui_address::SuiAddress, }; use crate::consistency::{build_objects_query, View}; use crate::data::package_resolver::PackageResolver; -use crate::data::{Db, DbConnection, QueryExecutor}; +use crate::data::{Db, QueryExecutor}; use crate::error::Error; use crate::filter; use crate::raw_query::RawQuery; @@ -174,9 +170,10 @@ impl DynamicField { let super_ = MoveObject::query( ctx, SuiAddress::from(field_id), - ObjectLookup::LatestAt { - parent_version, - checkpoint_viewed_at, + if let Some(parent_version) = parent_version { + Object::under_parent(parent_version, checkpoint_viewed_at) + } else { + Object::latest_at(checkpoint_viewed_at) }, ) .await?; @@ -184,59 +181,6 @@ impl DynamicField { super_.map(Self::try_from).transpose() } - /// Due to recent performance degradations, the existing `DynamicField::query` method is now - /// consistently timing out. This impacts features like `verify_zklogin_signature`, which - /// depends on resolving a dynamic field of 0x7 authenticator state. This method is a temporary - /// fix by fetching the data from the live `objects` table, and should only be used by - /// `verify_zklogin_signature`. Once we have fixed `objects_snapshot` table lag and backfilled - /// the `objects_version` table, this will no longer be needed. - pub(crate) async fn query_latest_dynamic_field( - db: &Db, - parent: SuiAddress, - name: DynamicFieldName, - kind: DynamicFieldType, - checkpoint_viewed_at: u64, - ) -> Result, Error> { - let type_ = match kind { - DynamicFieldType::DynamicField => name.type_.0, - DynamicFieldType::DynamicObject => { - DynamicFieldInfo::dynamic_object_field_wrapper(name.type_.0).into() - } - }; - - let field_id = derive_dynamic_field_id(parent, &type_, &name.bcs.0) - .map_err(|e| Error::Internal(format!("Failed to derive dynamic field id: {e}")))?; - - let object_id = SuiAddress::from(field_id); - - let Some(stored_obj): Option = db - .execute(move |conn| { - conn.first(move || { - objects::dsl::objects.filter(objects::dsl::object_id.eq(object_id.into_vec())) - }) - .optional() - }) - .await - .map_err(|e| Error::Internal(format!("Failed to fetch dynamic field: {e}")))? - else { - return Ok(None); - }; - - let history_object = StoredHistoryObject::from(stored_obj); - let gql_object = - Object::try_from_stored_history_object(history_object, checkpoint_viewed_at, None)?; - - let super_ = match MoveObject::try_from(&gql_object) { - Ok(object) => Some(object), - Err(MoveObjectDowncastError::WrappedOrDeleted) => None, - Err(MoveObjectDowncastError::NotAMoveObject) => { - return Err(Error::Internal(format!("{object_id} is not a Move object"))); - } - }; - - super_.map(Self::try_from).transpose() - } - /// Query the `db` for a `page` of dynamic fields attached to object with ID `parent`. The /// returned dynamic fields are bound by the `parent_version` if provided - each field will be /// the latest version at or before the provided version. If `parent_version` is not provided, diff --git a/crates/sui-graphql-rpc/src/types/epoch.rs b/crates/sui-graphql-rpc/src/types/epoch.rs index 6ca312ca3ba37..915493217d1f7 100644 --- a/crates/sui-graphql-rpc/src/types/epoch.rs +++ b/crates/sui-graphql-rpc/src/types/epoch.rs @@ -3,6 +3,7 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; +use crate::connection::ScanConnection; use crate::context_data::db_data_provider::{convert_to_validators, PgManager}; use crate::data::{DataLoader, Db, DbConnection, QueryExecutor}; use crate::error::Error; @@ -32,7 +33,7 @@ pub(crate) struct Epoch { pub checkpoint_viewed_at: u64, } -/// DataLoader key for fetching an `Epoch` by its ID, optionally constrained by a consistency +/// `DataLoader` key for fetching an `Epoch` by its ID, optionally constrained by a consistency /// cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct EpochKey { @@ -229,6 +230,23 @@ impl Epoch { } /// The epoch's corresponding transaction blocks. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range consists of all transactions in this epoch. async fn transaction_blocks( &self, ctx: &Context<'_>, @@ -237,13 +255,15 @@ impl Epoch { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; #[allow(clippy::unnecessary_lazy_evaluations)] // rust-lang/rust-clippy#9422 let Some(filter) = filter .unwrap_or_default() .intersect(TransactionBlockFilter { + // If `first_checkpoint_id` is 0, we include the 0th checkpoint by leaving it None after_checkpoint: (self.stored.first_checkpoint_id > 0) .then(|| UInt53::from(self.stored.first_checkpoint_id as u64 - 1)), before_checkpoint: self @@ -253,17 +273,12 @@ impl Epoch { ..Default::default() }) else { - return Ok(Connection::new(false, false)); + return Ok(ScanConnection::new(false, false)); }; - TransactionBlock::paginate( - ctx.data_unchecked(), - page, - filter, - self.checkpoint_viewed_at, - ) - .await - .extend() + TransactionBlock::paginate(ctx, page, filter, self.checkpoint_viewed_at, scan_limit) + .await + .extend() } } diff --git a/crates/sui-graphql-rpc/src/types/event.rs b/crates/sui-graphql-rpc/src/types/event.rs index 16284f01618cd..cb558c2fba6c3 100644 --- a/crates/sui-graphql-rpc/src/types/event.rs +++ b/crates/sui-graphql-rpc/src/types/event.rs @@ -3,7 +3,7 @@ use std::str::FromStr; -use super::cursor::{self, Page, Paginated, Target}; +use super::cursor::{self, Page, Paginated, ScanLimited, Target}; use super::digest::Digest; use super::type_filter::{ModuleFilter, TypeFilter}; use super::{ @@ -143,13 +143,12 @@ impl Event { /// checkpoint sequence numbers as the cursor to determine the correct page of results. The /// query can optionally be further `filter`-ed by the `EventFilter`. /// - /// The `checkpoint_viewed_at` parameter is represents the checkpoint sequence number at which - /// this page was queried for. Each entity returned in the connection will inherit this - /// checkpoint, so that when viewing that entity's state, it will be from the reference of this - /// checkpoint_viewed_at parameter. + /// The `checkpoint_viewed_at` parameter represents the checkpoint sequence number at which + /// this page was queried. Each entity returned in the connection inherits this checkpoint, so + /// that when viewing that entity's state, it's as if it's being viewed at this checkpoint. /// - /// If the `Page` is set, then this function will defer to the `checkpoint_viewed_at` in - /// the cursor if they are consistent. + /// The cursors in `page` might also include checkpoint viewed at fields. If these are set, + /// they take precedence over the checkpoint that pagination is being conducted in. pub(crate) async fn paginate( db: &Db, page: Page, @@ -260,9 +259,6 @@ impl Event { checkpoint_sequence_number: stored_tx.checkpoint_sequence_number, #[cfg(feature = "postgres-feature")] senders: vec![Some(native_event.sender.to_vec())], - #[cfg(feature = "mysql-feature")] - #[cfg(not(feature = "postgres-feature"))] - senders: serde_json::to_value(vec![native_event.sender.to_vec()]).unwrap(), package: native_event.package_id.to_vec(), module: native_event.transaction_module.to_string(), event_type: native_event @@ -291,17 +287,6 @@ impl Event { { stored.senders.first() } - #[cfg(feature = "mysql-feature")] - #[cfg(not(feature = "postgres-feature"))] - { - stored - .senders - .as_array() - .ok_or_else(|| { - Error::Internal("Failed to parse event senders as array".to_string()) - })? - .first() - } }) else { return Err(Error::Internal("No senders found for event".to_string())); }; @@ -376,3 +361,5 @@ impl Checkpointed for Cursor { self.checkpoint_viewed_at } } + +impl ScanLimited for Cursor {} diff --git a/crates/sui-graphql-rpc/src/types/move_module.rs b/crates/sui-graphql-rpc/src/types/move_module.rs index e34ad6c46a8bc..f85d6fe558abc 100644 --- a/crates/sui-graphql-rpc/src/types/move_module.rs +++ b/crates/sui-graphql-rpc/src/types/move_module.rs @@ -15,7 +15,6 @@ use super::datatype::MoveDatatype; use super::move_enum::MoveEnum; use super::move_function::MoveFunction; use super::move_struct::MoveStruct; -use super::object::Object; use super::{base64::Base64, move_package::MovePackage, sui_address::SuiAddress}; #[derive(Clone)] @@ -40,7 +39,7 @@ impl MoveModule { MovePackage::query( ctx, self.storage_id, - Object::latest_at(self.checkpoint_viewed_at), + MovePackage::by_id_at(self.checkpoint_viewed_at), ) .await .extend()? @@ -91,7 +90,7 @@ impl MoveModule { let Some(package) = MovePackage::query( ctx, self.storage_id, - Object::latest_at(checkpoint_viewed_at), + MovePackage::by_id_at(checkpoint_viewed_at), ) .await .extend()? @@ -482,7 +481,7 @@ impl MoveModule { checkpoint_viewed_at: u64, ) -> Result, Error> { let Some(package) = - MovePackage::query(ctx, address, Object::latest_at(checkpoint_viewed_at)).await? + MovePackage::query(ctx, address, MovePackage::by_id_at(checkpoint_viewed_at)).await? else { return Ok(None); }; diff --git a/crates/sui-graphql-rpc/src/types/move_object.rs b/crates/sui-graphql-rpc/src/types/move_object.rs index d41b8dd639420..cabe71c92bf60 100644 --- a/crates/sui-graphql-rpc/src/types/move_object.rs +++ b/crates/sui-graphql-rpc/src/types/move_object.rs @@ -20,6 +20,7 @@ use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; use super::type_filter::ExactTypeFilter; use super::uint53::UInt53; use super::{coin::Coin, object::Object}; +use crate::connection::ScanConnection; use crate::data::Db; use crate::error::Error; use crate::types::stake::StakedSui; @@ -49,6 +50,7 @@ pub(crate) enum MoveObjectDowncastError { /// This interface is implemented by types that represent a Move object on-chain (A Move value whose /// type has `key`). +#[allow(clippy::duplicated_attributes)] #[derive(Interface)] #[graphql( name = "IMoveObject", @@ -261,6 +263,25 @@ impl MoveObject { } /// The transaction blocks that sent objects to this object. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -269,9 +290,10 @@ impl MoveObject { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(&self.super_) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } diff --git a/crates/sui-graphql-rpc/src/types/move_package.rs b/crates/sui-graphql-rpc/src/types/move_package.rs index 1791b0bde32ca..a85bc75d8a661 100644 --- a/crates/sui-graphql-rpc/src/types/move_package.rs +++ b/crates/sui-graphql-rpc/src/types/move_package.rs @@ -1,16 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::{BTreeMap, BTreeSet, HashMap}; + use super::balance::{self, Balance}; use super::base64::Base64; use super::big_int::BigInt; use super::coin::Coin; -use super::cursor::{JsonCursor, Page}; +use super::cursor::{BcsCursor, JsonCursor, Page, RawPaginated, ScanLimited, Target}; use super::move_module::MoveModule; use super::move_object::MoveObject; -use super::object::{ - self, Object, ObjectFilter, ObjectImpl, ObjectLookup, ObjectOwner, ObjectStatus, -}; +use super::object::{self, Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}; use super::owner::OwnerImpl; use super::stake::StakedSui; use super::sui_address::SuiAddress; @@ -18,11 +18,23 @@ use super::suins_registration::{DomainFormat, SuinsRegistration}; use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; use super::type_filter::ExactTypeFilter; use super::uint53::UInt53; -use crate::consistency::ConsistentNamedCursor; +use crate::connection::ScanConnection; +use crate::consistency::{Checkpointed, ConsistentNamedCursor}; +use crate::data::{DataLoader, Db, DbConnection, QueryExecutor}; use crate::error::Error; +use crate::raw_query::RawQuery; +use crate::types::sui_address::addr; +use crate::{filter, query}; use async_graphql::connection::{Connection, CursorType, Edge}; +use async_graphql::dataloader::Loader; use async_graphql::*; +use diesel::prelude::QueryableByName; +use diesel::{BoolExpressionMethods, ExpressionMethods, JoinOnDsl, QueryDsl, Selectable}; +use serde::{Deserialize, Serialize}; +use sui_indexer::models::objects::StoredHistoryObject; +use sui_indexer::schema::packages; use sui_package_resolver::{error::Error as PackageCacheError, Package as ParsedMovePackage}; +use sui_types::is_system_package; use sui_types::{move_package::MovePackage as NativeMovePackage, object::Data}; #[derive(Clone)] @@ -35,6 +47,49 @@ pub(crate) struct MovePackage { pub native: NativeMovePackage, } +/// Filter for paginating `MovePackage`s that were created within a range of checkpoints. +#[derive(InputObject, Debug, Default, Clone)] +pub(crate) struct MovePackageCheckpointFilter { + /// Fetch packages that were published strictly after this checkpoint. Omitting this fetches + /// packages published since genesis. + pub after_checkpoint: Option, + + /// Fetch packages that were published strictly before this checkpoint. Omitting this fetches + /// packages published up to the latest checkpoint (inclusive). + pub before_checkpoint: Option, +} + +/// Filter for paginating versions of a given `MovePackage`. +#[derive(InputObject, Debug, Default, Clone)] +pub(crate) struct MovePackageVersionFilter { + /// Fetch versions of this package that are strictly newer than this version. Omitting this + /// fetches versions since the original version. + pub after_version: Option, + + /// Fetch versions of this package that are strictly older than this version. Omitting this + /// fetches versions up to the latest version (inclusive). + pub before_version: Option, +} + +/// Filter for a point query of a MovePackage, supporting querying different versions of a package +/// by their version. Note that different versions of the same user package exist at different IDs +/// to each other, so this is different from looking up the historical version of an object. +pub(crate) enum PackageLookup { + /// Get the package at the given address, if it was created before the given checkpoint. + ById { checkpoint_viewed_at: u64 }, + + /// Get the package whose original ID matches the storage ID of the package at the given + /// address, but whose version is `version`. + Versioned { + version: u64, + checkpoint_viewed_at: u64, + }, + + /// Get the package whose original ID matches the storage ID of the package at the given + /// address, but that has the max version at the given checkpoint. + Latest { checkpoint_viewed_at: u64 }, +} + /// Information used by a package to link to a specific version of its dependency. #[derive(SimpleObject)] struct Linkage { @@ -62,9 +117,51 @@ struct TypeOrigin { defining_id: SuiAddress, } +/// A wrapper around the stored representation of a package, used to implement pagination-related +/// traits. +#[derive(Selectable, QueryableByName)] +#[diesel(table_name = packages)] +struct StoredHistoryPackage { + original_id: Vec, + #[diesel(embed)] + object: StoredHistoryObject, +} + pub(crate) struct MovePackageDowncastError; pub(crate) type CModule = JsonCursor; +pub(crate) type Cursor = BcsCursor; + +/// The inner struct for the `MovePackage` cursor. The package is identified by the checkpoint it +/// was created in, its original ID, and its version, and the `checkpoint_viewed_at` specifies the +/// checkpoint snapshot that the data came from. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] +pub(crate) struct PackageCursor { + pub checkpoint_sequence_number: u64, + pub original_id: Vec, + pub package_version: u64, + pub checkpoint_viewed_at: u64, +} + +/// `DataLoader` key for fetching the storage ID of the (user) package that shares an original (aka +/// runtime) ID with the package stored at `package_id`, and whose version is `version`. +/// +/// Note that this is different from looking up the historical version of an object -- the query +/// returns the ID of the package (each version of a user package is at a different ID) -- and it +/// does not work for system packages (whose versions do all reside under the same ID). +#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] +struct PackageVersionKey { + address: SuiAddress, + version: u64, +} + +/// `DataLoader` key for fetching the latest version of a user package: The package with the largest +/// version whose original ID matches the original ID of the package at `address`. +#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] +struct LatestKey { + address: SuiAddress, + checkpoint_viewed_at: u64, +} /// A MovePackage is a kind of Move object that represents code that has been published on chain. /// It exposes information about its modules, type definitions, functions, and dependencies. @@ -236,6 +333,25 @@ impl MovePackage { /// The transaction blocks that sent objects to this package. /// /// Note that objects that have been sent to a package become inaccessible. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -244,9 +360,10 @@ impl MovePackage { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(&self.super_) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } @@ -255,6 +372,60 @@ impl MovePackage { ObjectImpl(&self.super_).bcs().await } + /// Fetch another version of this package (the package that shares this package's original ID, + /// but has the specified `version`). + async fn package_at_version( + &self, + ctx: &Context<'_>, + version: u64, + ) -> Result> { + MovePackage::query( + ctx, + self.super_.address, + MovePackage::by_version(version, self.checkpoint_viewed_at_impl()), + ) + .await + .extend() + } + + /// Fetch all versions of this package (packages that share this package's original ID), + /// optionally bounding the versions exclusively from below with `afterVersion`, or from above + /// with `beforeVersion`. + async fn package_versions( + &self, + ctx: &Context<'_>, + first: Option, + after: Option, + last: Option, + before: Option, + filter: Option, + ) -> Result> { + let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; + + MovePackage::paginate_by_version( + ctx.data_unchecked(), + page, + self.super_.address, + filter, + self.checkpoint_viewed_at_impl(), + ) + .await + .extend() + } + + /// Fetch the latest version of this package (the package with the highest `version` that shares + /// this packages's original ID) + async fn latest_package(&self, ctx: &Context<'_>) -> Result { + Ok(MovePackage::query( + ctx, + self.super_.address, + MovePackage::latest_at(self.checkpoint_viewed_at_impl()), + ) + .await + .extend()? + .ok_or_else(|| Error::Internal("No latest version found".to_string()))?) + } + /// A representation of the module called `name` in this package, including the /// structs and functions it defines. async fn module(&self, name: String) -> Result> { @@ -416,11 +587,82 @@ impl MovePackage { } } + /// Look-up the package by its Storage ID, as of a given checkpoint. + pub(crate) fn by_id_at(checkpoint_viewed_at: u64) -> PackageLookup { + PackageLookup::ById { + checkpoint_viewed_at, + } + } + + /// Look-up a specific version of the package, identified by the storage ID of any version of + /// the package, and the desired version (the actual object loaded might be at a different + /// object ID). + pub(crate) fn by_version(version: u64, checkpoint_viewed_at: u64) -> PackageLookup { + PackageLookup::Versioned { + version, + checkpoint_viewed_at, + } + } + + /// Look-up the package that shares the same original ID as the package at `address`, but has + /// the latest version, as of the given checkpoint. + pub(crate) fn latest_at(checkpoint_viewed_at: u64) -> PackageLookup { + PackageLookup::Latest { + checkpoint_viewed_at, + } + } + pub(crate) async fn query( ctx: &Context<'_>, address: SuiAddress, - key: ObjectLookup, + key: PackageLookup, ) -> Result, Error> { + let (address, key) = match key { + PackageLookup::ById { + checkpoint_viewed_at, + } => (address, Object::latest_at(checkpoint_viewed_at)), + + PackageLookup::Versioned { + version, + checkpoint_viewed_at, + } => { + if is_system_package(address) { + (address, Object::at_version(version, checkpoint_viewed_at)) + } else { + let DataLoader(loader) = &ctx.data_unchecked(); + let Some(translation) = loader + .load_one(PackageVersionKey { address, version }) + .await? + else { + return Ok(None); + }; + + (translation, Object::latest_at(checkpoint_viewed_at)) + } + } + + PackageLookup::Latest { + checkpoint_viewed_at, + } => { + if is_system_package(address) { + (address, Object::latest_at(checkpoint_viewed_at)) + } else { + let DataLoader(loader) = &ctx.data_unchecked(); + let Some(translation) = loader + .load_one(LatestKey { + address, + checkpoint_viewed_at, + }) + .await? + else { + return Ok(None); + }; + + (translation, Object::latest_at(checkpoint_viewed_at)) + } + } + }; + let Some(object) = Object::query(ctx, address, key).await? else { return Ok(None); }; @@ -429,6 +671,342 @@ impl MovePackage { Error::Internal(format!("{address} is not a package")) })?)) } + + /// Query the database for a `page` of Move packages. The Page uses the checkpoint sequence + /// number the package was created at, its original ID, and its version as the cursor. The query + /// can optionally be filtered by a bound on the checkpoints the packages were created in. + /// + /// The `checkpoint_viewed_at` parameter represents the checkpoint sequence number at which this + /// page was queried. Each entity returned in the connection will inherit this checkpoint, so + /// that when viewing that entity's state, it will be as if it is being viewed at this + /// checkpoint. + /// + /// The cursors in `page` may also include checkpoint viewed at fields. If these are set, they + /// take precedence over the checkpoint that pagination is being conducted in. + pub(crate) async fn paginate_by_checkpoint( + db: &Db, + page: Page, + filter: Option, + checkpoint_viewed_at: u64, + ) -> Result, Error> { + let cursor_viewed_at = page.validate_cursor_consistency()?; + let checkpoint_viewed_at = cursor_viewed_at.unwrap_or(checkpoint_viewed_at); + + let after_checkpoint: Option = filter + .as_ref() + .and_then(|f| f.after_checkpoint) + .map(|v| v.into()); + + // Clamp the "before checkpoint" bound by "checkpoint viewed at". + let before_checkpoint = filter + .as_ref() + .and_then(|f| f.before_checkpoint) + .map(|v| v.into()) + .unwrap_or(u64::MAX) + .min(checkpoint_viewed_at + 1); + + let (prev, next, results) = db + .execute(move |conn| { + let mut q = query!( + r#" + SELECT + p.original_id, + o.* + FROM + packages p + INNER JOIN + objects_history o + ON + p.package_id = o.object_id + AND p.package_version = o.object_version + AND p.checkpoint_sequence_number = o.checkpoint_sequence_number + "# + ); + + q = filter!( + q, + format!("o.checkpoint_sequence_number < {before_checkpoint}") + ); + if let Some(after) = after_checkpoint { + q = filter!(q, format!("{after} < o.checkpoint_sequence_number")); + } + + page.paginate_raw_query::(conn, checkpoint_viewed_at, q) + }) + .await?; + + let mut conn = Connection::new(prev, next); + + // The "checkpoint viewed at" sets a consistent upper bound for the nested queries. + for stored in results { + let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); + let package = + MovePackage::try_from_stored_history_object(stored.object, checkpoint_viewed_at)?; + conn.edges.push(Edge::new(cursor, package)); + } + + Ok(conn) + } + + /// Query the database for a `page` of Move packages. The Page uses the checkpoint sequence + /// number the package was created at, its original ID, and its version as the cursor. The query + /// is filtered by the ID of a package and will only return packages from the same family + /// (sharing the same original ID as the package whose ID was given), and can optionally be + /// filtered by an upper and lower bound on package version. + /// + /// The `checkpoint_viewed_at` parameter represents the checkpoint sequence number at which this + /// page was queried. Each entity returned in the connection will inherit this checkpoint, so + /// that when viewing that entity's state, it will be as if it is being viewed at this + /// checkpoint. + /// + /// The cursors in `page` may also include checkpoint viewed at fields. If these are set, they + /// take precedence over the checkpoint that pagination is being conducted in. + pub(crate) async fn paginate_by_version( + db: &Db, + page: Page, + package: SuiAddress, + filter: Option, + checkpoint_viewed_at: u64, + ) -> Result, Error> { + let cursor_viewed_at = page.validate_cursor_consistency()?; + let checkpoint_viewed_at = cursor_viewed_at.unwrap_or(checkpoint_viewed_at); + let (prev, next, results) = db + .execute(move |conn| { + page.paginate_raw_query::( + conn, + checkpoint_viewed_at, + if is_system_package(package) { + system_package_version_query(package, filter) + } else { + user_package_version_query(package, filter) + }, + ) + }) + .await?; + + let mut conn = Connection::new(prev, next); + + // The "checkpoint viewed at" sets a consistent upper bound for the nested queries. + for stored in results { + let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); + let package = + MovePackage::try_from_stored_history_object(stored.object, checkpoint_viewed_at)?; + conn.edges.push(Edge::new(cursor, package)); + } + + Ok(conn) + } + + /// `checkpoint_viewed_at` points to the checkpoint snapshot that this `MovePackage` came from. + /// This is stored in the `MovePackage` so that related fields from the package are read from + /// the same checkpoint (consistently). + pub(crate) fn try_from_stored_history_object( + history_object: StoredHistoryObject, + checkpoint_viewed_at: u64, + ) -> Result { + let object = Object::try_from_stored_history_object( + history_object, + checkpoint_viewed_at, + /* root_version */ None, + )?; + Self::try_from(&object).map_err(|_| Error::Internal("Not a package!".to_string())) + } +} + +impl Checkpointed for Cursor { + fn checkpoint_viewed_at(&self) -> u64 { + self.checkpoint_viewed_at + } +} + +impl RawPaginated for StoredHistoryPackage { + fn filter_ge(cursor: &Cursor, query: RawQuery) -> RawQuery { + filter!( + query, + format!( + "o.checkpoint_sequence_number > {cp} OR (\ + o.checkpoint_sequence_number = {cp} AND + original_id > '\\x{id}'::bytea OR (\ + original_id = '\\x{id}'::bytea AND \ + o.object_version >= {pv}\ + ))", + cp = cursor.checkpoint_sequence_number, + id = hex::encode(&cursor.original_id), + pv = cursor.package_version, + ) + ) + } + + fn filter_le(cursor: &Cursor, query: RawQuery) -> RawQuery { + filter!( + query, + format!( + "o.checkpoint_sequence_number < {cp} OR (\ + o.checkpoint_sequence_number = {cp} AND + original_id < '\\x{id}'::bytea OR (\ + original_id = '\\x{id}'::bytea AND \ + o.object_version <= {pv}\ + ))", + cp = cursor.checkpoint_sequence_number, + id = hex::encode(&cursor.original_id), + pv = cursor.package_version, + ) + ) + } + + fn order(asc: bool, query: RawQuery) -> RawQuery { + if asc { + query + .order_by("o.checkpoint_sequence_number ASC") + .order_by("original_id ASC") + .order_by("o.object_version ASC") + } else { + query + .order_by("o.checkpoint_sequence_number DESC") + .order_by("original_id DESC") + .order_by("o.object_version DESC") + } + } +} + +impl Target for StoredHistoryPackage { + fn cursor(&self, checkpoint_viewed_at: u64) -> Cursor { + Cursor::new(PackageCursor { + checkpoint_sequence_number: self.object.checkpoint_sequence_number as u64, + original_id: self.original_id.clone(), + package_version: self.object.object_version as u64, + checkpoint_viewed_at, + }) + } +} + +impl ScanLimited for BcsCursor {} + +#[async_trait::async_trait] +impl Loader for Db { + type Value = SuiAddress; + type Error = Error; + + async fn load( + &self, + keys: &[PackageVersionKey], + ) -> Result, Error> { + use packages::dsl; + let other = diesel::alias!(packages as other); + + let id_versions: BTreeSet<_> = keys + .iter() + .map(|k| (k.address.into_vec(), k.version as i64)) + .collect(); + + let stored_packages: Vec<(Vec, i64, Vec)> = self + .execute(move |conn| { + conn.results(|| { + let mut query = dsl::packages + .inner_join(other.on(dsl::original_id.eq(other.field(dsl::original_id)))) + .select(( + dsl::package_id, + other.field(dsl::package_version), + other.field(dsl::package_id), + )) + .into_boxed(); + + for (id, version) in id_versions.iter().cloned() { + query = query.or_filter( + dsl::package_id + .eq(id) + .and(other.field(dsl::package_version).eq(version)), + ); + } + + query + }) + }) + .await + .map_err(|e| Error::Internal(format!("Failed to load packages: {e}")))?; + + let mut result = HashMap::new(); + for (id, version, other_id) in stored_packages { + result.insert( + PackageVersionKey { + address: addr(&id)?, + version: version as u64, + }, + addr(&other_id)?, + ); + } + + Ok(result) + } +} + +#[async_trait::async_trait] +impl Loader for Db { + type Value = SuiAddress; + type Error = Error; + + async fn load(&self, keys: &[LatestKey]) -> Result, Error> { + use packages::dsl; + let other = diesel::alias!(packages as other); + + let mut ids_by_cursor: BTreeMap<_, BTreeSet<_>> = BTreeMap::new(); + for key in keys { + ids_by_cursor + .entry(key.checkpoint_viewed_at) + .or_default() + .insert(key.address.into_vec()); + } + + // Issue concurrent reads for each group of IDs + let futures = ids_by_cursor + .into_iter() + .map(|(checkpoint_viewed_at, ids)| { + self.execute(move |conn| { + let results: Vec<(Vec, Vec)> = conn.results(|| { + let o_original_id = other.field(dsl::original_id); + let o_package_id = other.field(dsl::package_id); + let o_cp_seq_num = other.field(dsl::checkpoint_sequence_number); + let o_version = other.field(dsl::package_version); + + let query = dsl::packages + .inner_join(other.on(dsl::original_id.eq(o_original_id))) + .select((dsl::package_id, o_package_id)) + .filter(dsl::package_id.eq_any(ids.iter().cloned())) + .filter(o_cp_seq_num.le(checkpoint_viewed_at as i64)) + .order_by((dsl::package_id, dsl::original_id, o_version.desc())) + .distinct_on((dsl::package_id, dsl::original_id)); + query + })?; + + Ok::<_, diesel::result::Error>( + results + .into_iter() + .map(|(p, latest)| (checkpoint_viewed_at, p, latest)) + .collect::>(), + ) + }) + }); + + // Wait for the reads to all finish, and gather them into the result map. + let groups = futures::future::join_all(futures).await; + + let mut results = HashMap::new(); + for group in groups { + for (checkpoint_viewed_at, address, latest) in + group.map_err(|e| Error::Internal(format!("Failed to fetch packages: {e}")))? + { + results.insert( + LatestKey { + address: addr(&address)?, + checkpoint_viewed_at, + }, + addr(&latest)?, + ); + } + } + + Ok(results) + } } impl TryFrom<&Object> for MovePackage { @@ -449,3 +1027,94 @@ impl TryFrom<&Object> for MovePackage { } } } + +/// Query for fetching all the versions of a system package (assumes that `package` has already been +/// verified as a system package). This is an `objects_history` query disguised as a package query. +fn system_package_version_query( + package: SuiAddress, + filter: Option, +) -> RawQuery { + // Query uses a left join to force the query planner to use `objects_version` in the outer loop. + let mut q = query!( + r#" + SELECT + o.object_id AS original_id, + o.* + FROM + objects_version v + LEFT JOIN + objects_history o + ON + v.object_id = o.object_id + AND v.object_version = o.object_version + AND v.cp_sequence_number = o.checkpoint_sequence_number + "# + ); + + q = filter!( + q, + format!( + "v.object_id = '\\x{}'::bytea", + hex::encode(package.into_vec()) + ) + ); + + if let Some(after) = filter.as_ref().and_then(|f| f.after_version) { + let a: u64 = after.into(); + q = filter!(q, format!("v.object_version > {a}")); + } + + if let Some(before) = filter.as_ref().and_then(|f| f.before_version) { + let b: u64 = before.into(); + q = filter!(q, format!("v.object_version < {b}")); + } + + q +} + +/// Query for fetching all the versions of a non-system package (assumes that `package` has already +/// been verified as a system package) +fn user_package_version_query( + package: SuiAddress, + filter: Option, +) -> RawQuery { + let mut q = query!( + r#" + SELECT + p.original_id, + o.* + FROM + packages q + INNER JOIN + packages p + ON + q.original_id = p.original_id + INNER JOIN + objects_history o + ON + p.package_id = o.object_id + AND p.package_version = o.object_version + AND p.checkpoint_sequence_number = o.checkpoint_sequence_number + "# + ); + + q = filter!( + q, + format!( + "q.package_id = '\\x{}'::bytea", + hex::encode(package.into_vec()) + ) + ); + + if let Some(after) = filter.as_ref().and_then(|f| f.after_version) { + let a: u64 = after.into(); + q = filter!(q, format!("p.package_version > {a}")); + } + + if let Some(before) = filter.as_ref().and_then(|f| f.before_version) { + let b: u64 = before.into(); + q = filter!(q, format!("p.package_version < {b}")); + } + + q +} diff --git a/crates/sui-graphql-rpc/src/types/object.rs b/crates/sui-graphql-rpc/src/types/object.rs index 7243666ef520c..eebe0b86447ed 100644 --- a/crates/sui-graphql-rpc/src/types/object.rs +++ b/crates/sui-graphql-rpc/src/types/object.rs @@ -9,7 +9,7 @@ use super::balance::{self, Balance}; use super::big_int::BigInt; use super::coin::Coin; use super::coin_metadata::CoinMetadata; -use super::cursor::{self, Page, RawPaginated, Target}; +use super::cursor::{self, Page, RawPaginated, ScanLimited, Target}; use super::digest::Digest; use super::display::{Display, DisplayEntry}; use super::dynamic_field::{DynamicField, DynamicFieldName}; @@ -17,12 +17,14 @@ use super::move_object::MoveObject; use super::move_package::MovePackage; use super::owner::OwnerImpl; use super::stake::StakedSui; +use super::sui_address::addr; use super::suins_registration::{DomainFormat, SuinsRegistration}; use super::transaction_block; use super::transaction_block::TransactionBlockFilter; use super::type_filter::{ExactTypeFilter, TypeFilter}; use super::uint53::UInt53; use super::{owner::Owner, sui_address::SuiAddress, transaction_block::TransactionBlock}; +use crate::connection::ScanConnection; use crate::consistency::{build_objects_query, Checkpointed, View}; use crate::data::package_resolver::PackageResolver; use crate::data::{DataLoader, Db, DbConnection, QueryExecutor}; @@ -34,12 +36,12 @@ use crate::{filter, or_filter}; use async_graphql::connection::{CursorType, Edge}; use async_graphql::dataloader::Loader; use async_graphql::{connection::Connection, *}; -use diesel::{BoolExpressionMethods, ExpressionMethods, QueryDsl}; +use diesel::{BoolExpressionMethods, ExpressionMethods, JoinOnDsl, QueryDsl, SelectableHelper}; use move_core_types::annotated_value::{MoveStruct, MoveTypeLayout}; use move_core_types::language_storage::StructTag; use serde::{Deserialize, Serialize}; use sui_indexer::models::objects::{StoredDeletedHistoryObject, StoredHistoryObject}; -use sui_indexer::schema::objects_history; +use sui_indexer::schema::{objects_history, objects_version}; use sui_indexer::types::ObjectStatus as NativeObjectStatus; use sui_indexer::types::OwnerType; use sui_types::object::bounded_visitor::BoundedVisitor; @@ -181,12 +183,18 @@ pub(crate) struct AddressOwner { owner: Option, } +/// Filter for a point query of an Object. pub(crate) enum ObjectLookup { LatestAt { - /// The parent version to be used as an optional upper bound for the query. Look for the - /// latest version of a child object that is less than or equal to this upper bound. - parent_version: Option, - /// The checkpoint sequence number at which this was viewed at + /// The checkpoint sequence number at which this was viewed at. + checkpoint_viewed_at: u64, + }, + + UnderParent { + /// The parent version to be used as an upper bound for the query. Look for the latest + /// version of a child object whose version is less than or equal to this upper bound. + parent_version: u64, + /// The checkpoint sequence number at which this was viewed at. checkpoint_viewed_at: u64, }, @@ -213,6 +221,7 @@ pub(crate) struct HistoricalObjectCursor { /// Interface implemented by on-chain values that are addressable by an ID (also referred to as its /// address). This includes Move objects and packages. +#[allow(clippy::duplicated_attributes)] #[derive(Interface)] #[graphql( name = "IObject", @@ -254,7 +263,8 @@ pub(crate) struct HistoricalObjectCursor { arg(name = "last", ty = "Option"), arg(name = "before", ty = "Option"), arg(name = "filter", ty = "Option"), - ty = "Connection", + arg(name = "scan_limit", ty = "Option"), + ty = "ScanConnection", desc = "The transaction blocks that sent objects to this object." ), field( @@ -273,7 +283,7 @@ pub(crate) enum IObject { SuinsRegistration(SuinsRegistration), } -/// DataLoader key for fetching an `Object` at a specific version, constrained by a consistency +/// `DataLoader` key for fetching an `Object` at a specific version, constrained by a consistency /// cursor (if that version was created after the checkpoint the query is viewing at, then it will /// fail). #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] @@ -283,13 +293,21 @@ struct HistoricalKey { checkpoint_viewed_at: u64, } -/// DataLoader key for fetching the latest version of an `Object` as of a consistency cursor. The -/// query can optionally be bounded by a `parent_version` which imposes an additional requirement -/// that the object's version is bounded above by the parent version. +/// `DataLoader` key for fetching the latest version of an object whose parent object has version +/// `parent_version`, as of `checkpoint_viewed_at`. This look-up can fail to find a valid object if +/// the key is not self-consistent, for example if the `parent_version` is set to a higher version +/// than the object's actual parent as of `checkpoint_viewed_at`. +#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] +struct ParentVersionKey { + id: SuiAddress, + parent_version: u64, + checkpoint_viewed_at: u64, +} + +/// `DataLoader` key for fetching the latest version of an object as of a given checkpoint. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct LatestAtKey { id: SuiAddress, - parent_version: Option, checkpoint_viewed_at: u64, } @@ -437,6 +455,25 @@ impl Object { } /// The transaction blocks that sent objects to this object. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -445,9 +482,10 @@ impl Object { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(self) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } @@ -542,9 +580,7 @@ impl ObjectImpl<'_> { pub(crate) async fn owner(&self, ctx: &Context<'_>) -> Option { use NativeOwner as O; - let Some(native) = self.0.native_impl() else { - return None; - }; + let native = self.0.native_impl()?; match native.owner { O::AddressOwner(address) => { @@ -606,7 +642,8 @@ impl ObjectImpl<'_> { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; let Some(filter) = filter @@ -616,17 +653,12 @@ impl ObjectImpl<'_> { ..Default::default() }) else { - return Ok(Connection::new(false, false)); + return Ok(ScanConnection::new(false, false)); }; - TransactionBlock::paginate( - ctx.data_unchecked(), - page, - filter, - self.0.checkpoint_viewed_at, - ) - .await - .extend() + TransactionBlock::paginate(ctx, page, filter, self.0.checkpoint_viewed_at, scan_limit) + .await + .extend() } pub(crate) async fn bcs(&self) -> Result> { @@ -807,7 +839,6 @@ impl Object { /// Look-up the latest version of the object as of a given checkpoint. pub(crate) fn latest_at(checkpoint_viewed_at: u64) -> ObjectLookup { ObjectLookup::LatestAt { - parent_version: None, checkpoint_viewed_at, } } @@ -815,8 +846,8 @@ impl Object { /// Look-up the latest version of an object whose version is less than or equal to its parent's /// version, as of a given checkpoint. pub(crate) fn under_parent(parent_version: u64, checkpoint_viewed_at: u64) -> ObjectLookup { - ObjectLookup::LatestAt { - parent_version: Some(parent_version), + ObjectLookup::UnderParent { + parent_version, checkpoint_viewed_at, } } @@ -849,18 +880,30 @@ impl Object { }) .await } - ObjectLookup::LatestAt { + + ObjectLookup::UnderParent { parent_version, checkpoint_viewed_at, } => { loader - .load_one(LatestAtKey { + .load_one(ParentVersionKey { id, parent_version, checkpoint_viewed_at, }) .await } + + ObjectLookup::LatestAt { + checkpoint_viewed_at, + } => { + loader + .load_one(LatestAtKey { + id, + checkpoint_viewed_at, + }) + .await + } } } @@ -1058,7 +1101,7 @@ impl ObjectFilter { hex::encode(id.into_vec()) ) .unwrap(); - prefix = ","; + prefix = ", "; } inner.push(')'); query = or_filter!(query, inner); @@ -1132,6 +1175,8 @@ impl Checkpointed for Cursor { } } +impl ScanLimited for Cursor {} + impl RawPaginated for StoredHistoryObject { fn filter_ge(cursor: &Cursor, query: RawQuery) -> RawQuery { filter!( @@ -1177,7 +1222,8 @@ impl Loader for Db { type Error = Error; async fn load(&self, keys: &[HistoricalKey]) -> Result, Error> { - use objects_history::dsl; + use objects_history::dsl as h; + use objects_version::dsl as v; let id_versions: BTreeSet<_> = keys .iter() @@ -1187,12 +1233,19 @@ impl Loader for Db { let objects: Vec = self .execute(move |conn| { conn.results(move || { - let mut query = dsl::objects_history.into_boxed(); + let mut query = h::objects_history + .inner_join( + v::objects_version.on(v::cp_sequence_number + .eq(h::checkpoint_sequence_number) + .and(v::object_id.eq(h::object_id)) + .and(v::object_version.eq(h::object_version))), + ) + .select(StoredHistoryObject::as_select()) + .into_boxed(); - // TODO: Speed up using an `obj_version` table. for (id, version) in id_versions.iter().cloned() { - query = query - .or_filter(dsl::object_id.eq(id).and(dsl::object_version.eq(version))); + query = + query.or_filter(v::object_id.eq(id).and(v::object_version.eq(version))); } query @@ -1234,17 +1287,20 @@ impl Loader for Db { } #[async_trait::async_trait] -impl Loader for Db { +impl Loader for Db { type Value = Object; type Error = Error; - async fn load(&self, keys: &[LatestAtKey]) -> Result, Error> { + async fn load( + &self, + keys: &[ParentVersionKey], + ) -> Result, Error> { // Group keys by checkpoint viewed at and parent version -- we'll issue a separate query for // each group. #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy)] struct GroupKey { checkpoint_viewed_at: u64, - parent_version: Option, + parent_version: u64, } let mut keys_by_cursor_and_parent_version: BTreeMap<_, BTreeSet<_>> = BTreeMap::new(); @@ -1257,50 +1313,40 @@ impl Loader for Db { keys_by_cursor_and_parent_version .entry(group_key) .or_default() - .insert(key.id); + .insert(key.id.into_vec()); } // Issue concurrent reads for each group of keys. let futures = keys_by_cursor_and_parent_version .into_iter() .map(|(group_key, ids)| { - self.execute_repeatable(move |conn| { - let Some(range) = AvailableRange::result(conn, group_key.checkpoint_viewed_at)? - else { - return Ok::, diesel::result::Error>( - vec![], - ); - }; - - let filter = ObjectFilter { - object_ids: Some(ids.iter().cloned().collect()), - ..Default::default() - }; - - // TODO: Implement queries that use a parent version bound using an - // `obj_version` table. - let apply_parent_bound = |q: RawQuery| { - if let Some(parent_version) = group_key.parent_version { - filter!(q, format!("object_version <= {parent_version}")) - } else { - q - } - }; - - Ok(conn - .results(move || { - build_objects_query( - View::Consistent, - range, - &Page::bounded(ids.len() as u64), - |q| apply_parent_bound(filter.apply(q)), - apply_parent_bound, + self.execute(move |conn| { + let stored: Vec = conn.results(move || { + use objects_history::dsl as h; + use objects_version::dsl as v; + + h::objects_history + .inner_join( + v::objects_version.on(v::cp_sequence_number + .eq(h::checkpoint_sequence_number) + .and(v::object_id.eq(h::object_id)) + .and(v::object_version.eq(h::object_version))), ) + .select(StoredHistoryObject::as_select()) + .filter(v::object_id.eq_any(ids.iter().cloned())) + .filter(v::object_version.le(group_key.parent_version as i64)) + .distinct_on(v::object_id) + .order_by(v::object_id) + .then_order_by(v::object_version.desc()) .into_boxed() - })? - .into_iter() - .map(|r| (group_key, r)) - .collect()) + })?; + + Ok::<_, diesel::result::Error>( + stored + .into_iter() + .map(|stored| (group_key, stored)) + .collect::>(), + ) }) }); @@ -1312,15 +1358,21 @@ impl Loader for Db { for (group_key, stored) in group.map_err(|e| Error::Internal(format!("Failed to fetch objects: {e}")))? { + // This particular object is invalid -- it didn't exist at the checkpoint we are + // viewing at. + if group_key.checkpoint_viewed_at < stored.checkpoint_sequence_number as u64 { + continue; + } + let object = Object::try_from_stored_history_object( stored, group_key.checkpoint_viewed_at, // If `LatestAtKey::parent_version` is set, it must have been correctly // propagated from the `Object::root_version` of some object. - group_key.parent_version, + Some(group_key.parent_version), )?; - let key = LatestAtKey { + let key = ParentVersionKey { id: object.address, checkpoint_viewed_at: group_key.checkpoint_viewed_at, parent_version: group_key.parent_version, @@ -1334,6 +1386,81 @@ impl Loader for Db { } } +#[async_trait::async_trait] +impl Loader for Db { + type Value = Object; + type Error = Error; + + async fn load(&self, keys: &[LatestAtKey]) -> Result, Error> { + // Group keys by checkpoint viewed at -- we'll issue a separate query for each group. + let mut keys_by_cursor_and_parent_version: BTreeMap<_, BTreeSet<_>> = BTreeMap::new(); + + for key in keys { + keys_by_cursor_and_parent_version + .entry(key.checkpoint_viewed_at) + .or_default() + .insert(key.id); + } + + // Issue concurrent reads for each group of keys. + let futures = + keys_by_cursor_and_parent_version + .into_iter() + .map(|(checkpoint_viewed_at, ids)| { + self.execute_repeatable(move |conn| { + let Some(range) = AvailableRange::result(conn, checkpoint_viewed_at)? + else { + return Ok::, diesel::result::Error>( + vec![], + ); + }; + + let filter = ObjectFilter { + object_ids: Some(ids.iter().cloned().collect()), + ..Default::default() + }; + + Ok(conn + .results(move || { + build_objects_query( + View::Consistent, + range, + &Page::bounded(ids.len() as u64), + |q| filter.apply(q), + |q| q, + ) + .into_boxed() + })? + .into_iter() + .map(|r| (checkpoint_viewed_at, r)) + .collect()) + }) + }); + + // Wait for the reads to all finish, and gather them into the result map. + let groups = futures::future::join_all(futures).await; + + let mut results = HashMap::new(); + for group in groups { + for (checkpoint_viewed_at, stored) in + group.map_err(|e| Error::Internal(format!("Failed to fetch objects: {e}")))? + { + let object = + Object::try_from_stored_history_object(stored, checkpoint_viewed_at, None)?; + + let key = LatestAtKey { + id: object.address, + checkpoint_viewed_at, + }; + + results.insert(key, object); + } + } + + Ok(results) + } +} + impl From<&ObjectKind> for ObjectStatus { fn from(kind: &ObjectKind) -> Self { match kind { @@ -1353,15 +1480,6 @@ impl From<&Object> for OwnerImpl { } } -/// Parse a `SuiAddress` from its stored representation. Failure is an internal error: the -/// database should never contain a malformed address (containing the wrong number of bytes). -fn addr(bytes: impl AsRef<[u8]>) -> Result { - SuiAddress::from_bytes(bytes.as_ref()).map_err(|e| { - let bytes = bytes.as_ref().to_vec(); - Error::Internal(format!("Error deserializing address: {bytes:?}: {e}")) - }) -} - pub(crate) async fn deserialize_move_struct( move_object: &NativeMoveObject, resolver: &PackageResolver, diff --git a/crates/sui-graphql-rpc/src/types/owner.rs b/crates/sui-graphql-rpc/src/types/owner.rs index 79525ca9e9921..8d790fdc2d699 100644 --- a/crates/sui-graphql-rpc/src/types/owner.rs +++ b/crates/sui-graphql-rpc/src/types/owner.rs @@ -56,6 +56,7 @@ pub(crate) struct OwnerImpl { /// are identified by an address which can represent either the public key of an account or another /// object. The same address can only refer to an account or an object, never both, but it is not /// possible to know which up-front. +#[allow(clippy::duplicated_attributes)] #[derive(Interface)] #[graphql( name = "IOwner", @@ -251,9 +252,10 @@ impl Owner { Object::query( ctx, self.address, - object::ObjectLookup::LatestAt { - parent_version: self.root_version, - checkpoint_viewed_at: self.checkpoint_viewed_at, + if let Some(parent_version) = self.root_version { + Object::under_parent(parent_version, self.checkpoint_viewed_at) + } else { + Object::latest_at(self.checkpoint_viewed_at) }, ) .await diff --git a/crates/sui-graphql-rpc/src/types/protocol_config.rs b/crates/sui-graphql-rpc/src/types/protocol_config.rs index ce60c22892d39..f4f8fd9caf771 100644 --- a/crates/sui-graphql-rpc/src/types/protocol_config.rs +++ b/crates/sui-graphql-rpc/src/types/protocol_config.rs @@ -1,15 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::BTreeMap; + use async_graphql::*; use diesel::{ExpressionMethods, QueryDsl}; -use sui_indexer::schema::{chain_identifier, epochs}; -use sui_protocol_config::{ProtocolConfig as NativeProtocolConfig, ProtocolVersion}; +use sui_indexer::schema::{epochs, feature_flags, protocol_configs}; use crate::{ data::{Db, DbConnection, QueryExecutor}, error::Error, - types::chain_identifier::ChainIdentifier, }; use super::uint53::UInt53; @@ -30,7 +30,9 @@ pub(crate) struct ProtocolConfigFeatureFlag { #[derive(Clone, Debug)] pub(crate) struct ProtocolConfigs { - native: NativeProtocolConfig, + version: u64, + configs: BTreeMap>, + feature_flags: BTreeMap, } /// Constants that control how the chain operates. @@ -41,15 +43,15 @@ impl ProtocolConfigs { /// The protocol is not required to change on every epoch boundary, so the protocol version /// tracks which change to the protocol these configs are from. async fn protocol_version(&self) -> UInt53 { - self.native.version.as_u64().into() + self.version.into() } /// List all available feature flags and their values. Feature flags are a form of boolean /// configuration that are usually used to gate features while they are in development. Once a /// flag has been enabled, it is rare for it to be disabled. async fn feature_flags(&self) -> Vec { - self.native - .feature_map() + self.feature_flags + .clone() .into_iter() .map(|(key, value)| ProtocolConfigFeatureFlag { key, value }) .collect() @@ -58,31 +60,24 @@ impl ProtocolConfigs { /// List all available configurations and their values. These configurations can take any value /// (but they will all be represented in string form), and do not include feature flags. async fn configs(&self) -> Vec { - self.native - .attr_map() + self.configs + .clone() .into_iter() - .map(|(key, value)| ProtocolConfigAttr { - key, - value: value.map(|v| v.to_string()), - }) + .map(|(key, value)| ProtocolConfigAttr { key, value }) .collect() } /// Query for the value of the configuration with name `key`. async fn config(&self, key: String) -> Option { - self.native - .attr_map() - .get(&key) - .map(|value| ProtocolConfigAttr { - key, - value: value.as_ref().map(|v| v.to_string()), - }) + self.configs.get(&key).map(|value| ProtocolConfigAttr { + key, + value: value.as_ref().map(|v| v.to_string()), + }) } /// Query for the state of the feature flag with name `key`. async fn feature_flag(&self, key: String) -> Option { - self.native - .feature_map() + self.feature_flags .get(&key) .map(|value| ProtocolConfigFeatureFlag { key, value: *value }) } @@ -90,36 +85,61 @@ impl ProtocolConfigs { impl ProtocolConfigs { pub(crate) async fn query(db: &Db, protocol_version: Option) -> Result { - use chain_identifier::dsl as c; use epochs::dsl as e; + use feature_flags::dsl as f; + use protocol_configs::dsl as p; + + let version = if let Some(version) = protocol_version { + version + } else { + let latest_version: i64 = db + .execute(move |conn| { + conn.first(move || { + e::epochs + .select(e::protocol_version) + .order_by(e::epoch.desc()) + }) + }) + .await + .map_err(|e| { + Error::Internal(format!( + "Failed to fetch latest protocol version in db: {e}" + )) + })?; + latest_version as u64 + }; + + // TODO: This could be optimized by fetching all configs and flags in a single query. + let configs: BTreeMap> = db + .execute(move |conn| { + conn.results(move || { + p::protocol_configs + .select((p::config_name, p::config_value)) + .filter(p::protocol_version.eq(version as i64)) + }) + }) + .await + .map_err(|e| Error::Internal(format!("Failed to fetch protocol configs in db: {e}")))? + .into_iter() + .collect(); - let (latest_version, digest_bytes): (i64, Option>) = db + let feature_flags: BTreeMap = db .execute(move |conn| { - conn.first(move || { - e::epochs - .select(( - e::protocol_version, - c::chain_identifier - .select(c::checkpoint_digest) - .single_value(), - )) - .order_by(e::epoch.desc()) + conn.results(move || { + f::feature_flags + .select((f::flag_name, f::flag_value)) + .filter(f::protocol_version.eq(version as i64)) }) }) .await - .map_err(|e| Error::Internal(format!("Failed to fetch system details: {e}")))?; - - let native = NativeProtocolConfig::get_for_version_if_supported( - protocol_version.unwrap_or(latest_version as u64).into(), - ChainIdentifier::from_bytes(digest_bytes.unwrap_or_default())?.chain(), - ) - .ok_or_else(|| { - Error::ProtocolVersionUnsupported( - ProtocolVersion::MIN.as_u64(), - ProtocolVersion::MAX.as_u64(), - ) - })?; - - Ok(ProtocolConfigs { native }) + .map_err(|e| Error::Internal(format!("Failed to fetch feature flags in db: {e}")))? + .into_iter() + .collect(); + + Ok(ProtocolConfigs { + version, + configs, + feature_flags, + }) } } diff --git a/crates/sui-graphql-rpc/src/types/query.rs b/crates/sui-graphql-rpc/src/types/query.rs index f10d28f06236f..f403fbf8657b5 100644 --- a/crates/sui-graphql-rpc/src/types/query.rs +++ b/crates/sui-graphql-rpc/src/types/query.rs @@ -12,6 +12,9 @@ use sui_sdk::SuiClient; use sui_types::transaction::{TransactionData, TransactionKind}; use sui_types::{gas_coin::GAS, transaction::TransactionDataAPI, TypeTag}; +use super::move_package::{ + self, MovePackage, MovePackageCheckpointFilter, MovePackageVersionFilter, +}; use super::suins_registration::NameService; use super::uint53::UInt53; use super::{ @@ -36,6 +39,7 @@ use super::{ transaction_metadata::TransactionMetadata, type_filter::ExactTypeFilter, }; +use crate::connection::ScanConnection; use crate::server::watermark_task::Watermark; use crate::types::base64::Base64 as GraphQLBase64; use crate::types::zklogin_verify_signature::verify_zklogin_signature; @@ -195,13 +199,13 @@ impl Query { &self, ctx: &Context<'_>, address: SuiAddress, - root_version: Option, + root_version: Option, ) -> Result> { let Watermark { checkpoint, .. } = *ctx.data()?; Ok(Some(Owner { address, checkpoint_viewed_at: checkpoint, - root_version, + root_version: root_version.map(|v| v.into()), })) } @@ -214,17 +218,52 @@ impl Query { version: Option, ) -> Result> { let Watermark { checkpoint, .. } = *ctx.data()?; + let key = match version { + Some(version) => Object::at_version(version.into(), checkpoint), + None => Object::latest_at(checkpoint), + }; + + Object::query(ctx, address, key).await.extend() + } + + /// The package corresponding to the given address (at the optionally given version). + /// + /// When no version is given, the package is loaded directly from the address given. Otherwise, + /// the address is translated before loading to point to the package whose original ID matches + /// the package at `address`, but whose version is `version`. For non-system packages, this + /// might result in a different address than `address` because different versions of a package, + /// introduced by upgrades, exist at distinct addresses. + /// + /// Note that this interpretation of `version` is different from a historical object read (the + /// interpretation of `version` for the `object` query). + async fn package( + &self, + ctx: &Context<'_>, + address: SuiAddress, + version: Option, + ) -> Result> { + let Watermark { checkpoint, .. } = *ctx.data()?; + let key = match version { + Some(version) => MovePackage::by_version(version.into(), checkpoint), + None => MovePackage::by_id_at(checkpoint), + }; - match version { - Some(version) => { - Object::query(ctx, address, Object::at_version(version.into(), checkpoint)) - .await - .extend() - } - None => Object::query(ctx, address, Object::latest_at(checkpoint)) - .await - .extend(), - } + MovePackage::query(ctx, address, key).await.extend() + } + + /// The latest version of the package at `address`. + /// + /// This corresponds to the package with the highest `version` that shares its original ID with + /// the package at `address`. + async fn latest_package( + &self, + ctx: &Context<'_>, + address: SuiAddress, + ) -> Result> { + let Watermark { checkpoint, .. } = *ctx.data()?; + MovePackage::query(ctx, address, MovePackage::latest_at(checkpoint)) + .await + .extend() } /// Look-up an Account by its SuiAddress. @@ -331,6 +370,25 @@ impl Query { } /// The transaction blocks that exist in the network. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. async fn transaction_blocks( &self, ctx: &Context<'_>, @@ -339,15 +397,18 @@ impl Query { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { let Watermark { checkpoint, .. } = *ctx.data()?; let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; + TransactionBlock::paginate( - ctx.data_unchecked(), + ctx, page, filter.unwrap_or_default(), checkpoint, + scan_limit, ) .await .extend() @@ -399,6 +460,49 @@ impl Query { .extend() } + /// The Move packages that exist in the network, optionally filtered to be strictly before + /// `beforeCheckpoint` and/or strictly after `afterCheckpoint`. + /// + /// This query returns all versions of a given user package that appear between the specified + /// checkpoints, but only records the latest versions of system packages. + async fn packages( + &self, + ctx: &Context<'_>, + first: Option, + after: Option, + last: Option, + before: Option, + filter: Option, + ) -> Result> { + let Watermark { checkpoint, .. } = *ctx.data()?; + + let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; + MovePackage::paginate_by_checkpoint(ctx.data_unchecked(), page, filter, checkpoint) + .await + .extend() + } + + /// Fetch all versions of package at `address` (packages that share this package's original ID), + /// optionally bounding the versions exclusively from below with `afterVersion`, or from above + /// with `beforeVersion`. + async fn package_versions( + &self, + ctx: &Context<'_>, + first: Option, + after: Option, + last: Option, + before: Option, + address: SuiAddress, + filter: Option, + ) -> Result> { + let Watermark { checkpoint, .. } = *ctx.data()?; + + let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; + MovePackage::paginate_by_version(ctx.data_unchecked(), page, address, filter, checkpoint) + .await + .extend() + } + /// Fetch the protocol config by protocol version (defaults to the latest protocol /// version known to the GraphQL service). async fn protocol_config( diff --git a/crates/sui-graphql-rpc/src/types/stake.rs b/crates/sui-graphql-rpc/src/types/stake.rs index 763741c67b747..75c0a07dabf5a 100644 --- a/crates/sui-graphql-rpc/src/types/stake.rs +++ b/crates/sui-graphql-rpc/src/types/stake.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::connection::ScanConnection; use crate::error::Error; use crate::{context_data::db_data_provider::PgManager, data::Db}; @@ -201,6 +202,25 @@ impl StakedSui { } /// The transaction blocks that sent objects to this object. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -209,9 +229,10 @@ impl StakedSui { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(&self.super_.super_) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } diff --git a/crates/sui-graphql-rpc/src/types/sui_address.rs b/crates/sui-graphql-rpc/src/types/sui_address.rs index 287bf0540e887..3a775e15064d4 100644 --- a/crates/sui-graphql-rpc/src/types/sui_address.rs +++ b/crates/sui-graphql-rpc/src/types/sui_address.rs @@ -3,18 +3,18 @@ use std::str::FromStr; +use crate::error::Error; use async_graphql::*; use move_core_types::account_address::AccountAddress; use serde::{Deserialize, Serialize}; use sui_types::base_types::{ObjectID, SuiAddress as NativeSuiAddress}; -use thiserror::Error; const SUI_ADDRESS_LENGTH: usize = 32; #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Copy)] pub(crate) struct SuiAddress([u8; SUI_ADDRESS_LENGTH]); -#[derive(Error, Debug, Eq, PartialEq)] +#[derive(thiserror::Error, Debug, Eq, PartialEq)] pub(crate) enum FromStrError { #[error("Invalid SuiAddress. Missing 0x prefix.")] NoPrefix, @@ -30,7 +30,7 @@ pub(crate) enum FromStrError { BadHex(char, usize), } -#[derive(Error, Debug, Eq, PartialEq)] +#[derive(thiserror::Error, Debug, Eq, PartialEq)] pub(crate) enum FromVecError { #[error("Expected SuiAddress with {} bytes, received {0}", SUI_ADDRESS_LENGTH)] WrongLength(usize), @@ -161,6 +161,15 @@ impl std::fmt::Display for SuiAddress { } } +/// Parse a `SuiAddress` from its stored representation. Failure is an internal error: the +/// database should never contain a malformed address (containing the wrong number of bytes). +pub(crate) fn addr(bytes: impl AsRef<[u8]>) -> Result { + SuiAddress::from_bytes(bytes.as_ref()).map_err(|e| { + let bytes = bytes.as_ref().to_vec(); + Error::Internal(format!("Error deserializing address: {bytes:?}: {e}")) + }) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/sui-graphql-rpc/src/types/suins_registration.rs b/crates/sui-graphql-rpc/src/types/suins_registration.rs index ed5337cb5d84f..e7391a258346e 100644 --- a/crates/sui-graphql-rpc/src/types/suins_registration.rs +++ b/crates/sui-graphql-rpc/src/types/suins_registration.rs @@ -25,6 +25,7 @@ use super::{ uint53::UInt53, }; use crate::{ + connection::ScanConnection, consistency::{build_objects_query, View}, data::{Db, DbConnection, QueryExecutor}, error::Error, @@ -238,6 +239,25 @@ impl SuinsRegistration { } /// The transaction blocks that sent objects to this object. + /// + /// `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + /// results. It is required for queries that apply more than two complex filters (on function, + /// kind, sender, recipient, input object, changed object, or ids), and can be at most + /// `serviceConfig.maxScanLimit`. + /// + /// When the scan limit is reached the page will be returned even if it has fewer than `first` + /// results when paginating forward (`last` when paginating backwards). If there are more + /// transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + /// `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + /// transaction that was scanned as opposed to the last (or first) transaction in the page. + /// + /// Requesting the next (or previous) page after this cursor will resume the search, scanning + /// the next `scanLimit` many transactions in the direction of pagination, and so on until all + /// transactions in the scanning range have been visited. + /// + /// By default, the scanning range includes all transactions known to GraphQL, but it can be + /// restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + /// `afterCheckpoint` and `atCheckpoint` filters. pub(crate) async fn received_transaction_blocks( &self, ctx: &Context<'_>, @@ -246,9 +266,10 @@ impl SuinsRegistration { last: Option, before: Option, filter: Option, - ) -> Result> { + scan_limit: Option, + ) -> Result> { ObjectImpl(&self.super_.super_) - .received_transaction_blocks(ctx, first, after, last, before, filter) + .received_transaction_blocks(ctx, first, after, last, before, filter, scan_limit) .await } diff --git a/crates/sui-graphql-rpc/src/types/transaction_block/cursor.rs b/crates/sui-graphql-rpc/src/types/transaction_block/cursor.rs new file mode 100644 index 0000000000000..56a82609bda1e --- /dev/null +++ b/crates/sui-graphql-rpc/src/types/transaction_block/cursor.rs @@ -0,0 +1,173 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + consistency::Checkpointed, + filter, + raw_query::RawQuery, + types::cursor::{self, Paginated, RawPaginated, ScanLimited, Target}, +}; +use diesel::{ + backend::Backend, + deserialize::{self, FromSql, QueryableByName}, + row::NamedRow, + ExpressionMethods, QueryDsl, +}; +use serde::{Deserialize, Serialize}; +use sui_indexer::{models::transactions::StoredTransaction, schema::transactions}; + +use super::Query; + +pub(crate) type Cursor = cursor::JsonCursor; + +/// The cursor returned for each `TransactionBlock` in a connection's page of results. The +/// `checkpoint_viewed_at` will set the consistent upper bound for subsequent queries made on this +/// cursor. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] +pub(crate) struct TransactionBlockCursor { + /// The checkpoint sequence number this was viewed at. + #[serde(rename = "c")] + pub checkpoint_viewed_at: u64, + #[serde(rename = "t")] + pub tx_sequence_number: u64, + /// Whether the cursor was derived from a `scan_limit`. Only applicable to the `startCursor` and + /// `endCursor` returned from a Connection's `PageInfo`, and indicates that the cursor may not + /// have a corresponding node in the result set. + #[serde(rename = "i")] + pub is_scan_limited: bool, +} + +/// Results from raw queries in Diesel can only be deserialized into structs that implements +/// `QueryableByName`. This struct is used to represent a row of `tx_sequence_number` returned from +/// subqueries against tx lookup tables. +#[derive(Clone, Debug)] +pub struct TxLookup { + pub tx_sequence_number: i64, +} + +impl Checkpointed for Cursor { + fn checkpoint_viewed_at(&self) -> u64 { + self.checkpoint_viewed_at + } +} + +impl ScanLimited for Cursor { + fn is_scan_limited(&self) -> bool { + self.is_scan_limited + } + + fn unlimited(&self) -> Self { + Cursor::new(TransactionBlockCursor { + is_scan_limited: false, + tx_sequence_number: self.tx_sequence_number, + checkpoint_viewed_at: self.checkpoint_viewed_at, + }) + } +} + +impl Paginated for StoredTransaction { + type Source = transactions::table; + + fn filter_ge(cursor: &Cursor, query: Query) -> Query { + query.filter(transactions::dsl::tx_sequence_number.ge(cursor.tx_sequence_number as i64)) + } + + fn filter_le(cursor: &Cursor, query: Query) -> Query { + query.filter(transactions::dsl::tx_sequence_number.le(cursor.tx_sequence_number as i64)) + } + + fn order(asc: bool, query: Query) -> Query { + use transactions::dsl; + if asc { + query.order_by(dsl::tx_sequence_number.asc()) + } else { + query.order_by(dsl::tx_sequence_number.desc()) + } + } +} + +impl Target for StoredTransaction { + fn cursor(&self, checkpoint_viewed_at: u64) -> Cursor { + Cursor::new(TransactionBlockCursor { + tx_sequence_number: self.tx_sequence_number as u64, + checkpoint_viewed_at, + is_scan_limited: false, + }) + } +} + +impl RawPaginated for StoredTransaction { + fn filter_ge(cursor: &Cursor, query: RawQuery) -> RawQuery { + filter!( + query, + format!("tx_sequence_number >= {}", cursor.tx_sequence_number) + ) + } + + fn filter_le(cursor: &Cursor, query: RawQuery) -> RawQuery { + filter!( + query, + format!("tx_sequence_number <= {}", cursor.tx_sequence_number) + ) + } + + fn order(asc: bool, query: RawQuery) -> RawQuery { + if asc { + query.order_by("tx_sequence_number ASC") + } else { + query.order_by("tx_sequence_number DESC") + } + } +} + +impl Target for TxLookup { + fn cursor(&self, checkpoint_viewed_at: u64) -> Cursor { + Cursor::new(TransactionBlockCursor { + tx_sequence_number: self.tx_sequence_number as u64, + checkpoint_viewed_at, + is_scan_limited: false, + }) + } +} + +impl RawPaginated for TxLookup { + fn filter_ge(cursor: &Cursor, query: RawQuery) -> RawQuery { + filter!( + query, + format!("tx_sequence_number >= {}", cursor.tx_sequence_number) + ) + } + + fn filter_le(cursor: &Cursor, query: RawQuery) -> RawQuery { + filter!( + query, + format!("tx_sequence_number <= {}", cursor.tx_sequence_number) + ) + } + + fn order(asc: bool, query: RawQuery) -> RawQuery { + if asc { + query.order_by("tx_sequence_number ASC") + } else { + query.order_by("tx_sequence_number DESC") + } + } +} + +/// `sql_query` raw queries require `QueryableByName`. The default implementation looks for a table +/// based on the struct name, and it also expects the struct's fields to reflect the table's +/// columns. We can override this behavior by implementing `QueryableByName` for our struct. For +/// `TxBounds`, its fields are derived from `checkpoints`, so we can't leverage the default +/// implementation directly. +impl QueryableByName for TxLookup +where + DB: Backend, + i64: FromSql, +{ + fn build<'a>(row: &impl NamedRow<'a, DB>) -> deserialize::Result { + let tx_sequence_number = + NamedRow::get::(row, "tx_sequence_number")?; + + Ok(Self { tx_sequence_number }) + } +} diff --git a/crates/sui-graphql-rpc/src/types/transaction_block/filter.rs b/crates/sui-graphql-rpc/src/types/transaction_block/filter.rs new file mode 100644 index 0000000000000..29c104ac9484c --- /dev/null +++ b/crates/sui-graphql-rpc/src/types/transaction_block/filter.rs @@ -0,0 +1,130 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use super::TransactionBlockKindInput; +use crate::types::{digest::Digest, sui_address::SuiAddress, type_filter::FqNameFilter}; +use crate::types::{intersect, uint53::UInt53}; +use async_graphql::InputObject; +use std::collections::BTreeSet; +use sui_types::base_types::SuiAddress as NativeSuiAddress; + +#[derive(InputObject, Debug, Default, Clone)] +pub(crate) struct TransactionBlockFilter { + pub function: Option, + + /// An input filter selecting for either system or programmable transactions. + pub kind: Option, + pub after_checkpoint: Option, + pub at_checkpoint: Option, + pub before_checkpoint: Option, + + pub sign_address: Option, + pub recv_address: Option, + + pub input_object: Option, + pub changed_object: Option, + + pub transaction_ids: Option>, +} + +impl TransactionBlockFilter { + /// Try to create a filter whose results are the intersection of transaction blocks in `self`'s + /// results and transaction blocks in `other`'s results. This may not be possible if the + /// resulting filter is inconsistent in some way (e.g. a filter that requires one field to be + /// two different values simultaneously). + pub(crate) fn intersect(self, other: Self) -> Option { + macro_rules! intersect { + ($field:ident, $body:expr) => { + intersect::field(self.$field, other.$field, $body) + }; + } + + Some(Self { + function: intersect!(function, FqNameFilter::intersect)?, + kind: intersect!(kind, intersect::by_eq)?, + + after_checkpoint: intersect!(after_checkpoint, intersect::by_max)?, + at_checkpoint: intersect!(at_checkpoint, intersect::by_eq)?, + before_checkpoint: intersect!(before_checkpoint, intersect::by_min)?, + + sign_address: intersect!(sign_address, intersect::by_eq)?, + recv_address: intersect!(recv_address, intersect::by_eq)?, + input_object: intersect!(input_object, intersect::by_eq)?, + changed_object: intersect!(changed_object, intersect::by_eq)?, + + transaction_ids: intersect!(transaction_ids, |a, b| { + let a = BTreeSet::from_iter(a.into_iter()); + let b = BTreeSet::from_iter(b.into_iter()); + Some(a.intersection(&b).cloned().collect()) + })?, + }) + } + + /// Most filter conditions require a scan limit if used in tandem with other filters. The + /// exception to this is sender and checkpoint, since sender is denormalized on all tables, and + /// the corresponding tx range can be determined for a checkpoint. + pub(crate) fn requires_scan_limit(&self) -> bool { + [ + self.function.is_some(), + self.kind.is_some(), + self.recv_address.is_some(), + self.input_object.is_some(), + self.changed_object.is_some(), + self.transaction_ids.is_some(), + ] + .into_iter() + .filter(|is_set| *is_set) + .count() + > 1 + } + + /// If we don't query a lookup table that has a denormalized sender column, we need to + /// explicitly sp + pub(crate) fn explicit_sender(&self) -> Option { + if self.function.is_none() + && self.kind.is_none() + && self.recv_address.is_none() + && self.input_object.is_none() + && self.changed_object.is_none() + { + self.sign_address + } else { + None + } + } + + /// A TransactionBlockFilter is considered not to have any filters if no filters are specified, + /// or if the only filters are on `checkpoint`. + pub(crate) fn has_filters(&self) -> bool { + self.function.is_some() + || self.kind.is_some() + || self.sign_address.is_some() + || self.recv_address.is_some() + || self.input_object.is_some() + || self.changed_object.is_some() + || self.transaction_ids.is_some() + } + + pub(crate) fn is_empty(&self) -> bool { + self.before_checkpoint == Some(UInt53::from(0)) + || matches!( + (self.after_checkpoint, self.before_checkpoint), + (Some(after), Some(before)) if after >= before + ) + || matches!( + (self.after_checkpoint, self.at_checkpoint), + (Some(after), Some(at)) if after >= at + ) + || matches!( + (self.at_checkpoint, self.before_checkpoint), + (Some(at), Some(before)) if at >= before + ) + // If SystemTx, sender if specified must be 0x0. Conversely, if sender is 0x0, kind must be SystemTx. + || matches!( + (self.kind, self.sign_address), + (Some(kind), Some(signer)) + if (kind == TransactionBlockKindInput::SystemTx) + != (signer == SuiAddress::from(NativeSuiAddress::ZERO)) + ) + } +} diff --git a/crates/sui-graphql-rpc/src/types/transaction_block.rs b/crates/sui-graphql-rpc/src/types/transaction_block/mod.rs similarity index 61% rename from crates/sui-graphql-rpc/src/types/transaction_block.rs rename to crates/sui-graphql-rpc/src/types/transaction_block/mod.rs index 5009d076145d8..1573fe97cfeab 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block/mod.rs @@ -1,22 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::{BTreeMap, BTreeSet, HashMap}; - -use async_graphql::{ - connection::{Connection, CursorType, Edge}, - dataloader::Loader, - *, +use super::{ + address::Address, + base64::Base64, + cursor::{Page, Target}, + digest::Digest, + epoch::Epoch, + gas::GasInput, + sui_address::SuiAddress, + transaction_block_effects::{TransactionBlockEffects, TransactionBlockEffectsKind}, + transaction_block_kind::TransactionBlockKind, }; -use diesel::{BoolExpressionMethods, ExpressionMethods, JoinOnDsl, QueryDsl, SelectableHelper}; +use crate::{ + config::ServiceConfig, + connection::ScanConnection, + data::{self, DataLoader, Db, DbConnection, QueryExecutor}, + error::Error, + server::watermark_task::Watermark, +}; +use async_graphql::{connection::CursorType, dataloader::Loader, *}; +use connection::Edge; +use cursor::TxLookup; +use diesel::{ExpressionMethods, JoinOnDsl, QueryDsl, SelectableHelper}; use fastcrypto::encoding::{Base58, Encoding}; use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, HashMap}; use sui_indexer::{ models::transactions::StoredTransaction, - schema::{ - transactions, tx_calls, tx_changed_objects, tx_digests, tx_input_objects, tx_recipients, - tx_senders, - }, + schema::{transactions, tx_digests}, }; use sui_types::{ base_types::SuiAddress as NativeSuiAddress, @@ -29,27 +41,13 @@ use sui_types::{ }, }; -use crate::{ - consistency::Checkpointed, - data::{self, DataLoader, Db, DbConnection, QueryExecutor}, - error::Error, - server::watermark_task::Watermark, - types::intersect, -}; +mod cursor; +mod filter; +mod tx_lookups; -use super::{ - address::Address, - base64::Base64, - cursor::{self, Page, Paginated, Target}, - digest::Digest, - epoch::Epoch, - gas::GasInput, - sui_address::SuiAddress, - transaction_block_effects::{TransactionBlockEffects, TransactionBlockEffectsKind}, - transaction_block_kind::TransactionBlockKind, - type_filter::FqNameFilter, - uint53::UInt53, -}; +pub(crate) use cursor::Cursor; +pub(crate) use filter::TransactionBlockFilter; +pub(crate) use tx_lookups::{subqueries, TxBounds}; /// Wraps the actual transaction block data with the checkpoint sequence number at which the data /// was viewed, for consistent results on paginating through and resolving nested types. @@ -94,26 +92,6 @@ pub(crate) enum TransactionBlockKindInput { ProgrammableTx = 1, } -#[derive(InputObject, Debug, Default, Clone)] -pub(crate) struct TransactionBlockFilter { - pub function: Option, - - /// An input filter selecting for either system or programmable transactions. - pub kind: Option, - pub after_checkpoint: Option, - pub at_checkpoint: Option, - pub before_checkpoint: Option, - - pub sign_address: Option, - pub recv_address: Option, - - pub input_object: Option, - pub changed_object: Option, - - pub transaction_ids: Option>, -} - -pub(crate) type Cursor = cursor::JsonCursor; type Query = data::Query; /// The cursor returned for each `TransactionBlock` in a connection's page of results. The @@ -131,7 +109,7 @@ pub(crate) struct TransactionBlockCursor { pub tx_checkpoint_number: u64, } -/// DataLoader key for fetching a `TransactionBlock` by its digest, optionally constrained by a +/// `DataLoader` key for fetching a `TransactionBlock` by its digest, optionally constrained by a /// consistency cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct DigestKey { @@ -298,104 +276,132 @@ impl TransactionBlock { /// /// If the `Page` is set, then this function will defer to the `checkpoint_viewed_at` in /// the cursor if they are consistent. + /// + /// Filters that involve a combination of `recvAddress`, `inputObject`, `changedObject`, and + /// `function` should provide a value for `scan_limit`. This modifies querying behavior by + /// limiting how many transactions to scan through before applying filters, and also affects + /// pagination behavior. pub(crate) async fn paginate( - db: &Db, + ctx: &Context<'_>, page: Page, filter: TransactionBlockFilter, checkpoint_viewed_at: u64, - ) -> Result, Error> { - use transactions as tx; + scan_limit: Option, + ) -> Result, Error> { + let limits = &ctx.data_unchecked::().limits; + + // If the caller has provided some arbitrary combination of `function`, `kind`, + // `recvAddress`, `inputObject`, or `changedObject`, we require setting a `scanLimit`. + if let Some(scan_limit) = scan_limit { + if scan_limit > limits.max_scan_limit as u64 { + return Err(Error::Client(format!( + "Scan limit exceeds max limit of '{}'", + limits.max_scan_limit + ))); + } + } else if filter.requires_scan_limit() { + return Err(Error::Client( + "A scan limit must be specified for the given filter combination".to_string(), + )); + } + + if let Some(tx_ids) = &filter.transaction_ids { + if tx_ids.len() > limits.max_transaction_ids as usize { + return Err(Error::Client(format!( + "Transaction IDs exceed max limit of '{}'", + limits.max_transaction_ids + ))); + } + } + + // If page size or scan limit is 0, we want to standardize behavior by returning an empty + // connection + if filter.is_empty() || page.limit() == 0 || scan_limit.is_some_and(|v| v == 0) { + return Ok(ScanConnection::new(false, false)); + } let cursor_viewed_at = page.validate_cursor_consistency()?; let checkpoint_viewed_at = cursor_viewed_at.unwrap_or(checkpoint_viewed_at); + let db: &Db = ctx.data_unchecked(); + let is_from_front = page.is_from_front(); - let (prev, next, results) = db - .execute(move |conn| { - page.paginate_query::( + use transactions::dsl as tx; + let (prev, next, transactions, tx_bounds): ( + bool, + bool, + Vec, + Option, + ) = db + .execute_repeatable(move |conn| { + let Some(tx_bounds) = TxBounds::query( conn, + filter.after_checkpoint.map(u64::from), + filter.at_checkpoint.map(u64::from), + filter.before_checkpoint.map(u64::from), checkpoint_viewed_at, - move || { - let mut query = tx::dsl::transactions.into_boxed(); - - if let Some(f) = &filter.function { - let sub_query = tx_calls::dsl::tx_calls - .select(tx_calls::dsl::tx_sequence_number) - .into_boxed(); - - query = query.filter(tx::dsl::tx_sequence_number.eq_any(f.apply( - sub_query, - tx_calls::dsl::package, - tx_calls::dsl::module, - tx_calls::dsl::func, - ))); - } - - if let Some(k) = &filter.kind { - query = query.filter(tx::dsl::transaction_kind.eq(*k as i16)) - } - - if let Some(c) = &filter.after_checkpoint { - query = - query.filter(tx::dsl::checkpoint_sequence_number.gt(i64::from(*c))); - } - - if let Some(c) = &filter.at_checkpoint { - query = - query.filter(tx::dsl::checkpoint_sequence_number.eq(i64::from(*c))); - } - - let before_checkpoint = filter - .before_checkpoint - .map_or(checkpoint_viewed_at + 1, |c| { - u64::from(c).min(checkpoint_viewed_at + 1) - }); - query = query.filter( - tx::dsl::checkpoint_sequence_number.lt(before_checkpoint as i64), - ); - - if let Some(a) = &filter.sign_address { - let sub_query = tx_senders::dsl::tx_senders - .select(tx_senders::dsl::tx_sequence_number) - .filter(tx_senders::dsl::sender.eq(a.into_vec())); - query = query.filter(tx::dsl::tx_sequence_number.eq_any(sub_query)); - } - - if let Some(a) = &filter.recv_address { - let sub_query = tx_recipients::dsl::tx_recipients - .select(tx_recipients::dsl::tx_sequence_number) - .filter(tx_recipients::dsl::recipient.eq(a.into_vec())); - query = query.filter(tx::dsl::tx_sequence_number.eq_any(sub_query)); - } - - if let Some(o) = &filter.input_object { - let sub_query = tx_input_objects::dsl::tx_input_objects - .select(tx_input_objects::dsl::tx_sequence_number) - .filter(tx_input_objects::dsl::object_id.eq(o.into_vec())); - query = query.filter(tx::dsl::tx_sequence_number.eq_any(sub_query)); - } - - if let Some(o) = &filter.changed_object { - let sub_query = tx_changed_objects::dsl::tx_changed_objects - .select(tx_changed_objects::dsl::tx_sequence_number) - .filter(tx_changed_objects::dsl::object_id.eq(o.into_vec())); - query = query.filter(tx::dsl::tx_sequence_number.eq_any(sub_query)); - } - - if let Some(txs) = &filter.transaction_ids { - let digests: Vec<_> = txs.iter().map(|d| d.to_vec()).collect(); - query = query.filter(tx::dsl::transaction_digest.eq_any(digests)); - } - - query - }, - ) + scan_limit, + &page, + )? + else { + return Ok::<_, diesel::result::Error>((false, false, Vec::new(), None)); + }; + + // If no filters are selected, or if the filter is composed of only checkpoint + // filters, we can directly query the main `transactions` table. Otherwise, we first + // fetch the set of `tx_sequence_number` from a join over relevant lookup tables, + // and then issue a query against the `transactions` table to fetch the remaining + // contents. + let (prev, next, transactions) = if !filter.has_filters() { + let (prev, next, iter) = page.paginate_query::( + conn, + checkpoint_viewed_at, + move || { + tx::transactions + .filter(tx::tx_sequence_number.ge(tx_bounds.scan_lo() as i64)) + .filter(tx::tx_sequence_number.lt(tx_bounds.scan_hi() as i64)) + .into_boxed() + }, + )?; + + (prev, next, iter.collect()) + } else { + let subquery = subqueries(&filter, tx_bounds).unwrap(); + let (prev, next, results) = + page.paginate_raw_query::(conn, checkpoint_viewed_at, subquery)?; + + let tx_sequence_numbers = results + .into_iter() + .map(|x| x.tx_sequence_number) + .collect::>(); + + let transactions = conn.results(move || { + tx::transactions + .filter(tx::tx_sequence_number.eq_any(tx_sequence_numbers.clone())) + })?; + + (prev, next, transactions) + }; + + Ok::<_, diesel::result::Error>((prev, next, transactions, Some(tx_bounds))) }) .await?; - let mut conn = Connection::new(prev, next); + let mut conn = ScanConnection::new(prev, next); + + let Some(tx_bounds) = tx_bounds else { + return Ok(conn); + }; - // The "checkpoint viewed at" sets a consistent upper bound for the nested queries. - for stored in results { + if scan_limit.is_some() { + apply_scan_limited_pagination( + &mut conn, + tx_bounds, + checkpoint_viewed_at, + is_from_front, + ); + } + + for stored in transactions { let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); let inner = TransactionBlockInner::try_from(stored)?; let transaction = TransactionBlock { @@ -409,87 +415,6 @@ impl TransactionBlock { } } -impl TransactionBlockFilter { - /// Try to create a filter whose results are the intersection of transaction blocks in `self`'s - /// results and transaction blocks in `other`'s results. This may not be possible if the - /// resulting filter is inconsistent in some way (e.g. a filter that requires one field to be - /// two different values simultaneously). - pub(crate) fn intersect(self, other: Self) -> Option { - macro_rules! intersect { - ($field:ident, $body:expr) => { - intersect::field(self.$field, other.$field, $body) - }; - } - - Some(Self { - function: intersect!(function, FqNameFilter::intersect)?, - kind: intersect!(kind, intersect::by_eq)?, - - after_checkpoint: intersect!(after_checkpoint, intersect::by_max)?, - at_checkpoint: intersect!(at_checkpoint, intersect::by_eq)?, - before_checkpoint: intersect!(before_checkpoint, intersect::by_min)?, - - sign_address: intersect!(sign_address, intersect::by_eq)?, - recv_address: intersect!(recv_address, intersect::by_eq)?, - input_object: intersect!(input_object, intersect::by_eq)?, - changed_object: intersect!(changed_object, intersect::by_eq)?, - - transaction_ids: intersect!(transaction_ids, |a, b| { - let a = BTreeSet::from_iter(a.into_iter()); - let b = BTreeSet::from_iter(b.into_iter()); - Some(a.intersection(&b).cloned().collect()) - })?, - }) - } -} - -impl Paginated for StoredTransaction { - type Source = transactions::table; - - fn filter_ge(cursor: &Cursor, query: Query) -> Query { - query - .filter(transactions::dsl::tx_sequence_number.ge(cursor.tx_sequence_number as i64)) - .filter( - transactions::dsl::checkpoint_sequence_number - .ge(cursor.tx_checkpoint_number as i64), - ) - } - - fn filter_le(cursor: &Cursor, query: Query) -> Query { - query - .filter(transactions::dsl::tx_sequence_number.le(cursor.tx_sequence_number as i64)) - .filter( - transactions::dsl::checkpoint_sequence_number - .le(cursor.tx_checkpoint_number as i64), - ) - } - - fn order(asc: bool, query: Query) -> Query { - use transactions::dsl; - if asc { - query.order_by(dsl::tx_sequence_number.asc()) - } else { - query.order_by(dsl::tx_sequence_number.desc()) - } - } -} - -impl Target for StoredTransaction { - fn cursor(&self, checkpoint_viewed_at: u64) -> Cursor { - Cursor::new(TransactionBlockCursor { - tx_sequence_number: self.tx_sequence_number as u64, - tx_checkpoint_number: self.checkpoint_sequence_number as u64, - checkpoint_viewed_at, - }) - } -} - -impl Checkpointed for Cursor { - fn checkpoint_viewed_at(&self) -> u64 { - self.checkpoint_viewed_at - } -} - #[async_trait::async_trait] impl Loader for Db { type Value = TransactionBlock; @@ -507,9 +432,7 @@ impl Loader for Db { let transactions: Vec = self .execute(move |conn| { conn.results(move || { - let join = ds::cp_sequence_number - .eq(tx::checkpoint_sequence_number) - .and(ds::tx_sequence_number.eq(tx::tx_sequence_number)); + let join = ds::tx_sequence_number.eq(tx::tx_sequence_number); tx::transactions .inner_join(ds::tx_digests.on(join)) @@ -601,3 +524,88 @@ impl TryFrom for TransactionBlock { }) } } + +fn apply_scan_limited_pagination( + conn: &mut ScanConnection, + tx_bounds: TxBounds, + checkpoint_viewed_at: u64, + is_from_front: bool, +) { + if is_from_front { + apply_forward_scan_limited_pagination(conn, tx_bounds, checkpoint_viewed_at); + } else { + apply_backward_scan_limited_pagination(conn, tx_bounds, checkpoint_viewed_at); + } +} + +/// When paginating forwards on a scan-limited query, the starting cursor and previous page flag +/// will be the first tx scanned in the current window, and whether this window is within the +/// scanning range. The ending cursor and next page flag wraps the last element of the result set if +/// there are more matches in the scanned window that are truncated - if the page size is smaller +/// than the scan limit - but otherwise is expanded out to the last tx scanned. +fn apply_forward_scan_limited_pagination( + conn: &mut ScanConnection, + tx_bounds: TxBounds, + checkpoint_viewed_at: u64, +) { + conn.has_previous_page = tx_bounds.scan_has_prev_page(); + conn.start_cursor = Some( + Cursor::new(cursor::TransactionBlockCursor { + checkpoint_viewed_at, + tx_sequence_number: tx_bounds.scan_start_cursor(), + is_scan_limited: true, + }) + .encode_cursor(), + ); + + // There may be more results within the scanned range that got truncated, which occurs when page + // size is less than `scan_limit`, so only overwrite the end when the base pagination reports no + // next page. + if !conn.has_next_page { + conn.has_next_page = tx_bounds.scan_has_next_page(); + conn.end_cursor = Some( + Cursor::new(cursor::TransactionBlockCursor { + checkpoint_viewed_at, + tx_sequence_number: tx_bounds.scan_end_cursor(), + is_scan_limited: true, + }) + .encode_cursor(), + ); + } +} + +/// When paginating backwards on a scan-limited query, the ending cursor and next page flag will be +/// the last tx scanned in the current window, and whether this window is within the scanning range. +/// The starting cursor and previous page flag wraps the first element of the result set if there +/// are more matches in the scanned window that are truncated - if the page size is smaller than the +/// scan limit - but otherwise is expanded out to the first tx scanned. +fn apply_backward_scan_limited_pagination( + conn: &mut ScanConnection, + tx_bounds: TxBounds, + checkpoint_viewed_at: u64, +) { + conn.has_next_page = tx_bounds.scan_has_next_page(); + conn.end_cursor = Some( + Cursor::new(cursor::TransactionBlockCursor { + checkpoint_viewed_at, + tx_sequence_number: tx_bounds.scan_end_cursor(), + is_scan_limited: true, + }) + .encode_cursor(), + ); + + // There may be more results within the scanned range that are truncated, especially if page + // size is less than `scan_limit`, so only overwrite the end when the base pagination reports no + // next page. + if !conn.has_previous_page { + conn.has_previous_page = tx_bounds.scan_has_prev_page(); + conn.start_cursor = Some( + Cursor::new(cursor::TransactionBlockCursor { + checkpoint_viewed_at, + tx_sequence_number: tx_bounds.scan_start_cursor(), + is_scan_limited: true, + }) + .encode_cursor(), + ); + } +} diff --git a/crates/sui-graphql-rpc/src/types/transaction_block/tx_lookups.rs b/crates/sui-graphql-rpc/src/types/transaction_block/tx_lookups.rs new file mode 100644 index 0000000000000..8077f4b4d5da4 --- /dev/null +++ b/crates/sui-graphql-rpc/src/types/transaction_block/tx_lookups.rs @@ -0,0 +1,431 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use super::{Cursor, TransactionBlockFilter}; +use crate::{ + data::{pg::bytea_literal, Conn, DbConnection}, + filter, inner_join, query, + raw_query::RawQuery, + types::{ + cursor::{End, Page}, + digest::Digest, + sui_address::SuiAddress, + transaction_block::TransactionBlockKindInput, + type_filter::{FqNameFilter, ModuleFilter}, + }, +}; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl}; +use std::fmt::Write; +use sui_indexer::schema::checkpoints; + +/// Bounds on transaction sequence number, imposed by filters, cursors, and the scan limit. The +/// outermost bounds are determined by the checkpoint filters. These get translated into bounds in +/// terms of transaction sequence numbers: +/// +/// ```ignore +/// tx_lo tx_hi +/// [-----------------------------------------------------------------) +/// ``` +/// +/// If cursors are provided, they further restrict the range of transactions to scan. Cursors are +/// exclusive, but when issuing database queries, we treat them inclusively so that we can detect +/// previous and next pages based on the existence of cursors in the results: +/// +/// ```ignore +/// cursor_lo cursor_hi_inclusive +/// [------------------------------------------] +/// ``` +/// +/// Finally, the scan limit restricts the number of transactions to scan. The scan limit can be +/// applied to either the front (forward pagination) or the back (backward pagination): +/// +/// ```ignore +/// [-----scan-limit-----)---------------------| end = Front +/// |---------------------[-----scan-limit------) end = Back +/// ``` +/// +/// This data structure can be used to compute the interval of transactions to look in for +/// candidates to include in a page of results. It can also determine whether the scanning has been +/// cut short on either side, implying that there is a previous or next page of values to scan. +/// +/// NOTE: for consistency, assume that lowerbounds are inclusive and upperbounds are exclusive. +/// Bounds that do not follow this convention will be annotated explicitly (e.g. `lo_exclusive` or +/// `hi_inclusive`). +#[derive(Clone, Debug, Copy)] +pub(crate) struct TxBounds { + /// The inclusive lower bound tx_sequence_number derived from checkpoint bounds. If checkpoint + /// bounds are not provided, this will default to `0`. + tx_lo: u64, + + /// The exclusive upper bound tx_sequence_number derived from checkpoint bounds. If checkpoint + /// bounds are not provided, this will default to the total transaction count at the checkpoint + /// viewed. + tx_hi: u64, + + /// The starting cursor (aka `after`). + cursor_lo_exclusive: Option, + + // The ending cursor (aka `before`). + cursor_hi: Option, + + /// The number of transactions to treat as candidates, defaults to all the transactions in the + /// range defined by the bounds above. + scan_limit: Option, + + /// Which end of the range candidates will be scanned from. + end: End, +} + +impl TxBounds { + /// Determines the `tx_sequence_number` range from the checkpoint bounds for a transaction block + /// query. If no checkpoint range is specified, the default is between 0 and the + /// `checkpoint_viewed_at`. The corresponding `tx_sequence_number` range is fetched from db, and + /// further adjusted by cursors and scan limit. If there are any inconsistencies or invalid + /// combinations, i.e. `after` cursor is greater than the upper bound, return None. + pub(crate) fn query( + conn: &mut Conn, + cp_after: Option, + cp_at: Option, + cp_before: Option, + checkpoint_viewed_at: u64, + scan_limit: Option, + page: &Page, + ) -> Result, diesel::result::Error> { + // Lowerbound in terms of checkpoint sequence number. We want to get the total transaction + // count of the checkpoint before this one, or 0 if there is no previous checkpoint. + let cp_lo = max_option([cp_after.map(|x| x.saturating_add(1)), cp_at]).unwrap_or(0); + + let cp_before_inclusive = match cp_before { + // There are no results strictly before checkpoint 0. + Some(0) => return Ok(None), + Some(x) => Some(x - 1), + None => None, + }; + + // Upperbound in terms of checkpoint sequence number. We want to get the total transaction + // count at the end of this checkpoint. If no upperbound is given, use + // `checkpoint_viewed_at`. + // + // SAFETY: we can unwrap because of the `Some(checkpoint_viewed_at) + let cp_hi = min_option([cp_before_inclusive, cp_at, Some(checkpoint_viewed_at)]).unwrap(); + + use checkpoints::dsl; + let (tx_lo, tx_hi) = if let Some(cp_prev) = cp_lo.checked_sub(1) { + let res: Vec = conn.results(move || { + dsl::checkpoints + .select(dsl::network_total_transactions) + .filter(dsl::sequence_number.eq_any([cp_prev as i64, cp_hi as i64])) + .order_by(dsl::network_total_transactions.asc()) + })?; + + // If there are not two distinct results, it means that the transaction bounds are + // empty (lo and hi are the same), or it means that the one or other of the checkpoints + // doesn't exist, so we can return early. + let &[lo, hi] = res.as_slice() else { + return Ok(None); + }; + + (lo as u64, hi as u64) + } else { + let res: Option = conn + .first(move || { + dsl::checkpoints + .select(dsl::network_total_transactions) + .filter(dsl::sequence_number.eq(cp_hi as i64)) + }) + .optional()?; + + // If there is no result, it means that the checkpoint doesn't exist, so we can return + // early. + let Some(hi) = res else { + return Ok(None); + }; + + (0, hi as u64) + }; + + // If the cursors point outside checkpoint bounds, we can return early. + if matches!(page.after(), Some(a) if tx_hi <= a.tx_sequence_number.saturating_add(1)) { + return Ok(None); + } + + if matches!(page.before(), Some(b) if b.tx_sequence_number <= tx_lo) { + return Ok(None); + } + + Ok(Some(Self { + tx_lo, + tx_hi, + cursor_lo_exclusive: page.after().map(|a| a.tx_sequence_number), + cursor_hi: page.before().map(|b| b.tx_sequence_number), + scan_limit, + end: page.end(), + })) + } + + /// Inclusive lowerbound for range of transactions to scan, accounting for the bounds from + /// filters and the cursor, but not scan limits. For the purposes of scanning records in the + /// DB, cursors are treated inclusively, even though they are exclusive bounds. + fn db_lo(&self) -> u64 { + max_option([self.cursor_lo_exclusive, Some(self.tx_lo)]).unwrap() + } + + /// Exclusive upperbound for range of transactions to scan, accounting for the bounds from + /// filters and the cursor, but not scan limits. For the purposes of scanning records in the + /// DB, cursors are treated inclusively, even though they are exclusive bounds. + fn db_hi(&self) -> u64 { + min_option([ + self.cursor_hi.map(|h| h.saturating_add(1)), + Some(self.tx_hi), + ]) + .unwrap() + } + + /// Whether the cursor lowerbound restricts the transaction range. + fn has_cursor_prev_page(&self) -> bool { + self.cursor_lo_exclusive.is_some_and(|lo| self.tx_lo <= lo) + } + + /// Whether the cursor upperbound restricts the transaction range. + fn has_cursor_next_page(&self) -> bool { + self.cursor_hi.is_some_and(|hi| hi < self.tx_hi) + } + + /// Inclusive lowerbound of range of transactions to scan. + pub(crate) fn scan_lo(&self) -> u64 { + match (self.end, self.scan_limit) { + (End::Front, _) | (_, None) => self.db_lo(), + (End::Back, Some(scan_limit)) => self + .db_hi() + // If there is a next page, additionally scan the cursor upperbound. + .saturating_sub(self.has_cursor_next_page() as u64) + .saturating_sub(scan_limit) + .max(self.db_lo()), + } + } + + /// Exclusive upperbound of range of transactions to scan. + pub(crate) fn scan_hi(&self) -> u64 { + match (self.end, self.scan_limit) { + (End::Back, _) | (_, None) => self.db_hi(), + (End::Front, Some(scan_limit)) => self + .db_lo() + // If there is a previous page, additionally scan the cursor lowerbound. + .saturating_add(self.has_cursor_prev_page() as u64) + .saturating_add(scan_limit) + .min(self.db_hi()), + } + } + + /// The first transaction scanned, ignoring transactions pointed at by cursors. + pub(crate) fn scan_start_cursor(&self) -> u64 { + let skip_cursor_lo = self.end == End::Front && self.has_cursor_prev_page(); + self.scan_lo().saturating_add(skip_cursor_lo as u64) + } + + /// The last transaction scanned, ignoring transactions pointed at by cursors. + pub(crate) fn scan_end_cursor(&self) -> u64 { + let skip_cursor_hi = self.end == End::Back && self.has_cursor_next_page(); + self.scan_hi().saturating_sub(skip_cursor_hi as u64 + 1) + } + + /// Whether there are more transactions to scan before this page. + pub(crate) fn scan_has_prev_page(&self) -> bool { + self.tx_lo < self.scan_start_cursor() + } + + /// Whether there are more transactions to scan after this page. + pub(crate) fn scan_has_next_page(&self) -> bool { + self.scan_end_cursor() + 1 < self.tx_hi + } +} + +/// Determines the maximum value in an arbitrary number of Option. +fn max_option(xs: impl IntoIterator>) -> Option { + xs.into_iter().flatten().max() +} + +/// Determines the minimum value in an arbitrary number of Option. +fn min_option(xs: impl IntoIterator>) -> Option { + xs.into_iter().flatten().min() +} + +/// Constructs a `RawQuery` as a join over all relevant side tables, filtered on their own filter +/// condition, plus optionally a sender, plus optionally tx/cp bounds. +pub(crate) fn subqueries(filter: &TransactionBlockFilter, tx_bounds: TxBounds) -> Option { + let sender = filter.sign_address; + + let mut subqueries = vec![]; + + if let Some(f) = &filter.function { + subqueries.push(match f { + FqNameFilter::ByModule(filter) => match filter { + ModuleFilter::ByPackage(p) => ("tx_calls_pkg", select_pkg(p, sender, tx_bounds)), + ModuleFilter::ByModule(p, m) => { + ("tx_calls_mod", select_mod(p, m.clone(), sender, tx_bounds)) + } + }, + FqNameFilter::ByFqName(p, m, n) => ( + "tx_calls_fun", + select_fun(p, m.clone(), n.clone(), sender, tx_bounds), + ), + }); + } + if let Some(kind) = &filter.kind { + subqueries.push(("tx_kinds", select_kind(*kind, sender, tx_bounds))); + } + if let Some(recv) = &filter.recv_address { + subqueries.push(("tx_recipients", select_recipient(recv, sender, tx_bounds))); + } + if let Some(input) = &filter.input_object { + subqueries.push(("tx_input_objects", select_input(input, sender, tx_bounds))); + } + if let Some(changed) = &filter.changed_object { + subqueries.push(( + "tx_changed_objects", + select_changed(changed, sender, tx_bounds), + )); + } + if let Some(sender) = &filter.explicit_sender() { + subqueries.push(("tx_senders", select_sender(sender, tx_bounds))); + } + if let Some(txs) = &filter.transaction_ids { + subqueries.push(("tx_digests", select_ids(txs, tx_bounds))); + } + + let (_, mut subquery) = subqueries.pop()?; + + if !subqueries.is_empty() { + subquery = query!("SELECT tx_sequence_number FROM ({}) AS initial", subquery); + while let Some((alias, subselect)) = subqueries.pop() { + subquery = inner_join!(subquery, alias => subselect, using: ["tx_sequence_number"]); + } + } + + Some(subquery) +} + +fn select_tx(sender: Option, bound: TxBounds, from: &str) -> RawQuery { + let mut query = filter!( + query!(format!("SELECT tx_sequence_number FROM {from}")), + format!( + "{} <= tx_sequence_number AND tx_sequence_number < {}", + bound.scan_lo(), + bound.scan_hi() + ) + ); + + if let Some(sender) = sender { + query = filter!( + query, + format!("sender = {}", bytea_literal(sender.as_slice())) + ); + } + + query +} + +fn select_pkg(pkg: &SuiAddress, sender: Option, bound: TxBounds) -> RawQuery { + filter!( + select_tx(sender, bound, "tx_calls_pkg"), + format!("package = {}", bytea_literal(pkg.as_slice())) + ) +} + +fn select_mod( + pkg: &SuiAddress, + mod_: String, + sender: Option, + bound: TxBounds, +) -> RawQuery { + filter!( + select_tx(sender, bound, "tx_calls_mod"), + format!( + "package = {} and module = {{}}", + bytea_literal(pkg.as_slice()) + ), + mod_ + ) +} + +fn select_fun( + pkg: &SuiAddress, + mod_: String, + fun: String, + sender: Option, + bound: TxBounds, +) -> RawQuery { + filter!( + select_tx(sender, bound, "tx_calls_fun"), + format!( + "package = {} AND module = {{}} AND func = {{}}", + bytea_literal(pkg.as_slice()), + ), + mod_, + fun + ) +} + +/// Returns a RawQuery that selects transactions of a specific kind. If SystemTX is specified, we +/// ignore the `sender`. If ProgrammableTX is specified, we filter against the `tx_kinds` table if +/// no `sender` is provided; otherwise, we just query the `tx_senders` table. Other combinations, in +/// particular when kind is SystemTx and sender is specified and not 0x0, are inconsistent and will +/// not produce any results. These inconsistent cases are expected to be checked for before this is +/// called. +fn select_kind( + kind: TransactionBlockKindInput, + sender: Option, + bound: TxBounds, +) -> RawQuery { + match (kind, sender) { + // We can simplify the query to just the `tx_senders` table if ProgrammableTX and sender is + // specified. + (TransactionBlockKindInput::ProgrammableTx, Some(sender)) => select_sender(&sender, bound), + // Otherwise, we can ignore the sender always, and just query the `tx_kinds` table. + _ => filter!( + select_tx(None, bound, "tx_kinds"), + format!("tx_kind = {}", kind as i16) + ), + } +} + +fn select_sender(sender: &SuiAddress, bound: TxBounds) -> RawQuery { + select_tx(Some(*sender), bound, "tx_senders") +} + +fn select_recipient(recv: &SuiAddress, sender: Option, bound: TxBounds) -> RawQuery { + filter!( + select_tx(sender, bound, "tx_recipients"), + format!("recipient = {}", bytea_literal(recv.as_slice())) + ) +} + +fn select_input(input: &SuiAddress, sender: Option, bound: TxBounds) -> RawQuery { + filter!( + select_tx(sender, bound, "tx_input_objects"), + format!("object_id = {}", bytea_literal(input.as_slice())) + ) +} + +fn select_changed(changed: &SuiAddress, sender: Option, bound: TxBounds) -> RawQuery { + filter!( + select_tx(sender, bound, "tx_changed_objects"), + format!("object_id = {}", bytea_literal(changed.as_slice())) + ) +} + +fn select_ids(ids: &Vec, bound: TxBounds) -> RawQuery { + let query = select_tx(None, bound, "tx_digests"); + if ids.is_empty() { + filter!(query, "1=0") + } else { + let mut inner = String::new(); + let mut prefix = "tx_digest IN ("; + for id in ids { + write!(&mut inner, "{prefix}{}", bytea_literal(id.as_slice())).unwrap(); + prefix = ", "; + } + inner.push(')'); + filter!(query, inner) + } +} diff --git a/crates/sui-graphql-rpc/src/types/type_filter.rs b/crates/sui-graphql-rpc/src/types/type_filter.rs index 31fa106d43577..f2028483989ff 100644 --- a/crates/sui-graphql-rpc/src/types/type_filter.rs +++ b/crates/sui-graphql-rpc/src/types/type_filter.rs @@ -268,32 +268,6 @@ impl TypeFilter { } impl FqNameFilter { - /// Modify `query` to apply this filter, treating `package` as the column containing the package - /// address, `module` as the module containing the module name, and `name` as the column - /// containing the module member name. - pub(crate) fn apply( - &self, - query: Query, - package: P, - module: M, - name: N, - ) -> Query - where - Query: QueryDsl, - P: Field, - M: Field, - N: Field, - QS: QuerySource, - { - match self { - FqNameFilter::ByModule(filter) => filter.apply(query, package, module), - FqNameFilter::ByFqName(p, m, n) => query - .filter(package.eq(p.into_vec())) - .filter(module.eq(m.clone())) - .filter(name.eq(n.clone())), - } - } - /// Try to create a filter whose results are the intersection of the results of the input /// filters (`self` and `other`). This may not be possible if the resulting filter is /// inconsistent (e.g. a filter that requires the module member's package to be at two different diff --git a/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs b/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs index 3f568010b662f..eb51fe116f263 100644 --- a/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs +++ b/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs @@ -78,14 +78,11 @@ pub(crate) async fn verify_zklogin_signature( )); }; - // fetch on-chain JWKs from dynamic field of system object. Due to recent performance - // degradations, the existing `DynamicField::query` method is now consistently timing out. As a - // workaround, we are using the `query_latest_dynamic_field` method, which fetches object data - // from the live `objects` table. This can be reverted once the `objects_snapshot` lag issue is - // fixed and we've backfilled the `objects_version` table. - let df = DynamicField::query_latest_dynamic_field( - ctx.data_unchecked(), + // fetch on-chain JWKs from dynamic field of system object. + let df = DynamicField::query( + ctx, SUI_AUTHENTICATOR_STATE_ADDRESS.into(), + None, DynamicFieldName { type_: ExactTypeFilter(TypeTag::U64), bcs: Base64(bcs::to_bytes(&1u64).unwrap()), diff --git a/crates/sui-graphql-rpc/tests/examples_validation_tests.rs b/crates/sui-graphql-rpc/tests/examples_validation_tests.rs index fc2a95d21c90b..205c0e1407b5d 100644 --- a/crates/sui-graphql-rpc/tests/examples_validation_tests.rs +++ b/crates/sui-graphql-rpc/tests/examples_validation_tests.rs @@ -3,105 +3,147 @@ #[cfg(feature = "pg_integration")] mod tests { + use anyhow::{anyhow, Context, Result}; use rand::rngs::StdRng; use rand::SeedableRng; use serial_test::serial; use simulacrum::Simulacrum; use std::cmp::max; + use std::collections::BTreeMap; + use std::fs; use std::path::PathBuf; use std::sync::Arc; use sui_graphql_rpc::config::{ConnectionConfig, Limits}; - use sui_graphql_rpc::examples::{load_examples, ExampleQuery, ExampleQueryGroup}; use sui_graphql_rpc::test_infra::cluster::ExecutorCluster; use sui_graphql_rpc::test_infra::cluster::DEFAULT_INTERNAL_DATA_SOURCE_PORT; use tempfile::tempdir; - fn bad_examples() -> ExampleQueryGroup { - ExampleQueryGroup { - name: "bad_examples".to_string(), - queries: vec![ - ExampleQuery { - name: "multiple_queries".to_string(), + struct Example { + contents: String, + path: Option, + } + + fn good_examples() -> Result> { + let examples = PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("examples"); + + let mut dirs = vec![examples.clone()]; + let mut queries = BTreeMap::new(); + while let Some(dir) = dirs.pop() { + let entries = + fs::read_dir(&dir).with_context(|| format!("Looking in {}", dir.display()))?; + + for entry in entries { + let entry = entry.with_context(|| format!("Entry in {}", dir.display()))?; + let path = entry.path(); + let typ_ = entry + .file_type() + .with_context(|| format!("Metadata for {}", path.display()))?; + + if typ_.is_dir() { + dirs.push(entry.path()); + continue; + } + + if path.ends_with(".graphql") { + let contents = fs::read_to_string(&path) + .with_context(|| format!("Reading {}", path.display()))?; + + let rel_path = path + .strip_prefix(&examples) + .with_context(|| format!("Generating name from {}", path.display()))? + .with_extension(""); + + let name = rel_path + .to_str() + .ok_or_else(|| anyhow!("Generating name from {}", path.display()))?; + + queries.insert( + name.to_string(), + Example { + contents, + path: Some(path), + }, + ); + } + } + } + + Ok(queries) + } + + fn bad_examples() -> BTreeMap { + BTreeMap::from_iter([ + ( + "multiple_queries".to_string(), + Example { contents: "{ chainIdentifier } { chainIdentifier }".to_string(), - path: PathBuf::from("multiple_queries.graphql"), + path: None, }, - ExampleQuery { - name: "malformed".to_string(), + ), + ( + "malformed".to_string(), + Example { contents: "query { }}".to_string(), - path: PathBuf::from("malformed.graphql"), + path: None, }, - ExampleQuery { - name: "invalid".to_string(), + ), + ( + "invalid".to_string(), + Example { contents: "djewfbfo".to_string(), - path: PathBuf::from("invalid.graphql"), + path: None, }, - ExampleQuery { - name: "empty".to_string(), + ), + ( + "empty".to_string(), + Example { contents: " ".to_string(), - path: PathBuf::from("empty.graphql"), + path: None, }, - ], - _path: PathBuf::from("bad_examples"), - } + ), + ]) } - async fn validate_example_query_group( + async fn test_query( cluster: &ExecutorCluster, - group: &ExampleQueryGroup, + name: &str, + query: &Example, max_nodes: &mut u64, max_output_nodes: &mut u64, max_depth: &mut u64, max_payload: &mut u64, ) -> Vec { - let mut errors = vec![]; - for query in &group.queries { - let resp = cluster - .graphql_client - .execute_to_graphql(query.contents.clone(), true, vec![], vec![]) - .await - .unwrap(); - resp.errors().iter().for_each(|err| { - errors.push(format!( - "Query failed: {}: {} at: {}\nError: {}", - group.name, - query.name, - query.path.display(), - err - )) - }); - if resp.errors().is_empty() { - let usage = resp - .usage() - .expect("Usage fetch should succeed") - .unwrap_or_else(|| panic!("Usage should be present for query: {}", query.name)); - - let nodes = *usage.get("inputNodes").unwrap_or_else(|| { - panic!("Node usage should be present for query: {}", query.name) - }); - let output_nodes = *usage.get("outputNodes").unwrap_or_else(|| { - panic!( - "Output node usage should be present for query: {}", - query.name - ) - }); - let depth = *usage.get("depth").unwrap_or_else(|| { - panic!("Depth usage should be present for query: {}", query.name) - }); - let payload = *usage.get("queryPayload").unwrap_or_else(|| { - panic!("Payload usage should be present for query: {}", query.name) - }); - *max_nodes = max(*max_nodes, nodes); - *max_output_nodes = max(*max_output_nodes, output_nodes); - *max_depth = max(*max_depth, depth); - *max_payload = max(*max_payload, payload); - } + let resp = cluster + .graphql_client + .execute_to_graphql(query.contents.clone(), true, vec![], vec![]) + .await + .unwrap(); + + let errors = resp.errors(); + if errors.is_empty() { + let usage = resp + .usage() + .expect("Usage not found") + .expect("Usage not found"); + *max_nodes = max(*max_nodes, usage["inputNodes"]); + *max_output_nodes = max(*max_output_nodes, usage["outputNodes"]); + *max_depth = max(*max_depth, usage["depth"]); + *max_payload = max(*max_payload, usage["queryPayload"]); + return vec![]; } + errors + .into_iter() + .map(|e| match &query.path { + Some(p) => format!("Query {name:?} at {} failed: {e}", p.display()), + None => format!("Query {name:?} failed: {e}"), + }) + .collect() } #[tokio::test] #[serial] - async fn test_single_all_examples_structure_valid() { + async fn good_examples_within_limits() { let rng = StdRng::from_seed([12; 32]); let data_ingestion_path = tempdir().unwrap().into_path(); let mut sim = Simulacrum::new_with_rng(rng); @@ -119,20 +161,20 @@ mod tests { ) .await; - let groups = load_examples().expect("Could not load examples"); - let mut errors = vec![]; - for group in groups { - let group_errors = validate_example_query_group( - &cluster, - &group, - &mut max_nodes, - &mut max_output_nodes, - &mut max_depth, - &mut max_payload, - ) - .await; - errors.extend(group_errors); + for (name, example) in good_examples().expect("Could not load examples") { + errors.extend( + test_query( + &cluster, + &name, + &example, + &mut max_nodes, + &mut max_output_nodes, + &mut max_depth, + &mut max_payload, + ) + .await, + ); } // Check that our examples can run with our usage limits @@ -167,7 +209,7 @@ mod tests { #[tokio::test] #[serial] - async fn test_bad_examples_fail() { + async fn bad_examples_fail() { let rng = StdRng::from_seed([12; 32]); let data_ingestion_path = tempdir().unwrap().into_path(); let mut sim = Simulacrum::new_with_rng(rng); @@ -185,21 +227,19 @@ mod tests { ) .await; - let bad_examples = bad_examples(); - let errors = validate_example_query_group( - &cluster, - &bad_examples, - &mut max_nodes, - &mut max_output_nodes, - &mut max_depth, - &mut max_payload, - ) - .await; + for (name, example) in bad_examples() { + let errors = test_query( + &cluster, + &name, + &example, + &mut max_nodes, + &mut max_output_nodes, + &mut max_depth, + &mut max_payload, + ) + .await; - assert_eq!( - errors.len(), - bad_examples.queries.len(), - "all examples should fail" - ); + assert!(!errors.is_empty(), "Query {name:?} should have failed"); + } } } diff --git a/crates/sui-graphql-rpc/tests/snapshot_tests.rs b/crates/sui-graphql-rpc/tests/snapshot_tests.rs index 30a66934b5deb..dcefef844375d 100644 --- a/crates/sui-graphql-rpc/tests/snapshot_tests.rs +++ b/crates/sui-graphql-rpc/tests/snapshot_tests.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use insta::assert_snapshot; -use std::fs::write; +use std::fs; use std::path::PathBuf; use sui_graphql_rpc::server::builder::export_schema; @@ -11,9 +11,8 @@ fn test_schema_sdl_export() { let sdl = export_schema(); // update the current schema file - let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - path.extend(["schema", "current_progress_schema.graphql"]); - write(path, &sdl).unwrap(); + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("schema.graphql"); + fs::write(path, &sdl).unwrap(); assert_snapshot!(sdl); } diff --git a/crates/sui-graphql-rpc/tests/snapshots/snapshot_tests__schema_sdl_export.snap b/crates/sui-graphql-rpc/tests/snapshots/snapshot_tests__schema_sdl_export.snap index fae839c9b487a..fd04f186f34b6 100644 --- a/crates/sui-graphql-rpc/tests/snapshots/snapshot_tests__schema_sdl_export.snap +++ b/crates/sui-graphql-rpc/tests/snapshots/snapshot_tests__schema_sdl_export.snap @@ -102,8 +102,27 @@ type Address implements IOwner { """ Similar behavior to the `transactionBlocks` in Query but supporting the additional `AddressTransactionBlockRelationship` filter, which defaults to `SIGN`. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, relation: AddressTransactionBlockRelationship, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, relation: AddressTransactionBlockRelationship, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! } type AddressConnection { @@ -413,8 +432,25 @@ type Checkpoint { epoch: Epoch """ Transactions in this checkpoint. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range consists of all transactions in this checkpoint. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! } type CheckpointConnection { @@ -521,8 +557,27 @@ type Coin implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -680,8 +735,27 @@ type CoinMetadata implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -1097,8 +1171,25 @@ type Epoch { checkpoints(first: Int, after: String, last: Int, before: String): CheckpointConnection! """ The epoch's corresponding transaction blocks. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range consists of all transactions in this epoch. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! } type Event { @@ -1433,7 +1524,7 @@ interface IObject { """ The transaction blocks that sent objects to this object. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -1976,8 +2067,27 @@ type MoveObject implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -2165,13 +2275,48 @@ type MovePackage implements IObject & IOwner { The transaction blocks that sent objects to this package. Note that objects that have been sent to a package become inaccessible. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the package's content. """ bcs: Base64 """ + Fetch another version of this package (the package that shares this package's original ID, + but has the specified `version`). + """ + packageAtVersion(version: Int!): MovePackage + """ + Fetch all versions of this package (packages that share this package's original ID), + optionally bounding the versions exclusively from below with `afterVersion`, or from above + with `beforeVersion`. + """ + packageVersions(first: Int, after: String, last: Int, before: String, filter: MovePackageVersionFilter): MovePackageConnection! + """ + Fetch the latest version of this package (the package with the highest `version` that shares + this packages's original ID) + """ + latestPackage: MovePackage! + """ A representation of the module called `name` in this package, including the structs and functions it defines. """ @@ -2195,6 +2340,22 @@ type MovePackage implements IObject & IOwner { moduleBcs: Base64 } +""" +Filter for paginating `MovePackage`s that were created within a range of checkpoints. +""" +input MovePackageCheckpointFilter { + """ + Fetch packages that were published strictly after this checkpoint. Omitting this fetches + packages published since genesis. + """ + afterCheckpoint: UInt53 + """ + Fetch packages that were published strictly before this checkpoint. Omitting this fetches + packages published up to the latest checkpoint (inclusive). + """ + beforeCheckpoint: UInt53 +} + type MovePackageConnection { """ Information to aid in pagination. @@ -2224,6 +2385,22 @@ type MovePackageEdge { cursor: String! } +""" +Filter for paginating versions of a given `MovePackage`. +""" +input MovePackageVersionFilter { + """ + Fetch versions of this package that are strictly newer than this version. Omitting this + fetches versions since the original version. + """ + afterVersion: UInt53 + """ + Fetch versions of this package that are strictly older than this version. Omitting this + fetches versions up to the latest version (inclusive). + """ + beforeVersion: UInt53 +} + """ Description of a struct type, defined in a Move module. """ @@ -2496,8 +2673,27 @@ type Object implements IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -3037,13 +3233,33 @@ type Query { state at the latest checkpoint known to the GraphQL RPC. Similarly, `Owner.asObject` will return the object's version at the latest checkpoint. """ - owner(address: SuiAddress!, rootVersion: Int): Owner + owner(address: SuiAddress!, rootVersion: UInt53): Owner """ The object corresponding to the given address at the (optionally) given version. When no version is given, the latest version is returned. """ object(address: SuiAddress!, version: UInt53): Object """ + The package corresponding to the given address (at the optionally given version). + + When no version is given, the package is loaded directly from the address given. Otherwise, + the address is translated before loading to point to the package whose original ID matches + the package at `address`, but whose version is `version`. For non-system packages, this + might result in a different address than `address` because different versions of a package, + introduced by upgrades, exist at distinct addresses. + + Note that this interpretation of `version` is different from a historical object read (the + interpretation of `version` for the `object` query). + """ + package(address: SuiAddress!, version: UInt53): MovePackage + """ + The latest version of the package at `address`. + + This corresponds to the package with the highest `version` that shares its original ID with + the package at `address`. + """ + latestPackage(address: SuiAddress!): MovePackage + """ Look-up an Account by its SuiAddress. """ address(address: SuiAddress!): Address @@ -3078,8 +3294,27 @@ type Query { checkpoints(first: Int, after: String, last: Int, before: String): CheckpointConnection! """ The transaction blocks that exist in the network. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + transactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The events that exist in the network. """ @@ -3089,6 +3324,20 @@ type Query { """ objects(first: Int, after: String, last: Int, before: String, filter: ObjectFilter): ObjectConnection! """ + The Move packages that exist in the network, optionally filtered to be strictly before + `beforeCheckpoint` and/or strictly after `afterCheckpoint`. + + This query returns all versions of a given user package that appear between the specified + checkpoints, but only records the latest versions of system packages. + """ + packages(first: Int, after: String, last: Int, before: String, filter: MovePackageCheckpointFilter): MovePackageConnection! + """ + Fetch all versions of package at `address` (packages that share this package's original ID), + optionally bounding the versions exclusively from below with `afterVersion`, or from above + with `beforeVersion`. + """ + packageVersions(first: Int, after: String, last: Int, before: String, address: SuiAddress!, filter: MovePackageVersionFilter): MovePackageConnection! + """ Fetch the protocol config by protocol version (defaults to the latest protocol version known to the GraphQL service). """ @@ -3283,6 +3532,14 @@ type ServiceConfig { Maximum nesting allowed in struct fields when calculating the layout of a single Move Type. """ maxMoveValueDepth: Int! + """ + Maximum number of transaction ids that can be passed to a `TransactionBlockFilter`. + """ + maxTransactionIds: Int! + """ + Maximum number of candidates to scan when gathering a page of results. + """ + maxScanLimit: Int! } """ @@ -3500,8 +3757,27 @@ type StakedSui implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ @@ -3702,8 +3978,27 @@ type SuinsRegistration implements IMoveObject & IObject & IOwner { storageRebate: BigInt """ The transaction blocks that sent objects to this object. + + `scanLimit` restricts the number of candidate transactions scanned when gathering a page of + results. It is required for queries that apply more than two complex filters (on function, + kind, sender, recipient, input object, changed object, or ids), and can be at most + `serviceConfig.maxScanLimit`. + + When the scan limit is reached the page will be returned even if it has fewer than `first` + results when paginating forward (`last` when paginating backwards). If there are more + transactions to scan, `pageInfo.hasNextPage` (or `pageInfo.hasPreviousPage`) will be set to + `true`, and `PageInfo.endCursor` (or `PageInfo.startCursor`) will be set to the last + transaction that was scanned as opposed to the last (or first) transaction in the page. + + Requesting the next (or previous) page after this cursor will resume the search, scanning + the next `scanLimit` many transactions in the direction of pagination, and so on until all + transactions in the scanning range have been visited. + + By default, the scanning range includes all transactions known to GraphQL, but it can be + restricted by the `after` and `before` cursors, and the `beforeCheckpoint`, + `afterCheckpoint` and `atCheckpoint` filters. """ - receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter): TransactionBlockConnection! + receivedTransactionBlocks(first: Int, after: String, last: Int, before: String, filter: TransactionBlockFilter, scanLimit: Int): TransactionBlockConnection! """ The Base64-encoded BCS serialization of the object's content. """ diff --git a/crates/sui-indexer-builder/Cargo.toml b/crates/sui-indexer-builder/Cargo.toml new file mode 100644 index 0000000000000..4ae5164d862ce --- /dev/null +++ b/crates/sui-indexer-builder/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "sui-indexer-builder" +version = "0.1.0" +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +anyhow.workspace = true +tokio = { workspace = true, features = ["full"] } +async-trait.workspace = true +mysten-metrics.workspace = true +sui-types.workspace = true +sui-data-ingestion-core.workspace = true +tracing.workspace = true +prometheus.workspace = true +telemetry-subscribers.workspace = true \ No newline at end of file diff --git a/crates/sui-indexer-builder/src/indexer_builder.rs b/crates/sui-indexer-builder/src/indexer_builder.rs new file mode 100644 index 0000000000000..dd7b317c76141 --- /dev/null +++ b/crates/sui-indexer-builder/src/indexer_builder.rs @@ -0,0 +1,323 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::cmp::{max, min}; +use std::sync::Arc; + +use anyhow::Error; +use async_trait::async_trait; +use tokio::task::JoinHandle; + +use mysten_metrics::{metered_channel, spawn_monitored_task}; + +use crate::{Task, Tasks}; + +type CheckpointData = (u64, Vec); +pub type DataSender = metered_channel::Sender>; + +pub struct IndexerBuilder { + name: String, + datasource: D, + data_mapper: M, + backfill_strategy: BackfillStrategy, + disable_live_task: bool, +} + +impl IndexerBuilder { + pub fn new(name: &str, datasource: D, data_mapper: M) -> IndexerBuilder { + IndexerBuilder { + name: name.into(), + datasource, + data_mapper, + backfill_strategy: BackfillStrategy::Simple, + disable_live_task: false, + } + } + pub fn build( + self, + start_from_checkpoint: u64, + genesis_checkpoint: u64, + persistent: P, + ) -> Indexer + where + P: Persistent, + { + Indexer { + name: self.name, + storage: persistent, + datasource: self.datasource.into(), + backfill_strategy: self.backfill_strategy, + disable_live_task: self.disable_live_task, + start_from_checkpoint, + data_mapper: self.data_mapper, + genesis_checkpoint, + } + } + + pub fn with_backfill_strategy(mut self, backfill: BackfillStrategy) -> Self { + self.backfill_strategy = backfill; + self + } + + pub fn disable_live_task(mut self) -> Self { + self.disable_live_task = true; + self + } +} + +pub struct Indexer { + name: String, + storage: P, + datasource: Arc, + data_mapper: M, + backfill_strategy: BackfillStrategy, + disable_live_task: bool, + start_from_checkpoint: u64, + genesis_checkpoint: u64, +} + +impl Indexer { + pub async fn start(mut self) -> Result<(), Error> + where + D: Datasource + 'static, + M: DataMapper + 'static, + P: Persistent + 'static, + T: Send, + { + // Update tasks first + self.update_tasks().await?; + // get updated tasks from storage and start workers + let updated_tasks = self.storage.tasks(&self.name).await?; + // Start latest checkpoint worker + // Tasks are ordered in checkpoint descending order, realtime update task always come first + // tasks won't be empty here, ok to unwrap. + let live_task_future = match updated_tasks.live_task() { + Some(live_task) if !self.disable_live_task => { + let live_task_future = self.datasource.start_ingestion_task( + live_task.task_name.clone(), + live_task.checkpoint, + live_task.target_checkpoint, + self.storage.clone(), + self.data_mapper.clone(), + ); + Some(live_task_future) + } + _ => None, + }; + + let backfill_tasks = updated_tasks.backfill_tasks(); + let storage_clone = self.storage.clone(); + let data_mapper_clone = self.data_mapper.clone(); + let datasource_clone = self.datasource.clone(); + + let handle = spawn_monitored_task!(async { + // Execute task one by one + for backfill_task in backfill_tasks { + if backfill_task.checkpoint < backfill_task.target_checkpoint { + datasource_clone + .start_ingestion_task( + backfill_task.task_name.clone(), + backfill_task.checkpoint, + backfill_task.target_checkpoint, + storage_clone.clone(), + data_mapper_clone.clone(), + ) + .await + .expect("Backfill task failed"); + } + } + }); + + if let Some(live_task_future) = live_task_future { + live_task_future.await?; + } + + tokio::try_join!(handle)?; + + Ok(()) + } + + async fn update_tasks(&mut self) -> Result<(), Error> + where + P: Persistent, + { + let tasks = self.storage.tasks(&self.name).await?; + let backfill_tasks = tasks.backfill_tasks(); + let latest_task = backfill_tasks.first(); + + // 1, create and update live task if needed + if !self.disable_live_task { + let from_checkpoint = max( + self.start_from_checkpoint, + latest_task + .map(|t| t.target_checkpoint + 1) + .unwrap_or_default(), + ); + + match tasks.live_task() { + None => { + self.storage + .register_task( + format!("{} - Live", self.name), + from_checkpoint, + i64::MAX as u64, + ) + .await?; + } + Some(mut live_task) => { + if self.start_from_checkpoint > live_task.checkpoint { + live_task.checkpoint = self.start_from_checkpoint; + self.storage.update_task(live_task).await?; + } + } + } + } + + // 2, create backfill tasks base on task config and existing tasks in the db + match latest_task { + None => { + // No task in database, create backfill tasks from genesis to `start_from_checkpoint` + if self.start_from_checkpoint != self.genesis_checkpoint { + self.create_backfill_tasks( + self.genesis_checkpoint, + self.start_from_checkpoint - 1, + ) + .await? + } + } + Some(latest_task) => { + if latest_task.target_checkpoint + 1 < self.start_from_checkpoint { + self.create_backfill_tasks( + latest_task.target_checkpoint + 1, + self.start_from_checkpoint - 1, + ) + .await?; + } + } + } + Ok(()) + } + + // Create backfill tasks according to backfill strategy + async fn create_backfill_tasks(&mut self, mut from_cp: u64, to_cp: u64) -> Result<(), Error> + where + P: Persistent, + { + match self.backfill_strategy { + BackfillStrategy::Simple => { + self.storage + .register_task( + format!("{} - backfill - {from_cp}:{to_cp}", self.name), + from_cp, + to_cp, + ) + .await + } + BackfillStrategy::Partitioned { task_size } => { + while from_cp < self.start_from_checkpoint { + let target_cp = min(from_cp + task_size - 1, to_cp); + self.storage + .register_task( + format!("{} - backfill - {from_cp}:{target_cp}", self.name), + from_cp, + target_cp, + ) + .await?; + from_cp = target_cp + 1; + } + Ok(()) + } + BackfillStrategy::Disabled => Ok(()), + } + } +} + +#[async_trait] +pub trait Persistent: IndexerProgressStore + Sync + Send + Clone { + async fn write(&self, data: Vec) -> Result<(), Error>; +} + +#[async_trait] +pub trait IndexerProgressStore: Send { + async fn load_progress(&self, task_name: String) -> anyhow::Result; + async fn save_progress( + &mut self, + task_name: String, + checkpoint_number: u64, + ) -> anyhow::Result<()>; + + async fn tasks(&self, task_prefix: &str) -> Result, Error>; + + async fn register_task( + &mut self, + task_name: String, + checkpoint: u64, + target_checkpoint: u64, + ) -> Result<(), anyhow::Error>; + + async fn update_task(&mut self, task: Task) -> Result<(), Error>; +} + +#[async_trait] +pub trait Datasource: Sync + Send { + async fn start_ingestion_task( + &self, + task_name: String, + starting_checkpoint: u64, + target_checkpoint: u64, + mut storage: P, + data_mapper: M, + ) -> Result<(), Error> + where + M: DataMapper, + P: Persistent, + { + // todo: add metrics for number of tasks + let (data_sender, mut data_channel) = metered_channel::channel( + 1000, + &mysten_metrics::get_metrics() + .unwrap() + .channel_inflight + .with_label_values(&[&task_name]), + ); + let join_handle = self + .start_data_retrieval(starting_checkpoint, target_checkpoint, data_sender) + .await?; + + while let Some((block_number, data)) = data_channel.recv().await { + if block_number > target_checkpoint { + break; + } + if !data.is_empty() { + let processed_data = data.into_iter().try_fold(vec![], |mut result, d| { + result.append(&mut data_mapper.map(d)?); + Ok::, Error>(result) + })?; + // TODO: we might be able to write data and progress in a single transaction. + storage.write(processed_data).await?; + } + storage + .save_progress(task_name.clone(), block_number) + .await?; + } + join_handle.abort(); + join_handle.await? + } + + async fn start_data_retrieval( + &self, + starting_checkpoint: u64, + target_checkpoint: u64, + data_sender: DataSender, + ) -> Result>, Error>; +} + +pub enum BackfillStrategy { + Simple, + Partitioned { task_size: u64 }, + Disabled, +} + +pub trait DataMapper: Sync + Send + Clone { + fn map(&self, data: T) -> Result, anyhow::Error>; +} diff --git a/crates/sui-indexer-builder/src/lib.rs b/crates/sui-indexer-builder/src/lib.rs new file mode 100644 index 0000000000000..f4cb0d4d32eda --- /dev/null +++ b/crates/sui-indexer-builder/src/lib.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod indexer_builder; +pub mod sui_datasource; + +#[derive(Clone, Debug)] +pub struct Task { + pub task_name: String, + pub checkpoint: u64, + pub target_checkpoint: u64, + pub timestamp: u64, +} + +pub trait Tasks { + fn live_task(&self) -> Option; + + fn backfill_tasks(&self) -> Vec; +} + +impl Tasks for Vec { + fn live_task(&self) -> Option { + // TODO: Change the schema to record live task properly. + self.iter() + .find(|t| t.target_checkpoint == i64::MAX as u64) + .cloned() + } + + fn backfill_tasks(&self) -> Vec { + self.iter() + .filter(|t| t.target_checkpoint != i64::MAX as u64) + .cloned() + .collect() + } +} diff --git a/crates/sui-indexer-builder/src/sui_datasource.rs b/crates/sui-indexer-builder/src/sui_datasource.rs new file mode 100644 index 0000000000000..388308bf10c4a --- /dev/null +++ b/crates/sui-indexer-builder/src/sui_datasource.rs @@ -0,0 +1,146 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_builder::{DataSender, Datasource}; +use anyhow::Error; +use async_trait::async_trait; +use mysten_metrics::{metered_channel, spawn_monitored_task}; +use std::path::PathBuf; +use sui_data_ingestion_core::{ + DataIngestionMetrics, IndexerExecutor, ProgressStore, ReaderOptions, Worker, WorkerPool, +}; +use sui_types::base_types::TransactionDigest; +use sui_types::full_checkpoint_content::CheckpointData as SuiCheckpointData; +use sui_types::full_checkpoint_content::CheckpointTransaction; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tokio::sync::oneshot; +use tokio::sync::oneshot::Sender; +use tokio::task::JoinHandle; +use tracing::info; + +pub struct SuiCheckpointDatasource { + remote_store_url: String, + concurrency: usize, + checkpoint_path: PathBuf, + metrics: DataIngestionMetrics, +} +impl SuiCheckpointDatasource { + pub fn new( + remote_store_url: String, + concurrency: usize, + checkpoint_path: PathBuf, + metrics: DataIngestionMetrics, + ) -> Self { + SuiCheckpointDatasource { + remote_store_url, + concurrency, + checkpoint_path, + metrics, + } + } +} + +#[async_trait] +impl Datasource for SuiCheckpointDatasource { + async fn start_data_retrieval( + &self, + starting_checkpoint: u64, + target_checkpoint: u64, + data_sender: DataSender, + ) -> Result>, Error> { + let (exit_sender, exit_receiver) = oneshot::channel(); + let progress_store = PerTaskInMemProgressStore { + current_checkpoint: starting_checkpoint, + exit_checkpoint: target_checkpoint, + exit_sender: Some(exit_sender), + }; + let mut executor = IndexerExecutor::new(progress_store, 1, self.metrics.clone()); + let worker = IndexerWorker::new(data_sender); + let worker_pool = WorkerPool::new( + worker, + TransactionDigest::random().to_string(), + self.concurrency, + ); + executor.register(worker_pool).await?; + let checkpoint_path = self.checkpoint_path.clone(); + let remote_store_url = self.remote_store_url.clone(); + Ok(spawn_monitored_task!(async { + executor + .run( + checkpoint_path, + Some(remote_store_url), + vec![], // optional remote store access options + ReaderOptions::default(), + exit_receiver, + ) + .await?; + Ok(()) + })) + } +} + +struct PerTaskInMemProgressStore { + pub current_checkpoint: u64, + pub exit_checkpoint: u64, + pub exit_sender: Option>, +} + +#[async_trait] +impl ProgressStore for PerTaskInMemProgressStore { + async fn load( + &mut self, + _task_name: String, + ) -> Result { + Ok(self.current_checkpoint) + } + + async fn save( + &mut self, + _task_name: String, + checkpoint_number: CheckpointSequenceNumber, + ) -> anyhow::Result<()> { + if checkpoint_number >= self.exit_checkpoint { + if let Some(sender) = self.exit_sender.take() { + let _ = sender.send(()); + } + } + self.current_checkpoint = checkpoint_number; + Ok(()) + } +} + +pub struct IndexerWorker { + data_sender: metered_channel::Sender<(u64, Vec)>, +} + +impl IndexerWorker { + pub fn new(data_sender: metered_channel::Sender<(u64, Vec)>) -> Self { + Self { data_sender } + } +} + +pub type CheckpointTxnData = (CheckpointTransaction, u64, u64); + +#[async_trait] +impl Worker for IndexerWorker { + async fn process_checkpoint(&self, checkpoint: SuiCheckpointData) -> anyhow::Result<()> { + info!( + "Received checkpoint [{}] {}: {}", + checkpoint.checkpoint_summary.epoch, + checkpoint.checkpoint_summary.sequence_number, + checkpoint.transactions.len(), + ); + let checkpoint_num = checkpoint.checkpoint_summary.sequence_number; + let timestamp_ms = checkpoint.checkpoint_summary.timestamp_ms; + + let transactions = checkpoint + .transactions + .into_iter() + .map(|tx| (tx, checkpoint_num, timestamp_ms)) + .collect(); + Ok(self + .data_sender + .send((checkpoint_num, transactions)) + .await?) + } +} diff --git a/crates/sui-indexer-builder/tests/indexer_test_utils.rs b/crates/sui-indexer-builder/tests/indexer_test_utils.rs new file mode 100644 index 0000000000000..17e8cb1ad35dc --- /dev/null +++ b/crates/sui-indexer-builder/tests/indexer_test_utils.rs @@ -0,0 +1,149 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; + +use anyhow::{anyhow, Error}; +use async_trait::async_trait; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; + +use mysten_metrics::spawn_monitored_task; + +use sui_indexer_builder::indexer_builder::{ + DataMapper, DataSender, Datasource, IndexerProgressStore, Persistent, +}; +use sui_indexer_builder::Task; + +pub struct TestDatasource { + pub data: Vec, +} + +#[async_trait] +impl Datasource for TestDatasource +where + T: Send + Sync + Clone + 'static, +{ + async fn start_data_retrieval( + &self, + starting_checkpoint: u64, + _target_checkpoint: u64, + data_sender: DataSender, + ) -> Result>, Error> { + let data_clone = self.data.clone(); + + Ok(spawn_monitored_task!(async { + let mut cp = starting_checkpoint; + while cp < data_clone.len() as u64 { + data_sender + .send((cp, vec![data_clone[cp as usize].clone()])) + .await?; + cp += 1; + } + Ok(()) + })) + } +} + +#[derive(Clone, Debug, Default)] +pub struct InMemoryPersistent { + pub progress_store: Arc>>, + pub data: Arc>>, +} + +impl InMemoryPersistent { + pub fn new() -> Self { + InMemoryPersistent { + progress_store: Default::default(), + data: Arc::new(Mutex::new(vec![])), + } + } +} + +#[async_trait] +impl IndexerProgressStore for InMemoryPersistent { + async fn load_progress(&self, task_name: String) -> anyhow::Result { + Ok(self + .progress_store + .lock() + .await + .get(&task_name) + .unwrap() + .checkpoint) + } + + async fn save_progress( + &mut self, + task_name: String, + checkpoint_number: u64, + ) -> anyhow::Result<()> { + self.progress_store + .lock() + .await + .get_mut(&task_name) + .unwrap() + .checkpoint = checkpoint_number; + Ok(()) + } + + async fn tasks(&self, task_prefix: &str) -> Result, Error> { + let mut tasks = self + .progress_store + .lock() + .await + .values() + .filter(|task| task.task_name.starts_with(task_prefix)) + .cloned() + .collect::>(); + tasks.sort_by(|t1, t2| t2.checkpoint.cmp(&t1.checkpoint)); + Ok(tasks) + } + + async fn register_task( + &mut self, + task_name: String, + checkpoint: u64, + target_checkpoint: u64, + ) -> Result<(), Error> { + let existing = self.progress_store.lock().await.insert( + task_name.clone(), + Task { + task_name: task_name.clone(), + checkpoint, + target_checkpoint, + timestamp: SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64, + }, + ); + if existing.is_some() { + return Err(anyhow!("Task {task_name} already exists")); + } + Ok(()) + } + + async fn update_task(&mut self, task: Task) -> Result<(), Error> { + self.progress_store + .lock() + .await + .insert(task.task_name.clone(), task); + Ok(()) + } +} + +#[async_trait] +impl Persistent for InMemoryPersistent { + async fn write(&self, data: Vec) -> Result<(), Error> { + self.data.lock().await.append(&mut data.clone()); + Ok(()) + } +} + +#[derive(Clone)] +pub struct NoopDataMapper; + +impl DataMapper for NoopDataMapper { + fn map(&self, data: T) -> Result, Error> { + Ok(vec![data]) + } +} diff --git a/crates/sui-indexer-builder/tests/indexer_tests.rs b/crates/sui-indexer-builder/tests/indexer_tests.rs new file mode 100644 index 0000000000000..c4febf9069a76 --- /dev/null +++ b/crates/sui-indexer-builder/tests/indexer_tests.rs @@ -0,0 +1,191 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_test_utils::{InMemoryPersistent, NoopDataMapper, TestDatasource}; +use prometheus::Registry; +use sui_indexer_builder::indexer_builder::{ + BackfillStrategy, IndexerBuilder, IndexerProgressStore, +}; +use sui_indexer_builder::Task; + +mod indexer_test_utils; + +#[tokio::test] +async fn indexer_simple_backfill_task_test() { + telemetry_subscribers::init_for_testing(); + let registry = Registry::new(); + mysten_metrics::init_metrics(®istry); + + let data = (0..=10u64).collect::>(); + let datasource = TestDatasource { data: data.clone() }; + let persistent = InMemoryPersistent::new(); + let indexer = IndexerBuilder::new("test_indexer", datasource, NoopDataMapper).build( + 5, + 0, + persistent.clone(), + ); + + indexer.start().await.unwrap(); + + // it should have 2 task created for the indexer - a live task and a backfill task + let tasks = persistent.tasks("test_indexer").await.unwrap(); + assert_eq!(2, tasks.len()); + // the tasks should be ordered by checkpoint number, + // the first one will be the live task and second one will be the backfill + assert_eq!(10, tasks.first().unwrap().checkpoint); + assert_eq!(i64::MAX as u64, tasks.first().unwrap().target_checkpoint); + assert_eq!(4, tasks.last().unwrap().checkpoint); + assert_eq!(4, tasks.last().unwrap().target_checkpoint); + + // the data recorded in storage should be the same as the datasource + let mut recorded_data = persistent.data.lock().await.clone(); + recorded_data.sort(); + assert_eq!(data, recorded_data); +} + +#[tokio::test] +async fn indexer_partitioned_backfill_task_test() { + telemetry_subscribers::init_for_testing(); + let registry = Registry::new(); + mysten_metrics::init_metrics(®istry); + + let data = (0..=50u64).collect::>(); + let datasource = TestDatasource { data: data.clone() }; + let persistent = InMemoryPersistent::new(); + let indexer = IndexerBuilder::new("test_indexer", datasource, NoopDataMapper) + .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 10 }) + .build(35, 0, persistent.clone()); + indexer.start().await.unwrap(); + + // it should have 5 task created for the indexer - a live task and 4 backfill task + let tasks = persistent.tasks("test_indexer").await.unwrap(); + assert_eq!(5, tasks.len()); + // the tasks should be ordered by checkpoint number, + // the first one will be the live task and rest will be the backfills + assert_eq!(50, tasks.first().unwrap().checkpoint); + assert_eq!(i64::MAX as u64, tasks.first().unwrap().target_checkpoint); + assert_eq!(34, tasks.get(1).unwrap().checkpoint); + assert_eq!(34, tasks.get(1).unwrap().target_checkpoint); + assert_eq!(29, tasks.get(2).unwrap().checkpoint); + assert_eq!(29, tasks.get(2).unwrap().target_checkpoint); + assert_eq!(19, tasks.get(3).unwrap().checkpoint); + assert_eq!(19, tasks.get(3).unwrap().target_checkpoint); + assert_eq!(9, tasks.get(4).unwrap().checkpoint); + assert_eq!(9, tasks.get(4).unwrap().target_checkpoint); + // the data recorded in storage should be the same as the datasource + let mut recorded_data = persistent.data.lock().await.clone(); + recorded_data.sort(); + assert_eq!(data, recorded_data); +} + +#[tokio::test] +async fn indexer_partitioned_task_with_data_already_in_db_test() { + telemetry_subscribers::init_for_testing(); + let registry = Registry::new(); + mysten_metrics::init_metrics(®istry); + + let data = (0..=50u64).collect::>(); + let datasource = TestDatasource { data: data.clone() }; + let persistent = InMemoryPersistent::new(); + persistent.data.lock().await.append(&mut (0..=30).collect()); + persistent.progress_store.lock().await.insert( + "test_indexer - backfill - 1".to_string(), + Task { + task_name: "test_indexer - backfill - 1".to_string(), + checkpoint: 30, + target_checkpoint: 30, + timestamp: 0, + }, + ); + let indexer = IndexerBuilder::new("test_indexer", datasource, NoopDataMapper) + .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 10 }) + .build(25, 0, persistent.clone()); + indexer.start().await.unwrap(); + + // it should have 2 task created for the indexer, one existing task and one live task + let tasks = persistent.tasks("test_indexer").await.unwrap(); + assert_eq!(2, tasks.len()); + // the first one will be the live task + assert_eq!(50, tasks.first().unwrap().checkpoint); + assert_eq!(i64::MAX as u64, tasks.first().unwrap().target_checkpoint); + // the data recorded in storage should be the same as the datasource + let mut recorded_data = persistent.data.lock().await.clone(); + recorded_data.sort(); + assert_eq!(data, recorded_data); +} + +#[tokio::test] +async fn indexer_partitioned_task_with_data_already_in_db_test2() { + telemetry_subscribers::init_for_testing(); + let registry = Registry::new(); + mysten_metrics::init_metrics(®istry); + + let data = (0..=50u64).collect::>(); + let datasource = TestDatasource { data: data.clone() }; + let persistent = InMemoryPersistent::new(); + persistent.data.lock().await.append(&mut (0..=30).collect()); + persistent.progress_store.lock().await.insert( + "test_indexer - backfill - 1".to_string(), + Task { + task_name: "test_indexer - backfill - 1".to_string(), + checkpoint: 30, + target_checkpoint: 30, + timestamp: 0, + }, + ); + let indexer = IndexerBuilder::new("test_indexer", datasource, NoopDataMapper) + .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 10 }) + .build(35, 0, persistent.clone()); + indexer.start().await.unwrap(); + + // it should have 3 task created for the indexer, existing task, a backfill task from cp 31 to cp 34, and a live task + let tasks = persistent.tasks("test_indexer").await.unwrap(); + assert_eq!(3, tasks.len()); + // the tasks should be ordered by checkpoint number, + // the first one will be the live task and rest will be the backfills + assert_eq!(50, tasks.first().unwrap().checkpoint); + assert_eq!(i64::MAX as u64, tasks.first().unwrap().target_checkpoint); + assert_eq!(34, tasks.get(1).unwrap().checkpoint); + assert_eq!(34, tasks.get(1).unwrap().target_checkpoint); + assert_eq!(30, tasks.get(2).unwrap().checkpoint); + assert_eq!(30, tasks.get(2).unwrap().target_checkpoint); + // the data recorded in storage should be the same as the datasource + let mut recorded_data = persistent.data.lock().await.clone(); + recorded_data.sort(); + assert_eq!(data, recorded_data); +} + +#[tokio::test] +async fn resume_test() { + telemetry_subscribers::init_for_testing(); + let registry = Registry::new(); + mysten_metrics::init_metrics(®istry); + + let data = (0..=50u64).collect::>(); + let datasource = TestDatasource { data: data.clone() }; + let persistent = InMemoryPersistent::new(); + persistent.progress_store.lock().await.insert( + "test_indexer - backfill - 30".to_string(), + Task { + task_name: "test_indexer - backfill - 30".to_string(), + checkpoint: 10, + target_checkpoint: 30, + timestamp: 0, + }, + ); + let indexer = IndexerBuilder::new("test_indexer", datasource, NoopDataMapper) + .with_backfill_strategy(BackfillStrategy::Simple) + .build(30, 0, persistent.clone()); + indexer.start().await.unwrap(); + + // it should have 2 task created for the indexer, one existing task and one live task + let tasks = persistent.tasks("test_indexer").await.unwrap(); + assert_eq!(2, tasks.len()); + // the first one will be the live task + assert_eq!(50, tasks.first().unwrap().checkpoint); + assert_eq!(i64::MAX as u64, tasks.first().unwrap().target_checkpoint); + // the data recorded in storage should be the same as the datasource + let mut recorded_data = persistent.data.lock().await.clone(); + recorded_data.sort(); + assert_eq!((10..=50u64).collect::>(), recorded_data); +} diff --git a/crates/sui-indexer/README.md b/crates/sui-indexer/README.md index cce84ca39feba..d81a7f2e89910 100644 --- a/crates/sui-indexer/README.md +++ b/crates/sui-indexer/README.md @@ -1,5 +1,7 @@ Sui indexer is an off-fullnode service to serve data from Sui protocol, including both data directly generated from chain and derivative data. +⚠ **Warning:** Sui indexer is still experimental and we expect occasional breaking changes that require backfills. + ## Architecture ![enhanced_FN](https://user-images.githubusercontent.com/106119108/221022505-a1d873c6-60e2-45f1-b2aa-e50192c4dfbb.png) diff --git a/crates/sui-indexer/migrations/mysql/2024-04-24-180249_packages/up.sql b/crates/sui-indexer/migrations/mysql/2024-04-24-180249_packages/up.sql index f3fe2539038fc..7ee89206f254f 100644 --- a/crates/sui-indexer/migrations/mysql/2024-04-24-180249_packages/up.sql +++ b/crates/sui-indexer/migrations/mysql/2024-04-24-180249_packages/up.sql @@ -1,7 +1,14 @@ CREATE TABLE packages ( - package_id blob NOT NULL, + package_id BLOB NOT NULL, + original_id BLOB NOT NULL, + package_version BIGINT NOT NULL, -- bcs serialized MovePackage - move_package MEDIUMBLOB NOT NULL, - CONSTRAINT packages_pk PRIMARY KEY (package_id(255)) + move_package MEDIUMBLOB NOT NULL, + checkpoint_sequence_number BIGINT NOT NULL, + CONSTRAINT packages_pk PRIMARY KEY (package_id(32), original_id(32), package_version), + CONSTRAINT packages_unique_package_id UNIQUE (package_id(32)) ); + +CREATE INDEX packages_cp_id_version ON packages (checkpoint_sequence_number, original_id(32), package_version); +CREATE INDEX packages_id_version_cp ON packages (original_id(32), package_version, checkpoint_sequence_number); diff --git a/crates/sui-indexer/migrations/mysql/2024-05-05-155158_obj_indices/down.sql b/crates/sui-indexer/migrations/mysql/2024-05-05-155158_obj_indices/down.sql new file mode 100644 index 0000000000000..7a3a7670f24c2 --- /dev/null +++ b/crates/sui-indexer/migrations/mysql/2024-05-05-155158_obj_indices/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS objects_version; diff --git a/crates/sui-indexer/migrations/mysql/2024-05-05-155158_obj_indices/up.sql b/crates/sui-indexer/migrations/mysql/2024-05-05-155158_obj_indices/up.sql new file mode 100644 index 0000000000000..e501b71a073c0 --- /dev/null +++ b/crates/sui-indexer/migrations/mysql/2024-05-05-155158_obj_indices/up.sql @@ -0,0 +1,9 @@ +-- The Postgres version of this table is partitioned by the first byte +-- of object_id, but this kind of partition is not easily supported in +-- MySQL, so this variant is unpartitioned for now. +CREATE TABLE objects_version ( + object_id BLOB NOT NULL, + object_version BIGINT NOT NULL, + cp_sequence_number BIGINT NOT NULL, + PRIMARY KEY (object_id(32), object_version) +) diff --git a/crates/sui-indexer/migrations/pg/2023-08-19-044020_events/up.sql b/crates/sui-indexer/migrations/pg/2023-08-19-044020_events/up.sql index a6c0d70566e7b..dfbfa3ea14495 100644 --- a/crates/sui-indexer/migrations/pg/2023-08-19-044020_events/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-08-19-044020_events/up.sql @@ -1,3 +1,4 @@ +-- TODO: modify queries in indexer reader to take advantage of the new indices CREATE TABLE events ( tx_sequence_number BIGINT NOT NULL, @@ -23,8 +24,8 @@ CREATE TABLE events timestamp_ms BIGINT NOT NULL, -- bcs of the Event contents (Event.contents) bcs BYTEA NOT NULL, - PRIMARY KEY(tx_sequence_number, event_sequence_number, checkpoint_sequence_number) -) PARTITION BY RANGE (checkpoint_sequence_number); + PRIMARY KEY(tx_sequence_number, event_sequence_number) +) PARTITION BY RANGE (tx_sequence_number); CREATE TABLE events_partition_0 PARTITION OF events FOR VALUES FROM (0) TO (MAXVALUE); CREATE INDEX events_package ON events (package, tx_sequence_number, event_sequence_number); CREATE INDEX events_package_module ON events (package, module, tx_sequence_number, event_sequence_number); diff --git a/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql b/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql index e5850457f922e..15e9dc9f1cb82 100644 --- a/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql +++ b/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql @@ -1,2 +1,3 @@ -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS transactions; +DROP TABLE IF EXISTS transactions_partition_0; diff --git a/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql b/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql index ede66ad44798c..f5404e3610751 100644 --- a/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql @@ -18,10 +18,6 @@ CREATE TABLE transactions ( -- number of successful commands in this transaction, bound by number of command -- in a programmaable transaction. success_command_count smallint NOT NULL, - PRIMARY KEY (tx_sequence_number, checkpoint_sequence_number) -) PARTITION BY RANGE (checkpoint_sequence_number); + PRIMARY KEY (tx_sequence_number) +) PARTITION BY RANGE (tx_sequence_number); CREATE TABLE transactions_partition_0 PARTITION OF transactions FOR VALUES FROM (0) TO (MAXVALUE); -CREATE INDEX transactions_transaction_digest ON transactions (transaction_digest); -CREATE INDEX transactions_checkpoint_sequence_number ON transactions (checkpoint_sequence_number); --- only create index for system transactions (0). See types.rs -CREATE INDEX transactions_transaction_kind ON transactions (transaction_kind) WHERE transaction_kind = 0; diff --git a/crates/sui-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql b/crates/sui-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql index 5f7281a2e1a1d..ddb63b020de70 100644 --- a/crates/sui-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql @@ -1,15 +1,15 @@ CREATE TABLE checkpoints ( - sequence_number bigint PRIMARY KEY, - checkpoint_digest bytea NOT NULL, - epoch bigint NOT NULL, + sequence_number BIGINT PRIMARY KEY, + checkpoint_digest BYTEA NOT NULL, + epoch BIGINT NOT NULL, -- total transactions in the network at the end of this checkpoint (including itself) - network_total_transactions bigint NOT NULL, - previous_checkpoint_digest bytea, + network_total_transactions BIGINT NOT NULL, + previous_checkpoint_digest BYTEA, -- if this checkpoitn is the last checkpoint of an epoch end_of_epoch boolean NOT NULL, -- array of TranscationDigest in bytes included in this checkpoint - tx_digests bytea[] NOT NULL, + tx_digests BYTEA[] NOT NULL, timestamp_ms BIGINT NOT NULL, total_gas_cost BIGINT NOT NULL, computation_cost BIGINT NOT NULL, @@ -17,11 +17,13 @@ CREATE TABLE checkpoints storage_rebate BIGINT NOT NULL, non_refundable_storage_fee BIGINT NOT NULL, -- bcs serialized Vec bytes - checkpoint_commitments bytea NOT NULL, + checkpoint_commitments BYTEA NOT NULL, -- bcs serialized AggregateAuthoritySignature bytes - validator_signature bytea NOT NULL, + validator_signature BYTEA NOT NULL, -- bcs serialzied EndOfEpochData bytes, if the checkpoint marks end of an epoch - end_of_epoch_data bytea + end_of_epoch_data BYTEA, + min_tx_sequence_number BIGINT, + max_tx_sequence_number BIGINT ); CREATE INDEX checkpoints_epoch ON checkpoints (epoch, sequence_number); diff --git a/crates/sui-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql b/crates/sui-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql index 4a0a17289ccec..5b540121cb849 100644 --- a/crates/sui-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql @@ -26,3 +26,22 @@ CREATE TABLE epochs -- of the epoch epoch_commitments bytea ); + +-- Table storing the protocol configs for each protocol version. +-- Examples include gas schedule, transaction limits, etc. +CREATE TABLE protocol_configs +( + protocol_version BIGINT NOT NULL, + config_name TEXT NOT NULL, + config_value TEXT, + PRIMARY KEY(protocol_version, config_name) +); + +-- Table storing the feature flags for each protocol version. +CREATE TABLE feature_flags +( + protocol_version BIGINT NOT NULL, + flag_name TEXT NOT NULL, + flag_value BOOLEAN NOT NULL, + PRIMARY KEY(protocol_version, flag_name) +); diff --git a/crates/sui-indexer/migrations/pg/2023-08-19-060729_packages/up.sql b/crates/sui-indexer/migrations/pg/2023-08-19-060729_packages/up.sql index a95489af4dc41..f08a5549608eb 100644 --- a/crates/sui-indexer/migrations/pg/2023-08-19-060729_packages/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-08-19-060729_packages/up.sql @@ -1,6 +1,14 @@ -CREATE TABLE packages +CREATE TABLE packages ( - package_id bytea PRIMARY KEY, + package_id bytea NOT NULL, + original_id bytea NOT NULL, + package_version bigint NOT NULL, -- bcs serialized MovePackage - move_package bytea NOT NULL + move_package bytea NOT NULL, + checkpoint_sequence_number bigint NOT NULL, + CONSTRAINT packages_pkey PRIMARY KEY (package_id, original_id, package_version), + CONSTRAINT packages_unique_package_id UNIQUE (package_id) ); + +CREATE INDEX packages_cp_id_version ON packages (checkpoint_sequence_number, original_id, package_version); +CREATE INDEX packages_id_version_cp ON packages (original_id, package_version, checkpoint_sequence_number); diff --git a/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql b/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql index 8e4f29f981c22..f5604c0db5357 100644 --- a/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql +++ b/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql @@ -2,5 +2,8 @@ DROP TABLE IF EXISTS tx_senders; DROP TABLE IF EXISTS tx_recipients; DROP TABLE IF EXISTS tx_input_objects; DROP TABLE IF EXISTS tx_changed_objects; -DROP TABLE IF EXISTS tx_calls; +DROP TABLE IF EXISTS tx_calls_pkg; +DROP TABLE IF EXISTS tx_calls_mod; +DROP TABLE IF EXISTS tx_calls_fun; DROP TABLE IF EXISTS tx_digests; +DROP TABLE IF EXISTS tx_kinds; diff --git a/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql b/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql index ed81a281f2b0a..0bcd824e31254 100644 --- a/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql @@ -1,57 +1,70 @@ CREATE TABLE tx_senders ( - cp_sequence_number BIGINT NOT NULL, tx_sequence_number BIGINT NOT NULL, - -- SuiAddress in bytes. sender BYTEA NOT NULL, - PRIMARY KEY(sender, tx_sequence_number, cp_sequence_number) + PRIMARY KEY(sender, tx_sequence_number) ); -CREATE INDEX tx_senders_tx_sequence_number_index ON tx_senders (tx_sequence_number, cp_sequence_number); CREATE TABLE tx_recipients ( - cp_sequence_number BIGINT NOT NULL, tx_sequence_number BIGINT NOT NULL, - -- SuiAddress in bytes. recipient BYTEA NOT NULL, - PRIMARY KEY(recipient, tx_sequence_number, cp_sequence_number) + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) ); -CREATE INDEX tx_recipients_tx_sequence_number_index ON tx_recipients (tx_sequence_number, cp_sequence_number); +CREATE INDEX tx_recipients_sender ON tx_recipients (sender, recipient, tx_sequence_number); CREATE TABLE tx_input_objects ( - cp_sequence_number BIGINT NOT NULL, tx_sequence_number BIGINT NOT NULL, - -- Object ID in bytes. object_id BYTEA NOT NULL, - PRIMARY KEY(object_id, tx_sequence_number, cp_sequence_number) + sender BYTEA NOT NULL, + PRIMARY KEY(object_id, tx_sequence_number) ); CREATE INDEX tx_input_objects_tx_sequence_number_index ON tx_input_objects (tx_sequence_number); +CREATE INDEX tx_input_objects_sender ON tx_input_objects (sender, object_id, tx_sequence_number); CREATE TABLE tx_changed_objects ( - cp_sequence_number BIGINT NOT NULL, tx_sequence_number BIGINT NOT NULL, - -- Object Id in bytes. object_id BYTEA NOT NULL, - PRIMARY KEY(object_id, tx_sequence_number, cp_sequence_number) + sender BYTEA NOT NULL, + PRIMARY KEY(object_id, tx_sequence_number) ); CREATE INDEX tx_changed_objects_tx_sequence_number_index ON tx_changed_objects (tx_sequence_number); +CREATE INDEX tx_changed_objects_sender ON tx_changed_objects (sender, object_id, tx_sequence_number); + +CREATE TABLE tx_calls_pkg ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number) +); +CREATE INDEX tx_calls_pkg_sender ON tx_calls_pkg (sender, package, tx_sequence_number); + +CREATE TABLE tx_calls_mod ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + module TEXT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number) +); +CREATE INDEX tx_calls_mod_sender ON tx_calls_mod (sender, package, module, tx_sequence_number); -CREATE TABLE tx_calls ( - cp_sequence_number BIGINT NOT NULL, +CREATE TABLE tx_calls_fun ( tx_sequence_number BIGINT NOT NULL, package BYTEA NOT NULL, module TEXT NOT NULL, func TEXT NOT NULL, - -- 1. Using Primary Key as a unique index. - -- 2. Diesel does not like tables with no primary key. - PRIMARY KEY(package, tx_sequence_number, cp_sequence_number) + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, func, tx_sequence_number) ); -CREATE INDEX tx_calls_module ON tx_calls (package, module, tx_sequence_number, cp_sequence_number); -CREATE INDEX tx_calls_func ON tx_calls (package, module, func, tx_sequence_number, cp_sequence_number); -CREATE INDEX tx_calls_tx_sequence_number ON tx_calls (tx_sequence_number, cp_sequence_number); +CREATE INDEX tx_calls_fun_sender ON tx_calls_fun (sender, package, module, func, tx_sequence_number); --- un-partitioned table for tx_digest -> (cp_sequence_number, tx_sequence_number) lookup. CREATE TABLE tx_digests ( tx_digest BYTEA PRIMARY KEY, - cp_sequence_number BIGINT NOT NULL, tx_sequence_number BIGINT NOT NULL ); CREATE INDEX tx_digests_tx_sequence_number ON tx_digests (tx_sequence_number); + +CREATE TABLE tx_kinds ( + tx_sequence_number BIGINT NOT NULL, + tx_kind SMALLINT NOT NULL, + PRIMARY KEY(tx_kind, tx_sequence_number) +); diff --git a/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql b/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql index 1693f3892a5fa..bab0311186e1d 100644 --- a/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql +++ b/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql @@ -1 +1,2 @@ DROP PROCEDURE IF EXISTS advance_partition; +DROP PROCEDURE IF EXISTS drop_partition; diff --git a/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql b/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql index cb24af8e09934..8ca64b86a7081 100644 --- a/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql +++ b/crates/sui-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql @@ -1,10 +1,10 @@ -CREATE OR REPLACE PROCEDURE advance_partition(table_name TEXT, last_epoch BIGINT, new_epoch BIGINT, last_epoch_start_cp BIGINT, new_epoch_start_cp BIGINT) +CREATE OR REPLACE PROCEDURE advance_partition(table_name TEXT, last_epoch BIGINT, new_epoch BIGINT, last_epoch_start BIGINT, new_epoch_start BIGINT) LANGUAGE plpgsql AS $$ BEGIN EXECUTE format('ALTER TABLE %I DETACH PARTITION %I_partition_%s', table_name, table_name, last_epoch); - EXECUTE format('ALTER TABLE %I ATTACH PARTITION %I_partition_%s FOR VALUES FROM (%L) TO (%L)', table_name, table_name, last_epoch, last_epoch_start_cp, new_epoch_start_cp); - EXECUTE format('CREATE TABLE IF NOT EXISTS %I_partition_%s PARTITION OF %I FOR VALUES FROM (%L) TO (MAXVALUE)', table_name, new_epoch, table_name, new_epoch_start_cp); + EXECUTE format('ALTER TABLE %I ATTACH PARTITION %I_partition_%s FOR VALUES FROM (%L) TO (%L)', table_name, table_name, last_epoch, last_epoch_start, new_epoch_start); + EXECUTE format('CREATE TABLE IF NOT EXISTS %I_partition_%s PARTITION OF %I FOR VALUES FROM (%L) TO (MAXVALUE)', table_name, new_epoch, table_name, new_epoch_start); END; $$; diff --git a/crates/sui-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql b/crates/sui-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql new file mode 100644 index 0000000000000..7a3a7670f24c2 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS objects_version; diff --git a/crates/sui-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql b/crates/sui-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql new file mode 100644 index 0000000000000..666e5a2423319 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql @@ -0,0 +1,31 @@ +-- Indexing table mapping an object's ID and version to its checkpoint +-- sequence number, partitioned by the first byte of its Object ID. +CREATE TABLE objects_version ( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + cp_sequence_number bigint NOT NULL, + PRIMARY KEY (object_id, object_version) +) PARTITION BY RANGE (object_id); + +-- Create a partition for each first byte value. +DO $$ +DECLARE + lo text; + hi text; +BEGIN + FOR i IN 0..254 LOOP + lo := LPAD(TO_HEX(i), 2, '0'); + hi := LPAD(TO_HEX(i + 1), 2, '0'); + EXECUTE FORMAT($F$ + CREATE TABLE objects_version_%1$s PARTITION OF objects_version FOR VALUES + FROM (E'\\x%1$s00000000000000000000000000000000000000000000000000000000000000') + TO (E'\\x%2$s00000000000000000000000000000000000000000000000000000000000000'); + $F$, lo, hi); + END LOOP; +END; +$$ LANGUAGE plpgsql; + +-- Special case for the last partition, because of the upper bound. +CREATE TABLE objects_version_ff PARTITION OF objects_version FOR VALUES +FROM (E'\\xff00000000000000000000000000000000000000000000000000000000000000') +TO (MAXVALUE); diff --git a/crates/sui-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql b/crates/sui-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql new file mode 100644 index 0000000000000..3583887435168 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS event_emit_package; +DROP TABLE IF EXISTS event_emit_module; +DROP TABLE IF EXISTS event_struct_package; +DROP TABLE IF EXISTS event_struct_module; +DROP TABLE IF EXISTS event_struct_name; +DROP TABLE IF EXISTS event_struct_instantiation; +DROP TABLE IF EXISTS event_senders; diff --git a/crates/sui-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql b/crates/sui-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql new file mode 100644 index 0000000000000..a89625146a9fd --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql @@ -0,0 +1,74 @@ +CREATE TABLE event_emit_package +( + package BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_emit_package_sender ON event_emit_package (sender, package, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_emit_module +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_emit_module_sender ON event_emit_module (sender, package, module, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_package +( + package BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_package_sender ON event_struct_package (sender, package, tx_sequence_number, event_sequence_number); + + +CREATE TABLE event_struct_module +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_module_sender ON event_struct_module (sender, package, module, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_name +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + type_name TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, type_name, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_name_sender ON event_struct_name (sender, package, module, type_name, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_instantiation +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + type_instantiation TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, type_instantiation, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_instantiation_sender ON event_struct_instantiation (sender, package, module, type_instantiation, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_senders +( + sender BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + PRIMARY KEY(sender, tx_sequence_number, event_sequence_number) +); diff --git a/crates/sui-indexer/src/db.rs b/crates/sui-indexer/src/db.rs index 99b6df729463b..08e6dd3956d38 100644 --- a/crates/sui-indexer/src/db.rs +++ b/crates/sui-indexer/src/db.rs @@ -160,7 +160,6 @@ pub fn get_pool_connection( pub fn reset_database( conn: &mut PoolConnection, - drop_all: bool, ) -> Result<(), anyhow::Error> { #[cfg(feature = "postgres-feature")] { @@ -169,7 +168,7 @@ pub fn reset_database( .map_or_else( || Err(anyhow!("Failed to downcast connection to PgConnection")), |pg_conn| { - setup_postgres::reset_database(pg_conn, drop_all)?; + setup_postgres::reset_database(pg_conn)?; Ok(()) }, )?; @@ -182,7 +181,7 @@ pub fn reset_database( .map_or_else( || Err(anyhow!("Failed to downcast connection to PgConnection")), |mysql_conn| { - setup_mysql::reset_database(mysql_conn, drop_all)?; + setup_mysql::reset_database(mysql_conn)?; Ok(()) }, )?; @@ -200,7 +199,8 @@ pub mod setup_postgres { use crate::IndexerConfig; use anyhow::anyhow; use diesel::migration::MigrationSource; - use diesel::{PgConnection, RunQueryDsl}; + use diesel::PgConnection; + use diesel::RunQueryDsl; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use prometheus::Registry; use secrecy::ExposeSecret; @@ -208,49 +208,62 @@ pub mod setup_postgres { const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/pg"); - pub fn reset_database( - conn: &mut PoolConnection, - drop_all: bool, - ) -> Result<(), anyhow::Error> { - info!("Resetting database ..."); - if drop_all { - drop_all_tables(conn) - .map_err(|e| anyhow!("Encountering error when dropping all tables {e}"))?; - } else { - conn.revert_all_migrations(MIGRATIONS) - .map_err(|e| anyhow!("Error reverting all migrations {e}"))?; - } - conn.run_migrations(&MIGRATIONS.migrations().unwrap()) - .map_err(|e| anyhow!("Failed to run migrations {e}"))?; - info!("Reset database complete."); - Ok(()) - } - - fn drop_all_tables(conn: &mut PgConnection) -> Result<(), diesel::result::Error> { - info!("Dropping all tables in the database"); - let table_names: Vec = diesel::dsl::sql::( - " - SELECT tablename FROM pg_tables WHERE schemaname = 'public' - ", - ) - .load(conn)?; + pub fn reset_database(conn: &mut PoolConnection) -> Result<(), anyhow::Error> { + info!("Resetting PG database ..."); + + let drop_all_tables = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') + LOOP + EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_tables).execute(conn)?; + info!("Dropped all tables."); + + let drop_all_procedures = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes + FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid) + WHERE ns.nspname = 'public' AND prokind = 'p') + LOOP + EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_procedures).execute(conn)?; + info!("Dropped all procedures."); + + let drop_all_functions = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes + FROM pg_proc INNER JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid) + WHERE pg_namespace.nspname = 'public' AND prokind = 'f') + LOOP + EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_functions).execute(conn)?; + info!("Dropped all functions."); - for table_name in table_names { - let drop_table_query = format!("DROP TABLE IF EXISTS {} CASCADE", table_name); - diesel::sql_query(drop_table_query).execute(conn)?; - } - - // Recreate the __diesel_schema_migrations table diesel::sql_query( " - CREATE TABLE __diesel_schema_migrations ( + CREATE TABLE IF NOT EXISTS __diesel_schema_migrations ( version VARCHAR(50) PRIMARY KEY, run_on TIMESTAMP NOT NULL DEFAULT NOW() - ) - ", + )", ) .execute(conn)?; - info!("Dropped all tables in the database"); + info!("Created __diesel_schema_migrations table."); + + conn.run_migrations(&MIGRATIONS.migrations().unwrap()) + .map_err(|e| anyhow!("Failed to run migrations {e}"))?; + info!("Reset database complete."); Ok(()) } @@ -281,7 +294,7 @@ pub mod setup_postgres { ); e })?; - reset_database(&mut conn, /* drop_all */ true).map_err(|e| { + reset_database(&mut conn).map_err(|e| { let db_err_msg = format!( "Failed resetting database with url: {:?} and error: {:?}", db_url, e @@ -343,7 +356,8 @@ pub mod setup_mysql { use crate::IndexerConfig; use anyhow::anyhow; use diesel::migration::MigrationSource; - use diesel::{MysqlConnection, RunQueryDsl}; + use diesel::MysqlConnection; + use diesel::RunQueryDsl; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use prometheus::Registry; use secrecy::ExposeSecret; @@ -351,49 +365,33 @@ pub mod setup_mysql { const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/mysql"); - pub fn reset_database( - conn: &mut PoolConnection, - drop_all: bool, - ) -> Result<(), anyhow::Error> { - info!("Resetting database ..."); - if drop_all { - crate::db::setup_mysql::drop_all_tables(conn) - .map_err(|e| anyhow!("Encountering error when dropping all tables {e}"))?; - } else { - conn.revert_all_migrations(MIGRATIONS) - .map_err(|e| anyhow!("Error reverting all migrations {e}"))?; - } - conn.run_migrations(&MIGRATIONS.migrations().unwrap()) - .map_err(|e| anyhow!("Failed to run migrations {e}"))?; - info!("Reset database complete."); - Ok(()) - } + pub fn reset_database(conn: &mut PoolConnection) -> Result<(), anyhow::Error> { + info!("Resetting MySQL database ..."); - fn drop_all_tables(conn: &mut MysqlConnection) -> Result<(), diesel::result::Error> { - info!("Dropping all tables in the database"); let table_names: Vec = diesel::dsl::sql::( - " - SELECT TABLE_NAME FROM information_schema.tables WHERE table_schema = DATABASE() - ", + "SELECT TABLE_NAME FROM information_schema.tables WHERE table_schema = DATABASE()", ) .load(conn)?; - for table_name in table_names { let drop_table_query = format!("DROP TABLE IF EXISTS {}", table_name); diesel::sql_query(drop_table_query).execute(conn)?; } + info!("Drop tables complete."); - // Recreate the __diesel_schema_migrations table diesel::sql_query( " - CREATE TABLE __diesel_schema_migrations ( - version VARCHAR(50) PRIMARY KEY, - run_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP() - ) - ", + CREATE TABLE __diesel_schema_migrations ( + version VARCHAR(50) PRIMARY KEY, + run_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP() + ) + ", ) .execute(conn)?; - info!("Dropped all tables in the database"); + info!("Created __diesel_schema_migrations table."); + + conn.run_migrations(&MIGRATIONS.migrations().unwrap()) + .map_err(|e| anyhow!("Failed to run migrations {e}"))?; + info!("All migrations complete, reset database complete"); Ok(()) } @@ -421,16 +419,14 @@ pub mod setup_mysql { ); e })?; - crate::db::setup_mysql::reset_database(&mut conn, /* drop_all */ true).map_err( - |e| { - let db_err_msg = format!( - "Failed resetting database with url: {:?} and error: {:?}", - db_url, e - ); - error!("{}", db_err_msg); - IndexerError::PostgresResetError(db_err_msg) - }, - )?; + crate::db::setup_mysql::reset_database(&mut conn).map_err(|e| { + let db_err_msg = format!( + "Failed resetting database with url: {:?} and error: {:?}", + db_url, e + ); + error!("{}", db_err_msg); + IndexerError::PostgresResetError(db_err_msg) + })?; info!("Reset MySQL database complete."); } let indexer_metrics = IndexerMetrics::new(®istry); diff --git a/crates/sui-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-indexer/src/handlers/checkpoint_handler.rs index b3897f2fe8935..a5273f6302e80 100644 --- a/crates/sui-indexer/src/handlers/checkpoint_handler.rs +++ b/crates/sui-indexer/src/handlers/checkpoint_handler.rs @@ -1,56 +1,50 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::handlers::committer::start_tx_checkpoint_commit_task; -use crate::handlers::tx_processor::IndexingPackageBuffer; -use crate::models::display::StoredDisplay; +use std::collections::{BTreeMap, HashMap}; +use std::sync::{Arc, Mutex}; + use async_trait::async_trait; +use diesel::r2d2::R2D2Connection; use itertools::Itertools; +use tap::tap::TapFallible; +use tokio::sync::watch; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + use move_core_types::annotated_value::{MoveStructLayout, MoveTypeLayout}; use move_core_types::language_storage::{StructTag, TypeTag}; use mysten_metrics::{get_metrics, spawn_monitored_task}; -use std::collections::{BTreeMap, HashMap}; -use std::sync::{Arc, Mutex}; +use sui_data_ingestion_core::Worker; +use sui_json_rpc_types::SuiMoveValue; use sui_package_resolver::{PackageStore, PackageStoreWithLruCache, Resolver}; use sui_rest_api::{CheckpointData, CheckpointTransaction}; -use sui_types::base_types::ObjectRef; +use sui_types::base_types::ObjectID; use sui_types::dynamic_field::DynamicFieldInfo; use sui_types::dynamic_field::DynamicFieldName; use sui_types::dynamic_field::DynamicFieldType; +use sui_types::effects::TransactionEffectsAPI; +use sui_types::event::SystemEpochInfoEvent; use sui_types::messages_checkpoint::{ CertifiedCheckpointSummary, CheckpointContents, CheckpointSequenceNumber, }; use sui_types::object::Object; -use tokio_util::sync::CancellationToken; - -use tokio::sync::watch; - -use diesel::r2d2::R2D2Connection; -use std::collections::hash_map::Entry; -use std::collections::HashSet; -use sui_data_ingestion_core::Worker; -use sui_json_rpc_types::SuiMoveValue; -use sui_types::base_types::SequenceNumber; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::event::SystemEpochInfoEvent; use sui_types::object::Owner; -use sui_types::transaction::TransactionDataAPI; -use tap::tap::TapFallible; -use tracing::{info, warn}; - -use sui_types::base_types::ObjectID; use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; use sui_types::sui_system_state::{get_sui_system_state, SuiSystemStateTrait}; +use sui_types::transaction::TransactionDataAPI; +use crate::db::ConnectionPool; use crate::errors::IndexerError; +use crate::handlers::committer::start_tx_checkpoint_commit_task; +use crate::handlers::tx_processor::IndexingPackageBuffer; use crate::metrics::IndexerMetrics; - -use crate::db::ConnectionPool; +use crate::models::display::StoredDisplay; use crate::store::package_resolver::{IndexerStorePackageResolver, InterimPackageResolver}; use crate::store::{IndexerStore, PgIndexerStore}; use crate::types::{ - IndexedCheckpoint, IndexedDeletedObject, IndexedEpochInfo, IndexedEvent, IndexedObject, - IndexedPackage, IndexedTransaction, IndexerResult, TransactionKind, TxIndex, + EventIndex, IndexedCheckpoint, IndexedDeletedObject, IndexedEpochInfo, IndexedEvent, + IndexedObject, IndexedPackage, IndexedTransaction, IndexerResult, TransactionKind, TxIndex, }; use super::tx_processor::EpochEndIndexingObjectStore; @@ -215,6 +209,7 @@ where 0, //first_checkpoint_id None, ), + network_total_transactions: 0, })); } @@ -241,7 +236,12 @@ where let event = bcs::from_bytes::(&epoch_event.contents)?; // Now we just entered epoch X, we want to calculate the diff between - // TotalTransactionsByEndOfEpoch(X-1) and TotalTransactionsByEndOfEpoch(X-2) + // TotalTransactionsByEndOfEpoch(X-1) and TotalTransactionsByEndOfEpoch(X-2). Note that on + // the indexer's chain-reading side, this is not guaranteed to have the latest data. Rather + // than impose a wait on the reading side, however, we overwrite this on the persisting + // side, where we can guarantee that the previous epoch's checkpoints have been written to + // db. + let network_tx_count_prev_epoch = match system_state.epoch { // If first epoch change, this number is 0 1 => Ok(0), @@ -265,6 +265,7 @@ where checkpoint_summary.sequence_number + 1, // first_checkpoint_id Some(&event), ), + network_total_transactions: checkpoint_summary.network_total_transactions, })) } @@ -287,20 +288,21 @@ where let object_history_changes: TransactionObjectChangesToCommit = Self::index_objects_history(data.clone(), package_resolver.clone()).await?; - let (checkpoint, db_transactions, db_events, db_indices, db_displays) = { + let (checkpoint, db_transactions, db_events, db_tx_indices, db_event_indices, db_displays) = { let CheckpointData { transactions, checkpoint_summary, checkpoint_contents, } = data; - let (db_transactions, db_events, db_indices, db_displays) = Self::index_transactions( - transactions, - &checkpoint_summary, - &checkpoint_contents, - &metrics, - ) - .await?; + let (db_transactions, db_events, db_tx_indices, db_event_indices, db_displays) = + Self::index_transactions( + transactions, + &checkpoint_summary, + &checkpoint_contents, + &metrics, + ) + .await?; let successful_tx_num: u64 = db_transactions.iter().map(|t| t.successful_tx_num).sum(); ( @@ -311,7 +313,8 @@ where ), db_transactions, db_events, - db_indices, + db_tx_indices, + db_event_indices, db_displays, ) }; @@ -334,7 +337,8 @@ where checkpoint, transactions: db_transactions, events: db_events, - tx_indices: db_indices, + tx_indices: db_tx_indices, + event_indices: db_event_indices, display_updates: db_displays, object_changes, object_history_changes, @@ -352,6 +356,7 @@ where Vec, Vec, Vec, + Vec, BTreeMap, )> { let checkpoint_seq = checkpoint_summary.sequence_number(); @@ -372,7 +377,8 @@ where let mut db_transactions = Vec::new(); let mut db_events = Vec::new(); let mut db_displays = BTreeMap::new(); - let mut db_indices = Vec::new(); + let mut db_tx_indices = Vec::new(); + let mut db_event_indices = Vec::new(); for tx in transactions { let CheckpointTransaction { @@ -390,6 +396,7 @@ where checkpoint_seq, tx_digest, sender_signed_data.digest() ))); } + let tx = sender_signed_data.transaction_data(); let events = events .as_ref() @@ -413,6 +420,12 @@ where ) })); + db_event_indices.extend( + events.iter().enumerate().map(|(idx, event)| { + EventIndex::from_event(tx_sequence_number, idx as u64, event) + }), + ); + db_displays.extend( events .iter() @@ -440,7 +453,7 @@ where object_changes, balance_change, events, - transaction_kind, + transaction_kind: transaction_kind.clone(), successful_tx_num: if fx.status().is_ok() { tx.kind().tx_count() as u64 } else { @@ -468,8 +481,8 @@ where // Payers let payers = vec![tx.gas_owner()]; - // Senders - let senders = vec![tx.sender()]; + // Sender + let sender = tx.sender(); // Recipients let recipients = fx @@ -489,19 +502,26 @@ where .map(|(p, m, f)| (*<&ObjectID>::clone(p), m.to_string(), f.to_string())) .collect(); - db_indices.push(TxIndex { + db_tx_indices.push(TxIndex { tx_sequence_number, transaction_digest: tx_digest, checkpoint_sequence_number: *checkpoint_seq, input_objects, changed_objects, - senders, + sender, payers, recipients, move_calls, + tx_kind: transaction_kind, }); } - Ok((db_transactions, db_events, db_indices, db_displays)) + Ok(( + db_transactions, + db_events, + db_tx_indices, + db_event_indices, + db_displays, + )) } pub(crate) async fn index_objects( @@ -511,71 +531,41 @@ where ) -> Result { let _timer = metrics.indexing_objects_latency.start_timer(); let checkpoint_seq = data.checkpoint_summary.sequence_number; - let deleted_objects = data - .transactions - .iter() - .flat_map(|tx| get_deleted_objects(&tx.effects)) - .collect::>(); - let deleted_object_ids = deleted_objects - .iter() - .map(|o| (o.0, o.1)) - .collect::>(); - let indexed_deleted_objects = deleted_objects + + let eventually_removed_object_refs_post_version = + data.eventually_removed_object_refs_post_version(); + let indexed_eventually_removed_objects = eventually_removed_object_refs_post_version .into_iter() - .map(|o| IndexedDeletedObject { - object_id: o.0, - object_version: o.1.value(), + .map(|obj_ref| IndexedDeletedObject { + object_id: obj_ref.0, + object_version: obj_ref.1.into(), checkpoint_sequence_number: checkpoint_seq, }) .collect(); - let (latest_objects, intermediate_versions) = get_latest_objects(data.output_objects()); - - let live_objects: Vec = data - .transactions - .iter() - .flat_map(|tx| { - let CheckpointTransaction { - transaction: tx, - effects: fx, - .. - } = tx; - fx.all_changed_objects() - .into_iter() - .filter_map(|(oref, _owner, _kind)| { - // We don't care about objects that are deleted or updated more than once - if intermediate_versions.contains(&(oref.0, oref.1)) - || deleted_object_ids.contains(&(oref.0, oref.1)) - { - return None; - } - let object = latest_objects.get(&(oref.0)).unwrap_or_else(|| { - panic!( - "object {:?} not found in CheckpointData (tx_digest: {})", - oref.0, - tx.digest() - ) - }); - assert_eq!(oref.1, object.version()); - Some(object.clone()) - }) - .collect::>() - }) - .collect(); - + let latest_live_output_objects = data.latest_live_output_objects(); + let latest_live_output_object_map = latest_live_output_objects + .clone() + .into_iter() + .map(|o| (o.id(), o.clone())) + .collect::>(); let move_struct_layout_map = - get_move_struct_layout_map(&live_objects, package_resolver).await?; - let changed_objects = live_objects + get_move_struct_layout_map(latest_live_output_objects.clone(), package_resolver) + .await?; + let changed_objects = latest_live_output_objects .into_iter() .map(|o| { - let df_info = - try_create_dynamic_field_info(&o, &move_struct_layout_map, &latest_objects); - df_info.map(|info| IndexedObject::from_object(checkpoint_seq, o, info)) + let df_info = try_create_dynamic_field_info( + o, + &move_struct_layout_map, + &latest_live_output_object_map, + ); + df_info.map(|info| IndexedObject::from_object(checkpoint_seq, o.clone(), info)) }) .collect::, _>>()?; Ok(TransactionObjectChangesToCommit { changed_objects, - deleted_objects: indexed_deleted_objects, + deleted_objects: indexed_eventually_removed_objects, }) } @@ -588,59 +578,42 @@ where let deleted_objects = data .transactions .iter() - .flat_map(|tx| get_deleted_objects(&tx.effects)) + .flat_map(|tx| tx.removed_object_refs_post_version()) .collect::>(); let indexed_deleted_objects: Vec = deleted_objects .into_iter() - .map(|o| IndexedDeletedObject { - object_id: o.0, - object_version: o.1.value(), + .map(|obj_ref| IndexedDeletedObject { + object_id: obj_ref.0, + object_version: obj_ref.1.into(), checkpoint_sequence_number: checkpoint_seq, }) .collect(); - let (latest_objects, _) = get_latest_objects(data.output_objects()); - let history_object_map = data - .output_objects() + let latest_live_output_objects = data.latest_live_output_objects(); + let latest_live_output_object_map = latest_live_output_objects + .clone() .into_iter() - .map(|o| ((o.id(), o.version()), o.clone())) + .map(|o| (o.id(), o.clone())) .collect::>(); - let history_objects: Vec = data + let output_objects = data .transactions .iter() - .flat_map(|tx| { - let CheckpointTransaction { - transaction: tx, - effects: fx, - .. - } = tx; - fx.all_changed_objects() - .into_iter() - .map(|(oref, _owner, _kind)| { - let history_object = history_object_map.get(&(oref.0, oref.1)).unwrap_or_else(|| { - panic!( - "object {:?} version {:?} not found in CheckpointData (tx_digest: {})", - oref.0, - oref.1, - tx.digest() - ) - }); - assert_eq!(oref.2, history_object.digest()); - history_object.clone() - }) - .collect::>() - }) - .collect(); - + .flat_map(|tx| &tx.output_objects) + .collect::>(); + // TODO(gegaowp): the current df_info implementation is not correct, + // but we have decided remove all df_* except df_kind. let move_struct_layout_map = - get_move_struct_layout_map(&history_objects, package_resolver).await?; - let changed_objects = history_objects + get_move_struct_layout_map(output_objects.clone(), package_resolver).await?; + let changed_objects = output_objects .into_iter() .map(|o| { - let df_info = - try_create_dynamic_field_info(&o, &move_struct_layout_map, &latest_objects); - df_info.map(|info| IndexedObject::from_object(checkpoint_seq, o, info)) + let df_info = try_create_dynamic_field_info( + o, + &move_struct_layout_map, + &latest_live_output_object_map, + ); + df_info.map(|info| IndexedObject::from_object(checkpoint_seq, o.clone(), info)) }) .collect::, _>>()?; @@ -659,8 +632,9 @@ where .iter() .flat_map(|data| { let checkpoint_sequence_number = data.checkpoint_summary.sequence_number; - data.output_objects() + data.transactions .iter() + .flat_map(|tx| &tx.output_objects) .filter_map(|o| { if let sui_types::object::Data::Package(p) = &o.data { Some(IndexedPackage { @@ -684,8 +658,9 @@ where .iter() .flat_map(|data| { let checkpoint_sequence_number = data.checkpoint_summary.sequence_number; - data.output_objects() + data.transactions .iter() + .flat_map(|tx| &tx.output_objects) .filter_map(|o| { if let sui_types::object::Data::Package(p) = &o.data { let indexed_pkg = IndexedPackage { @@ -693,7 +668,7 @@ where move_package: p.clone(), checkpoint_sequence_number, }; - Some((indexed_pkg, (**o).clone())) + Some((indexed_pkg, o.clone())) } else { None } @@ -715,11 +690,11 @@ where } async fn get_move_struct_layout_map( - objects: &[Object], + objects: Vec<&Object>, package_resolver: Arc>, ) -> Result, IndexerError> { let struct_tags = objects - .iter() + .into_iter() .filter_map(|o| { let move_object = o.data.try_as_move().cloned(); move_object.map(|move_object| { @@ -770,40 +745,6 @@ async fn get_move_struct_layout_map( Ok(move_struct_layout_map) } -pub fn get_deleted_objects(effects: &TransactionEffects) -> Vec { - let deleted = effects.deleted().into_iter(); - let wrapped = effects.wrapped().into_iter(); - let unwrapped_then_deleted = effects.unwrapped_then_deleted().into_iter(); - deleted - .chain(wrapped) - .chain(unwrapped_then_deleted) - .collect::>() -} - -pub fn get_latest_objects( - objects: Vec<&Object>, -) -> ( - HashMap, - HashSet<(ObjectID, SequenceNumber)>, -) { - let mut latest_objects = HashMap::new(); - let mut discarded_versions = HashSet::new(); - for object in objects { - match latest_objects.entry(object.id()) { - Entry::Vacant(e) => { - e.insert(object.clone()); - } - Entry::Occupied(mut e) => { - if object.version() > e.get().version() { - discarded_versions.insert((e.get().id(), e.get().version())); - e.insert(object.clone()); - } - } - } - } - (latest_objects, discarded_versions) -} - fn try_create_dynamic_field_info( o: &Object, struct_tag_to_move_struct_layout: &HashMap, diff --git a/crates/sui-indexer/src/handlers/committer.rs b/crates/sui-indexer/src/handlers/committer.rs index eb1dc7365f2b8..f4e5504893f5c 100644 --- a/crates/sui-indexer/src/handlers/committer.rs +++ b/crates/sui-indexer/src/handlers/committer.rs @@ -89,6 +89,7 @@ async fn commit_checkpoints( let mut tx_batch = vec![]; let mut events_batch = vec![]; let mut tx_indices_batch = vec![]; + let mut event_indices_batch = vec![]; let mut display_updates_batch = BTreeMap::new(); let mut object_changes_batch = vec![]; let mut object_history_changes_batch = vec![]; @@ -99,6 +100,7 @@ async fn commit_checkpoints( checkpoint, transactions, events, + event_indices, tx_indices, display_updates, object_changes, @@ -110,6 +112,7 @@ async fn commit_checkpoints( tx_batch.push(transactions); events_batch.push(events); tx_indices_batch.push(tx_indices); + event_indices_batch.push(event_indices); display_updates_batch.extend(display_updates.into_iter()); object_changes_batch.push(object_changes); object_history_changes_batch.push(object_history_changes); @@ -123,6 +126,10 @@ async fn commit_checkpoints( let tx_batch = tx_batch.into_iter().flatten().collect::>(); let tx_indices_batch = tx_indices_batch.into_iter().flatten().collect::>(); let events_batch = events_batch.into_iter().flatten().collect::>(); + let event_indices_batch = event_indices_batch + .into_iter() + .flatten() + .collect::>(); let packages_batch = packages_batch.into_iter().flatten().collect::>(); let checkpoint_num = checkpoint_batch.len(); let tx_count = tx_batch.len(); @@ -133,6 +140,7 @@ async fn commit_checkpoints( state.persist_transactions(tx_batch), state.persist_tx_indices(tx_indices_batch), state.persist_events(events_batch), + state.persist_event_indices(event_indices_batch), state.persist_displays(display_updates_batch), state.persist_packages(packages_batch), state.persist_objects(object_changes_batch.clone()), @@ -154,6 +162,8 @@ async fn commit_checkpoints( .expect("Persisting data into DB should not fail."); } + let is_epoch_end = epoch.is_some(); + // handle partitioning on epoch boundary if let Some(epoch_data) = epoch { state @@ -176,6 +186,17 @@ async fn commit_checkpoints( ); }) .expect("Persisting data into DB should not fail."); + + if is_epoch_end { + // The epoch has advanced so we update the configs for the new protocol version, if it has changed. + let chain_id = state + .get_chain_identifier() + .await + .expect("Failed to get chain identifier") + .expect("Chain identifier should have been indexed at this point"); + let _ = state.persist_protocol_configs_and_feature_flags(chain_id); + } + let elapsed = guard.stop_and_record(); commit_notifier diff --git a/crates/sui-indexer/src/handlers/mod.rs b/crates/sui-indexer/src/handlers/mod.rs index ca27e92a0bf41..2a6578fc18295 100644 --- a/crates/sui-indexer/src/handlers/mod.rs +++ b/crates/sui-indexer/src/handlers/mod.rs @@ -6,8 +6,8 @@ use std::collections::BTreeMap; use crate::{ models::display::StoredDisplay, types::{ - IndexedCheckpoint, IndexedDeletedObject, IndexedEpochInfo, IndexedEvent, IndexedObject, - IndexedPackage, IndexedTransaction, TxIndex, + EventIndex, IndexedCheckpoint, IndexedDeletedObject, IndexedEpochInfo, IndexedEvent, + IndexedObject, IndexedPackage, IndexedTransaction, TxIndex, }, }; @@ -22,6 +22,7 @@ pub struct CheckpointDataToCommit { pub checkpoint: IndexedCheckpoint, pub transactions: Vec, pub events: Vec, + pub event_indices: Vec, pub tx_indices: Vec, pub display_updates: BTreeMap, pub object_changes: TransactionObjectChangesToCommit, @@ -40,4 +41,5 @@ pub struct TransactionObjectChangesToCommit { pub struct EpochToCommit { pub last_epoch: Option, pub new_epoch: IndexedEpochInfo, + pub network_total_transactions: u64, } diff --git a/crates/sui-indexer/src/handlers/tx_processor.rs b/crates/sui-indexer/src/handlers/tx_processor.rs index 04d96d6eafd04..2ee43f5880ead 100644 --- a/crates/sui-indexer/src/handlers/tx_processor.rs +++ b/crates/sui-indexer/src/handlers/tx_processor.rs @@ -30,7 +30,6 @@ use sui_types::messages_checkpoint::CheckpointSequenceNumber; use crate::errors::IndexerError; use crate::metrics::IndexerMetrics; - use crate::types::IndexedPackage; use crate::types::{IndexedObjectChange, IndexerResult}; @@ -289,9 +288,8 @@ pub(crate) struct EpochEndIndexingObjectStore<'a> { impl<'a> EpochEndIndexingObjectStore<'a> { pub fn new(data: &'a CheckpointData) -> Self { - // We only care about output objects for end-of-epoch indexing Self { - objects: data.output_objects(), + objects: data.latest_live_output_objects(), } } } diff --git a/crates/sui-indexer/src/indexer.rs b/crates/sui-indexer/src/indexer.rs index 3b71cf795d37f..f38a7a1de140d 100644 --- a/crates/sui-indexer/src/indexer.rs +++ b/crates/sui-indexer/src/indexer.rs @@ -122,6 +122,14 @@ impl Indexer { spawn_monitored_task!(pruner.start(CancellationToken::new())); } + // If we already have chain identifier indexed (i.e. the first checkpoint has been indexed), + // then we persist protocol configs for protocol versions not yet in the db. + // Otherwise, we would do the persisting in `commit_checkpoint` while the first cp is + // being indexed. + if let Some(chain_id) = store.get_chain_identifier().await? { + store.persist_protocol_configs_and_feature_flags(chain_id)?; + } + let cancel_clone = cancel.clone(); let (exit_sender, exit_receiver) = oneshot::channel(); // Spawn a task that links the cancellation token to the exit sender diff --git a/crates/sui-indexer/src/indexer_reader.rs b/crates/sui-indexer/src/indexer_reader.rs index 00408945140bd..f05098a764185 100644 --- a/crates/sui-indexer/src/indexer_reader.rs +++ b/crates/sui-indexer/src/indexer_reader.rs @@ -777,14 +777,14 @@ impl IndexerReader { let package = Hex::encode(package.to_vec()); match (module, function) { (Some(module), Some(function)) => ( - "tx_calls".into(), + "tx_calls_fun".into(), format!( "package = '\\x{}'::bytea AND module = '{}' AND func = '{}'", package, module, function ), ), (Some(module), None) => ( - "tx_calls".into(), + "tx_calls_mod".into(), format!( "package = '\\x{}'::bytea AND module = '{}'", package, module @@ -792,11 +792,11 @@ impl IndexerReader { ), (None, Some(_)) => { return Err(IndexerError::InvalidArgumentError( - "Function cannot be present wihtout Module.".into(), + "Function cannot be present without Module.".into(), )); } (None, None) => ( - "tx_calls".into(), + "tx_calls_pkg".into(), format!("package = '\\x{}'::bytea", package), ), } diff --git a/crates/sui-indexer/src/main.rs b/crates/sui-indexer/src/main.rs index 02a92f7a507b2..dbb95bbc991a6 100644 --- a/crates/sui-indexer/src/main.rs +++ b/crates/sui-indexer/src/main.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use clap::Parser; -use tracing::info; +use tracing::{info, warn}; use sui_indexer::errors::IndexerError; use sui_indexer::metrics::start_prometheus_server; @@ -14,6 +14,7 @@ async fn main() -> Result<(), IndexerError> { let _guard = telemetry_subscribers::TelemetryConfig::new() .with_env() .init(); + warn!("WARNING: Sui indexer is still experimental and we expect occasional breaking changes that require backfills."); let mut indexer_config = IndexerConfig::parse(); // TODO: remove. Temporary safeguard to migrate to `rpc_client_url` usage diff --git a/crates/sui-indexer/src/metrics.rs b/crates/sui-indexer/src/metrics.rs index 36d788f5fd580..34978836db5d2 100644 --- a/crates/sui-indexer/src/metrics.rs +++ b/crates/sui-indexer/src/metrics.rs @@ -139,6 +139,8 @@ pub struct IndexerMetrics { pub checkpoint_db_commit_latency_objects_history_chunks: Histogram, pub checkpoint_db_commit_latency_events: Histogram, pub checkpoint_db_commit_latency_events_chunks: Histogram, + pub checkpoint_db_commit_latency_event_indices: Histogram, + pub checkpoint_db_commit_latency_event_indices_chunks: Histogram, pub checkpoint_db_commit_latency_packages: Histogram, pub checkpoint_db_commit_latency_tx_indices: Histogram, pub checkpoint_db_commit_latency_tx_indices_chunks: Histogram, @@ -494,6 +496,20 @@ impl IndexerMetrics { registry, ) .unwrap(), + checkpoint_db_commit_latency_event_indices: register_histogram_with_registry!( + "checkpoint_db_commit_latency_event_indices", + "Time spent commiting event indices", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_event_indices_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_event_indices_chunks", + "Time spent commiting event indices chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), checkpoint_db_commit_latency_packages: register_histogram_with_registry!( "checkpoint_db_commit_latency_packages", "Time spent commiting packages", diff --git a/crates/sui-indexer/src/models/checkpoints.rs b/crates/sui-indexer/src/models/checkpoints.rs index 260fcfb5944f2..69e2618c82297 100644 --- a/crates/sui-indexer/src/models/checkpoints.rs +++ b/crates/sui-indexer/src/models/checkpoints.rs @@ -42,6 +42,8 @@ pub struct StoredCheckpoint { pub checkpoint_commitments: Vec, pub validator_signature: Vec, pub end_of_epoch_data: Option>, + pub min_tx_sequence_number: Option, + pub max_tx_sequence_number: Option, } impl From<&IndexedCheckpoint> for StoredCheckpoint { @@ -83,6 +85,8 @@ impl From<&IndexedCheckpoint> for StoredCheckpoint { .as_ref() .map(|d| bcs::to_bytes(d).unwrap()), end_of_epoch: c.end_of_epoch_data.is_some(), + min_tx_sequence_number: Some(c.min_tx_sequence_number as i64), + max_tx_sequence_number: Some(c.max_tx_sequence_number as i64), } } } diff --git a/crates/sui-indexer/src/models/epoch.rs b/crates/sui-indexer/src/models/epoch.rs index a392fafbbd4d5..0991203d5cd02 100644 --- a/crates/sui-indexer/src/models/epoch.rs +++ b/crates/sui-indexer/src/models/epoch.rs @@ -3,9 +3,9 @@ use diesel::{Insertable, Queryable, Selectable}; -use crate::errors::IndexerError; use crate::schema::epochs; use crate::types::IndexedEpochInfo; +use crate::{errors::IndexerError, schema::feature_flags, schema::protocol_configs}; use sui_json_rpc_types::{EndOfEpochInfo, EpochInfo}; use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; @@ -33,6 +33,22 @@ pub struct StoredEpochInfo { pub epoch_commitments: Option>, } +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = protocol_configs)] +pub struct StoredProtocolConfig { + pub protocol_version: i64, + pub config_name: String, + pub config_value: Option, +} + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = feature_flags)] +pub struct StoredFeatureFlag { + pub protocol_version: i64, + pub flag_name: String, + pub flag_value: bool, +} + #[derive(Queryable, Selectable, Clone)] #[diesel(table_name = epochs)] pub struct QueryableEpochInfo { diff --git a/crates/sui-indexer/src/models/event_indices.rs b/crates/sui-indexer/src/models/event_indices.rs new file mode 100644 index 0000000000000..08f17cce339d5 --- /dev/null +++ b/crates/sui-indexer/src/models/event_indices.rs @@ -0,0 +1,145 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + schema::{ + event_emit_module, event_emit_package, event_senders, event_struct_instantiation, + event_struct_module, event_struct_name, event_struct_package, + }, + types::EventIndex, +}; +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_emit_package)] +pub struct StoredEventEmitPackage { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_emit_module)] +pub struct StoredEventEmitModule { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_senders)] +pub struct StoredEventSenders { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_package)] +pub struct StoredEventStructPackage { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_module)] +pub struct StoredEventStructModule { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_name)] +pub struct StoredEventStructName { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub type_name: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_instantiation)] +pub struct StoredEventStructInstantiation { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub type_instantiation: String, + pub sender: Vec, +} + +impl EventIndex { + pub fn split( + self: EventIndex, + ) -> ( + StoredEventEmitPackage, + StoredEventEmitModule, + StoredEventSenders, + StoredEventStructPackage, + StoredEventStructModule, + StoredEventStructName, + StoredEventStructInstantiation, + ) { + let tx_sequence_number = self.tx_sequence_number as i64; + let event_sequence_number = self.event_sequence_number as i64; + ( + StoredEventEmitPackage { + tx_sequence_number, + event_sequence_number, + package: self.emit_package.to_vec(), + sender: self.sender.to_vec(), + }, + StoredEventEmitModule { + tx_sequence_number, + event_sequence_number, + package: self.emit_package.to_vec(), + module: self.emit_module.clone(), + sender: self.sender.to_vec(), + }, + StoredEventSenders { + tx_sequence_number, + event_sequence_number, + sender: self.sender.to_vec(), + }, + StoredEventStructPackage { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + sender: self.sender.to_vec(), + }, + StoredEventStructModule { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + sender: self.sender.to_vec(), + }, + StoredEventStructName { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + type_name: self.type_name.clone(), + sender: self.sender.to_vec(), + }, + StoredEventStructInstantiation { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + type_instantiation: self.type_instantiation.clone(), + sender: self.sender.to_vec(), + }, + ) + } +} diff --git a/crates/sui-indexer/src/models/mod.rs b/crates/sui-indexer/src/models/mod.rs index 3b8233ec45021..b677e09f1aaad 100644 --- a/crates/sui-indexer/src/models/mod.rs +++ b/crates/sui-indexer/src/models/mod.rs @@ -4,7 +4,9 @@ pub mod checkpoints; pub mod display; pub mod epoch; +pub mod event_indices; pub mod events; +pub mod obj_indices; pub mod objects; pub mod packages; pub mod transactions; diff --git a/crates/sui-indexer/src/models/obj_indices.rs b/crates/sui-indexer/src/models/obj_indices.rs new file mode 100644 index 0000000000000..7e5298008834c --- /dev/null +++ b/crates/sui-indexer/src/models/obj_indices.rs @@ -0,0 +1,40 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; + +use crate::schema::objects_version; + +use super::objects::StoredDeletedObject; +use super::objects::StoredObject; + +/// Model types related to tables that support efficient execution of queries on the `objects`, +/// `objects_history` and `objects_snapshot` tables. + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects_version, primary_key(object_id, object_version))] +pub struct StoredObjectVersion { + pub object_id: Vec, + pub object_version: i64, + pub cp_sequence_number: i64, +} + +impl From<&StoredObject> for StoredObjectVersion { + fn from(o: &StoredObject) -> Self { + Self { + object_id: o.object_id.clone(), + object_version: o.object_version, + cp_sequence_number: o.checkpoint_sequence_number, + } + } +} + +impl From<&StoredDeletedObject> for StoredObjectVersion { + fn from(o: &StoredDeletedObject) -> Self { + Self { + object_id: o.object_id.clone(), + object_version: o.object_version, + cp_sequence_number: o.checkpoint_sequence_number, + } + } +} diff --git a/crates/sui-indexer/src/models/objects.rs b/crates/sui-indexer/src/models/objects.rs index a6c74f19e0ce9..03f9cc3c81299 100644 --- a/crates/sui-indexer/src/models/objects.rs +++ b/crates/sui-indexer/src/models/objects.rs @@ -19,7 +19,7 @@ use sui_types::object::ObjectRead; use crate::errors::IndexerError; use crate::schema::{objects, objects_history, objects_snapshot}; -use crate::types::{IndexedDeletedObject, IndexedObject, ObjectStatus}; +use crate::types::{owner_to_owner_info, IndexedDeletedObject, IndexedObject, ObjectStatus}; #[derive(Queryable)] pub struct DynamicFieldColumn { @@ -229,30 +229,43 @@ impl From for StoredDeletedHistoryObject { impl From for StoredObject { fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number, + object, + df_info, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; Self { - object_id: o.object_id.to_vec(), - object_version: o.object_version as i64, - object_digest: o.object_digest.into_inner().to_vec(), - checkpoint_sequence_number: o.checkpoint_sequence_number as i64, - owner_type: o.owner_type as i16, - owner_id: o.owner_id.map(|id| id.to_vec()), - object_type: o - .object + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_digest: object.digest().into_inner().to_vec(), + checkpoint_sequence_number: checkpoint_sequence_number as i64, + owner_type: owner_type as i16, + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object .type_() .map(|t| t.to_canonical_string(/* with_prefix */ true)), - object_type_package: o.object.type_().map(|t| t.address().to_vec()), - object_type_module: o.object.type_().map(|t| t.module().to_string()), - object_type_name: o.object.type_().map(|t| t.name().to_string()), - serialized_object: bcs::to_bytes(&o.object).unwrap(), - coin_type: o.coin_type, - coin_balance: o.coin_balance.map(|b| b as i64), - df_kind: o.df_info.as_ref().map(|k| match k.type_ { + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: bcs::to_bytes(&object).unwrap(), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_info.as_ref().map(|k| match k.type_ { DynamicFieldType::DynamicField => 0, DynamicFieldType::DynamicObject => 1, }), - df_name: o.df_info.as_ref().map(|n| bcs::to_bytes(&n.name).unwrap()), - df_object_type: o.df_info.as_ref().map(|v| v.object_type.clone()), - df_object_id: o.df_info.as_ref().map(|v| v.object_id.to_vec()), + df_name: df_info.as_ref().map(|n| bcs::to_bytes(&n.name).unwrap()), + df_object_type: df_info.as_ref().map(|v| v.object_type.clone()), + df_object_id: df_info.as_ref().map(|v| v.object_id.to_vec()), } } } @@ -486,7 +499,7 @@ impl TryFrom for SuiCoin { let balance = o .coin_balance .ok_or(IndexerError::PersistentStorageDataCorruptionError(format!( - "Object {} is supposed to be a coin but has an empy coin_balance column", + "Object {} is supposed to be a coin but has an empty coin_balance column", coin_object_id, )))?; Ok(SuiCoin { diff --git a/crates/sui-indexer/src/models/packages.rs b/crates/sui-indexer/src/models/packages.rs index 63f61f01fc428..97c8e8fc5b459 100644 --- a/crates/sui-indexer/src/models/packages.rs +++ b/crates/sui-indexer/src/models/packages.rs @@ -10,14 +10,20 @@ use diesel::prelude::*; #[diesel(table_name = packages, primary_key(package_id))] pub struct StoredPackage { pub package_id: Vec, + pub original_id: Vec, + pub package_version: i64, pub move_package: Vec, + pub checkpoint_sequence_number: i64, } impl From for StoredPackage { fn from(p: IndexedPackage) -> Self { Self { package_id: p.package_id.to_vec(), + original_id: p.move_package.original_package_id().to_vec(), + package_version: p.move_package.version().value() as i64, move_package: bcs::to_bytes(&p.move_package).unwrap(), + checkpoint_sequence_number: p.checkpoint_sequence_number as i64, } } } diff --git a/crates/sui-indexer/src/models/tx_indices.rs b/crates/sui-indexer/src/models/tx_indices.rs index 86c4ae4c73819..4f942c2e7af0b 100644 --- a/crates/sui-indexer/src/models/tx_indices.rs +++ b/crates/sui-indexer/src/models/tx_indices.rs @@ -3,7 +3,8 @@ use crate::{ schema::{ - tx_calls, tx_changed_objects, tx_digests, tx_input_objects, tx_recipients, tx_senders, + tx_calls_fun, tx_calls_mod, tx_calls_pkg, tx_changed_objects, tx_digests, tx_input_objects, + tx_kinds, tx_recipients, tx_senders, }, types::TxIndex, }; @@ -24,7 +25,6 @@ pub struct TxDigest { #[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] #[diesel(table_name = tx_senders)] pub struct StoredTxSenders { - pub cp_sequence_number: i64, pub tx_sequence_number: i64, pub sender: Vec, } @@ -32,35 +32,52 @@ pub struct StoredTxSenders { #[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] #[diesel(table_name = tx_recipients)] pub struct StoredTxRecipients { - pub cp_sequence_number: i64, pub tx_sequence_number: i64, pub recipient: Vec, + pub sender: Vec, } #[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] #[diesel(table_name = tx_input_objects)] pub struct StoredTxInputObject { - pub cp_sequence_number: i64, pub tx_sequence_number: i64, pub object_id: Vec, + pub sender: Vec, } #[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] #[diesel(table_name = tx_changed_objects)] pub struct StoredTxChangedObject { - pub cp_sequence_number: i64, pub tx_sequence_number: i64, pub object_id: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_pkg)] +pub struct StoredTxPkg { + pub tx_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_mod)] +pub struct StoredTxMod { + pub tx_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, } #[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] -#[diesel(table_name = tx_calls)] -pub struct StoredTxCalls { - pub cp_sequence_number: i64, +#[diesel(table_name = tx_calls_fun)] +pub struct StoredTxFun { pub tx_sequence_number: i64, pub package: Vec, pub module: String, pub func: String, + pub sender: Vec, } #[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] @@ -68,7 +85,13 @@ pub struct StoredTxCalls { pub struct StoredTxDigest { pub tx_digest: Vec, pub tx_sequence_number: i64, - pub cp_sequence_number: i64, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_kinds)] +pub struct StoredTxKind { + pub tx_kind: i16, + pub tx_sequence_number: i64, } #[allow(clippy::type_complexity)] @@ -80,71 +103,109 @@ impl TxIndex { Vec, Vec, Vec, - Vec, + Vec, + Vec, + Vec, Vec, + Vec, ) { let tx_sequence_number = self.tx_sequence_number as i64; - let cp_sequence_number = self.checkpoint_sequence_number as i64; - let tx_senders = self - .senders - .iter() - .map(|s| StoredTxSenders { - cp_sequence_number, - tx_sequence_number, - sender: s.to_vec(), - }) - .collect(); + let tx_sender = StoredTxSenders { + tx_sequence_number, + sender: self.sender.to_vec(), + }; let tx_recipients = self .recipients .iter() .map(|s| StoredTxRecipients { - cp_sequence_number, tx_sequence_number, recipient: s.to_vec(), + sender: self.sender.to_vec(), }) .collect(); let tx_input_objects = self .input_objects .iter() .map(|o| StoredTxInputObject { - cp_sequence_number, tx_sequence_number, object_id: bcs::to_bytes(&o).unwrap(), + sender: self.sender.to_vec(), }) .collect(); let tx_changed_objects = self .changed_objects .iter() .map(|o| StoredTxChangedObject { - cp_sequence_number, tx_sequence_number, object_id: bcs::to_bytes(&o).unwrap(), + sender: self.sender.to_vec(), }) .collect(); - let tx_calls = self + + let mut packages = Vec::new(); + let mut packages_modules = Vec::new(); + let mut packages_modules_funcs = Vec::new(); + + for (pkg, pkg_mod, pkg_mod_func) in self .move_calls .iter() - .map(|(p, m, f)| StoredTxCalls { - cp_sequence_number, + .map(|(p, m, f)| (*p, (*p, m.clone()), (*p, m.clone(), f.clone()))) + { + packages.push(pkg); + packages_modules.push(pkg_mod); + packages_modules_funcs.push(pkg_mod_func); + } + + let tx_pkgs = packages + .iter() + .map(|p| StoredTxPkg { + tx_sequence_number, + package: p.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_mods = packages_modules + .iter() + .map(|(p, m)| StoredTxMod { + tx_sequence_number, + package: p.to_vec(), + module: m.to_string(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_calls = packages_modules_funcs + .iter() + .map(|(p, m, f)| StoredTxFun { tx_sequence_number, package: p.to_vec(), module: m.to_string(), func: f.to_string(), + sender: self.sender.to_vec(), }) .collect(); + let stored_tx_digest = StoredTxDigest { tx_digest: self.transaction_digest.into_inner().to_vec(), tx_sequence_number, - cp_sequence_number, + }; + + let tx_kind = StoredTxKind { + tx_kind: self.tx_kind as i16, + tx_sequence_number, }; ( - tx_senders, + vec![tx_sender], tx_recipients, tx_input_objects, tx_changed_objects, + tx_pkgs, + tx_mods, tx_calls, vec![stored_tx_digest], + vec![tx_kind], ) } } diff --git a/crates/sui-indexer/src/schema/mod.rs b/crates/sui-indexer/src/schema/mod.rs index d1d408d76a307..dfba1ad7b7b43 100644 --- a/crates/sui-indexer/src/schema/mod.rs +++ b/crates/sui-indexer/src/schema/mod.rs @@ -16,17 +16,30 @@ mod inner { pub use crate::schema::pg::checkpoints; pub use crate::schema::pg::display; pub use crate::schema::pg::epochs; + pub use crate::schema::pg::event_emit_module; + pub use crate::schema::pg::event_emit_package; + pub use crate::schema::pg::event_senders; + pub use crate::schema::pg::event_struct_instantiation; + pub use crate::schema::pg::event_struct_module; + pub use crate::schema::pg::event_struct_name; + pub use crate::schema::pg::event_struct_package; pub use crate::schema::pg::events; + pub use crate::schema::pg::feature_flags; pub use crate::schema::pg::objects; pub use crate::schema::pg::objects_history; pub use crate::schema::pg::objects_snapshot; + pub use crate::schema::pg::objects_version; pub use crate::schema::pg::packages; + pub use crate::schema::pg::protocol_configs; pub use crate::schema::pg::pruner_cp_watermark; pub use crate::schema::pg::transactions; - pub use crate::schema::pg::tx_calls; + pub use crate::schema::pg::tx_calls_fun; + pub use crate::schema::pg::tx_calls_mod; + pub use crate::schema::pg::tx_calls_pkg; pub use crate::schema::pg::tx_changed_objects; pub use crate::schema::pg::tx_digests; pub use crate::schema::pg::tx_input_objects; + pub use crate::schema::pg::tx_kinds; pub use crate::schema::pg::tx_recipients; pub use crate::schema::pg::tx_senders; } @@ -38,17 +51,30 @@ mod inner { pub use crate::schema::mysql::checkpoints; pub use crate::schema::mysql::display; pub use crate::schema::mysql::epochs; + pub use crate::schema::mysql::event_emit_module; + pub use crate::schema::mysql::event_emit_package; + pub use crate::schema::mysql::event_senders; + pub use crate::schema::mysql::event_struct_instantiation; + pub use crate::schema::mysql::event_struct_module; + pub use crate::schema::mysql::event_struct_name; + pub use crate::schema::mysql::event_struct_package; pub use crate::schema::mysql::events; + pub use crate::schema::mysql::feature_flags; pub use crate::schema::mysql::objects; pub use crate::schema::mysql::objects_history; pub use crate::schema::mysql::objects_snapshot; + pub use crate::schema::mysql::objects_version; pub use crate::schema::mysql::packages; + pub use crate::schema::mysql::protocol_configs; pub use crate::schema::mysql::pruner_cp_watermark; pub use crate::schema::mysql::transactions; - pub use crate::schema::mysql::tx_calls; + pub use crate::schema::mysql::tx_calls_fun; + pub use crate::schema::mysql::tx_calls_mod; + pub use crate::schema::mysql::tx_calls_pkg; pub use crate::schema::mysql::tx_changed_objects; pub use crate::schema::mysql::tx_digests; pub use crate::schema::mysql::tx_input_objects; + pub use crate::schema::mysql::tx_kinds; pub use crate::schema::mysql::tx_recipients; pub use crate::schema::mysql::tx_senders; } @@ -57,17 +83,30 @@ pub use inner::chain_identifier; pub use inner::checkpoints; pub use inner::display; pub use inner::epochs; +pub use inner::event_emit_module; +pub use inner::event_emit_package; +pub use inner::event_senders; +pub use inner::event_struct_instantiation; +pub use inner::event_struct_module; +pub use inner::event_struct_name; +pub use inner::event_struct_package; pub use inner::events; +pub use inner::feature_flags; pub use inner::objects; pub use inner::objects_history; pub use inner::objects_snapshot; +pub use inner::objects_version; pub use inner::packages; +pub use inner::protocol_configs; pub use inner::pruner_cp_watermark; pub use inner::transactions; -pub use inner::tx_calls; +pub use inner::tx_calls_fun; +pub use inner::tx_calls_mod; +pub use inner::tx_calls_pkg; pub use inner::tx_changed_objects; pub use inner::tx_digests; pub use inner::tx_input_objects; +pub use inner::tx_kinds; pub use inner::tx_recipients; pub use inner::tx_senders; diff --git a/crates/sui-indexer/src/schema/mysql.rs b/crates/sui-indexer/src/schema/mysql.rs index 10cdc089c8884..8db69b30f75cf 100644 --- a/crates/sui-indexer/src/schema/mysql.rs +++ b/crates/sui-indexer/src/schema/mysql.rs @@ -26,6 +26,8 @@ diesel::table! { checkpoint_commitments -> Mediumblob, validator_signature -> Blob, end_of_epoch_data -> Nullable, + min_tx_sequence_number -> Nullable, + max_tx_sequence_number -> Nullable } } @@ -80,6 +82,82 @@ diesel::table! { } } +diesel::table! { + event_emit_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Blob, + module -> Text, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + sender -> Blob, + } +} + +diesel::table! { + event_emit_package (package, tx_sequence_number, event_sequence_number) { + package -> Blob, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + sender -> Blob, + } +} + +diesel::table! { + event_senders (sender, tx_sequence_number, event_sequence_number) { + sender -> Blob, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + } +} + +diesel::table! { + event_struct_instantiation (package, module, type_instantiation, tx_sequence_number, event_sequence_number) { + package -> Blob, + module -> Text, + type_instantiation -> Text, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + sender -> Blob, + } +} + +diesel::table! { + event_struct_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Blob, + module -> Text, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + sender -> Blob, + } +} + +diesel::table! { + event_struct_name (package, module, type_name, tx_sequence_number, event_sequence_number) { + package -> Blob, + module -> Text, + type_name -> Text, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + sender -> Blob, + } +} + +diesel::table! { + event_struct_package (package, tx_sequence_number, event_sequence_number) { + package -> Blob, + tx_sequence_number -> Bigint, + event_sequence_number -> Bigint, + sender -> Blob, + } +} + +diesel::table! { + feature_flags (protocol_version, flag_name) { + protocol_version -> Bigint, + flag_name -> Text, + flag_value -> Bool, + } +} + diesel::table! { objects (object_id) { object_id -> Blob, @@ -148,10 +226,37 @@ diesel::table! { } } +diesel::table! { + objects_version (object_id, object_version) { + object_id -> Blob, + object_version -> Bigint, + cp_sequence_number -> Bigint, + } +} + diesel::table! { packages (package_id) { package_id -> Blob, + original_id -> Blob, + package_version -> Bigint, move_package -> Mediumblob, + checkpoint_sequence_number -> Bigint, + } +} + +diesel::table! { + protocol_configs (protocol_version, config_name) { + protocol_version -> Bigint, + config_name -> Text, + config_value -> Nullable, + } +} + +diesel::table! { + pruner_cp_watermark (checkpoint_sequence_number) { + checkpoint_sequence_number -> Bigint, + min_tx_sequence_number -> Bigint, + max_tx_sequence_number -> Bigint, } } @@ -180,12 +285,29 @@ diesel::table! { } diesel::table! { - tx_calls (package, tx_sequence_number, cp_sequence_number) { - cp_sequence_number -> Bigint, + tx_calls_fun (package, module, func, tx_sequence_number) { tx_sequence_number -> Bigint, package -> Blob, module -> Text, func -> Text, + sender -> Blob, + } +} + +diesel::table! { + tx_calls_mod (package, module, tx_sequence_number) { + tx_sequence_number -> Bigint, + package -> Blob, + module -> Text, + sender -> Blob, + } +} + +diesel::table! { + tx_calls_pkg (package, tx_sequence_number) { + tx_sequence_number -> Bigint, + package -> Blob, + sender -> Blob, } } @@ -194,6 +316,7 @@ diesel::table! { cp_sequence_number -> Bigint, tx_sequence_number -> Bigint, object_id -> Blob, + sender -> Blob, } } @@ -210,6 +333,14 @@ diesel::table! { cp_sequence_number -> Bigint, tx_sequence_number -> Bigint, object_id -> Blob, + sender -> Blob, + } +} + +diesel::table! { + tx_kinds (tx_kind, tx_sequence_number) { + tx_sequence_number -> Bigint, + tx_kind -> Smallint, } } @@ -218,6 +349,7 @@ diesel::table! { cp_sequence_number -> Bigint, tx_sequence_number -> Bigint, recipient -> Blob, + sender -> Blob, } } @@ -235,18 +367,31 @@ macro_rules! for_all_tables { $action!( chain_identifier, checkpoints, + pruner_cp_watermark, + display, epochs, + event_emit_module, + event_emit_package, + event_senders, + event_struct_instantiation, + event_struct_module, + event_struct_name, + event_struct_package, events, objects, objects_history, objects_snapshot, + objects_version, packages, pruner_cp_watermark, transactions, - tx_calls, + tx_calls_fun, + tx_calls_mod, + tx_calls_pkg, tx_changed_objects, tx_digests, tx_input_objects, + tx_kinds, tx_recipients, tx_senders ); diff --git a/crates/sui-indexer/src/schema/pg.rs b/crates/sui-indexer/src/schema/pg.rs index 564f7cd721343..2515c98d34c06 100644 --- a/crates/sui-indexer/src/schema/pg.rs +++ b/crates/sui-indexer/src/schema/pg.rs @@ -26,14 +26,8 @@ diesel::table! { checkpoint_commitments -> Bytea, validator_signature -> Bytea, end_of_epoch_data -> Nullable, - } -} - -diesel::table! { - pruner_cp_watermark (checkpoint_sequence_number) { - checkpoint_sequence_number -> Int8, - min_tx_sequence_number -> Int8, - max_tx_sequence_number -> Int8, + min_tx_sequence_number -> Nullable, + max_tx_sequence_number -> Nullable } } @@ -71,7 +65,75 @@ diesel::table! { } diesel::table! { - events (tx_sequence_number, event_sequence_number, checkpoint_sequence_number) { + event_emit_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_emit_package (package, tx_sequence_number, event_sequence_number) { + package -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_senders (sender, tx_sequence_number, event_sequence_number) { + sender -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + } +} + +diesel::table! { + event_struct_instantiation (package, module, type_instantiation, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + type_instantiation -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_name (package, module, type_name, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + type_name -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_package (package, tx_sequence_number, event_sequence_number) { + package -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + events (tx_sequence_number, event_sequence_number) { tx_sequence_number -> Int8, event_sequence_number -> Int8, transaction_digest -> Bytea, @@ -89,7 +151,7 @@ diesel::table! { } diesel::table! { - events_partition_0 (tx_sequence_number, event_sequence_number, checkpoint_sequence_number) { + events_partition_0 (tx_sequence_number, event_sequence_number) { tx_sequence_number -> Int8, event_sequence_number -> Int8, transaction_digest -> Bytea, @@ -106,6 +168,14 @@ diesel::table! { } } +diesel::table! { + feature_flags (protocol_version, flag_name) { + protocol_version -> Int8, + flag_name -> Text, + flag_value -> Bool, + } +} + diesel::table! { objects (object_id) { object_id -> Bytea, @@ -198,14 +268,41 @@ diesel::table! { } diesel::table! { - packages (package_id) { + objects_version (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + protocol_configs (protocol_version, config_name) { + protocol_version -> Int8, + config_name -> Text, + config_value -> Nullable, + } +} + +diesel::table! { + packages (package_id, original_id, package_version) { package_id -> Bytea, + original_id -> Bytea, + package_version -> Int8, move_package -> Bytea, + checkpoint_sequence_number -> Int8, + } +} + +diesel::table! { + pruner_cp_watermark (checkpoint_sequence_number) { + checkpoint_sequence_number -> Int8, + min_tx_sequence_number -> Int8, + max_tx_sequence_number -> Int8, } } diesel::table! { - transactions (tx_sequence_number, checkpoint_sequence_number) { + transactions (tx_sequence_number) { tx_sequence_number -> Int8, transaction_digest -> Bytea, raw_transaction -> Bytea, @@ -221,7 +318,7 @@ diesel::table! { } diesel::table! { - transactions_partition_0 (tx_sequence_number, checkpoint_sequence_number) { + transactions_partition_0 (tx_sequence_number) { tx_sequence_number -> Int8, transaction_digest -> Bytea, raw_transaction -> Bytea, @@ -237,50 +334,72 @@ diesel::table! { } diesel::table! { - tx_calls (package, tx_sequence_number, cp_sequence_number) { - cp_sequence_number -> Int8, + tx_calls_fun (package, module, func, tx_sequence_number) { tx_sequence_number -> Int8, package -> Bytea, module -> Text, func -> Text, + sender -> Bytea, } } diesel::table! { - tx_changed_objects (object_id, tx_sequence_number, cp_sequence_number) { - cp_sequence_number -> Int8, + tx_calls_mod (package, module, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + module -> Text, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_pkg (package, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_changed_objects (object_id, tx_sequence_number) { tx_sequence_number -> Int8, object_id -> Bytea, + sender -> Bytea, } } diesel::table! { tx_digests (tx_digest) { tx_digest -> Bytea, - cp_sequence_number -> Int8, tx_sequence_number -> Int8, } } diesel::table! { - tx_input_objects (object_id, tx_sequence_number, cp_sequence_number) { - cp_sequence_number -> Int8, + tx_input_objects (object_id, tx_sequence_number) { tx_sequence_number -> Int8, object_id -> Bytea, + sender -> Bytea, } } diesel::table! { - tx_recipients (recipient, tx_sequence_number, cp_sequence_number) { - cp_sequence_number -> Int8, + tx_kinds (tx_kind, tx_sequence_number) { + tx_sequence_number -> Int8, + tx_kind -> Int2, + } +} + +diesel::table! { + tx_recipients (recipient, tx_sequence_number) { tx_sequence_number -> Int8, recipient -> Bytea, + sender -> Bytea, } } diesel::table! { - tx_senders (sender, tx_sequence_number, cp_sequence_number) { - cp_sequence_number -> Int8, + tx_senders (sender, tx_sequence_number) { tx_sequence_number -> Int8, sender -> Bytea, } @@ -292,24 +411,37 @@ macro_rules! for_all_tables { $action!( chain_identifier, checkpoints, - pruner_cp_watermark, display, epochs, + event_emit_module, + event_emit_package, + event_senders, + event_struct_instantiation, + event_struct_module, + event_struct_name, + event_struct_package, events, - objects, + feature_flags, objects_history, objects_snapshot, + objects_version, packages, + protocol_configs, + pruner_cp_watermark, transactions, - tx_calls, + tx_calls_fun, + tx_calls_mod, + tx_calls_pkg, tx_changed_objects, tx_digests, tx_input_objects, + tx_kinds, tx_recipients, tx_senders ); }; } + pub use for_all_tables; for_all_tables!(diesel::allow_tables_to_appear_in_same_query); diff --git a/crates/sui-indexer/src/store/indexer_store.rs b/crates/sui-indexer/src/store/indexer_store.rs index 868fe31416a6c..2fd2c1531b76a 100644 --- a/crates/sui-indexer/src/store/indexer_store.rs +++ b/crates/sui-indexer/src/store/indexer_store.rs @@ -10,7 +10,9 @@ use crate::errors::IndexerError; use crate::handlers::{EpochToCommit, TransactionObjectChangesToCommit}; use crate::models::display::StoredDisplay; use crate::models::objects::{StoredDeletedObject, StoredObject}; -use crate::types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}; +use crate::types::{ + EventIndex, IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex, +}; #[allow(clippy::large_enum_variant)] pub enum ObjectChangeToCommit { @@ -30,6 +32,13 @@ pub trait IndexerStore: Any + Clone + Sync + Send + 'static { &self, ) -> Result, IndexerError>; + async fn get_chain_identifier(&self) -> Result>, IndexerError>; + + fn persist_protocol_configs_and_feature_flags( + &self, + chain_id: Vec, + ) -> Result<(), IndexerError>; + async fn persist_objects( &self, object_changes: Vec, @@ -63,6 +72,11 @@ pub trait IndexerStore: Any + Clone + Sync + Send + 'static { async fn persist_tx_indices(&self, indices: Vec) -> Result<(), IndexerError>; async fn persist_events(&self, events: Vec) -> Result<(), IndexerError>; + async fn persist_event_indices( + &self, + event_indices: Vec, + ) -> Result<(), IndexerError>; + async fn persist_displays( &self, display_updates: BTreeMap, diff --git a/crates/sui-indexer/src/store/mod.rs b/crates/sui-indexer/src/store/mod.rs index ae04c2ea1c1c2..f520f6afa1414 100644 --- a/crates/sui-indexer/src/store/mod.rs +++ b/crates/sui-indexer/src/store/mod.rs @@ -11,7 +11,7 @@ pub mod pg_partition_manager; pub mod diesel_macro { thread_local! { - pub static CALLED_FROM_BLOCKING_POOL: std::cell::RefCell = std::cell::RefCell::new(false); + pub static CALLED_FROM_BLOCKING_POOL: std::cell::RefCell = const { std::cell::RefCell::new(false) }; } #[macro_export] @@ -292,10 +292,11 @@ pub mod diesel_macro { /// Check that we are in a context conducive to making blocking calls. /// This is done by either: /// - Checking that we are not inside a tokio runtime context + /// /// Or: /// - If we are inside a tokio runtime context, ensure that the call went through - /// `IndexerReader::spawn_blocking` which properly moves the blocking call to a blocking thread - /// pool. + /// `IndexerReader::spawn_blocking` which properly moves the blocking call to a blocking thread + /// pool. #[macro_export] macro_rules! blocking_call_is_ok_or_panic { () => {{ @@ -312,6 +313,36 @@ pub mod diesel_macro { }}; } + #[macro_export] + macro_rules! persist_chunk_into_table { + ($table:expr, $chunk:expr, $pool:expr) => {{ + let now = std::time::Instant::now(); + let chunk_len = $chunk.len(); + transactional_blocking_with_retry!( + $pool, + |conn| { + for chunk in $chunk.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + insert_or_ignore_into!($table, chunk, conn); + } + Ok::<(), IndexerError>(()) + }, + PG_DB_COMMIT_SLEEP_DURATION + ) + .tap_ok(|_| { + let elapsed = now.elapsed().as_secs_f64(); + info!( + elapsed, + "Persisted {} rows to {}", + chunk_len, + stringify!($table), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist {} with error: {}", stringify!($table), e); + }) + }}; + } + pub use blocking_call_is_ok_or_panic; pub use read_only_blocking; pub use read_only_repeatable_blocking; diff --git a/crates/sui-indexer/src/store/pg_indexer_store.rs b/crates/sui-indexer/src/store/pg_indexer_store.rs index 39e16c2e394ab..23080f3f1fcaa 100644 --- a/crates/sui-indexer/src/store/pg_indexer_store.rs +++ b/crates/sui-indexer/src/store/pg_indexer_store.rs @@ -20,6 +20,7 @@ use itertools::Itertools; use tap::TapFallible; use tracing::info; +use sui_protocol_config::ProtocolConfig; use sui_types::base_types::ObjectID; use crate::db::ConnectionPool; @@ -32,7 +33,9 @@ use crate::models::checkpoints::StoredCheckpoint; use crate::models::checkpoints::StoredCpTx; use crate::models::display::StoredDisplay; use crate::models::epoch::StoredEpochInfo; +use crate::models::epoch::{StoredFeatureFlag, StoredProtocolConfig}; use crate::models::events::StoredEvent; +use crate::models::obj_indices::StoredObjectVersion; use crate::models::objects::{ StoredDeletedHistoryObject, StoredDeletedObject, StoredHistoryObject, StoredObject, StoredObjectSnapshot, @@ -40,13 +43,17 @@ use crate::models::objects::{ use crate::models::packages::StoredPackage; use crate::models::transactions::StoredTransaction; use crate::schema::{ - chain_identifier, checkpoints, display, epochs, events, objects, objects_history, - objects_snapshot, packages, pruner_cp_watermark, transactions, tx_calls, tx_changed_objects, - tx_digests, tx_input_objects, tx_recipients, tx_senders, + chain_identifier, checkpoints, display, epochs, event_emit_module, event_emit_package, + event_senders, event_struct_instantiation, event_struct_module, event_struct_name, + event_struct_package, events, feature_flags, objects, objects_history, objects_snapshot, + objects_version, packages, protocol_configs, pruner_cp_watermark, transactions, tx_calls_fun, + tx_calls_mod, tx_calls_pkg, tx_changed_objects, tx_digests, tx_input_objects, tx_kinds, + tx_recipients, tx_senders, }; +use crate::types::EventIndex; use crate::types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}; use crate::{ - insert_or_ignore_into, on_conflict_do_update, read_only_blocking, + insert_or_ignore_into, on_conflict_do_update, persist_chunk_into_table, read_only_blocking, transactional_blocking_with_retry, }; @@ -56,6 +63,7 @@ use super::ObjectChangeToCommit; #[cfg(feature = "postgres-feature")] use diesel::upsert::excluded; +use sui_types::digests::{ChainIdentifier, CheckpointDigest}; #[macro_export] macro_rules! chunk { @@ -69,7 +77,7 @@ macro_rules! chunk { }}; } -macro_rules! prune_tx_indice_table { +macro_rules! prune_tx_or_event_indice_table { ($table:ident, $conn:expr, $min_tx:expr, $max_tx:expr, $context_msg:expr) => { diesel::delete($table::table.filter($table::tx_sequence_number.between($min_tx, $max_tx))) .execute($conn) @@ -127,6 +135,7 @@ SET object_version = EXCLUDED.object_version, pub struct PgIndexerStoreConfig { pub parallel_chunk_size: usize, pub parallel_objects_chunk_size: usize, + #[allow(unused)] pub epochs_to_keep: Option, } @@ -191,6 +200,38 @@ impl PgIndexerStore { .context("Failed reading latest epoch id from PostgresDB") } + /// Get the range of the protocol versions that need to be indexed. + pub fn get_protocol_version_index_range(&self) -> Result<(i64, i64), IndexerError> { + // We start indexing from the next protocol version after the latest one stored in the db. + let start = read_only_blocking!(&self.blocking_cp, |conn| { + protocol_configs::dsl::protocol_configs + .select(max(protocol_configs::protocol_version)) + .first::>(conn) + }) + .context("Failed reading latest protocol version from PostgresDB")? + .map_or(1, |v| v + 1); + + // We end indexing at the protocol version of the latest epoch stored in the db. + let end = read_only_blocking!(&self.blocking_cp, |conn| { + epochs::dsl::epochs + .select(max(epochs::protocol_version)) + .first::>(conn) + }) + .context("Failed reading latest epoch protocol version from PostgresDB")? + .unwrap_or(1); + Ok((start, end)) + } + + pub fn get_chain_identifier(&self) -> Result>, IndexerError> { + read_only_blocking!(&self.blocking_cp, |conn| { + chain_identifier::dsl::chain_identifier + .select(chain_identifier::checkpoint_digest) + .first::>(conn) + .optional() + }) + .context("Failed reading chain id from PostgresDB") + } + fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError> { read_only_blocking!(&self.blocking_cp, |conn| { checkpoints::dsl::checkpoints @@ -462,6 +503,12 @@ impl PgIndexerStore { .eq(excluded(objects_snapshot::checkpoint_sequence_number)), objects_snapshot::owner_type.eq(excluded(objects_snapshot::owner_type)), objects_snapshot::owner_id.eq(excluded(objects_snapshot::owner_id)), + objects_snapshot::object_type_package + .eq(excluded(objects_snapshot::object_type_package)), + objects_snapshot::object_type_module + .eq(excluded(objects_snapshot::object_type_module)), + objects_snapshot::object_type_name + .eq(excluded(objects_snapshot::object_type_name)), objects_snapshot::object_type .eq(excluded(objects_snapshot::object_type)), objects_snapshot::serialized_object @@ -484,6 +531,9 @@ impl PgIndexerStore { .eq(excluded.checkpoint_sequence_number), objects_snapshot::owner_type.eq(excluded.owner_type), objects_snapshot::owner_id.eq(excluded.owner_id), + objects_snapshot::object_type_package.eq(excluded.object_type_package), + objects_snapshot::object_type_module.eq(excluded.object_type_module), + objects_snapshot::object_type_name.eq(excluded.object_type_name), objects_snapshot::object_type.eq(excluded.object_type), objects_snapshot::serialized_object.eq(excluded.serialized_object), objects_snapshot::coin_type.eq(excluded.coin_type), @@ -522,13 +572,16 @@ impl PgIndexerStore { .checkpoint_db_commit_latency_objects_history_chunks .start_timer(); let mut mutated_objects: Vec = vec![]; + let mut object_versions: Vec = vec![]; let mut deleted_object_ids: Vec = vec![]; for object in objects { match object { ObjectChangeToCommit::MutatedObject(stored_object) => { + object_versions.push(StoredObjectVersion::from(&stored_object)); mutated_objects.push(stored_object.into()); } ObjectChangeToCommit::DeletedObject(stored_deleted_object) => { + object_versions.push(StoredObjectVersion::from(&stored_deleted_object)); deleted_object_ids.push(stored_deleted_object.into()); } } @@ -547,6 +600,11 @@ impl PgIndexerStore { ); } + for object_version_chunk in object_versions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + insert_or_ignore_into!(objects_version::table, object_version_chunk, conn); + } + for deleted_objects_chunk in deleted_object_ids.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { @@ -606,6 +664,8 @@ impl PgIndexerStore { // If the first checkpoint has sequence number 0, we need to persist the digest as // chain identifier. if first_checkpoint.sequence_number == 0 { + let checkpoint_digest = first_checkpoint.checkpoint_digest.into_inner().to_vec(); + self.persist_protocol_configs_and_feature_flags(checkpoint_digest.clone())?; transactional_blocking_with_retry!( &self.blocking_cp, |conn| { @@ -806,13 +866,144 @@ impl PgIndexerStore { }) } + async fn persist_event_indices_chunk( + &self, + indices: Vec, + ) -> Result<(), IndexerError> { + let guard = self + .metrics + .checkpoint_db_commit_latency_event_indices_chunks + .start_timer(); + let len = indices.len(); + let ( + event_emit_packages, + event_emit_modules, + event_senders, + event_struct_packages, + event_struct_modules, + event_struct_names, + event_struct_instantiations, + ) = indices.into_iter().map(|i| i.split()).fold( + ( + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + ), + |( + mut event_emit_packages, + mut event_emit_modules, + mut event_senders, + mut event_struct_packages, + mut event_struct_modules, + mut event_struct_names, + mut event_struct_instantiations, + ), + index| { + event_emit_packages.push(index.0); + event_emit_modules.push(index.1); + event_senders.push(index.2); + event_struct_packages.push(index.3); + event_struct_modules.push(index.4); + event_struct_names.push(index.5); + event_struct_instantiations.push(index.6); + ( + event_emit_packages, + event_emit_modules, + event_senders, + event_struct_packages, + event_struct_modules, + event_struct_names, + event_struct_instantiations, + ) + }, + ); + + // Now persist all the event indices in parallel into their tables. + let mut futures = vec![]; + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!( + event_emit_package::table, + event_emit_packages, + &this.blocking_cp + ) + })); + + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!( + event_emit_module::table, + event_emit_modules, + &this.blocking_cp + ) + })); + + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!(event_senders::table, event_senders, &this.blocking_cp) + })); + + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!( + event_struct_package::table, + event_struct_packages, + &this.blocking_cp + ) + })); + + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!( + event_struct_module::table, + event_struct_modules, + &this.blocking_cp + ) + })); + + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!( + event_struct_name::table, + event_struct_names, + &this.blocking_cp + ) + })); + + futures.push(self.spawn_blocking_task(move |this| { + persist_chunk_into_table!( + event_struct_instantiation::table, + event_struct_instantiations, + &this.blocking_cp + ) + })); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + tracing::error!("Failed to join event indices futures in a chunk: {}", e); + IndexerError::from(e) + })? + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all event indices in a chunk: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked event indices", len); + Ok(()) + } + async fn persist_tx_indices_chunk(&self, indices: Vec) -> Result<(), IndexerError> { let guard = self .metrics .checkpoint_db_commit_latency_tx_indices_chunks .start_timer(); let len = indices.len(); - let (senders, recipients, input_objects, changed_objects, calls, digests) = + let (senders, recipients, input_objects, changed_objects, pkgs, mods, funs, digests, kinds) = indices.into_iter().map(|i| i.split()).fold( ( Vec::new(), @@ -821,29 +1012,41 @@ impl PgIndexerStore { Vec::new(), Vec::new(), Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), ), |( mut tx_senders, mut tx_recipients, mut tx_input_objects, mut tx_changed_objects, - mut tx_calls, + mut tx_pkgs, + mut tx_mods, + mut tx_funs, mut tx_digests, + mut tx_kinds, ), index| { tx_senders.extend(index.0); tx_recipients.extend(index.1); tx_input_objects.extend(index.2); tx_changed_objects.extend(index.3); - tx_calls.extend(index.4); - tx_digests.extend(index.5); + tx_pkgs.extend(index.4); + tx_mods.extend(index.5); + tx_funs.extend(index.6); + tx_digests.extend(index.7); + tx_kinds.extend(index.8); ( tx_senders, tx_recipients, tx_input_objects, tx_changed_objects, - tx_calls, + tx_pkgs, + tx_mods, + tx_funs, tx_digests, + tx_kinds, ) }, ); @@ -932,14 +1135,15 @@ impl PgIndexerStore { tracing::error!("Failed to persist tx_changed_objects with error: {}", e); }) })); + futures.push(self.spawn_blocking_task(move |this| { let now = Instant::now(); - let calls_len = calls.len(); + let rows_len = pkgs.len(); transactional_blocking_with_retry!( &this.blocking_cp, |conn| { - for chunk in calls.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { - insert_or_ignore_into!(tx_calls::table, chunk, conn); + for chunk in pkgs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + insert_or_ignore_into!(tx_calls_pkg::table, chunk, conn); } Ok::<(), IndexerError>(()) }, @@ -947,12 +1151,60 @@ impl PgIndexerStore { ) .tap_ok(|_| { let elapsed = now.elapsed().as_secs_f64(); - info!(elapsed, "Persisted {} rows to tx_calls tables", calls_len); + info!( + elapsed, + "Persisted {} rows to tx_calls_pkg tables", rows_len + ); }) .tap_err(|e| { - tracing::error!("Failed to persist tx_calls with error: {}", e); + tracing::error!("Failed to persist tx_calls_pkg with error: {}", e); }) })); + + futures.push(self.spawn_blocking_task(move |this| { + let now = Instant::now(); + let rows_len = mods.len(); + transactional_blocking_with_retry!( + &this.blocking_cp, + |conn| { + for chunk in mods.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + insert_or_ignore_into!(tx_calls_mod::table, chunk, conn); + } + Ok::<(), IndexerError>(()) + }, + PG_DB_COMMIT_SLEEP_DURATION + ) + .tap_ok(|_| { + let elapsed = now.elapsed().as_secs_f64(); + info!(elapsed, "Persisted {} rows to tx_calls_mod table", rows_len); + }) + .tap_err(|e| { + tracing::error!("Failed to persist tx_calls_mod with error: {}", e); + }) + })); + + futures.push(self.spawn_blocking_task(move |this| { + let now = Instant::now(); + let rows_len = funs.len(); + transactional_blocking_with_retry!( + &this.blocking_cp, + |conn| { + for chunk in funs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + insert_or_ignore_into!(tx_calls_fun::table, chunk, conn); + } + Ok::<(), IndexerError>(()) + }, + PG_DB_COMMIT_SLEEP_DURATION + ) + .tap_ok(|_| { + let elapsed = now.elapsed().as_secs_f64(); + info!(elapsed, "Persisted {} rows to tx_calls_fun table", rows_len); + }) + .tap_err(|e| { + tracing::error!("Failed to persist tx_calls_fun with error: {}", e); + }) + })); + futures.push(self.spawn_blocking_task(move |this| { let now = Instant::now(); let calls_len = digests.len(); @@ -960,12 +1212,7 @@ impl PgIndexerStore { &this.blocking_cp, |conn| { for chunk in digests.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { - diesel::insert_into(tx_digests::table) - .values(chunk) - .on_conflict_do_nothing() - .execute(conn) - .map_err(IndexerError::from) - .context("Failed to write tx_digests chunk to PostgresDB")?; + insert_or_ignore_into!(tx_digests::table, chunk, conn); } Ok::<(), IndexerError>(()) }, @@ -980,6 +1227,28 @@ impl PgIndexerStore { }) })); + futures.push(self.spawn_blocking_task(move |this| { + let now = Instant::now(); + let rows_len = kinds.len(); + transactional_blocking_with_retry!( + &this.blocking_cp, + |conn| { + for chunk in kinds.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + insert_or_ignore_into!(tx_kinds::table, chunk, conn); + } + Ok::<(), IndexerError>(()) + }, + Duration::from_secs(60) + ) + .tap_ok(|_| { + let elapsed = now.elapsed().as_secs_f64(); + info!(elapsed, "Persisted {} rows to tx_kinds tables", rows_len); + }) + .tap_err(|e| { + tracing::error!("Failed to persist tx_kinds with error: {}", e); + }) + })); + futures::future::join_all(futures) .await .into_iter() @@ -1007,12 +1276,35 @@ impl PgIndexerStore { .checkpoint_db_commit_latency_epoch .start_timer(); let epoch_id = epoch.new_epoch.epoch; + transactional_blocking_with_retry!( &self.blocking_cp, |conn| { if let Some(last_epoch) = &epoch.last_epoch { let last_epoch_id = last_epoch.epoch; - let last_epoch = StoredEpochInfo::from_epoch_end_info(last_epoch); + // Overwrites the `epoch_total_transactions` field on `epoch.last_epoch` because + // we are not guaranteed to have the latest data in db when this is set on + // indexer's chain-reading side. However, when we `persist_epoch`, the + // checkpoints from an epoch ago must have been indexed. + let previous_epoch_network_total_transactions = match epoch_id { + 0 | 1 => 0, + _ => { + let prev_epoch_id = epoch_id - 2; + let result = checkpoints::table + .filter(checkpoints::epoch.eq(prev_epoch_id as i64)) + .select(max(checkpoints::network_total_transactions)) + .first::>(conn) + .map(|o| o.unwrap_or(0))?; + + result as u64 + } + }; + + let epoch_total_transactions = epoch.network_total_transactions + - previous_epoch_network_total_transactions; + + let mut last_epoch = StoredEpochInfo::from_epoch_end_info(last_epoch); + last_epoch.epoch_total_transactions = Some(epoch_total_transactions as i64); info!(last_epoch_id, "Persisting epoch end data."); on_conflict_do_update!( epochs::table, @@ -1094,6 +1386,14 @@ impl PgIndexerStore { EpochPartitionData::compose_data(epoch_to_commit, last_epoch); let table_partitions = self.partition_manager.get_table_partitions()?; for (table, (_, last_partition)) in table_partitions { + // Only advance epoch partition for epoch partitioned tables. + if !self + .partition_manager + .get_strategy(&table) + .is_epoch_partitioned() + { + continue; + } let guard = self.metrics.advance_epoch_latency.start_timer(); self.partition_manager.advance_epoch( table.clone(), @@ -1147,47 +1447,121 @@ impl PgIndexerStore { ) } + fn prune_event_indices_table(&self, min_tx: u64, max_tx: u64) -> Result<(), IndexerError> { + let (min_tx, max_tx) = (min_tx as i64, max_tx as i64); + transactional_blocking_with_retry!( + &self.blocking_cp, + |conn| { + prune_tx_or_event_indice_table!( + event_emit_module, + conn, + min_tx, + max_tx, + "Failed to prune event_emit_module table" + ); + prune_tx_or_event_indice_table!( + event_emit_package, + conn, + min_tx, + max_tx, + "Failed to prune event_emit_package table" + ); + prune_tx_or_event_indice_table![ + event_senders, + conn, + min_tx, + max_tx, + "Failed to prune event_senders table" + ]; + prune_tx_or_event_indice_table![ + event_struct_instantiation, + conn, + min_tx, + max_tx, + "Failed to prune event_struct_instantiation table" + ]; + prune_tx_or_event_indice_table![ + event_struct_module, + conn, + min_tx, + max_tx, + "Failed to prune event_struct_module table" + ]; + prune_tx_or_event_indice_table![ + event_struct_name, + conn, + min_tx, + max_tx, + "Failed to prune event_struct_name table" + ]; + prune_tx_or_event_indice_table![ + event_struct_package, + conn, + min_tx, + max_tx, + "Failed to prune event_struct_package table" + ]; + Ok::<(), IndexerError>(()) + }, + PG_DB_COMMIT_SLEEP_DURATION + ) + } + fn prune_tx_indices_table(&self, min_tx: u64, max_tx: u64) -> Result<(), IndexerError> { let (min_tx, max_tx) = (min_tx as i64, max_tx as i64); transactional_blocking_with_retry!( &self.blocking_cp, |conn| { - prune_tx_indice_table!( + prune_tx_or_event_indice_table!( tx_senders, conn, min_tx, max_tx, "Failed to prune tx_senders table" ); - prune_tx_indice_table!( + prune_tx_or_event_indice_table!( tx_recipients, conn, min_tx, max_tx, "Failed to prune tx_recipients table" ); - prune_tx_indice_table![ + prune_tx_or_event_indice_table![ tx_input_objects, conn, min_tx, max_tx, "Failed to prune tx_input_objects table" ]; - prune_tx_indice_table![ + prune_tx_or_event_indice_table![ tx_changed_objects, conn, min_tx, max_tx, "Failed to prune tx_changed_objects table" ]; - prune_tx_indice_table![ - tx_calls, + prune_tx_or_event_indice_table![ + tx_calls_pkg, + conn, + min_tx, + max_tx, + "Failed to prune tx_calls_pkg table" + ]; + prune_tx_or_event_indice_table![ + tx_calls_mod, conn, min_tx, max_tx, - "Failed to prune tx_calls table" + "Failed to prune tx_calls_mod table" ]; - prune_tx_indice_table![ + prune_tx_or_event_indice_table![ + tx_calls_fun, + conn, + min_tx, + max_tx, + "Failed to prune tx_calls_fun table" + ]; + prune_tx_or_event_indice_table![ tx_digests, conn, min_tx, @@ -1224,9 +1598,9 @@ impl PgIndexerStore { read_only_blocking!(&self.blocking_cp, |conn| { checkpoints::table .filter(checkpoints::epoch.eq(epoch as i64)) - .select(max(checkpoints::network_total_transactions)) - .first::>(conn) - .map(|o| o.unwrap_or(0)) + .select(checkpoints::network_total_transactions) + .order_by(checkpoints::sequence_number.desc()) + .first::(conn) }) .context("Failed to get network total transactions in epoch") .map(|v| v as u64) @@ -1294,6 +1668,11 @@ impl IndexerStore for PgIndexerStore { .await } + async fn get_chain_identifier(&self) -> Result>, IndexerError> { + self.execute_in_blocking_worker(|this| this.get_chain_identifier()) + .await + } + async fn get_latest_object_snapshot_checkpoint_sequence_number( &self, ) -> Result, IndexerError> { @@ -1621,6 +2000,46 @@ impl IndexerStore for PgIndexerStore { .await } + async fn persist_event_indices(&self, indices: Vec) -> Result<(), IndexerError> { + if indices.is_empty() { + return Ok(()); + } + let len = indices.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_event_indices + .start_timer(); + let chunks = chunk!(indices, self.config.parallel_chunk_size); + + let futures = chunks + .into_iter() + .map(|chunk| { + self.spawn_task(move |this: Self| async move { + this.persist_event_indices_chunk(chunk).await + }) + }) + .collect::>(); + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + tracing::error!("Failed to join persist_event_indices_chunk futures: {}", e); + IndexerError::from(e) + })? + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all event_indices chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} event_indices chunks", len); + Ok(()) + } + async fn persist_tx_indices(&self, indices: Vec) -> Result<(), IndexerError> { if indices.is_empty() { return Ok(()); @@ -1716,6 +2135,21 @@ impl IndexerStore for PgIndexerStore { "Pruned transactions for checkpoint {} from tx {} to tx {}", cp, min_tx, max_tx ); + self.execute_in_blocking_worker(move |this| { + this.prune_event_indices_table(min_tx, max_tx) + }) + .await + .unwrap_or_else(|e| { + tracing::error!( + "Failed to prune events of transactions for cp {}: {}", + cp, + e + ); + }); + info!( + "Pruned events of transactions for checkpoint {} from tx {} to tx {}", + cp, min_tx, max_tx + ); self.metrics.last_pruned_transaction.set(max_tx as i64); self.execute_in_blocking_worker(move |this| this.prune_cp_tx_table(cp)) @@ -1753,6 +2187,73 @@ impl IndexerStore for PgIndexerStore { fn as_any(&self) -> &dyn StdAny { self } + + /// Persist protocol configs and feature flags until the protocol version for the latest epoch + /// we have stored in the db, inclusive. + fn persist_protocol_configs_and_feature_flags( + &self, + chain_id: Vec, + ) -> Result<(), IndexerError> { + let chain_id = ChainIdentifier::from( + CheckpointDigest::try_from(chain_id).expect("Unable to convert chain id"), + ); + + let mut all_configs = vec![]; + let mut all_flags = vec![]; + + let (start_version, end_version) = self.get_protocol_version_index_range()?; + info!( + "Persisting protocol configs with start_version: {}, end_version: {}", + start_version, end_version + ); + + // Gather all protocol configs and feature flags for all versions between start and end. + for version in start_version..=end_version { + let protocol_configs = ProtocolConfig::get_for_version_if_supported( + (version as u64).into(), + chain_id.chain(), + ) + .ok_or(IndexerError::GenericError(format!( + "Unable to fetch protocol version {} and chain {:?}", + version, + chain_id.chain() + )))?; + let configs_vec = protocol_configs + .attr_map() + .into_iter() + .map(|(k, v)| StoredProtocolConfig { + protocol_version: version, + config_name: k, + config_value: v.map(|v| v.to_string()), + }) + .collect::>(); + all_configs.extend(configs_vec); + + let feature_flags = protocol_configs + .feature_map() + .into_iter() + .map(|(k, v)| StoredFeatureFlag { + protocol_version: version, + flag_name: k, + flag_value: v, + }) + .collect::>(); + all_flags.extend(feature_flags); + } + + // Now insert all of them into the db. + // TODO: right now the size of these updates is manageable but later we may consider batching. + transactional_blocking_with_retry!( + &self.blocking_cp, + |conn| { + insert_or_ignore_into!(protocol_configs::table, all_configs.clone(), conn); + insert_or_ignore_into!(feature_flags::table, all_flags.clone(), conn); + Ok::<(), IndexerError>(()) + }, + PG_DB_COMMIT_SLEEP_DURATION + )?; + Ok(()) + } } /// Construct deleted objects and mutated objects to commit. @@ -1773,15 +2274,15 @@ fn make_final_list_of_objects_to_commit( .flat_map(|changes| changes.changed_objects); let mut latest_objects = HashMap::new(); for object in mutated_objects { - if deleted_objects.contains_key(&object.object_id) { + if deleted_objects.contains_key(&object.object.id()) { continue; } - match latest_objects.entry(object.object_id) { + match latest_objects.entry(object.object.id()) { Entry::Vacant(e) => { e.insert(object); } Entry::Occupied(mut e) => { - if object.object_version > e.get().object_version { + if object.object.version() > e.get().object.version() { e.insert(object); } } diff --git a/crates/sui-indexer/src/store/pg_partition_manager.rs b/crates/sui-indexer/src/store/pg_partition_manager.rs index f67f1ed6b4041..f27078ca47048 100644 --- a/crates/sui-indexer/src/store/pg_partition_manager.rs +++ b/crates/sui-indexer/src/store/pg_partition_manager.rs @@ -4,7 +4,7 @@ use diesel::r2d2::R2D2Connection; use diesel::sql_types::{BigInt, VarChar}; use diesel::{QueryableByName, RunQueryDsl}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::time::Duration; use tracing::{error, info}; @@ -44,22 +44,42 @@ GROUP BY table_name; pub struct PgPartitionManager { cp: ConnectionPool, + partition_strategies: HashMap<&'static str, PgPartitionStrategy>, } impl Clone for PgPartitionManager { fn clone(&self) -> PgPartitionManager { Self { cp: self.cp.clone(), + partition_strategies: self.partition_strategies.clone(), } } } +#[derive(Clone, Copy)] +pub enum PgPartitionStrategy { + CheckpointSequenceNumber, + TxSequenceNumber, + ObjectId, +} + +impl PgPartitionStrategy { + pub fn is_epoch_partitioned(&self) -> bool { + matches!( + self, + Self::CheckpointSequenceNumber | Self::TxSequenceNumber + ) + } +} + #[derive(Clone, Debug)] pub struct EpochPartitionData { last_epoch: u64, next_epoch: u64, last_epoch_start_cp: u64, next_epoch_start_cp: u64, + last_epoch_start_tx: u64, + next_epoch_start_tx: u64, } impl EpochPartitionData { @@ -68,18 +88,35 @@ impl EpochPartitionData { let last_epoch_start_cp = last_db_epoch.first_checkpoint_id as u64; let next_epoch = epoch.new_epoch.epoch; let next_epoch_start_cp = epoch.new_epoch.first_checkpoint_id; + + // Determining the tx_sequence_number range for the epoch partition differs from the + // checkpoint_sequence_number range, because the former is a sum of total transactions - + // this sum already addresses the off-by-one. + let next_epoch_start_tx = epoch.network_total_transactions; + let last_epoch_start_tx = + next_epoch_start_tx - last_db_epoch.epoch_total_transactions.unwrap() as u64; + Self { last_epoch, next_epoch, last_epoch_start_cp, next_epoch_start_cp, + last_epoch_start_tx, + next_epoch_start_tx, } } } impl PgPartitionManager { pub fn new(cp: ConnectionPool) -> Result { - let manager = Self { cp }; + let mut partition_strategies = HashMap::new(); + partition_strategies.insert("events", PgPartitionStrategy::TxSequenceNumber); + partition_strategies.insert("transactions", PgPartitionStrategy::TxSequenceNumber); + partition_strategies.insert("objects_version", PgPartitionStrategy::ObjectId); + let manager = Self { + cp, + partition_strategies, + }; let tables = manager.get_table_partitions()?; info!( "Found {} tables with partitions : [{:?}]", @@ -116,12 +153,41 @@ impl PgPartitionManager { ) } + /// Tries to fetch the partitioning strategy for the given partitioned table. Defaults to + /// `CheckpointSequenceNumber` as the majority of our tables are partitioned on an epoch's + /// checkpoints today. + pub fn get_strategy(&self, table_name: &str) -> PgPartitionStrategy { + self.partition_strategies + .get(table_name) + .copied() + .unwrap_or(PgPartitionStrategy::CheckpointSequenceNumber) + } + + pub fn determine_epoch_partition_range( + &self, + table_name: &str, + data: &EpochPartitionData, + ) -> Option<(u64, u64)> { + match self.get_strategy(table_name) { + PgPartitionStrategy::CheckpointSequenceNumber => { + Some((data.last_epoch_start_cp, data.next_epoch_start_cp)) + } + PgPartitionStrategy::TxSequenceNumber => { + Some((data.last_epoch_start_tx, data.next_epoch_start_tx)) + } + PgPartitionStrategy::ObjectId => None, + } + } + pub fn advance_epoch( &self, table: String, last_partition: u64, data: &EpochPartitionData, ) -> Result<(), IndexerError> { + let Some(partition_range) = self.determine_epoch_partition_range(&table, data) else { + return Ok(()); + }; if data.next_epoch == 0 { tracing::info!("Epoch 0 partition has been created in the initial setup."); return Ok(()); @@ -136,8 +202,8 @@ impl PgPartitionManager { .bind::(table.clone()) .bind::(data.last_epoch as i64) .bind::(data.next_epoch as i64) - .bind::(data.last_epoch_start_cp as i64) - .bind::(data.next_epoch_start_cp as i64), + .bind::(partition_range.0 as i64) + .bind::(partition_range.1 as i64), conn, ) }, @@ -148,14 +214,14 @@ impl PgPartitionManager { transactional_blocking_with_retry!( &self.cp, |conn| { - RunQueryDsl::execute(diesel::sql_query(format!("ALTER TABLE {table_name} REORGANIZE PARTITION {table_name}_partition_{last_epoch} INTO (PARTITION {table_name}_partition_{last_epoch} VALUES LESS THAN ({next_epoch_start_cp}), PARTITION {table_name}_partition_{next_epoch} VALUES LESS THAN MAXVALUE)", table_name = table.clone(), last_epoch = data.last_epoch as i64, next_epoch_start_cp = data.next_epoch_start_cp as i64, next_epoch = data.next_epoch as i64)), conn) + RunQueryDsl::execute(diesel::sql_query(format!("ALTER TABLE {table_name} REORGANIZE PARTITION {table_name}_partition_{last_epoch} INTO (PARTITION {table_name}_partition_{last_epoch} VALUES LESS THAN ({next_epoch_start}), PARTITION {table_name}_partition_{next_epoch} VALUES LESS THAN MAXVALUE)", table_name = table.clone(), last_epoch = data.last_epoch as i64, next_epoch_start = partition_range.1 as i64, next_epoch = data.next_epoch as i64)), conn) }, Duration::from_secs(10) )?; info!( "Advanced epoch partition for table {} from {} to {}, prev partition upper bound {}", - table, last_partition, data.next_epoch, data.last_epoch_start_cp + table, last_partition, data.next_epoch, partition_range.0 ); } else if last_partition != data.next_epoch { // skip when the partition is already advanced once, which is possible when indexer diff --git a/crates/sui-indexer/src/test_utils.rs b/crates/sui-indexer/src/test_utils.rs index dd5c72b485521..486ff1f51c8d2 100644 --- a/crates/sui-indexer/src/test_utils.rs +++ b/crates/sui-indexer/src/test_utils.rs @@ -148,7 +148,7 @@ pub async fn start_test_indexer_impl( } ReaderWriterConfig::Writer { snapshot_config } => { if config.reset_db { - crate::db::reset_database(&mut blocking_pool.get().unwrap(), true).unwrap(); + crate::db::reset_database(&mut blocking_pool.get().unwrap()).unwrap(); } let store_clone = store.clone(); diff --git a/crates/sui-indexer/src/types.rs b/crates/sui-indexer/src/types.rs index 7eb2c0071ce7f..04409ecb53757 100644 --- a/crates/sui-indexer/src/types.rs +++ b/crates/sui-indexer/src/types.rs @@ -16,7 +16,8 @@ use sui_types::dynamic_field::DynamicFieldInfo; use sui_types::effects::TransactionEffects; use sui_types::event::SystemEpochInfoEvent; use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointCommitment, CheckpointDigest, EndOfEpochData, + CertifiedCheckpointSummary, CheckpointCommitment, CheckpointDigest, CheckpointSequenceNumber, + EndOfEpochData, }; use sui_types::move_package::MovePackage; use sui_types::object::{Object, Owner}; @@ -89,6 +90,8 @@ impl IndexedCheckpoint { } } +/// Represents system state and summary info at the start and end of an epoch. Optional fields are +/// populated at epoch boundary, since they cannot be determined at the start of the epoch. #[derive(Clone, Debug, Default)] pub struct IndexedEpochInfo { pub epoch: u64, @@ -134,6 +137,9 @@ impl IndexedEpochInfo { } } + /// Creates `IndexedEpochInfo` for epoch X-1 at the boundary of epoch X-1 to X. + /// `network_total_tx_num_at_last_epoch_end` is needed to determine the number of transactions + /// that occurred in the epoch X-1. pub fn from_end_of_epoch_data( system_state_summary: &SuiSystemStateSummary, last_checkpoint_summary: &CertifiedCheckpointSummary, @@ -217,6 +223,47 @@ impl IndexedEvent { } } +#[derive(Debug, Clone)] +pub struct EventIndex { + pub tx_sequence_number: u64, + pub event_sequence_number: u64, + pub sender: SuiAddress, + pub emit_package: ObjectID, + pub emit_module: String, + pub type_package: ObjectID, + pub type_module: String, + /// Struct name of the event, without type parameters. + pub type_name: String, + /// Type instantiation of the event, with type name and type parameters, if any. + pub type_instantiation: String, +} + +impl EventIndex { + pub fn from_event( + tx_sequence_number: u64, + event_sequence_number: u64, + event: &sui_types::event::Event, + ) -> Self { + let type_instantiation = event + .type_ + .to_canonical_string(/* with_prefix */ true) + .splitn(3, "::") + .collect::>()[2] + .to_string(); + Self { + tx_sequence_number, + event_sequence_number, + sender: event.sender, + emit_package: event.package_id, + emit_module: event.transaction_module.to_string(), + type_package: event.type_.address.into(), + type_module: event.type_.module.to_string(), + type_name: event.type_.name.to_string(), + type_instantiation, + } + } +} + #[derive(Debug, Copy, Clone)] pub enum OwnerType { Immutable = 0, @@ -282,44 +329,20 @@ pub enum DynamicFieldKind { #[derive(Clone, Debug)] pub struct IndexedObject { - pub object_id: ObjectID, - pub object_version: u64, - pub object_digest: ObjectDigest, - pub checkpoint_sequence_number: u64, - pub owner_type: OwnerType, - pub owner_id: Option, + pub checkpoint_sequence_number: CheckpointSequenceNumber, pub object: Object, - pub coin_type: Option, - pub coin_balance: Option, pub df_info: Option, } impl IndexedObject { pub fn from_object( - checkpoint_sequence_number: u64, + checkpoint_sequence_number: CheckpointSequenceNumber, object: Object, df_info: Option, ) -> Self { - let (owner_type, owner_id) = owner_to_owner_info(&object.owner); - let coin_type = object - .coin_type_maybe() - .map(|t| t.to_canonical_string(/* with_prefix */ true)); - let coin_balance = if coin_type.is_some() { - Some(object.get_coin_value_unsafe()) - } else { - None - }; - Self { checkpoint_sequence_number, - object_id: object.id(), - object_version: object.version().value(), - object_digest: object.digest(), - owner_type, - owner_id, object, - coin_type, - coin_balance, df_info, } } @@ -363,12 +386,13 @@ pub struct IndexedTransaction { #[derive(Debug, Clone)] pub struct TxIndex { pub tx_sequence_number: u64, + pub tx_kind: TransactionKind, pub transaction_digest: TransactionDigest, pub checkpoint_sequence_number: u64, pub input_objects: Vec, pub changed_objects: Vec, pub payers: Vec, - pub senders: Vec, + pub sender: SuiAddress, pub recipients: Vec, pub move_calls: Vec<(ObjectID, String, String)>, } diff --git a/crates/sui-indexer/tests/ingestion_tests.rs b/crates/sui-indexer/tests/ingestion_tests.rs index 06914eeff6471..af67a061bbde3 100644 --- a/crates/sui-indexer/tests/ingestion_tests.rs +++ b/crates/sui-indexer/tests/ingestion_tests.rs @@ -58,7 +58,7 @@ mod ingestion_tests { let server_handle = tokio::spawn(async move { sui_rest_api::RestService::new_without_version(sim) - .start_service(server_url, Some("/rest".to_owned())) + .start_service(server_url) .await; }); // Starts indexer diff --git a/crates/sui-json-rpc-tests/Cargo.toml b/crates/sui-json-rpc-tests/Cargo.toml index af78d3ccc56df..0f688d4b455e0 100644 --- a/crates/sui-json-rpc-tests/Cargo.toml +++ b/crates/sui-json-rpc-tests/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] [dev-dependencies] diff --git a/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs b/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs index bb1dcaa8be1e7..433c8fcddd72d 100644 --- a/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs +++ b/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs @@ -26,7 +26,7 @@ async fn test_dry_run_publish_with_mocked_coin() -> Result<(), anyhow::Error> { .into_iter() .map(|b| b.to_vec().unwrap()) .collect::>(); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let mut builder = ProgrammableTransactionBuilder::new(); builder.publish_immutable(compiled_modules_bytes, dependencies); diff --git a/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs b/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs index e566183954a94..3d648666047e1 100644 --- a/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs +++ b/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs @@ -192,7 +192,7 @@ async fn test_publish() -> Result<(), anyhow::Error> { BuildConfig::new_for_testing().build(Path::new("../../examples/move/basics"))?; let compiled_modules_bytes = compiled_package.get_package_base64(/* with_unpublished_deps */ false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let transaction_bytes: TransactionBlockBytes = http_client .publish( @@ -453,7 +453,7 @@ async fn test_get_metadata() -> Result<(), anyhow::Error> { let compiled_package = BuildConfig::new_for_testing().build(&path)?; let compiled_modules_bytes = compiled_package.get_package_base64(/* with_unpublished_deps */ false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let transaction_bytes: TransactionBlockBytes = http_client .publish( @@ -537,7 +537,7 @@ async fn test_get_total_supply() -> Result<(), anyhow::Error> { let compiled_package = BuildConfig::default().build(&path)?; let compiled_modules_bytes = compiled_package.get_package_base64(/* with_unpublished_deps */ false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let transaction_bytes: TransactionBlockBytes = http_client .publish( diff --git a/crates/sui-json-rpc-tests/tests/transaction_tests.rs b/crates/sui-json-rpc-tests/tests/transaction_tests.rs index f1036cbf00d7d..0ab96b4ac0210 100644 --- a/crates/sui-json-rpc-tests/tests/transaction_tests.rs +++ b/crates/sui-json-rpc-tests/tests/transaction_tests.rs @@ -259,24 +259,23 @@ async fn test_get_fullnode_transaction() -> Result<(), anyhow::Error> { assert!(second_page.data.len() > 5); assert!(!second_page.has_next_page); - let mut all_txs_rev = first_page.data.clone(); - all_txs_rev.extend(second_page.data); - all_txs_rev.reverse(); + let mut all_txs = first_page.data.clone(); + all_txs.extend(second_page.data); - // test get 10 latest transactions paged + // test get 10 transactions paged let latest = client .read_api() .query_transaction_blocks( SuiTransactionBlockResponseQuery::default(), None, Some(10), - true, + false, ) .await .unwrap(); assert_eq!(10, latest.data.len()); - assert_eq!(Some(all_txs_rev[9].digest), latest.next_cursor); - assert_eq!(all_txs_rev[0..10], latest.data); + assert_eq!(Some(all_txs[9].digest), latest.next_cursor); + assert_eq!(all_txs[0..10], latest.data); assert!(latest.has_next_page); // test get from address txs in ascending order diff --git a/crates/sui-json-rpc-types/src/sui_transaction.rs b/crates/sui-json-rpc-types/src/sui_transaction.rs index eff57a51290cc..8936d54831102 100644 --- a/crates/sui-json-rpc-types/src/sui_transaction.rs +++ b/crates/sui-json-rpc-types/src/sui_transaction.rs @@ -180,6 +180,10 @@ impl SuiTransactionBlockResponseOptions { } } + #[deprecated( + since = "1.33.0", + note = "Balance and object changes no longer require local execution" + )] pub fn require_local_execution(&self) -> bool { self.show_balance_changes || self.show_object_changes } @@ -1799,7 +1803,7 @@ impl SuiProgrammableTransactionBlock { } fn resolve_input_type( - inputs: &Vec, + inputs: &[CallArg], commands: &[Command], module_cache: &impl GetModule, ) -> Vec> { diff --git a/crates/sui-json-rpc/Cargo.toml b/crates/sui-json-rpc/Cargo.toml index 25ddb1b48ea60..8afaf8f974122 100644 --- a/crates/sui-json-rpc/Cargo.toml +++ b/crates/sui-json-rpc/Cargo.toml @@ -52,6 +52,7 @@ sui-json-rpc-types.workspace = true sui-macros.workspace = true sui-transaction-builder.workspace = true mysten-metrics.workspace = true +mysten-service.workspace = true shared-crypto.workspace = true typed-store-error.workspace = true cached.workspace = true diff --git a/crates/sui-json-rpc/src/axum_router.rs b/crates/sui-json-rpc/src/axum_router.rs index 8ddebab687b2c..0fb6be835c835 100644 --- a/crates/sui-json-rpc/src/axum_router.rs +++ b/crates/sui-json-rpc/src/axum_router.rs @@ -24,6 +24,7 @@ use serde_json::value::RawValue; use sui_core::traffic_controller::{ metrics::TrafficControllerMetrics, policies::TrafficTally, TrafficController, }; +use sui_json_rpc_api::TRANSACTION_EXECUTION_CLIENT_ERROR_CODE; use sui_types::traffic_control::ClientIdSource; use sui_types::traffic_control::{PolicyConfig, Weight}; use tracing::error; @@ -278,6 +279,8 @@ fn handle_traffic_resp( fn normalize(err: ErrorCode) -> Weight { match err { ErrorCode::InvalidRequest | ErrorCode::InvalidParams => Weight::one(), + // e.g. invalid client signature + ErrorCode::ServerError(i) if i == TRANSACTION_EXECUTION_CLIENT_ERROR_CODE => Weight::one(), _ => Weight::zero(), } } diff --git a/crates/sui-json-rpc/src/read_api.rs b/crates/sui-json-rpc/src/read_api.rs index 6de1c04324208..4dfdccc1086c6 100644 --- a/crates/sui-json-rpc/src/read_api.rs +++ b/crates/sui-json-rpc/src/read_api.rs @@ -17,6 +17,7 @@ use move_core_types::language_storage::StructTag; use tap::TapFallible; use tracing::{debug, error, info, instrument, trace, warn}; +use mysten_metrics::add_server_timing; use mysten_metrics::spawn_monitored_task; use sui_core::authority::AuthorityState; use sui_json_rpc_api::{ @@ -721,10 +722,12 @@ impl ReadApiServer for ReadApi { // Fetch transaction to determine existence let transaction_kv_store = self.transaction_kv_store.clone(); let transaction = spawn_monitored_task!(async move { - transaction_kv_store.get_tx(digest).await.map_err(|err| { + let ret = transaction_kv_store.get_tx(digest).await.map_err(|err| { debug!(tx_digest=?digest, "Failed to get transaction: {:?}", err); Error::from(err) - }) + }); + add_server_timing("tx_kv_lookup"); + ret }) .await .map_err(Error::from)??; diff --git a/crates/sui-json/src/lib.rs b/crates/sui-json/src/lib.rs index 6c367c0d6ba15..4782ed9e57116 100644 --- a/crates/sui-json/src/lib.rs +++ b/crates/sui-json/src/lib.rs @@ -703,7 +703,7 @@ fn resolve_object_vec_arg(idx: usize, arg: &SuiJsonValue) -> Result>( /// - Base64 encoded `privkey` for Raw key /// - Bech32 encoded private key prefixed with `suiprivkey` /// - Hex encoded `privkey` for Raw key +/// /// If `require_secp256k1` is true, it will return an error if the key is not Secp256k1. pub fn read_key(path: &PathBuf, require_secp256k1: bool) -> Result { if !path.exists() { diff --git a/crates/sui-keys/src/keystore.rs b/crates/sui-keys/src/keystore.rs index 199e0fe6f4a7f..db622a9535718 100644 --- a/crates/sui-keys/src/keystore.rs +++ b/crates/sui-keys/src/keystore.rs @@ -421,13 +421,10 @@ impl FileBasedKeystore { Ok(()) } + /// Keys saved as Base64 with 33 bytes `flag || privkey` ($BASE64_STR). + /// To see Bech32 format encoding, use `sui keytool export $SUI_ADDRESS` where + /// $SUI_ADDRESS can be found with `sui keytool list`. Or use `sui keytool convert $BASE64_STR` pub fn save_keystore(&self) -> Result<(), anyhow::Error> { - eprintln!( - "Keys saved as Base64 with 33 bytes `flag || privkey` ($BASE64_STR). - To see Bech32 format encoding, use `sui keytool export $SUI_ADDRESS` where - $SUI_ADDRESS can be found with `sui keytool list`. Or use `sui keytool convert $BASE64_STR`." - ); - if let Some(path) = &self.path { let store = serde_json::to_string_pretty( &self diff --git a/crates/sui-light-client/Cargo.toml b/crates/sui-light-client/Cargo.toml index 1cf90ea7aa48f..a18d7df4c49cd 100644 --- a/crates/sui-light-client/Cargo.toml +++ b/crates/sui-light-client/Cargo.toml @@ -6,6 +6,13 @@ license = "Apache-2.0" publish = false edition = "2021" +[lib] +path = "src/lib.rs" + +[[bin]] +name = "sui-light-client" +path = "src/main.rs" + [dependencies] anyhow.workspace = true async-trait.workspace = true @@ -25,4 +32,3 @@ sui-sdk.workspace = true move-binary-format.workspace = true sui-json-rpc-types.workspace = true sui-package-resolver.workspace = true - diff --git a/crates/sui-light-client/example_config/15918264.chk b/crates/sui-light-client/example_config/15918264.chk new file mode 100644 index 0000000000000..70892f9c226c1 Binary files /dev/null and b/crates/sui-light-client/example_config/15918264.chk differ diff --git a/crates/sui-light-client/example_config/16005062.chk b/crates/sui-light-client/example_config/16005062.chk new file mode 100644 index 0000000000000..88d1bf8ee7131 Binary files /dev/null and b/crates/sui-light-client/example_config/16005062.chk differ diff --git a/crates/sui-light-client/example_config/20958462.chk b/crates/sui-light-client/example_config/20958462.chk new file mode 100644 index 0000000000000..ddd4569b66247 Binary files /dev/null and b/crates/sui-light-client/example_config/20958462.chk differ diff --git a/crates/sui-light-client/src/construct.rs b/crates/sui-light-client/src/construct.rs new file mode 100644 index 0000000000000..7658236768daa --- /dev/null +++ b/crates/sui-light-client/src/construct.rs @@ -0,0 +1,91 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::proof::{Proof, ProofTarget, TransactionProof}; + +use anyhow::anyhow; +use sui_rest_api::{CheckpointData, CheckpointTransaction}; +use sui_types::effects::TransactionEffectsAPI; + +/// Construct a proof from the given checkpoint data and proof targets. +/// +/// Only minimal cheaper checks are performed to ensure the proof is valid. If you need guaranteed +/// validity consider calling `verify_proof` function on the constructed proof. It either returns +/// `Ok` with a proof, or `Err` with a description of the error. +pub fn construct_proof(targets: ProofTarget, data: &CheckpointData) -> anyhow::Result { + let checkpoint_summary = data.checkpoint_summary.clone(); + let mut this_proof = Proof { + targets, + checkpoint_summary, + contents_proof: None, + }; + + // Do a minimal check that the given checkpoint data is consistent with the committee + if let Some(committee) = &this_proof.targets.committee { + // Check we have the correct epoch + if this_proof.checkpoint_summary.epoch() + 1 != committee.epoch { + return Err(anyhow!("Epoch mismatch between checkpoint and committee")); + } + + // Check its an end of epoch checkpoint + if this_proof.checkpoint_summary.end_of_epoch_data.is_none() { + return Err(anyhow!("Expected end of epoch checkpoint")); + } + } + + // If proof targets include objects or events, we need to include the contents proof + // Need to ensure that all targets refer to the same transaction first of all + let object_tx = this_proof + .targets + .objects + .iter() + .map(|(_, o)| o.previous_transaction); + let event_tx = this_proof + .targets + .events + .iter() + .map(|(eid, _)| eid.tx_digest); + let mut all_tx = object_tx.chain(event_tx); + + // Get the first tx ID + let target_tx_id = if let Some(first_tx) = all_tx.next() { + first_tx + } else { + // Since there is no target we just return the summary proof + return Ok(this_proof); + }; + + // Basic check that all targets refer to the same transaction + if !all_tx.all(|tx| tx == target_tx_id) { + return Err(anyhow!("All targets must refer to the same transaction")); + } + + // Find the transaction in the checkpoint data + let tx = data + .transactions + .iter() + .find(|t| t.effects.transaction_digest() == &target_tx_id) + .ok_or(anyhow!("Transaction not found in checkpoint data"))? + .clone(); + + let CheckpointTransaction { + transaction, + effects, + events, + .. + } = tx; + + // Add all the transaction data in there + this_proof.contents_proof = Some(TransactionProof { + checkpoint_contents: data.checkpoint_contents.clone(), + transaction, + effects, + events, + }); + + // TODO: should we check that the objects & events are in the transaction, to + // avoid constructing invalid proofs? I opt to not check because the check + // is expensive (sequential scan of all objects). + + Ok(this_proof) +} diff --git a/crates/sui-light-client/src/lib.rs b/crates/sui-light-client/src/lib.rs new file mode 100644 index 0000000000000..8f5488b48be9a --- /dev/null +++ b/crates/sui-light-client/src/lib.rs @@ -0,0 +1,11 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod construct; +pub mod proof; + +#[doc(inline)] +pub use proof::*; + +#[doc(inline)] +pub use construct::*; diff --git a/crates/sui-light-client/src/main.rs b/crates/sui-light-client/src/main.rs index f62f5a45e6f4a..0dc3f586f4ee5 100644 --- a/crates/sui-light-client/src/main.rs +++ b/crates/sui-light-client/src/main.rs @@ -96,8 +96,8 @@ struct Config { } impl Config { - pub fn rest_url(&self) -> String { - format!("{}/rest", self.full_node_url) + pub fn rest_url(&self) -> &str { + &self.full_node_url } } @@ -186,7 +186,7 @@ async fn download_checkpoint_summary( ) -> anyhow::Result { // Download the checkpoint from the server let client = Client::new(config.rest_url()); - client.get_checkpoint_summary(seq).await + client.get_checkpoint_summary(seq).await.map_err(Into::into) } /// Run binary search to for each end of epoch checkpoint that is missing diff --git a/crates/sui-light-client/src/proof.rs b/crates/sui-light-client/src/proof.rs new file mode 100644 index 0000000000000..8d846384e64bb --- /dev/null +++ b/crates/sui-light-client/src/proof.rs @@ -0,0 +1,230 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; + +use sui_types::{ + base_types::ObjectRef, + committee::Committee, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + event::{Event, EventID}, + messages_checkpoint::{CertifiedCheckpointSummary, CheckpointContents, EndOfEpochData}, + object::Object, + transaction::Transaction, +}; + +/// Define aspect of Sui state that needs to be certified in a proof +#[derive(Default)] +pub struct ProofTarget { + /// Objects that need to be certified. + pub objects: Vec<(ObjectRef, Object)>, + + /// Events that need to be certified. + pub events: Vec<(EventID, Event)>, + + /// The next committee being certified. + pub committee: Option, +} + +impl ProofTarget { + /// Create a new empty proof target. An empty proof target still ensures that the + /// checkpoint summary is correct. + pub fn new() -> Self { + Self::default() + } + + /// Add an object to be certified by object reference and content. A verified proof will + /// ensure that both the reference and content are correct. Note that some content is + /// metadata such as the transaction that created this object. + pub fn add_object(mut self, object_ref: ObjectRef, object: Object) -> Self { + self.objects.push((object_ref, object)); + self + } + + /// Add an event to be certified by event ID and content. A verified proof will ensure that + /// both the ID and content are correct. + pub fn add_event(mut self, event_id: EventID, event: Event) -> Self { + self.events.push((event_id, event)); + self + } + + /// Add the next committee to be certified. A verified proof will ensure that the next + /// committee is correct. + pub fn set_committee(mut self, committee: Committee) -> Self { + self.committee = Some(committee); + self + } +} + +/// Part of a proof that provides evidence relating to a specific transaction to +/// certify objects and events. +pub struct TransactionProof { + /// Checkpoint contents including this transaction. + pub checkpoint_contents: CheckpointContents, + + /// The transaction being certified. + pub transaction: Transaction, + + /// The effects of the transaction being certified. + pub effects: TransactionEffects, + + /// The events of the transaction being certified. + pub events: Option, +} + +/// A proof for specific targets. It certifies a checkpoint summary and optionally includes +/// transaction evidence to certify objects and events. +pub struct Proof { + /// Targets of the proof are a committee, objects, or events that need to be certified. + pub targets: ProofTarget, + + /// A summary of the checkpoint being certified. + pub checkpoint_summary: CertifiedCheckpointSummary, + + /// Optional transaction proof to certify objects and events. + pub contents_proof: Option, +} + +/// Verify a proof against a committee. A proof is valid if it certifies the checkpoint summary +/// and optionally includes transaction evidence to certify objects and events. +/// +/// If the result is `Ok(())` then the proof is valid. If Err is returned then the proof is invalid +/// and the error message will describe the reason. Once a proof is verified it can be trusted, +/// and information in `targets` as well as `checkpoint_summary` or `contents_proof` can be +/// trusted as being authentic. +/// +/// The authoritative committee is required to verify the proof. The sequence of committees can be +/// verified through a Committee proof target on the last checkpoint of each epoch, +/// sequentially since the first epoch. +pub fn verify_proof(committee: &Committee, proof: &Proof) -> anyhow::Result<()> { + // Get checkpoint summary and optional contents + let summary = &proof.checkpoint_summary; + let contents_ref = proof + .contents_proof + .as_ref() + .map(|x| &x.checkpoint_contents); + + // Verify the checkpoint summary using the committee + summary.verify_with_contents(committee, contents_ref)?; + + // MILESTONE 1 : summary and contents is correct + // Note: this is unconditional on the proof targets, and always checked. + + // If the proof target is the next committee check it + if let Some(committee) = &proof.targets.committee { + match &summary.end_of_epoch_data { + Some(EndOfEpochData { + next_epoch_committee, + .. + }) => { + // Extract the end of epoch committee + let next_committee_data = next_epoch_committee.iter().cloned().collect(); + let new_committee = + Committee::new(summary.epoch().checked_add(1).unwrap(), next_committee_data); + + if new_committee != *committee { + return Err(anyhow!( + "Given committee does not match the end of epoch committee" + )); + } + } + None => { + return Err(anyhow!( + "No end of epoch committee in the checkpoint summary" + )); + } + } + } + + // MILESTONE 2: committee if requested is correct + + // Non empty object or event targets require the optional contents proof + // If it is not present return an error + + if (!proof.targets.objects.is_empty() || !proof.targets.events.is_empty()) + && proof.contents_proof.is_none() + { + return Err(anyhow!("Contents proof is missing")); + } + + // MILESTONE 3: contents proof is present if required + + if let Some(contents_proof) = &proof.contents_proof { + // Extract Transaction Digests and check they are in contents + let digests = contents_proof.effects.execution_digests(); + if contents_proof.transaction.digest() != &digests.transaction { + return Err(anyhow!( + "Transaction digest does not match the execution digest" + )); + } + + // Ensure the digests are in the checkpoint contents + if !contents_proof + .checkpoint_contents + .enumerate_transactions(summary) + .any(|x| x.1 == &digests) + { + // Could not find the digest in the checkpoint contents + return Err(anyhow!( + "Transaction digest not found in the checkpoint contents" + )); + } + + // MILESTONE 4: Transaction & Effect correct and in contents + + if contents_proof.effects.events_digest() + != contents_proof.events.as_ref().map(|e| e.digest()).as_ref() + { + return Err(anyhow!("Events digest does not match the execution digest")); + } + + // If the target includes any events ensure the events digest is not None + if !proof.targets.events.is_empty() && contents_proof.events.is_none() { + return Err(anyhow!("Events digest is missing")); + } + + // MILESTONE 5: Events digest & Events are correct and present if required + + // Now we verify the content of any target events + + for (event_id, event) in &proof.targets.events { + // Check the event corresponds to the transaction + if event_id.tx_digest != digests.transaction { + return Err(anyhow!("Event does not belong to the transaction")); + } + + // The sequence number must be a valid index + // Note: safe to unwrap as we have checked that its not None above + if event_id.event_seq as usize >= contents_proof.events.as_ref().unwrap().data.len() { + return Err(anyhow!("Event sequence number out of bounds")); + } + + // Now check that the contents of the event are the same + if &contents_proof.events.as_ref().unwrap().data[event_id.event_seq as usize] != event { + return Err(anyhow!("Event contents do not match")); + } + } + + // MILESTONE 6: Event contents are correct + + // Now check all object references are correct and in the effects + let changed_objects = contents_proof.effects.all_changed_objects(); + + for (object_ref, object) in &proof.targets.objects { + // Is the given reference correct? + if object_ref != &object.compute_object_reference() { + return Err(anyhow!("Object reference does not match the object")); + } + + // Has this object been created in these effects? + changed_objects + .iter() + .find(|effects_object_ref| &effects_object_ref.0 == object_ref) + .ok_or(anyhow!("Object not found"))?; + } + + // MILESTONE 7: Object references are correct and in the effects + } + + Ok(()) +} diff --git a/crates/sui-light-client/tests/check_proof.rs b/crates/sui-light-client/tests/check_proof.rs new file mode 100644 index 0000000000000..b02cd3e627398 --- /dev/null +++ b/crates/sui-light-client/tests/check_proof.rs @@ -0,0 +1,273 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; + +use sui_light_client::construct::construct_proof; +use sui_light_client::proof::{verify_proof, Proof, ProofTarget}; + +use sui_types::event::{Event, EventID}; + +use sui_types::{committee::Committee, effects::TransactionEffectsAPI, object::Object}; + +use sui_rest_api::CheckpointData; + +use std::io::Read; +use std::{fs, path::PathBuf}; + +async fn read_full_checkpoint(checkpoint_path: &PathBuf) -> anyhow::Result { + println!("Reading checkpoint from {:?}", checkpoint_path); + let mut reader = fs::File::open(checkpoint_path.clone())?; + let mut buffer = Vec::new(); + reader.read_to_end(&mut buffer)?; + let (_, data): (u8, CheckpointData) = + bcs::from_bytes(&buffer).map_err(|e| anyhow!("Unable to parse checkpoint file: {}", e))?; + Ok(data) +} + +async fn read_data(committee_seq: u64, seq: u64) -> (Committee, CheckpointData) { + let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + d.push(format!("example_config/{}.chk", committee_seq)); + + let committee_checkpoint = read_full_checkpoint(&d).await.unwrap(); + + let prev_committee = committee_checkpoint + .checkpoint_summary + .end_of_epoch_data + .as_ref() + .ok_or(anyhow!("Expected checkpoint to be end-of-epoch")) + .unwrap() + .next_epoch_committee + .iter() + .cloned() + .collect(); + + // Make a committee object using this + let committee = Committee::new( + committee_checkpoint + .checkpoint_summary + .epoch() + .checked_add(1) + .unwrap(), + prev_committee, + ); + + let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + d.push(format!("example_config/{}.chk", seq)); + + let full_checkpoint = read_full_checkpoint(&d).await.unwrap(); + + (committee, full_checkpoint) +} + +#[tokio::test] +async fn check_can_read_test_data() { + let (_committee, full_checkpoint) = read_data(15918264, 16005062).await; + assert!(full_checkpoint + .checkpoint_summary + .end_of_epoch_data + .is_some()); +} + +#[tokio::test] +async fn test_new_committee() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let new_committee_data = full_checkpoint + .checkpoint_summary + .end_of_epoch_data + .as_ref() + .ok_or(anyhow!("Expected checkpoint to be end-of-epoch")) + .unwrap() + .next_epoch_committee + .iter() + .cloned() + .collect(); + + // Make a committee object using this + let new_committee = Committee::new( + full_checkpoint + .checkpoint_summary + .epoch() + .checked_add(1) + .unwrap(), + new_committee_data, + ); + + let committee_proof = Proof { + checkpoint_summary: full_checkpoint.checkpoint_summary.clone(), + contents_proof: None, + targets: ProofTarget::new().set_committee(new_committee.clone()), + }; + + assert!(verify_proof(&committee, &committee_proof).is_ok()); +} + +// Fail if the new committee does not match the target of the proof +#[tokio::test] +async fn test_incorrect_new_committee() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let committee_proof = Proof { + checkpoint_summary: full_checkpoint.checkpoint_summary.clone(), + contents_proof: None, + targets: ProofTarget::new().set_committee(committee.clone()), // WRONG + }; + + assert!(verify_proof(&committee, &committee_proof).is_err()); +} + +// Fail if the certificate is incorrect even if no proof targets are given +#[tokio::test] +async fn test_fail_incorrect_cert() { + let (_committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let new_committee_data = full_checkpoint + .checkpoint_summary + .end_of_epoch_data + .as_ref() + .ok_or(anyhow!("Expected checkpoint to be end-of-epoch")) + .unwrap() + .next_epoch_committee + .iter() + .cloned() + .collect(); + + // Make a committee object using this + let new_committee = Committee::new( + full_checkpoint + .checkpoint_summary + .epoch() + .checked_add(1) + .unwrap(), + new_committee_data, + ); + + let committee_proof = Proof { + checkpoint_summary: full_checkpoint.checkpoint_summary.clone(), + contents_proof: None, + targets: ProofTarget::new(), + }; + + assert!(verify_proof( + &new_committee, // WRONG + &committee_proof + ) + .is_err()); +} + +#[tokio::test] +async fn test_object_target_fail_no_data() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let sample_object: Object = full_checkpoint.transactions[0].output_objects[0].clone(); + let sample_ref = sample_object.compute_object_reference(); + + let bad_proof = Proof { + checkpoint_summary: full_checkpoint.checkpoint_summary.clone(), + contents_proof: None, // WRONG + targets: ProofTarget::new().add_object(sample_ref, sample_object), + }; + + assert!(verify_proof(&committee, &bad_proof).is_err()); +} + +#[tokio::test] +async fn test_object_target_success() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let sample_object: Object = full_checkpoint.transactions[0].output_objects[0].clone(); + let sample_ref = sample_object.compute_object_reference(); + + let target = ProofTarget::new().add_object(sample_ref, sample_object); + let object_proof = construct_proof(target, &full_checkpoint).unwrap(); + + assert!(verify_proof(&committee, &object_proof).is_ok()); +} + +#[tokio::test] +async fn test_object_target_fail_wrong_object() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let sample_object: Object = full_checkpoint.transactions[0].output_objects[0].clone(); + let wrong_object: Object = full_checkpoint.transactions[1].output_objects[1].clone(); + let mut sample_ref = sample_object.compute_object_reference(); + let wrong_ref = wrong_object.compute_object_reference(); + + let target = ProofTarget::new().add_object(wrong_ref, sample_object.clone()); // WRONG + let object_proof = construct_proof(target, &full_checkpoint).unwrap(); + assert!(verify_proof(&committee, &object_proof).is_err()); + + // Does not exist + sample_ref.1 = sample_ref.1.next(); // WRONG + + let target = ProofTarget::new().add_object(sample_ref, sample_object); + let object_proof = construct_proof(target, &full_checkpoint).unwrap(); + assert!(verify_proof(&committee, &object_proof).is_err()); +} + +#[tokio::test] +async fn test_event_target_fail_no_data() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let sample_event: Event = full_checkpoint.transactions[1] + .events + .as_ref() + .unwrap() + .data[0] + .clone(); + let sample_eid = EventID::from(( + *full_checkpoint.transactions[1].effects.transaction_digest(), + 0, + )); + + let bad_proof = Proof { + checkpoint_summary: full_checkpoint.checkpoint_summary.clone(), + contents_proof: None, // WRONG + targets: ProofTarget::new().add_event(sample_eid, sample_event), + }; + + assert!(verify_proof(&committee, &bad_proof).is_err()); +} + +#[tokio::test] +async fn test_event_target_success() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let sample_event: Event = full_checkpoint.transactions[1] + .events + .as_ref() + .unwrap() + .data[0] + .clone(); + let sample_eid = EventID::from(( + *full_checkpoint.transactions[1].effects.transaction_digest(), + 0, + )); + + let target = ProofTarget::new().add_event(sample_eid, sample_event); + let event_proof = construct_proof(target, &full_checkpoint).unwrap(); + + assert!(verify_proof(&committee, &event_proof).is_ok()); +} + +#[tokio::test] +async fn test_event_target_fail_bad_event() { + let (committee, full_checkpoint) = read_data(15918264, 16005062).await; + + let sample_event: Event = full_checkpoint.transactions[1] + .events + .as_ref() + .unwrap() + .data[0] + .clone(); + let sample_eid = EventID::from(( + *full_checkpoint.transactions[1].effects.transaction_digest(), + 1, // WRONG + )); + + let target = ProofTarget::new().add_event(sample_eid, sample_event); + let event_proof = construct_proof(target, &full_checkpoint).unwrap(); + + assert!(verify_proof(&committee, &event_proof).is_err()); +} diff --git a/crates/sui-macros/Cargo.toml b/crates/sui-macros/Cargo.toml index c7e99bd255f69..cb274a3af898c 100644 --- a/crates/sui-macros/Cargo.toml +++ b/crates/sui-macros/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] sui-proc-macros.workspace = true once_cell.workspace = true diff --git a/crates/sui-macros/src/lib.rs b/crates/sui-macros/src/lib.rs index 6e3cd85ca19be..a036a3dbfcfb0 100644 --- a/crates/sui-macros/src/lib.rs +++ b/crates/sui-macros/src/lib.rs @@ -528,7 +528,7 @@ mod test { assert_eq!(Foo::new(1, 2).b, 2); assert_eq!(new_foo(1).a, 1); - let v = vec![Foo::new(1, 2), Foo::new(3, 2)]; + let v = [Foo::new(1, 2), Foo::new(3, 2)]; assert_eq!(v[0].a, 1); assert_eq!(v[1].b, 2); @@ -720,7 +720,7 @@ mod test { assert_eq!(Foo::new(1, 2).b, 2); assert_eq!(new_foo(1).a, 1); - let v = vec![Foo::new(1, 2), Foo::new(3, 2)]; + let v = [Foo::new(1, 2), Foo::new(3, 2)]; assert_eq!(v[0].a, 1); assert_eq!(v[1].b, 2); diff --git a/crates/sui-move-build/src/lib.rs b/crates/sui-move-build/src/lib.rs index 9584bcd4998fb..26ba9a08eb734 100644 --- a/crates/sui-move-build/src/lib.rs +++ b/crates/sui-move-build/src/lib.rs @@ -347,19 +347,9 @@ impl CompiledPackage { } /// Return the set of Object IDs corresponding to this package's transitive dependencies' - /// original package IDs. - pub fn get_dependency_original_package_ids(&self) -> Vec { - let mut ids: BTreeSet<_> = self - .package - .deps_compiled_units - .iter() - .map(|(_, m)| ObjectID::from(*m.unit.module.address())) - .collect(); - - // `0x0` is not a real dependency ID -- it means that the package has unpublished - // dependencies. - ids.remove(&ObjectID::ZERO); - ids.into_iter().collect() + /// storage package IDs (where to load those packages on-chain). + pub fn get_dependency_storage_package_ids(&self) -> Vec { + self.dependency_ids.published.values().cloned().collect() } pub fn get_package_digest(&self, with_unpublished_deps: bool) -> [u8; 32] { @@ -391,14 +381,6 @@ impl CompiledPackage { .collect() } - pub fn get_package_dependencies_hex(&self) -> Vec { - self.dependency_ids - .published - .values() - .map(|object_id| object_id.to_hex_uncompressed()) - .collect() - } - /// Get bytecode modules from DeepBook that are used by this package pub fn get_deepbook_modules(&self) -> impl Iterator { self.get_modules_and_deps() @@ -645,7 +627,7 @@ pub struct PackageDependencies { pub invalid: BTreeMap, /// Set of dependencies that have conflicting `published-at` addresses. The key refers to /// the package, and the tuple refers to the address in the (Move.lock, Move.toml) respectively. - pub conflicting: BTreeMap, + pub conflicting: BTreeMap, } /// Partition packages in `resolution_graph` into one of four groups: diff --git a/crates/sui-move/src/build.rs b/crates/sui-move/src/build.rs index 5baa78625b08b..1d765e49e8120 100644 --- a/crates/sui-move/src/build.rs +++ b/crates/sui-move/src/build.rs @@ -75,12 +75,11 @@ impl Build { check_unpublished_dependencies(&pkg.dependency_ids.unpublished)?; } - let package_dependencies = pkg.get_package_dependencies_hex(); println!( "{}", json!({ "modules": pkg.get_package_base64(with_unpublished_deps), - "dependencies": json!(package_dependencies), + "dependencies": pkg.get_dependency_storage_package_ids(), "digest": pkg.get_package_digest(with_unpublished_deps), }) ) diff --git a/crates/sui-move/src/unit_test.rs b/crates/sui-move/src/unit_test.rs index 3bb4da1f2da0f..78ae1e5260a52 100644 --- a/crates/sui-move/src/unit_test.rs +++ b/crates/sui-move/src/unit_test.rs @@ -20,13 +20,8 @@ use sui_move_natives::test_scenario::InMemoryTestStore; use sui_move_natives::{object_runtime::ObjectRuntime, NativesCostTable}; use sui_protocol_config::ProtocolConfig; use sui_types::{ - base_types::{ObjectID, SequenceNumber}, - error::SuiResult, - gas_model::tables::initial_cost_schedule_for_unit_tests, - in_memory_storage::InMemoryStorage, + gas_model::tables::initial_cost_schedule_for_unit_tests, in_memory_storage::InMemoryStorage, metrics::LimitsMetrics, - object::Object, - storage::ChildObjectResolver, }; // Move unit tests will halt after executing this many steps. This is a protection to avoid divergence @@ -63,28 +58,6 @@ impl Test { } } -struct DummyChildObjectStore {} - -impl ChildObjectResolver for DummyChildObjectStore { - fn read_child_object( - &self, - _parent: &ObjectID, - _child: &ObjectID, - _child_version_upper_bound: SequenceNumber, - ) -> SuiResult> { - Ok(None) - } - fn get_object_received_at_version( - &self, - _owner: &ObjectID, - _receiving_object_id: &ObjectID, - _receive_object_at_version: SequenceNumber, - _epoch_id: sui_types::committee::EpochId, - ) -> SuiResult> { - Ok(None) - } -} - static TEST_STORE_INNER: Lazy> = Lazy::new(|| RwLock::new(InMemoryStorage::default())); diff --git a/crates/sui-network/src/state_sync/server.rs b/crates/sui-network/src/state_sync/server.rs index f7e24b5a5202f..4e6b32cff059e 100644 --- a/crates/sui-network/src/state_sync/server.rs +++ b/crates/sui-network/src/state_sync/server.rs @@ -221,7 +221,7 @@ where } })?; - struct SemaphoreExtension(OwnedSemaphorePermit); + struct SemaphoreExtension(#[allow(unused)] OwnedSemaphorePermit); inner.call(req).await.map(move |mut response| { // Insert permit as extension so it's not dropped until the response is sent. response diff --git a/crates/sui-node/Cargo.toml b/crates/sui-node/Cargo.toml index 2159ad62b713b..1e3449067aaf3 100644 --- a/crates/sui-node/Cargo.toml +++ b/crates/sui-node/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anemo.workspace = true anemo-tower.workspace = true @@ -19,6 +22,7 @@ prometheus.workspace = true tokio = { workspace = true, features = ["full"] } tracing.workspace = true futures.workspace = true +parking_lot.workspace = true tower.workspace = true reqwest.workspace = true tap.workspace = true @@ -43,6 +47,7 @@ sui-snapshot.workspace = true sui-telemetry.workspace = true sui-types.workspace = true mysten-metrics.workspace = true +mysten-service.workspace = true mysten-common.workspace = true narwhal-network.workspace = true narwhal-worker.workspace = true diff --git a/crates/sui-node/src/admin.rs b/crates/sui-node/src/admin.rs index 228707cebd2b4..3d5e44241268d 100644 --- a/crates/sui-node/src/admin.rs +++ b/crates/sui-node/src/admin.rs @@ -241,7 +241,12 @@ async fn capabilities(State(state): State>) -> (StatusCode, String // Only one of v1 or v2 will be populated at a time let capabilities = epoch_store.get_capabilities_v1(); let mut output = String::new(); - for capability in &capabilities { + for capability in capabilities.unwrap_or_default() { + output.push_str(&format!("{:?}\n", capability)); + } + + let capabilities = epoch_store.get_capabilities_v2(); + for capability in capabilities.unwrap_or_default() { output.push_str(&format!("{:?}\n", capability)); } diff --git a/crates/sui-node/src/lib.rs b/crates/sui-node/src/lib.rs index fe26295dd4c25..1f4f881d26f1f 100644 --- a/crates/sui-node/src/lib.rs +++ b/crates/sui-node/src/lib.rs @@ -55,6 +55,7 @@ use fastcrypto_zkp::bn254::zk_login::JWK; pub use handle::SuiNodeHandle; use mysten_metrics::{spawn_monitored_task, RegistryService}; use mysten_network::server::ServerBuilder; +use mysten_service::server_timing::server_timing_middleware; use narwhal_network::metrics::MetricsMakeCallbackHandler; use narwhal_network::metrics::{NetworkConnectionMetrics, NetworkMetrics}; use sui_archival::reader::ArchiveReaderBalancer; @@ -2041,10 +2042,7 @@ pub async fn build_http_server( rest_service.with_executor(transaction_orchestrator.clone()) } - let rest_router = rest_service.into_router(); - router = router - .nest("/rest", rest_router.clone()) - .nest("/v2", rest_router); + router = router.merge(rest_service.into_router()); } let listener = tokio::net::TcpListener::bind(&config.json_rpc_address) @@ -2052,6 +2050,8 @@ pub async fn build_http_server( .unwrap(); let addr = listener.local_addr().unwrap(); + router = router.layer(axum::middleware::from_fn(server_timing_middleware)); + let handle = tokio::spawn(async move { axum::serve( listener, diff --git a/crates/sui-open-rpc-macros/src/lib.rs b/crates/sui-open-rpc-macros/src/lib.rs index 6fa99cc211998..ccf58a392286a 100644 --- a/crates/sui-open-rpc-macros/src/lib.rs +++ b/crates/sui-open-rpc-macros/src/lib.rs @@ -121,10 +121,6 @@ pub fn open_rpc(attr: TokenStream, item: TokenStream) -> TokenStream { trait OptionalQuote { fn to_quote(&self) -> TokenStream2; - - fn unwrap_quote(&self, quote: F) -> TokenStream2 - where - F: FnOnce(LitStr) -> TokenStream2; } impl OptionalQuote for Option { @@ -135,17 +131,6 @@ impl OptionalQuote for Option { quote!(None) } } - - fn unwrap_quote(&self, quote: F) -> TokenStream2 - where - F: FnOnce(LitStr) -> TokenStream2, - { - if let Some(lit_str) = self { - quote(lit_str.clone()) - } else { - quote!() - } - } } struct RpcDefinition { diff --git a/crates/sui-open-rpc/Cargo.toml b/crates/sui-open-rpc/Cargo.toml index 3bbe0f53ab5d7..e6000fe18d0e2 100644 --- a/crates/sui-open-rpc/Cargo.toml +++ b/crates/sui-open-rpc/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] schemars.workspace = true serde.workspace = true diff --git a/crates/sui-open-rpc/spec/openrpc.json b/crates/sui-open-rpc/spec/openrpc.json index d3f12a2dea7e6..f01df691e4369 100644 --- a/crates/sui-open-rpc/spec/openrpc.json +++ b/crates/sui-open-rpc/spec/openrpc.json @@ -12,7 +12,7 @@ "name": "Apache-2.0", "url": "https://raw.githubusercontent.com/MystenLabs/sui/main/LICENSE" }, - "version": "1.31.1" + "version": "1.32.0" }, "methods": [ { @@ -1293,7 +1293,7 @@ "name": "Result", "value": { "minSupportedProtocolVersion": "1", - "maxSupportedProtocolVersion": "54", + "maxSupportedProtocolVersion": "55", "protocolVersion": "6", "featureFlags": { "accept_zklogin_in_multisig": false, @@ -1341,6 +1341,7 @@ "reject_mutable_random_on_entry_functions": false, "reshare_at_same_initial_version": false, "resolve_abort_locations_to_package_id": false, + "rethrow_serialization_type_layout_errors": false, "scoring_decision_with_validity_cutoff": true, "shared_object_deletion": false, "simple_conservation_checks": false, @@ -1417,6 +1418,7 @@ "config_read_setting_impl_cost_base": null, "config_read_setting_impl_cost_per_byte": null, "consensus_bad_nodes_stake_threshold": null, + "consensus_max_num_transactions_in_block": null, "consensus_max_transaction_size_bytes": null, "consensus_max_transactions_in_block_bytes": null, "crypto_invalid_arguments_cost": { diff --git a/crates/sui-open-rpc/src/examples.rs b/crates/sui-open-rpc/src/examples.rs index ed8c038571073..9c2f8d8ba6805 100644 --- a/crates/sui-open-rpc/src/examples.rs +++ b/crates/sui-open-rpc/src/examples.rs @@ -69,11 +69,6 @@ struct Examples { examples: Vec, } -#[derive(serde::Serialize)] -struct Value { - value: String, -} - impl Examples { fn new(name: &str, examples: Vec) -> Self { Self { diff --git a/crates/sui-oracle/tests/integration_tests.rs b/crates/sui-oracle/tests/integration_tests.rs index 6800a53461740..23232b87a3591 100644 --- a/crates/sui-oracle/tests/integration_tests.rs +++ b/crates/sui-oracle/tests/integration_tests.rs @@ -466,7 +466,7 @@ async fn publish_package( ) -> ObjectID { let compiled_package = BuildConfig::new_for_testing().build(path).unwrap(); let all_module_bytes = compiled_package.get_package_bytes(false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let gas = client .coin_read_api() .get_coins(sender, None, None, Some(1)) diff --git a/crates/sui-package-dump/Cargo.toml b/crates/sui-package-dump/Cargo.toml new file mode 100644 index 0000000000000..92632519a9877 --- /dev/null +++ b/crates/sui-package-dump/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "sui-package-dump" +version.workspace = true +authors = ["Mysten Labs Result { + Ok(Self { + inner: reqwest::Client::builder() + .user_agent(concat!("sui-package-dump/", env!("CARGO_PKG_VERSION"))) + .build() + .context("Failed to create GraphQL client")?, + url: url.into_url().context("Invalid RPC URL")?, + }) + } + + pub(crate) async fn query(&self, query: Operation) -> Result + where + V: Serialize, + Q: DeserializeOwned + QueryBuilder + 'static, + { + self.inner + .post(self.url.clone()) + .run_graphql(query) + .await + .context("Failed to send GraphQL query")? + .data + .ok_or_else(|| anyhow!("Empty response to query")) + } +} diff --git a/crates/sui-package-dump/src/lib.rs b/crates/sui-package-dump/src/lib.rs new file mode 100644 index 0000000000000..ea70db5fd927a --- /dev/null +++ b/crates/sui-package-dump/src/lib.rs @@ -0,0 +1,252 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::{ + collections::BTreeMap, + fs, + path::{Path, PathBuf}, +}; + +use anyhow::{bail, ensure, Context, Result}; +use client::Client; +use fastcrypto::encoding::{Base64, Encoding}; +use query::{limits, packages, SuiAddress, UInt53}; +use sui_types::object::Object; +use tracing::info; + +mod client; +mod query; + +/// Ensure all packages created before `before_checkpoint` are written to the `output_dir`ectory, +/// from the GraphQL service at `rpc_url`. +/// +/// `output_dir` can be a path to a non-existent directory, an existing empty directory, or an +/// existing directory written to in the past. If the path is non-existent, the invocation creates +/// it. If the path exists but is empty, the invocation writes to the directory. If the directory +/// has been written to in the past, the invocation picks back up where the previous invocation +/// left off. +pub async fn dump( + rpc_url: String, + output_dir: PathBuf, + before_checkpoint: Option, +) -> Result<()> { + ensure_output_directory(&output_dir)?; + + let client = Client::new(rpc_url)?; + let after_checkpoint = read_last_checkpoint(&output_dir)?; + let limit = max_page_size(&client).await?; + let (last_checkpoint, packages) = + fetch_packages(&client, limit, after_checkpoint, before_checkpoint).await?; + + for package in &packages { + let SuiAddress(address) = &package.address; + dump_package(&output_dir, package) + .with_context(|| format!("Failed to dump package {address}"))?; + } + + if let Some(last_checkpoint) = last_checkpoint { + write_last_checkpoint(&output_dir, last_checkpoint)?; + } + + Ok(()) +} + +/// Ensure the output directory exists, either because it already exists as a writable directory, or +/// by creating a new directory. +fn ensure_output_directory(path: impl Into) -> Result<()> { + let path: PathBuf = path.into(); + if !path.exists() { + fs::create_dir_all(&path).context("Making output directory")?; + return Ok(()); + } + + ensure!( + path.is_dir(), + "Output path is not a directory: {}", + path.display() + ); + + let metadata = fs::metadata(&path).context("Getting metadata for output path")?; + + ensure!( + !metadata.permissions().readonly(), + "Output directory is not writable: {}", + path.display() + ); + + Ok(()) +} + +/// Load the last checkpoint that was loaded by a previous run of the tool, if there is a previous +/// run. +fn read_last_checkpoint(output: &Path) -> Result> { + let path = output.join("last-checkpoint"); + if !path.exists() { + return Ok(None); + } + + let content = fs::read_to_string(&path).context("Failed to read last checkpoint")?; + let checkpoint: u64 = + serde_json::from_str(&content).context("Failed to parse last checkpoint")?; + + info!("Resuming download after checkpoint {checkpoint}"); + + Ok(Some(checkpoint)) +} + +/// Write the max checkpoint that we have seen a package from back to the output directory. +fn write_last_checkpoint(output: &Path, checkpoint: u64) -> Result<()> { + let path = output.join("last-checkpoint"); + let content = + serde_json::to_string(&checkpoint).context("Failed to serialize last checkpoint")?; + + fs::write(path, content).context("Failed to write last checkpoint")?; + Ok(()) +} + +/// Read the max page size supported by the GraphQL service. +async fn max_page_size(client: &Client) -> Result { + Ok(client + .query(limits::build()) + .await + .context("Failed to fetch max page size")? + .service_config + .max_page_size) +} + +/// Read all the packages between `after_checkpoint` and `before_checkpoint`, in batches of +/// `page_size` from the `client` connected to a GraphQL service. +/// +/// If `after_checkpoint` is not provided, packages are read from genesis. If `before_checkpoint` +/// is not provided, packages are read until the latest checkpoint. +/// +/// Returns the latest checkpoint that was read from in this fetch, and a list of all the packages +/// that were read. +async fn fetch_packages( + client: &Client, + page_size: i32, + after_checkpoint: Option, + before_checkpoint: Option, +) -> Result<(Option, Vec)> { + let packages::Query { + checkpoint: checkpoint_viewed_at, + packages: + packages::MovePackageConnection { + mut page_info, + mut nodes, + }, + } = client + .query(packages::build( + page_size, + None, + after_checkpoint.map(UInt53), + before_checkpoint.map(UInt53), + )) + .await + .with_context(|| "Failed to fetch page 1 of packages.")?; + + for i in 2.. { + if !page_info.has_next_page { + break; + } + + let packages = client + .query(packages::build( + page_size, + page_info.end_cursor, + after_checkpoint.map(UInt53), + before_checkpoint.map(UInt53), + )) + .await + .with_context(|| format!("Failed to fetch page {i} of packages."))? + .packages; + + nodes.extend(packages.nodes); + page_info = packages.page_info; + + info!( + "Fetched page {i} ({} package{} so far).", + nodes.len(), + if nodes.len() == 1 { "" } else { "s" }, + ); + } + + use packages::Checkpoint as C; + let last_checkpoint = match (checkpoint_viewed_at, before_checkpoint) { + ( + Some(C { + sequence_number: UInt53(v), + }), + Some(b), + ) if b > 0 => Some(v.min(b - 1)), + ( + Some(C { + sequence_number: UInt53(c), + }), + _, + ) + | (_, Some(c)) => Some(c), + _ => None, + }; + + Ok((last_checkpoint, nodes)) +} + +/// Write out `pkg` to the `output_dir`ectory, using the package's address and name as the directory +/// name. The following files are written for each directory: +/// +/// - `object.bcs` -- the BCS serialized form of the `Object` type containing the package. +/// +/// - `linkage.json` -- a JSON serialization of the package's linkage table, mapping dependency +/// original IDs to the version of the dependency being depended on and the ID of the object +/// on chain that contains that version. +/// +/// - `origins.json` -- a JSON serialization of the type origin table, mapping type names contained +/// in this package to the version of the package that first introduced that type. +/// +/// - `*.mv` -- a BCS serialization of each compiled module in the package. +fn dump_package(output_dir: &Path, pkg: &packages::MovePackage) -> Result<()> { + let Some(query::Base64(bcs)) = &pkg.bcs else { + bail!("Missing BCS"); + }; + + let bytes = Base64::decode(bcs).context("Failed to decode BCS")?; + + let object = bcs::from_bytes::(&bytes).context("Failed to deserialize")?; + let id = object.id(); + let Some(package) = object.data.try_as_package() else { + bail!("Not a package"); + }; + + let origins: BTreeMap<_, _> = package + .type_origin_table() + .iter() + .map(|o| { + ( + format!("{}::{}", o.module_name, o.datatype_name), + o.package.to_string(), + ) + }) + .collect(); + + let package_dir = output_dir.join(format!("{}.{}", id, package.version().value())); + fs::create_dir(&package_dir).context("Failed to make output directory")?; + + let linkage_json = serde_json::to_string_pretty(package.linkage_table()) + .context("Failed to serialize linkage")?; + let origins_json = + serde_json::to_string_pretty(&origins).context("Failed to serialize type origins")?; + + fs::write(package_dir.join("object.bcs"), bytes).context("Failed to write object BCS")?; + fs::write(package_dir.join("linkage.json"), linkage_json).context("Failed to write linkage")?; + fs::write(package_dir.join("origins.json"), origins_json) + .context("Failed to write type origins")?; + + for (module_name, module_bytes) in package.serialized_module_map() { + let module_path = package_dir.join(format!("{module_name}.mv")); + fs::write(module_path, module_bytes) + .with_context(|| format!("Failed to write module: {module_name}"))? + } + + Ok(()) +} diff --git a/crates/sui-package-dump/src/query.rs b/crates/sui-package-dump/src/query.rs new file mode 100644 index 0000000000000..a0d2c0ae391d5 --- /dev/null +++ b/crates/sui-package-dump/src/query.rs @@ -0,0 +1,105 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use cynic::Operation; +use cynic::QueryBuilder; + +#[cynic::schema("sui")] +mod schema {} + +#[derive(cynic::Scalar, Debug)] +pub(crate) struct SuiAddress(pub String); + +#[derive(cynic::Scalar, Debug)] +pub(crate) struct Base64(pub String); + +#[derive(cynic::Scalar, Debug)] +pub(crate) struct UInt53(pub u64); + +/// Query types related to GraphQL service limits. +pub(crate) mod limits { + use super::*; + + pub(crate) fn build() -> Operation { + Query::build(()) + } + + #[derive(cynic::QueryFragment, Debug)] + pub(crate) struct Query { + pub(crate) service_config: ServiceConfig, + } + + #[derive(cynic::QueryFragment, Debug)] + pub(crate) struct ServiceConfig { + pub(crate) max_page_size: i32, + } +} + +/// Query types related to fetching packages. +pub(crate) mod packages { + use super::*; + + pub(crate) fn build( + first: i32, + after: Option, + after_checkpoint: Option, + before_checkpoint: Option, + ) -> Operation { + Query::build(Vars { + first, + after, + filter: Some(MovePackageCheckpointFilter { + after_checkpoint, + before_checkpoint, + }), + }) + } + + #[derive(cynic::QueryVariables, Debug)] + pub(crate) struct Vars { + pub(crate) first: i32, + pub(crate) after: Option, + pub(crate) filter: Option, + } + + #[derive(cynic::InputObject, Debug)] + pub(crate) struct MovePackageCheckpointFilter { + pub(crate) after_checkpoint: Option, + pub(crate) before_checkpoint: Option, + } + + #[derive(cynic::QueryFragment, Debug)] + #[cynic(variables = "Vars")] + pub(crate) struct Query { + pub(crate) checkpoint: Option, + #[arguments( + first: $first, + after: $after, + filter: $filter, + )] + pub(crate) packages: MovePackageConnection, + } + + #[derive(cynic::QueryFragment, Debug)] + pub(crate) struct Checkpoint { + pub(crate) sequence_number: UInt53, + } + + #[derive(cynic::QueryFragment, Debug)] + pub(crate) struct MovePackageConnection { + pub(crate) page_info: PageInfo, + pub(crate) nodes: Vec, + } + + #[derive(cynic::QueryFragment, Debug)] + pub(crate) struct PageInfo { + pub(crate) has_next_page: bool, + pub(crate) end_cursor: Option, + } + + #[derive(cynic::QueryFragment, Debug)] + pub(crate) struct MovePackage { + pub(crate) address: SuiAddress, + pub(crate) bcs: Option, + } +} diff --git a/crates/sui-package-management/Cargo.toml b/crates/sui-package-management/Cargo.toml index 9e2149fb36d7e..a4b775c1209f5 100644 --- a/crates/sui-package-management/Cargo.toml +++ b/crates/sui-package-management/Cargo.toml @@ -11,6 +11,7 @@ path = "src/lib.rs" [dependencies] anyhow.workspace = true +thiserror.workspace = true tracing.workspace = true sui-json-rpc-types.workspace = true diff --git a/crates/sui-package-management/src/lib.rs b/crates/sui-package-management/src/lib.rs index e472578c7746a..43aa2f914711c 100644 --- a/crates/sui-package-management/src/lib.rs +++ b/crates/sui-package-management/src/lib.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, Context}; +use std::collections::HashMap; use std::fs::File; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -24,13 +25,21 @@ pub enum LockCommand { Upgrade, } -#[derive(Debug, Clone)] +#[derive(thiserror::Error, Debug, Clone)] pub enum PublishedAtError { + #[error("The 'published-at' field in Move.toml or Move.lock is invalid: {0:?}")] Invalid(String), + + #[error("The 'published-at' field is not present in Move.toml or Move.lock")] NotPresent, + + #[error( + "Conflicting 'published-at' addresses between Move.toml -- {id_manifest} -- and \ + Move.lock -- {id_lock}" + )] Conflict { - id_lock: String, - id_manifest: String, + id_lock: ObjectID, + id_manifest: ObjectID, }, } @@ -140,45 +149,42 @@ pub fn resolve_published_id( ) -> Result { // Look up a valid `published-at` in the `Move.toml` first, which we'll // return if the Move.lock does not manage addresses. - let published_id_in_manifest = match published_at_property(package) { - Ok(v) => Some(v), - Err(PublishedAtError::NotPresent) => None, - Err(e) => return Err(e), // An existing but invalid `published-at` in `Move.toml` should fail early. - }; + let published_id_in_manifest = manifest_published_at(package); + + match published_id_in_manifest { + Ok(_) | Err(PublishedAtError::NotPresent) => { /* nop */ } + Err(e) => { + return Err(e); + } + } let lock = package.package_path.join(SourcePackageLayout::Lock.path()); let Ok(mut lock_file) = File::open(lock.clone()) else { - return match published_id_in_manifest { - Some(v) => { - ObjectID::from_str(v.as_str()).map_err(|_| PublishedAtError::Invalid(v.to_owned())) - } - None => Err(PublishedAtError::NotPresent), - }; + return published_id_in_manifest; }; - let managed_packages = ManagedPackage::read(&mut lock_file).ok(); + // Find the environment and ManagedPackage data for this chain_id. - let id_in_lock_for_chain_id = managed_packages.and_then(|m| { - let chain_id = chain_id.as_ref()?; - m.into_iter() - .find_map(|(_, v)| (v.chain_id == *chain_id).then_some(v.latest_published_id)) - }); - - let package_id = match (id_in_lock_for_chain_id, published_id_in_manifest) { - (Some(id_lock), Some(id_manifest)) if id_lock != id_manifest => { - return Err(PublishedAtError::Conflict { + let id_in_lock_for_chain_id = + lock_published_at(ManagedPackage::read(&mut lock_file).ok(), chain_id.as_ref()); + + match (id_in_lock_for_chain_id, published_id_in_manifest) { + (Ok(id_lock), Ok(id_manifest)) if id_lock != id_manifest => { + Err(PublishedAtError::Conflict { id_lock, id_manifest, }) } - (Some(id_lock), _) => id_lock, - (None, Some(id_manifest)) => id_manifest, /* No info in Move.lock: Fall back to manifest */ - _ => return Err(PublishedAtError::NotPresent), /* Neither in Move.toml nor Move.lock */ - }; - ObjectID::from_str(package_id.as_str()) - .map_err(|_| PublishedAtError::Invalid(package_id.to_owned())) + + (Ok(id), _) | (_, Ok(id)) => Ok(id), + + // We return early (above) if we failed to read the ID from the manifest for some reason + // other than it not being present, so at this point, we can defer to whatever error came + // from the lock file (Ok case is handled above). + (from_lock, Err(_)) => from_lock, + } } -fn published_at_property(package: &Package) -> Result { +fn manifest_published_at(package: &Package) -> Result { let Some(value) = package .source_package .package @@ -187,5 +193,36 @@ fn published_at_property(package: &Package) -> Result else { return Err(PublishedAtError::NotPresent); }; - Ok(value.to_string()) + + let id = + ObjectID::from_str(value.as_str()).map_err(|_| PublishedAtError::Invalid(value.clone()))?; + + if id == ObjectID::ZERO { + Err(PublishedAtError::NotPresent) + } else { + Ok(id) + } +} + +fn lock_published_at( + lock: Option>, + chain_id: Option<&String>, +) -> Result { + let (Some(lock), Some(chain_id)) = (lock, chain_id) else { + return Err(PublishedAtError::NotPresent); + }; + + let managed_package = lock + .into_values() + .find(|v| v.chain_id == *chain_id) + .ok_or(PublishedAtError::NotPresent)?; + + let id = ObjectID::from_str(managed_package.latest_published_id.as_str()) + .map_err(|_| PublishedAtError::Invalid(managed_package.latest_published_id.clone()))?; + + if id == ObjectID::ZERO { + Err(PublishedAtError::NotPresent) + } else { + Ok(id) + } } diff --git a/crates/sui-package-resolver/src/lib.rs b/crates/sui-package-resolver/src/lib.rs index 17f1118e591d1..a6c1f3d3c0043 100644 --- a/crates/sui-package-resolver/src/lib.rs +++ b/crates/sui-package-resolver/src/lib.rs @@ -132,6 +132,7 @@ pub enum ErrorConstants { /// * A numeric value (u8, u16, u32, u64, u128, u256); or /// * A boolean value; or /// * An address value + /// /// Otherwise, the `Raw` bytes of the error constant are returned. Rendered { /// The name of the error constant. @@ -1547,7 +1548,7 @@ impl<'l> ResolutionContext<'l> { O::Datatype(key, params) => { // SAFETY: `add_signature` ensures `datatypes` has an element with this key. - let def = &self.datatypes[&key]; + let def = &self.datatypes[key]; let param_layouts = params .iter() @@ -1639,7 +1640,7 @@ impl<'l> ResolutionContext<'l> { O::Datatype(key, params) => { // SAFETY: `add_signature` ensures `datatypes` has an element with this key. - let defining_id = &self.datatypes[&key].defining_id; + let defining_id = &self.datatypes[key].defining_id; for param in params { self.relocate_signature(param)?; } diff --git a/crates/sui-proc-macros/Cargo.toml b/crates/sui-proc-macros/Cargo.toml index 0ccf1b0d77060..cb3bf09505604 100644 --- a/crates/sui-proc-macros/Cargo.toml +++ b/crates/sui-proc-macros/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [lib] proc-macro = true diff --git a/crates/sui-protocol-config/Cargo.toml b/crates/sui-protocol-config/Cargo.toml index edcea7bd5ba8b..f8a8dfec81551 100644 --- a/crates/sui-protocol-config/Cargo.toml +++ b/crates/sui-protocol-config/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] serde.workspace = true tracing.workspace = true diff --git a/crates/sui-protocol-config/src/lib.rs b/crates/sui-protocol-config/src/lib.rs index c6e4d5de99afa..370bb61286a09 100644 --- a/crates/sui-protocol-config/src/lib.rs +++ b/crates/sui-protocol-config/src/lib.rs @@ -16,7 +16,7 @@ use tracing::{info, warn}; /// The minimum and maximum protocol versions supported by this build. const MIN_PROTOCOL_VERSION: u64 = 1; -const MAX_PROTOCOL_VERSION: u64 = 54; +const MAX_PROTOCOL_VERSION: u64 = 55; // Record history of protocol version allocations here: // @@ -167,6 +167,8 @@ const MAX_PROTOCOL_VERSION: u64 = 54; // Update stdlib natives costs // Version 54: Enable random beacon on mainnet. // Enable soft bundle on mainnet. +// Version 55: Enable enums on mainnet. +// Rethrow serialization type layout errors instead of converting them. #[derive(Copy, Clone, Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct ProtocolVersion(u64); @@ -505,6 +507,10 @@ struct FeatureFlags { // Use AuthorityCapabilitiesV2 #[serde(skip_serializing_if = "is_false")] authority_capabilities_v2: bool, + + // Rethrow type layout errors during serialization instead of trying to convert them. + #[serde(skip_serializing_if = "is_false")] + rethrow_serialization_type_layout_errors: bool, } fn is_false(b: &bool) -> bool { @@ -694,7 +700,7 @@ pub struct ProtocolConfig { /// Max number of publish or upgrade commands allowed in a programmable transaction block. max_publish_or_upgrade_per_ptb: Option, - /// Maximum number of gas units that a single MoveCall transaction can use. Enforced by the Sui adapter. + /// Maximum gas budget in MIST that a transaction can use. max_tx_gas: Option, /// Maximum amount of the proposed gas price in MIST (defined in the transaction). @@ -1183,8 +1189,10 @@ pub struct ProtocolConfig { /// The maximum serialised transaction size (in bytes) accepted by consensus. That should be bigger than the /// `max_tx_size_bytes` with some additional headroom. consensus_max_transaction_size_bytes: Option, - /// The maximum size of transactions included in a consensus proposed block + /// The maximum size of transactions included in a consensus block. consensus_max_transactions_in_block_bytes: Option, + /// The maximum number of transactions included in a consensus block. + consensus_max_num_transactions_in_block: Option, /// The max accumulated txn execution cost per object in a Narwhal commit. Transactions /// in a checkpoint will be deferred once their touch shared objects hit this limit. @@ -1528,6 +1536,15 @@ impl ProtocolConfig { pub fn authority_capabilities_v2(&self) -> bool { self.feature_flags.authority_capabilities_v2 } + + pub fn max_num_transactions_in_block(&self) -> u64 { + // 500 is the value used before this field is introduced. + self.consensus_max_num_transactions_in_block.unwrap_or(500) + } + + pub fn rethrow_serialization_type_layout_errors(&self) -> bool { + self.feature_flags.rethrow_serialization_type_layout_errors + } } #[cfg(not(msim))] @@ -2015,6 +2032,8 @@ impl ProtocolConfig { consensus_max_transactions_in_block_bytes: None, + consensus_max_num_transactions_in_block: None, + max_accumulated_txn_cost_per_object_in_narwhal_commit: None, max_deferral_rounds_for_congestion_control: None, @@ -2652,6 +2671,18 @@ impl ProtocolConfig { cfg.feature_flags.soft_bundle = true; cfg.max_soft_bundle_size = Some(5); } + 55 => { + // Turn on enums mainnet + cfg.move_binary_format_version = Some(7); + + // Assume 1KB per transaction and 500 transactions per block. + cfg.consensus_max_transactions_in_block_bytes = Some(512 * 1024); + // Assume 20_000 TPS * 5% max stake per validator / (minimum) 4 blocks per round = 250 transactions per block maximum + // Using a higher limit that is 512, to account for bursty traffic and system transactions. + cfg.consensus_max_num_transactions_in_block = Some(512); + + cfg.feature_flags.rethrow_serialization_type_layout_errors = true; + } // Use this template when making changes: // // // modify an existing constant. @@ -2705,16 +2736,13 @@ impl ProtocolConfig { } } - pub fn meter_config(&self) -> MeterConfig { + /// MeterConfig for metering packages during signing. It is NOT stable between binaries and + /// cannot used during execution. + pub fn meter_config_for_signing(&self) -> MeterConfig { MeterConfig { - max_per_fun_meter_units: Some(self.max_verifier_meter_ticks_per_function() as u128), - max_per_mod_meter_units: Some(self.max_meter_ticks_per_module() as u128), - max_per_pkg_meter_units: Some( - // Until the per-package limit was introduced, the per-module limit played double - // duty. - self.max_meter_ticks_per_package_as_option() - .unwrap_or_else(|| self.max_meter_ticks_per_module()) as u128, - ), + max_per_fun_meter_units: Some(2_200_000), + max_per_mod_meter_units: Some(2_200_000), + max_per_pkg_meter_units: Some(2_200_000), } } diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_55.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_55.snap new file mode 100644 index 0000000000000..6d8a2e27dc2b0 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_55.snap @@ -0,0 +1,323 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 55 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 1000 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 100 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: false +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 10 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_55.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_55.snap new file mode 100644 index 0000000000000..223b9b68af4c3 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_55.snap @@ -0,0 +1,324 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 55 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 1000 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 100 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 10 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_55.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_55.snap new file mode 100644 index 0000000000000..69b7747d1bb49 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_55.snap @@ -0,0 +1,333 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 55 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 1000 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 100 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 10 + diff --git a/crates/sui-proxy/src/consumer.rs b/crates/sui-proxy/src/consumer.rs index 7135c5eaa670b..c4b3af8ad9770 100644 --- a/crates/sui-proxy/src/consumer.rs +++ b/crates/sui-proxy/src/consumer.rs @@ -242,7 +242,7 @@ async fn check_response( async fn convert( mfs: Vec, ) -> Result, (StatusCode, &'static str)> { - let result = match tokio::task::spawn_blocking(|| { + let result = tokio::task::spawn_blocking(|| { let timer = CONSUMER_OPERATION_DURATION .with_label_values(&["convert_to_remote_write_task"]) .start_timer(); @@ -250,8 +250,9 @@ async fn convert( timer.observe_duration(); result.into_iter() }) - .await - { + .await; + + let result = match result { Ok(v) => v, Err(err) => { error!("unable to convert to remote_write; {err}"); diff --git a/crates/sui-replay/src/lib.rs b/crates/sui-replay/src/lib.rs index 4d139661dd794..26a27cf828767 100644 --- a/crates/sui-replay/src/lib.rs +++ b/crates/sui-replay/src/lib.rs @@ -586,9 +586,8 @@ pub(crate) fn chain_from_chain_id(chain: &str) -> Chain { fn parse_configs_versions( configs_and_versions: Option>, ) -> Option> { - let Some(configs_and_versions) = configs_and_versions else { - return None; - }; + let configs_and_versions = configs_and_versions?; + assert!(configs_and_versions.len() % 2 == 0, "Invalid number of arguments for configs and version -- you must supply a version for each config"); Some( configs_and_versions diff --git a/crates/sui-replay/src/replay.rs b/crates/sui-replay/src/replay.rs index 8ccbf693d98f6..b41e0eddf3b0e 100644 --- a/crates/sui-replay/src/replay.rs +++ b/crates/sui-replay/src/replay.rs @@ -635,6 +635,12 @@ impl LocalExec { error!("Object {id} {version} {digest} was deleted on RPC server."); Ok(None) } + // This is a child object which was not found in the store (e.g., due to exists + // check before creating the dynamic field). + Err(ReplayEngineError::ObjectVersionNotFound { id, version }) => { + info!("Object {id} {version} not found on RPC server -- this may have been pruned or never existed."); + Ok(None) + } Err(err) => Err(ReplayEngineError::SuiRpcError { err: err.to_string(), }), diff --git a/crates/sui-rest-api/Cargo.toml b/crates/sui-rest-api/Cargo.toml index 95e375770bd2a..aa046b7c70b2a 100644 --- a/crates/sui-rest-api/Cargo.toml +++ b/crates/sui-rest-api/Cargo.toml @@ -12,6 +12,7 @@ axum = { workspace = true, features = ["matched-path"] } bcs.workspace = true rand.workspace = true reqwest.workspace = true +url.workspace = true serde.workspace = true serde_json.workspace = true serde_yaml.workspace = true @@ -31,6 +32,5 @@ sui-types.workspace = true mysten-network.workspace = true sui-protocol-config.workspace = true - [dev-dependencies] diffy = "0.3" diff --git a/crates/sui-rest-api/openapi/openapi.json b/crates/sui-rest-api/openapi/openapi.json index 2e4ff81d8f3b2..ced704ee4d2c5 100644 --- a/crates/sui-rest-api/openapi/openapi.json +++ b/crates/sui-rest-api/openapi/openapi.json @@ -368,6 +368,9 @@ }, "404": { "description": "" + }, + "410": { + "description": "" } } } @@ -2654,6 +2657,85 @@ ] } } + }, + { + "description": "Certificate is cancelled due to congestion on shared objects", + "type": "object", + "required": [ + "congested_objects", + "error" + ], + "properties": { + "congested_objects": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ObjectId" + } + }, + "error": { + "type": "string", + "enum": [ + "execution_cancelled_due_to_shared_object_congestion" + ] + } + } + }, + { + "description": "Address is denied for this coin type", + "type": "object", + "required": [ + "address", + "coin_type", + "error" + ], + "properties": { + "address": { + "$ref": "#/components/schemas/Address" + }, + "coin_type": { + "type": "string" + }, + "error": { + "type": "string", + "enum": [ + "address_denied_for_coin" + ] + } + } + }, + { + "description": "Coin type is globally paused for use", + "type": "object", + "required": [ + "coin_type", + "error" + ], + "properties": { + "coin_type": { + "type": "string" + }, + "error": { + "type": "string", + "enum": [ + "coin_type_global_pause" + ] + } + } + }, + { + "description": "Certificate is cancelled because randomness could not be generated this epoch", + "type": "object", + "required": [ + "error" + ], + "properties": { + "error": { + "type": "string", + "enum": [ + "execution_cancelled_due_to_randomness_unavailable" + ] + } + } } ] }, @@ -5195,6 +5277,37 @@ "$ref": "#/components/schemas/SimpleSignature" } } + }, + { + "type": "object", + "required": [ + "authenticator_data", + "client_data_json", + "scheme", + "signature" + ], + "properties": { + "authenticator_data": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "client_data_json": { + "type": "string" + }, + "scheme": { + "type": "string", + "enum": [ + "passkey" + ] + }, + "signature": { + "$ref": "#/components/schemas/SimpleSignature" + } + } } ] }, diff --git a/crates/sui-rest-api/src/checkpoints.rs b/crates/sui-rest-api/src/checkpoints.rs index ff11549529862..334e1b4cebbe0 100644 --- a/crates/sui-rest-api/src/checkpoints.rs +++ b/crates/sui-rest-api/src/checkpoints.rs @@ -46,6 +46,7 @@ impl ApiEndpoint for GetCheckpointFull { .build(), ) .response(404, ResponseBuilder::new().build()) + .response(410, ResponseBuilder::new().build()) .build() } @@ -58,9 +59,21 @@ async fn get_checkpoint_full( Path(checkpoint_id): Path, accept: AcceptFormat, State(state): State, -) -> Result> { +) -> Result> { let verified_summary = match checkpoint_id { - CheckpointId::SequenceNumber(s) => state.inner().get_checkpoint_by_sequence_number(s), + CheckpointId::SequenceNumber(s) => { + // Since we need object contents we need to check for the lowest available checkpoint + // with objects that hasn't been pruned + let oldest_checkpoint = state.inner().get_lowest_available_checkpoint_objects()?; + if s < oldest_checkpoint { + return Err(crate::RestError::new( + axum::http::StatusCode::GONE, + "Old checkpoints have been pruned", + )); + } + + state.inner().get_checkpoint_by_sequence_number(s) + } CheckpointId::Digest(d) => state.inner().get_checkpoint_by_digest(&d.into()), }? .ok_or(CheckpointNotFoundError(checkpoint_id))?; @@ -72,8 +85,7 @@ async fn get_checkpoint_full( let checkpoint_data = state .inner() - .get_checkpoint_data(verified_summary, checkpoint_contents)? - .into(); + .get_checkpoint_data(verified_summary, checkpoint_contents)?; match accept { AcceptFormat::Json => ResponseContent::Json(checkpoint_data), @@ -109,6 +121,7 @@ impl ApiEndpoint for GetCheckpoint { .build(), ) .response(404, ResponseBuilder::new().build()) + .response(410, ResponseBuilder::new().build()) .build() } @@ -123,7 +136,17 @@ async fn get_checkpoint( State(state): State, ) -> Result> { let summary = match checkpoint_id { - CheckpointId::SequenceNumber(s) => state.inner().get_checkpoint_by_sequence_number(s), + CheckpointId::SequenceNumber(s) => { + let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; + if s < oldest_checkpoint { + return Err(crate::RestError::new( + axum::http::StatusCode::GONE, + "Old checkpoints have been pruned", + )); + } + + state.inner().get_checkpoint_by_sequence_number(s) + } CheckpointId::Digest(d) => state.inner().get_checkpoint_by_digest(&d.into()), }? .ok_or(CheckpointNotFoundError(checkpoint_id))? diff --git a/crates/sui-rest-api/src/client/mod.rs b/crates/sui-rest-api/src/client/mod.rs index 15000c16e971c..915563b0798bc 100644 --- a/crates/sui-rest-api/src/client/mod.rs +++ b/crates/sui-rest-api/src/client/mod.rs @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 pub mod sdk; +use sdk::Result; + +pub use reqwest; use crate::transactions::ExecuteTransactionQueryParameters; -use anyhow::Result; use sui_types::base_types::{ObjectID, SequenceNumber, SuiAddress}; use sui_types::crypto::AuthorityStrongQuorumSignInfo; use sui_types::effects::{TransactionEffects, TransactionEvents}; diff --git a/crates/sui-rest-api/src/client/sdk.rs b/crates/sui-rest-api/src/client/sdk.rs index 9db18f9d8cc61..892c0d2c4835e 100644 --- a/crates/sui-rest-api/src/client/sdk.rs +++ b/crates/sui-rest-api/src/client/sdk.rs @@ -1,8 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::anyhow; -use anyhow::Result; use reqwest::header::HeaderValue; use reqwest::StatusCode; use reqwest::Url; @@ -57,10 +55,12 @@ pub struct Client { impl Client { pub fn new(url: &str) -> Result { - let mut url = Url::parse(url)?; + let mut url = Url::parse(url).map_err(Error::from_error)?; if url.cannot_be_a_base() { - return Err(anyhow!("provided url '{url}' cannot be used as a base")); + return Err(Error::new_message(format!( + "provided url '{url}' cannot be used as a base" + ))); } url.set_path("/v2/"); @@ -81,12 +81,12 @@ impl Client { &self.inner } - pub(super) fn url(&self) -> &Url { + pub fn url(&self) -> &Url { &self.url } pub async fn node_info(&self) -> Result> { - let url = self.url.join("")?; + let url = self.url().join("")?; let response = self .inner @@ -99,7 +99,7 @@ impl Client { } pub async fn health_check(&self, threshold_seconds: Option) -> Result> { - let url = self.url.join("health")?; + let url = self.url().join("health")?; let query = Threshold { threshold_seconds }; let response = self.inner.get(url).query(&query).send().await?; @@ -108,7 +108,7 @@ impl Client { } pub async fn get_coin_info(&self, coin_type: &StructTag) -> Result> { - let url = self.url.join(&format!("coins/{coin_type}"))?; + let url = self.url().join(&format!("coins/{coin_type}"))?; let response = self .inner @@ -125,7 +125,7 @@ impl Client { account: Address, parameters: &ListAccountOwnedObjectsQueryParameters, ) -> Result>> { - let url = self.url.join(&format!("account/{account}/objects"))?; + let url = self.url().join(&format!("account/{account}/objects"))?; let response = self .inner @@ -139,7 +139,7 @@ impl Client { } pub async fn get_object(&self, object_id: ObjectId) -> Result> { - let url = self.url.join(&format!("objects/{object_id}"))?; + let url = self.url().join(&format!("objects/{object_id}"))?; let response = self .inner @@ -157,7 +157,7 @@ impl Client { version: Version, ) -> Result> { let url = self - .url + .url() .join(&format!("objects/{object_id}/version/{version}"))?; let response = self @@ -175,7 +175,7 @@ impl Client { object_id: ObjectId, parameters: &ListDynamicFieldsQueryParameters, ) -> Result>> { - let url = self.url.join(&format!("objects/{object_id}"))?; + let url = self.url().join(&format!("objects/{object_id}"))?; let response = self .inner @@ -189,7 +189,7 @@ impl Client { } pub async fn get_gas_info(&self) -> Result> { - let url = self.url.join("system/gas")?; + let url = self.url().join("system/gas")?; let response = self .inner @@ -209,7 +209,7 @@ impl Client { } pub async fn get_current_protocol_config(&self) -> Result> { - let url = self.url.join("system/protocol")?; + let url = self.url().join("system/protocol")?; let response = self .inner @@ -225,7 +225,7 @@ impl Client { &self, version: u64, ) -> Result> { - let url = self.url.join(&format!("system/protocol/{version}"))?; + let url = self.url().join(&format!("system/protocol/{version}"))?; let response = self .inner @@ -238,7 +238,7 @@ impl Client { } pub async fn get_system_state_summary(&self) -> Result> { - let url = self.url.join("system")?; + let url = self.url().join("system")?; let response = self .inner @@ -251,7 +251,7 @@ impl Client { } pub async fn get_current_committee(&self) -> Result> { - let url = self.url.join("system/committee")?; + let url = self.url().join("system/committee")?; let response = self .inner @@ -264,7 +264,7 @@ impl Client { } pub async fn get_committee(&self, epoch: EpochId) -> Result> { - let url = self.url.join(&format!("system/committee/{epoch}"))?; + let url = self.url().join(&format!("system/committee/{epoch}"))?; let response = self .inner @@ -281,7 +281,7 @@ impl Client { checkpoint_sequence_number: CheckpointSequenceNumber, ) -> Result> { let url = self - .url + .url() .join(&format!("checkpoints/{checkpoint_sequence_number}"))?; let response = self @@ -305,7 +305,7 @@ impl Client { let checkpoint = page .pop() - .ok_or_else(|| anyhow!("server returned empty checkpoint list"))?; + .ok_or_else(|| Error::new_message("server returned empty checkpoint list"))?; Ok(Response::new(checkpoint, parts)) } @@ -314,7 +314,7 @@ impl Client { &self, parameters: &ListCheckpointsQueryParameters, ) -> Result>> { - let url = self.url.join("checkpoints")?; + let url = self.url().join("checkpoints")?; let response = self .inner @@ -332,7 +332,7 @@ impl Client { checkpoint_sequence_number: CheckpointSequenceNumber, ) -> Result> { let url = self - .url + .url() .join(&format!("checkpoints/{checkpoint_sequence_number}/full"))?; let response = self @@ -349,7 +349,7 @@ impl Client { &self, transaction: &TransactionDigest, ) -> Result> { - let url = self.url.join(&format!("transactions/{transaction}"))?; + let url = self.url().join(&format!("transactions/{transaction}"))?; let response = self .inner @@ -365,7 +365,7 @@ impl Client { &self, parameters: &ListTransactionsQueryParameters, ) -> Result>> { - let url = self.url.join("transactions")?; + let url = self.url().join("transactions")?; let response = self .inner @@ -383,7 +383,7 @@ impl Client { parameters: &ExecuteTransactionQueryParameters, transaction: &SignedTransaction, ) -> Result> { - let url = self.url.join("transactions")?; + let url = self.url().join("transactions")?; let body = bcs::to_bytes(transaction)?; @@ -400,22 +400,27 @@ impl Client { self.bcs(response).await } - fn check_response( + async fn check_response( &self, response: reqwest::Response, ) -> Result<(reqwest::Response, ResponseParts)> { + let parts = ResponseParts::from_reqwest_response(&response); + if !response.status().is_success() { - let status = response.status(); - return Err(anyhow::anyhow!("request failed with status {status}")); - } + let error = match response.text().await { + Ok(body) => Error::new_message(body), + Err(e) => Error::from_error(e), + } + .pipe(|e| e.with_parts(parts)); - let parts = ResponseParts::from_reqwest_response(&response); + return Err(error); + } Ok((response, parts)) } async fn empty(&self, response: reqwest::Response) -> Result> { - let (_response, parts) = self.check_response(response)?; + let (_response, parts) = self.check_response(response).await?; Ok(Response::new((), parts)) } @@ -423,7 +428,7 @@ impl Client { &self, response: reqwest::Response, ) -> Result> { - let (response, parts) = self.check_response(response)?; + let (response, parts) = self.check_response(response).await?; let json = response.json().await?; Ok(Response::new(json, parts)) @@ -433,11 +438,13 @@ impl Client { &self, response: reqwest::Response, ) -> Result> { - let (response, parts) = self.check_response(response)?; + let (response, parts) = self.check_response(response).await?; let bytes = response.bytes().await?; - let bcs = bcs::from_bytes(&bytes)?; - Ok(Response::new(bcs, parts)) + match bcs::from_bytes(&bytes) { + Ok(bcs) => Ok(Response::new(bcs, parts)), + Err(e) => Err(Error::from_error(e).with_parts(parts)), + } } } @@ -553,3 +560,109 @@ impl Response { Response::new(f(inner), state) } } + +pub type Result = std::result::Result; + +type BoxError = Box; + +#[derive(Debug)] +pub struct Error { + inner: Box, +} + +#[derive(Debug)] +struct InnerError { + parts: Option, + message: Option, + source: Option, +} + +impl Error { + fn empty() -> Self { + Self { + inner: Box::new(InnerError { + parts: None, + message: None, + source: None, + }), + } + } + + fn from_error>(error: E) -> Self { + Self::empty().with_error(error.into()) + } + + fn new_message>(message: M) -> Self { + Self::empty().with_message(message.into()) + } + + fn with_parts(mut self, parts: ResponseParts) -> Self { + self.inner.parts.replace(parts); + self + } + + fn with_message(mut self, message: String) -> Self { + self.inner.message.replace(message); + self + } + + fn with_error(mut self, error: BoxError) -> Self { + self.inner.source.replace(error); + self + } + + pub fn status(&self) -> Option { + self.parts().map(|parts| parts.status) + } + + pub fn parts(&self) -> Option<&ResponseParts> { + self.inner.parts.as_ref() + } + + pub fn message(&self) -> Option<&str> { + self.inner.message.as_deref() + } +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Rest Client Error:")?; + if let Some(status) = self.status() { + write!(f, " {status}")?; + } + + if let Some(message) = self.message() { + write!(f, " '{message}'")?; + } + + if let Some(source) = &self.inner.source { + write!(f, " '{source}'")?; + } + + Ok(()) + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.inner.source.as_deref().map(|e| e as _) + } +} + +impl From for Error { + fn from(error: reqwest::Error) -> Self { + Self::from_error(error) + } +} + +impl From for Error { + fn from(error: bcs::Error) -> Self { + Self::from_error(error) + } +} + +impl From for Error { + fn from(error: url::ParseError) -> Self { + Self::from_error(error) + } +} diff --git a/crates/sui-rest-api/src/lib.rs b/crates/sui-rest-api/src/lib.rs index 9efd7f35325a1..18993abb1b23e 100644 --- a/crates/sui-rest-api/src/lib.rs +++ b/crates/sui-rest-api/src/lib.rs @@ -1,12 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use axum::Router; +use axum::{response::Redirect, routing::get, Router}; use mysten_network::callback::CallbackLayer; use openapi::ApiEndpoint; use reader::StateReader; use std::sync::Arc; use sui_types::storage::RestStateReader; +use sui_types::transaction_executor::TransactionExecutor; use tap::Pipe; pub mod accept; @@ -32,7 +33,7 @@ pub use client::Client; pub use error::{RestError, Result}; pub use metrics::RestMetrics; pub use sui_types::full_checkpoint_content::{CheckpointData, CheckpointTransaction}; -pub use transactions::{ExecuteTransactionQueryParameters, TransactionExecutor}; +pub use transactions::ExecuteTransactionQueryParameters; pub const TEXT_PLAIN_UTF_8: &str = "text/plain; charset=utf-8"; pub const APPLICATION_BCS: &str = "application/bcs"; @@ -45,6 +46,7 @@ pub enum Direction { Descending, } +#[derive(Debug)] pub struct Page { pub entries: response::ResponseContent>, pub cursor: Option, @@ -145,8 +147,14 @@ impl RestService { api.register_endpoints(ENDPOINTS.to_owned()); - api.to_router() - .with_state(self.clone()) + Router::new() + .nest("/v2/", api.to_router().with_state(self.clone())) + .route("/v2", get(|| async { Redirect::permanent("/v2/") })) + // Previously the service used to be hosted at `/rest`. In an effort to migrate folks + // to the new versioned route, we'll issue redirects from `/rest` -> `/v2`. + .route("/rest/*path", axum::routing::method_routing::any(redirect)) + .route("/rest", get(|| async { Redirect::permanent("/v2/") })) + .route("/rest/", get(|| async { Redirect::permanent("/v2/") })) .layer(axum::middleware::map_response_with_state( self, response::append_info_headers, @@ -162,15 +170,9 @@ impl RestService { }) } - pub async fn start_service(self, socket_address: std::net::SocketAddr, base: Option) { - let mut app = self.into_router(); - - if let Some(base) = base { - app = Router::new().nest(&base, app); - } - + pub async fn start_service(self, socket_address: std::net::SocketAddr) { let listener = tokio::net::TcpListener::bind(socket_address).await.unwrap(); - axum::serve(listener, app).await.unwrap(); + axum::serve(listener, self.into_router()).await.unwrap(); } } @@ -196,6 +198,10 @@ fn info() -> openapiv3::v3_1::Info { } } +async fn redirect(axum::extract::Path(path): axum::extract::Path) -> Redirect { + Redirect::permanent(&format!("/v2/{path}")) +} + mod _schemars { use schemars::schema::InstanceType; use schemars::schema::Metadata; diff --git a/crates/sui-rest-api/src/reader.rs b/crates/sui-rest-api/src/reader.rs index dd07ba57ae88f..30b2f8c2271b7 100644 --- a/crates/sui-rest-api/src/reader.rs +++ b/crates/sui-rest-api/src/reader.rs @@ -260,6 +260,7 @@ impl Iterator for CheckpointTransactionsIter { pub struct CursorInfo { pub checkpoint: CheckpointSequenceNumber, pub timestamp_ms: u64, + #[allow(unused)] pub index: u64, // None if there are no more transactions in the store diff --git a/crates/sui-rest-api/src/response.rs b/crates/sui-rest-api/src/response.rs index 5b3866d68db18..cc30c9935cd1c 100644 --- a/crates/sui-rest-api/src/response.rs +++ b/crates/sui-rest-api/src/response.rs @@ -19,6 +19,7 @@ use crate::{ pub struct Bcs(pub T); +#[derive(Debug)] pub enum ResponseContent { Bcs(T), Json(J), @@ -101,9 +102,9 @@ impl axum::response::IntoResponse for BcsRejection { "Expected request with `Content-Type: application/bcs`", ) .into_response(), - BcsRejection::DeserializationError(_) => ( + BcsRejection::DeserializationError(e) => ( StatusCode::UNPROCESSABLE_ENTITY, - "Failed to deserialize the BCS body into the target type", + format!("Failed to deserialize the BCS body into the target type: {e}"), ) .into_response(), BcsRejection::BytesRejection(bytes_rejection) => bytes_rejection.into_response(), diff --git a/crates/sui-rest-api/src/transactions/execution.rs b/crates/sui-rest-api/src/transactions/execution.rs index c075ad6a3de99..9611b82c4bf15 100644 --- a/crates/sui-rest-api/src/transactions/execution.rs +++ b/crates/sui-rest-api/src/transactions/execution.rs @@ -11,6 +11,7 @@ use sui_sdk2::types::{ Address, BalanceChange, CheckpointSequenceNumber, Object, Owner, SignedTransaction, TransactionEffects, TransactionEvents, ValidatorAggregatedSignature, }; +use sui_types::transaction_executor::TransactionExecutor; use tap::Pipe; use crate::openapi::{ @@ -20,22 +21,6 @@ use crate::response::Bcs; use crate::{accept::AcceptFormat, response::ResponseContent}; use crate::{RestService, Result}; -/// Trait to define the interface for how the REST service interacts with a a QuorumDriver or a -/// simulated transaction executor. -#[async_trait::async_trait] -pub trait TransactionExecutor: Send + Sync { - async fn execute_transaction( - &self, - request: sui_types::quorum_driver_types::ExecuteTransactionRequestV3, - client_addr: Option, - ) -> Result< - sui_types::quorum_driver_types::ExecuteTransactionResponseV3, - sui_types::quorum_driver_types::QuorumDriverError, - >; - - //TODO include Simulate functionality -} - pub struct ExecuteTransaction; impl ApiEndpoint for ExecuteTransaction { diff --git a/crates/sui-rest-api/src/transactions/mod.rs b/crates/sui-rest-api/src/transactions/mod.rs index 26921104d8c58..6e9ffed21492d 100644 --- a/crates/sui-rest-api/src/transactions/mod.rs +++ b/crates/sui-rest-api/src/transactions/mod.rs @@ -6,7 +6,6 @@ pub use execution::EffectsFinality; pub use execution::ExecuteTransaction; pub use execution::ExecuteTransactionQueryParameters; pub use execution::TransactionExecutionResponse; -pub use execution::TransactionExecutor; use axum::extract::{Path, Query, State}; use axum::http::StatusCode; diff --git a/crates/sui-rosetta/src/construction.rs b/crates/sui-rosetta/src/construction.rs index 45eaaef6abe57..d67c37ce34a94 100644 --- a/crates/sui-rosetta/src/construction.rs +++ b/crates/sui-rosetta/src/construction.rs @@ -74,7 +74,7 @@ pub async fn payloads( let intent_msg_bytes = bcs::to_bytes(&intent_msg)?; let mut hasher = DefaultHash::default(); - hasher.update(&bcs::to_bytes(&intent_msg).expect("Message serialization should not fail")); + hasher.update(bcs::to_bytes(&intent_msg).expect("Message serialization should not fail")); let digest = hasher.finalize().digest; Ok(ConstructionPayloadsResponse { diff --git a/crates/sui-rosetta/src/unit_tests/balance_changing_tx_tests.rs b/crates/sui-rosetta/src/unit_tests/balance_changing_tx_tests.rs index b0dba64531281..3fcf3044a3ee7 100644 --- a/crates/sui-rosetta/src/unit_tests/balance_changing_tx_tests.rs +++ b/crates/sui-rosetta/src/unit_tests/balance_changing_tx_tests.rs @@ -144,7 +144,7 @@ async fn test_publish_and_move_call() { let compiled_package = BuildConfig::new_for_testing().build(&path).unwrap(); let compiled_modules_bytes = compiled_package.get_package_bytes(/* with_unpublished_deps */ false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); let pt = { let mut builder = ProgrammableTransactionBuilder::new(); diff --git a/crates/sui-rosetta/tests/gas_budget_tests.rs b/crates/sui-rosetta/tests/gas_budget_tests.rs index ead522c111354..f5666298bdd58 100644 --- a/crates/sui-rosetta/tests/gas_budget_tests.rs +++ b/crates/sui-rosetta/tests/gas_budget_tests.rs @@ -28,6 +28,7 @@ mod rosetta_client; #[derive(Deserialize, Debug)] #[serde(untagged)] enum TransactionIdentifierResponseResult { + #[allow(unused)] Success(TransactionIdentifierResponse), Error(RosettaSubmitGasError), } diff --git a/crates/sui-rpc-loadgen/src/payload/query_transactions.rs b/crates/sui-rpc-loadgen/src/payload/query_transactions.rs index b4abb519fee19..5482372cdd812 100644 --- a/crates/sui-rpc-loadgen/src/payload/query_transactions.rs +++ b/crates/sui-rpc-loadgen/src/payload/query_transactions.rs @@ -82,7 +82,7 @@ impl<'a> ProcessPayload<'a, &'a QueryTransactionBlocks> for RpcCommandProcessor } }; - results = join_all(clients.iter().enumerate().map(|(_i, client)| { + results = join_all(clients.iter().map(|client| { let with_query = query.clone(); async move { query_transaction_blocks(client, with_query, cursor, None) diff --git a/crates/sui-rpc-loadgen/src/payload/validation.rs b/crates/sui-rpc-loadgen/src/payload/validation.rs index 00656da736ed7..8204692c73ab1 100644 --- a/crates/sui-rpc-loadgen/src/payload/validation.rs +++ b/crates/sui-rpc-loadgen/src/payload/validation.rs @@ -16,7 +16,7 @@ use tracing::log::warn; const LOADGEN_QUERY_MAX_RESULT_LIMIT: usize = 25; -pub(crate) fn cross_validate_entities(entities: &Vec>, entity_name: &str) +pub(crate) fn cross_validate_entities(entities: &[Vec], entity_name: &str) where U: PartialEq + Debug, { diff --git a/crates/sui-sdk/src/apis.rs b/crates/sui-sdk/src/apis.rs index 61c4cd5e81ee0..8597d2fb920f9 100644 --- a/crates/sui-sdk/src/apis.rs +++ b/crates/sui-sdk/src/apis.rs @@ -9,6 +9,7 @@ use jsonrpsee::core::client::Subscription; use std::collections::BTreeMap; use std::future; use std::sync::Arc; +use std::time::Duration; use std::time::Instant; use sui_json_rpc_types::DevInspectArgs; use sui_json_rpc_types::SuiData; @@ -39,7 +40,9 @@ use sui_types::sui_serde::BigInt; use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; use sui_types::transaction::{Transaction, TransactionData, TransactionKind}; -const WAIT_FOR_LOCAL_EXECUTION_RETRY_COUNT: u8 = 3; +const WAIT_FOR_LOCAL_EXECUTION_TIMEOUT: Duration = Duration::from_secs(60); +const WAIT_FOR_LOCAL_EXECUTION_DELAY: Duration = Duration::from_millis(200); +const WAIT_FOR_LOCAL_EXECUTION_INTERVAL: Duration = Duration::from_secs(2); /// The main read API structure with functions for retrieving data about different objects and transactions #[derive(Debug)] @@ -1133,39 +1136,49 @@ impl QuorumDriverApi { ) -> SuiRpcResult { let (tx_bytes, signatures) = tx.to_tx_bytes_and_signatures(); let request_type = request_type.unwrap_or_else(|| options.default_execution_request_type()); - let mut retry_count = 0; + let start = Instant::now(); - while retry_count < WAIT_FOR_LOCAL_EXECUTION_RETRY_COUNT { - let response: SuiTransactionBlockResponse = self - .api - .http - .execute_transaction_block( - tx_bytes.clone(), - signatures.clone(), - Some(options.clone()), - Some(request_type.clone()), - ) - .await?; + let response = self + .api + .http + .execute_transaction_block( + tx_bytes.clone(), + signatures.clone(), + Some(options.clone()), + Some(request_type.clone()), + ) + .await?; - match request_type { - ExecuteTransactionRequestType::WaitForEffectsCert => { - return Ok(response); - } - ExecuteTransactionRequestType::WaitForLocalExecution => { - if let Some(true) = response.confirmed_local_execution { - return Ok(response); - } else { - // If fullnode executed the cert in the network but did not confirm local - // execution, it must have timed out and hence we could retry. - retry_count += 1; - } + if let ExecuteTransactionRequestType::WaitForEffectsCert = request_type { + return Ok(response); + } + + // JSON-RPC ignores WaitForLocalExecution, so simulate it by polling for the transaction. + let mut poll_response = tokio::time::timeout(WAIT_FOR_LOCAL_EXECUTION_TIMEOUT, async { + // Apply a short delay to give the full node a chance to catch up. + tokio::time::sleep(WAIT_FOR_LOCAL_EXECUTION_DELAY).await; + + let mut interval = tokio::time::interval(WAIT_FOR_LOCAL_EXECUTION_INTERVAL); + loop { + interval.tick().await; + + if let Ok(poll_response) = self + .api + .http + .get_transaction_block(*tx.digest(), Some(options.clone())) + .await + { + break poll_response; } } - } - Err(Error::FailToConfirmTransactionStatus( - *tx.digest(), - start.elapsed().as_secs(), - )) + }) + .await + .map_err(|_| { + Error::FailToConfirmTransactionStatus(*tx.digest(), start.elapsed().as_secs()) + })?; + + poll_response.confirmed_local_execution = Some(true); + Ok(poll_response) } } diff --git a/crates/sui-sdk/src/lib.rs b/crates/sui-sdk/src/lib.rs index 945a0e0ba5c86..9200d8451e5fc 100644 --- a/crates/sui-sdk/src/lib.rs +++ b/crates/sui-sdk/src/lib.rs @@ -14,9 +14,9 @@ //! * [EventApi] - provides event related functions functions to //! * [GovernanceApi] - provides functionality related to staking //! * [QuorumDriverApi] - provides functionality to execute a transaction -//! block and submit it to the fullnode(s) +//! block and submit it to the fullnode(s) //! * [ReadApi] - provides functions for retrieving data about different -//! objects and transactions +//! objects and transactions //! * TransactionBuilder - provides functions for building transactions //! //! # Usage diff --git a/crates/sui-security-watchdog/src/query_runner.rs b/crates/sui-security-watchdog/src/query_runner.rs index a0ab4ec1054e5..37e1fd95a9815 100644 --- a/crates/sui-security-watchdog/src/query_runner.rs +++ b/crates/sui-security-watchdog/src/query_runner.rs @@ -214,7 +214,8 @@ impl QueryRunner for SnowflakeQueryRunner { let res = self.make_snowflake_api()?.exec(query).await?; match res { QueryResult::Arrow(records) => self.parse_record_batches(records), - // Handle other result types (Json, Empty) with a unified error message + QueryResult::Empty => Ok(Vec::new()), + // Handle other result types Json with a unified error message _ => Err(anyhow!("Unexpected query result type")), } } diff --git a/crates/sui-simulator/Cargo.toml b/crates/sui-simulator/Cargo.toml index 5d2f864250cc3..4d12c46ef42a4 100644 --- a/crates/sui-simulator/Cargo.toml +++ b/crates/sui-simulator/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] move-package.workspace = true diff --git a/crates/sui-simulator/src/lib.rs b/crates/sui-simulator/src/lib.rs index 086d2f1282184..91fa6b5937d26 100644 --- a/crates/sui-simulator/src/lib.rs +++ b/crates/sui-simulator/src/lib.rs @@ -110,7 +110,7 @@ pub mod configs { } thread_local! { - static NODE_COUNT: AtomicUsize = AtomicUsize::new(0); + static NODE_COUNT: AtomicUsize = const { AtomicUsize::new(0) }; } pub struct NodeLeakDetector(()); diff --git a/crates/sui-single-node-benchmark/Cargo.toml b/crates/sui-single-node-benchmark/Cargo.toml index 8cad1f304ca72..33cdf4cc24138 100644 --- a/crates/sui-single-node-benchmark/Cargo.toml +++ b/crates/sui-single-node-benchmark/Cargo.toml @@ -10,6 +10,7 @@ move-binary-format.workspace = true move-bytecode-utils.workspace = true move-core-types.workspace = true move-package.workspace = true +move-symbol-pool.workspace = true sui-config.workspace = true sui-core = { workspace = true, features = ["test-utils"] } sui-move-build.workspace = true diff --git a/crates/sui-single-node-benchmark/src/tx_generator/package_publish_tx_generator.rs b/crates/sui-single-node-benchmark/src/tx_generator/package_publish_tx_generator.rs index 6912994e75093..fcf15e3a71a52 100644 --- a/crates/sui-single-node-benchmark/src/tx_generator/package_publish_tx_generator.rs +++ b/crates/sui-single-node-benchmark/src/tx_generator/package_publish_tx_generator.rs @@ -5,8 +5,9 @@ use crate::benchmark_context::BenchmarkContext; use crate::mock_account::Account; use crate::tx_generator::TxGenerator; use move_package::source_package::manifest_parser::parse_move_manifest_from_file; +use move_symbol_pool::Symbol; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use std::collections::BTreeMap; use std::fs; use std::path::PathBuf; use sui_move_build::{BuildConfig, CompiledPackage}; @@ -27,13 +28,14 @@ impl PackagePublishTxGenerator { dependencies, root_package, } = manifest; - let mut dep_map = HashMap::new(); + let mut dep_map = BTreeMap::new(); for dependency in dependencies { let Package { name, path, is_source_code, } = dependency; + info!("Publishing dependent package {}", name); let target_path = dir.join(&path); let module_bytes = if is_source_code { @@ -68,23 +70,32 @@ impl PackagePublishTxGenerator { .await .0; info!("Published dependent package {}", package_id); - dep_map.insert(name, package_id); + dep_map.insert(Symbol::from(name), package_id); } + let Package { name, path, is_source_code, } = root_package; + info!("Compiling root package {}", name); assert!( is_source_code, "Only support building root package from source code" ); + let target_path = dir.join(path); - dep_map.insert(name, ObjectID::ZERO); - let compiled_package = BuildConfig::new_for_testing_replace_addresses(dep_map) - .build(&target_path) - .unwrap(); + let published_deps = dep_map.clone(); + + dep_map.insert(Symbol::from(name), ObjectID::ZERO); + let mut compiled_package = BuildConfig::new_for_testing_replace_addresses( + dep_map.into_iter().map(|(k, v)| (k.to_string(), v)), + ) + .build(&target_path) + .unwrap(); + + compiled_package.dependency_ids.published = published_deps; Self { compiled_package } } } diff --git a/crates/sui-snapshot/src/lib.rs b/crates/sui-snapshot/src/lib.rs index a19a090770d5c..ac083f021efe1 100644 --- a/crates/sui-snapshot/src/lib.rs +++ b/crates/sui-snapshot/src/lib.rs @@ -72,6 +72,7 @@ use tokio::time::Instant; /// - epoch_1/ /// - 1_1.obj /// - ... +/// /// Object File Disk Format ///┌──────────────────────────────┐ ///│ magic(0x00B7EC75) <4 byte> │ diff --git a/crates/sui-source-validation-service/src/lib.rs b/crates/sui-source-validation-service/src/lib.rs index 2654a92f72373..ff95996921ea5 100644 --- a/crates/sui-source-validation-service/src/lib.rs +++ b/crates/sui-source-validation-service/src/lib.rs @@ -34,7 +34,7 @@ use sui_move_build::{BuildConfig, SuiPackageHooks}; use sui_sdk::rpc_types::SuiTransactionBlockEffects; use sui_sdk::types::base_types::ObjectID; use sui_sdk::SuiClientBuilder; -use sui_source_validation::{BytecodeSourceVerifier, SourceMode}; +use sui_source_validation::{BytecodeSourceVerifier, ValidationMode}; pub const HOST_PORT_ENV: &str = "HOST_PORT"; pub const SUI_SOURCE_VALIDATION_VERSION_HEADER: &str = "x-sui-source-validation-version"; @@ -172,11 +172,7 @@ pub async fn verify_package( let compiled_package = build_config.build(package_path.as_ref())?; BytecodeSourceVerifier::new(client.read_api()) - .verify_package( - &compiled_package, - /* verify_deps */ false, - SourceMode::Verify, - ) + .verify(&compiled_package, ValidationMode::root()) .await .map_err(|e| anyhow!("Network {network}: {e}"))?; diff --git a/crates/sui-source-validation/Cargo.toml b/crates/sui-source-validation/Cargo.toml index a1b1918889e1a..8dc2ef1f07b17 100644 --- a/crates/sui-source-validation/Cargo.toml +++ b/crates/sui-source-validation/Cargo.toml @@ -17,9 +17,10 @@ tracing.workspace = true futures.workspace = true sui-json-rpc-types.workspace = true +sui-move-build.workspace = true +sui-package-management.workspace = true sui-types.workspace = true sui-sdk.workspace = true -sui-move-build.workspace = true move-binary-format.workspace = true move-bytecode-source-map.workspace = true diff --git a/crates/sui-source-validation/src/error.rs b/crates/sui-source-validation/src/error.rs new file mode 100644 index 0000000000000..15466b0b67eae --- /dev/null +++ b/crates/sui-source-validation/src/error.rs @@ -0,0 +1,95 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; + +use move_core_types::account_address::AccountAddress; +use move_symbol_pool::Symbol; +use sui_json_rpc_types::SuiRawMoveObject; +use sui_package_management::PublishedAtError; +use sui_sdk::error::Error as SdkError; +use sui_types::{base_types::ObjectID, error::SuiObjectResponseError}; + +#[derive(Debug, thiserror::Error)] +pub struct AggregateError(pub(crate) Vec); + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Cannot check local module for {package}: {message}")] + CannotCheckLocalModules { package: Symbol, message: String }, + + #[error("Could not read a dependency's on-chain object: {0:?}")] + DependencyObjectReadFailure(SdkError), + + #[error("On-chain package {0} is empty")] + EmptyOnChainPackage(AccountAddress), + + #[error("Invalid module {name} with error: {message}")] + InvalidModuleFailure { name: String, message: String }, + + #[error("Local version of dependency {address}::{module} was not found.")] + LocalDependencyNotFound { + address: AccountAddress, + module: Symbol, + }, + + #[error("Source package depends on {0} which is not in the linkage table.")] + MissingDependencyInLinkageTable(AccountAddress), + + #[error("On-chain package depends on {0} which is not a source dependency.")] + MissingDependencyInSourcePackage(AccountAddress), + + #[error( + "Local dependency did not match its on-chain version at {address}::{package}::{module}" + )] + ModuleBytecodeMismatch { + address: AccountAddress, + package: Symbol, + module: Symbol, + }, + + #[error("Dependency ID contains a Sui object, not a Move package: {0}")] + ObjectFoundWhenPackageExpected(ObjectID, SuiRawMoveObject), + + #[error("Could not deserialize on-chain dependency {address}::{module}.")] + OnChainDependencyDeserializationError { + address: AccountAddress, + module: Symbol, + }, + + #[error("On-chain version of dependency {package}::{module} was not found.")] + OnChainDependencyNotFound { package: Symbol, module: Symbol }, + + #[error("{0}. Please supply an explicit on-chain address for the package")] + PublishedAt(#[from] PublishedAtError), + + #[error("Dependency object does not exist or was deleted: {0:?}")] + SuiObjectRefFailure(SuiObjectResponseError), + + #[error("On-chain address cannot be zero")] + ZeroOnChainAddresSpecifiedFailure, +} + +impl fmt::Display for AggregateError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let Self(errors) = self; + match &errors[..] { + [] => unreachable!("Aggregate error with no errors"), + [error] => write!(f, "{}", error)?, + errors => { + writeln!(f, "Multiple source verification errors found:")?; + for error in errors { + write!(f, "\n- {}", error)?; + } + return Ok(()); + } + }; + Ok(()) + } +} + +impl From for AggregateError { + fn from(error: Error) -> Self { + Self(vec![error]) + } +} diff --git a/crates/sui-source-validation/src/lib.rs b/crates/sui-source-validation/src/lib.rs index 03911a2e00d82..a692974c68a3f 100644 --- a/crates/sui-source-validation/src/lib.rs +++ b/crates/sui-source-validation/src/lib.rs @@ -1,140 +1,41 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{anyhow, bail, ensure}; -use colored::Colorize; -use core::fmt; +use crate::error::{AggregateError, Error}; use futures::future; use move_binary_format::CompiledModule; -use move_bytecode_source_map::utils::source_map_from_file; -use move_compiler::editions::{Edition, Flavor}; -use move_compiler::shared::NumericalAddress; -use move_package::compilation::package_layout::CompiledPackageLayout; -use move_package::lock_file::schema::{Header, ToolchainVersion}; -use move_package::source_package::layout::SourcePackageLayout; -use move_package::source_package::parsed_manifest::{FileName, PackageName}; -use std::ffi::OsStr; -use std::fs::File; -use std::io::{self, Seek}; -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{collections::HashMap, fmt::Debug}; -use sui_move_build::CompiledPackage; -use sui_types::error::SuiObjectResponseError; -use tar::Archive; -use tempfile::TempDir; -use thiserror::Error; -use tracing::{debug, info}; - -use move_command_line_common::env::MOVE_HOME; -use move_command_line_common::files::MOVE_COMPILED_EXTENSION; -use move_command_line_common::files::{ - extension_equals, find_filenames, MOVE_EXTENSION, SOURCE_MAP_EXTENSION, -}; use move_compiler::compiled_unit::NamedCompiledModule; use move_core_types::account_address::AccountAddress; -use move_package::compilation::compiled_package::{ - CompiledPackage as MoveCompiledPackage, CompiledUnitWithSource, -}; use move_symbol_pool::Symbol; +use std::collections::{HashMap, HashSet}; +use sui_move_build::CompiledPackage; use sui_sdk::apis::ReadApi; -use sui_sdk::error::Error; - -use sui_sdk::rpc_types::{SuiObjectDataOptions, SuiRawData, SuiRawMoveObject, SuiRawMovePackage}; +use sui_sdk::error::Error as SdkError; +use sui_sdk::rpc_types::{SuiObjectDataOptions, SuiRawData, SuiRawMovePackage}; use sui_types::base_types::ObjectID; +use toolchain::units_for_toolchain; + +pub mod error; +mod toolchain; #[cfg(test)] mod tests; -const CURRENT_COMPILER_VERSION: &str = env!("CARGO_PKG_VERSION"); -const LEGACY_COMPILER_VERSION: &str = CURRENT_COMPILER_VERSION; // TODO: update this when Move 2024 is released -const PRE_TOOLCHAIN_MOVE_LOCK_VERSION: u64 = 0; // Used to detect lockfiles pre-toolchain versioning support -const CANONICAL_UNIX_BINARY_NAME: &str = "sui"; -const CANONICAL_WIN_BINARY_NAME: &str = "sui.exe"; +/// Details of what to verify +pub enum ValidationMode { + /// Validate only the dependencies + Deps, -#[derive(Debug, Error)] -pub enum SourceVerificationError { - #[error("Could not read a dependency's on-chain object: {0:?}")] - DependencyObjectReadFailure(Error), + /// Validate the root package, and its linkage. + Root { + /// Additionally validate the dependencies, and make sure the runtime and storage IDs in + /// dependency source code matches the root package's on-chain linkage table. + deps: bool, - #[error("Dependency object does not exist or was deleted: {0:?}")] - SuiObjectRefFailure(SuiObjectResponseError), - - #[error("Dependency ID contains a Sui object, not a Move package: {0}")] - ObjectFoundWhenPackageExpected(ObjectID, SuiRawMoveObject), - - #[error("On-chain version of dependency {package}::{module} was not found.")] - OnChainDependencyNotFound { package: Symbol, module: Symbol }, - - #[error("Could not deserialize on-chain dependency {address}::{module}.")] - OnChainDependencyDeserializationError { - address: AccountAddress, - module: Symbol, + /// Look for the root package on-chain at the specified address, rather than the address in + /// its manifest. + at: Option, }, - - #[error("Local version of dependency {address}::{module} was not found.")] - LocalDependencyNotFound { - address: AccountAddress, - module: Symbol, - }, - - #[error( - "Local dependency did not match its on-chain version at {address}::{package}::{module}" - )] - ModuleBytecodeMismatch { - address: AccountAddress, - package: Symbol, - module: Symbol, - }, - - #[error("Cannot check local module for {package}: {message}")] - CannotCheckLocalModules { package: Symbol, message: String }, - - #[error("On-chain address cannot be zero")] - ZeroOnChainAddresSpecifiedFailure, - - #[error("Invalid module {name} with error: {message}")] - InvalidModuleFailure { name: String, message: String }, -} - -#[derive(Debug, Error)] -pub struct AggregateSourceVerificationError(Vec); - -impl From for AggregateSourceVerificationError { - fn from(error: SourceVerificationError) -> Self { - AggregateSourceVerificationError(vec![error]) - } -} - -impl fmt::Display for AggregateSourceVerificationError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let AggregateSourceVerificationError(errors) = self; - match &errors[..] { - [] => unreachable!("Aggregate error with no errors"), - [error] => write!(f, "{}", error)?, - errors => { - writeln!(f, "Multiple source verification errors found:")?; - for error in errors { - write!(f, "\n- {}", error)?; - } - return Ok(()); - } - }; - Ok(()) - } -} - -/// How to handle package source during bytecode verification. -#[derive(PartialEq, Eq)] -pub enum SourceMode { - /// Don't verify source. - Skip, - - /// Verify source at the address specified in its manifest. - Verify, - - /// Verify source at an overridden address (only works if the package is not published) - VerifyAt(AccountAddress), } pub struct BytecodeSourceVerifier<'a> { @@ -143,669 +44,396 @@ pub struct BytecodeSourceVerifier<'a> { /// Map package addresses and module names to package names and bytecode. type LocalModules = HashMap<(AccountAddress, Symbol), (Symbol, CompiledModule)>; -/// Map package addresses and modules names to bytecode (package names are gone in the on-chain -/// representation). -type OnChainModules = HashMap<(AccountAddress, Symbol), CompiledModule>; -impl<'a> BytecodeSourceVerifier<'a> { - pub fn new(rpc_client: &'a ReadApi) -> Self { - BytecodeSourceVerifier { rpc_client } - } - - /// Helper wrapper to verify that all local Move package dependencies' and root bytecode matches - /// the bytecode at the address specified on the Sui network we are publishing to. - pub async fn verify_package_root_and_deps( - &self, - compiled_package: &CompiledPackage, - root_on_chain_address: AccountAddress, - ) -> Result<(), AggregateSourceVerificationError> { - self.verify_package( - compiled_package, - /* verify_deps */ true, - SourceMode::VerifyAt(root_on_chain_address), - ) - .await - } +#[derive(Default)] +struct OnChainRepresentation { + /// Storage IDs from the root package's on-chain linkage table. This will only be present if + /// root package verification was requested, in which case the keys from this mapping must + /// match the source package's dependencies. + on_chain_dependencies: Option>, - /// Helper wrapper to verify that all local Move package root bytecode matches - /// the bytecode at the address specified on the Sui network we are publishing to. - pub async fn verify_package_root( - &self, - compiled_package: &CompiledPackage, - root_on_chain_address: AccountAddress, - ) -> Result<(), AggregateSourceVerificationError> { - self.verify_package( - compiled_package, - /* verify_deps */ false, - SourceMode::VerifyAt(root_on_chain_address), - ) - .await - } + /// Map package addresses and module names to bytecode (package names are gone in the on-chain + /// representation). + modules: HashMap<(AccountAddress, Symbol), CompiledModule>, +} - /// Helper wrapper to verify that all local Move package dependencies' matches - /// the bytecode at the address specified on the Sui network we are publishing to. - pub async fn verify_package_deps( - &self, - compiled_package: &CompiledPackage, - ) -> Result<(), AggregateSourceVerificationError> { - self.verify_package( - compiled_package, - /* verify_deps */ true, - SourceMode::Skip, - ) - .await +impl ValidationMode { + /// Only verify that source dependencies match their on-chain versions. + pub fn deps() -> Self { + Self::Deps } - /// Verify that all local Move package dependencies' and/or root bytecode matches the bytecode - /// at the address specified on the Sui network we are publishing to. If `verify_deps` is true, - /// the dependencies are verified. If `root_on_chain_address` is specified, the root is - /// verified against a package at `root_on_chain_address`. - pub async fn verify_package( - &self, - compiled_package: &CompiledPackage, - verify_deps: bool, - source_mode: SourceMode, - ) -> Result<(), AggregateSourceVerificationError> { - let mut on_chain_pkgs = vec![]; - match &source_mode { - SourceMode::Skip => (), - // On-chain address for matching root package cannot be zero - SourceMode::VerifyAt(AccountAddress::ZERO) => { - return Err(SourceVerificationError::ZeroOnChainAddresSpecifiedFailure.into()) - } - SourceMode::VerifyAt(root_address) => on_chain_pkgs.push(*root_address), - SourceMode::Verify => { - on_chain_pkgs.extend(compiled_package.published_at.as_ref().map(|id| **id)) - } - }; - - if verify_deps { - on_chain_pkgs.extend( - compiled_package - .dependency_ids - .published - .values() - .map(|id| **id), - ); + /// Only verify that the root package matches its on-chain version (requires that the root + /// package is published with its address available in the manifest). + pub fn root() -> Self { + Self::Root { + deps: false, + at: None, } + } - let local_modules = local_modules(&compiled_package.package, verify_deps, source_mode)?; - let mut on_chain_modules = self.on_chain_modules(on_chain_pkgs.into_iter()).await?; - - let mut errors = Vec::new(); - for ((address, module), (package, local_module)) in local_modules { - let Some(on_chain_module) = on_chain_modules.remove(&(address, module)) else { - errors.push(SourceVerificationError::OnChainDependencyNotFound { package, module }); - continue; - }; - - // compare local bytecode to on-chain bytecode to ensure integrity of our - // dependencies - if local_module != on_chain_module { - errors.push(SourceVerificationError::ModuleBytecodeMismatch { - address, - package, - module, - }); - } + /// Only verify that the root package matches its on-chain version, but override the location + /// to look for the root package to `address`. + pub fn root_at(address: AccountAddress) -> Self { + Self::Root { + deps: false, + at: Some(address), } + } - if let Some(((address, module), _)) = on_chain_modules.into_iter().next() { - errors.push(SourceVerificationError::LocalDependencyNotFound { address, module }); + /// Verify both the root package and its dependencies (requires that the root package is + /// published with its address available in the manifest). + pub fn root_and_deps() -> Self { + Self::Root { + deps: true, + at: None, } + } - if !errors.is_empty() { - return Err(AggregateSourceVerificationError(errors)); + /// Verify both the root package and its dependencies, but override the location to look for + /// the root package to `address`. + pub fn root_and_deps_at(address: AccountAddress) -> Self { + Self::Root { + deps: true, + at: Some(address), } - - Ok(()) } - async fn pkg_for_address( - &self, - addr: AccountAddress, - ) -> Result { - // Move packages are specified with an AccountAddress, but are - // fetched from a sui network via sui_getObject, which takes an object ID - let obj_id = ObjectID::from(addr); - - // fetch the Sui object at the address specified for the package in the local resolution table - // if future packages with a large set of dependency packages prove too slow to verify, - // batched object fetching should be added to the ReadApi & used here - let obj_read = self - .rpc_client - .get_object_with_options(obj_id, SuiObjectDataOptions::new().with_bcs()) - .await - .map_err(SourceVerificationError::DependencyObjectReadFailure)?; - - let obj = obj_read - .into_object() - .map_err(SourceVerificationError::SuiObjectRefFailure)? - .bcs - .ok_or_else(|| { - SourceVerificationError::DependencyObjectReadFailure(Error::DataError( - "Bcs field is not found".to_string(), - )) - })?; + /// Should we verify dependencies? + fn verify_deps(&self) -> bool { + matches!(self, Self::Deps | Self::Root { deps: true, .. }) + } - match obj { - SuiRawData::Package(pkg) => Ok(pkg), - SuiRawData::MoveObject(move_obj) => Err( - SourceVerificationError::ObjectFoundWhenPackageExpected(obj_id, move_obj), - ), + /// If the root package needs to be verified, what address should it be fetched from? + fn root_address(&self, package: &CompiledPackage) -> Result, Error> { + match self { + Self::Root { at: Some(addr), .. } => Ok(Some(*addr)), + Self::Root { at: None, .. } => Ok(Some(*package.published_at.clone()?)), + Self::Deps => Ok(None), } } - async fn on_chain_modules( - &self, - addresses: impl Iterator + Clone, - ) -> Result { - let resp = future::join_all(addresses.clone().map(|addr| self.pkg_for_address(addr))).await; - let mut map = OnChainModules::new(); - let mut err = vec![]; - - for (storage_id, pkg) in addresses.zip(resp) { - let SuiRawMovePackage { module_map, .. } = pkg?; - for (name, bytes) in module_map { - let Ok(module) = CompiledModule::deserialize_with_defaults(&bytes) else { - err.push( - SourceVerificationError::OnChainDependencyDeserializationError { - address: storage_id, - module: name.into(), - }, - ); - continue; - }; + /// All the on-chain addresses that we need to fetch to build on-chain addresses. + fn on_chain_addresses(&self, package: &CompiledPackage) -> Result, Error> { + let mut addrs = vec![]; - let runtime_id = *module.self_id().address(); - map.insert((runtime_id, Symbol::from(name)), module); - } + if let Some(addr) = self.root_address(package)? { + addrs.push(addr); } - if !err.is_empty() { - return Err(AggregateSourceVerificationError(err)); + if self.verify_deps() { + addrs.extend(dependency_addresses(package)); } - Ok(map) + Ok(addrs) } -} -fn substitute_root_address( - named_module: &NamedCompiledModule, - root: AccountAddress, -) -> Result { - let mut module = named_module.module.clone(); - let address_idx = module.self_handle().address; + /// On-chain representation of the package and dependencies compiled to `package`, including + /// linkage information. + async fn on_chain( + &self, + package: &CompiledPackage, + verifier: &BytecodeSourceVerifier<'_>, + ) -> Result { + let mut on_chain = OnChainRepresentation::default(); + let mut errs: Vec = vec![]; + + let root = self.root_address(package)?; + let addrs = self.on_chain_addresses(package)?; + + let resps = + future::join_all(addrs.iter().copied().map(|a| verifier.pkg_for_address(a))).await; + + for (storage_id, pkg) in addrs.into_iter().zip(resps) { + let SuiRawMovePackage { + module_map, + linkage_table, + .. + } = pkg?; + + let mut modules = module_map + .into_iter() + .map(|(name, bytes)| { + let Ok(module) = CompiledModule::deserialize_with_defaults(&bytes) else { + return Err(Error::OnChainDependencyDeserializationError { + address: storage_id, + module: name.into(), + }); + }; - let Some(addr) = module.address_identifiers.get_mut(address_idx.0 as usize) else { - return Err(SourceVerificationError::InvalidModuleFailure { - name: named_module.name.to_string(), - message: "Self address field missing".into(), - }); - }; + Ok::<_, Error>((Symbol::from(name), module)) + }) + .peekable(); - if *addr != AccountAddress::ZERO { - return Err(SourceVerificationError::InvalidModuleFailure { - name: named_module.name.to_string(), - message: "Self address already populated".to_string(), - }); - } + let runtime_id = match modules.peek() { + Some(Ok((_, module))) => *module.self_id().address(), - *addr = root; - Ok(module) -} + Some(Err(_)) => { + // SAFETY: The error type does not implement `Clone` so we need to take the + // error by value. We do that by calling `next` to take the value we just + // peeked, which we know is an error type. + errs.push(modules.next().unwrap().unwrap_err()); + continue; + } -fn local_modules( - compiled_package: &MoveCompiledPackage, - include_deps: bool, - source_mode: SourceMode, -) -> Result { - let mut map = LocalModules::new(); - - if include_deps { - // Compile dependencies with prior compilers if needed. - let deps_compiled_units = units_for_toolchain(&compiled_package.deps_compiled_units) - .map_err(|e| SourceVerificationError::CannotCheckLocalModules { - package: compiled_package.compiled_package_info.package_name, - message: e.to_string(), - })?; + None => { + errs.push(Error::EmptyOnChainPackage(storage_id)); + continue; + } + }; - for (package, local_unit) in deps_compiled_units { - let m = &local_unit.unit; - let module = m.name; - let address = m.address.into_inner(); - if address == AccountAddress::ZERO { - continue; + for module in modules { + match module { + Ok((name, module)) => { + on_chain.modules.insert((runtime_id, name), module); + } + + Err(e) => { + errs.push(e); + continue; + } + } } - map.insert((address, module), (package, m.module.clone())); + if root.is_some_and(|r| r == storage_id) { + on_chain.on_chain_dependencies = Some(HashSet::from_iter( + linkage_table.into_values().map(|info| *info.upgraded_id), + )); + } } + + Ok(on_chain) } - let root_package = compiled_package.compiled_package_info.package_name; - match source_mode { - SourceMode::Skip => { /* nop */ } - - // Include the root compiled units, at their current addresses. - SourceMode::Verify => { - // Compile root modules with prior compiler if needed. - let root_compiled_units = { - let root_compiled_units = compiled_package - .root_compiled_units - .iter() - .map(|u| ("root".into(), u.clone())) - .collect::>(); - - units_for_toolchain(&root_compiled_units).map_err(|e| { - SourceVerificationError::CannotCheckLocalModules { - package: compiled_package.compiled_package_info.package_name, + /// Local representation of the modules in `package`. If the validation mode requires verifying + /// dependencies, then the dependencies' modules are also included in the output. + /// + /// For the purposes of this function, a module is considered a dependency if it is from a + /// different source package, and that source package has already been published. Conversely, a + /// module that is from a different source package, but that has not been published is + /// considered part of the root package. + /// + /// If the validation mode requires verifying the root package at a specific address, then the + /// modules from the root package will be expected at address `0x0` and this address will be + /// substituted with the specified address. + fn local(&self, package: &CompiledPackage) -> Result { + let package = &package.package; + let root_package = package.compiled_package_info.package_name; + let mut map = LocalModules::new(); + + if self.verify_deps() { + let deps_compiled_units = + units_for_toolchain(&package.deps_compiled_units).map_err(|e| { + Error::CannotCheckLocalModules { + package: package.compiled_package_info.package_name, message: e.to_string(), } - })? - }; + })?; - for (_, local_unit) in root_compiled_units { + for (package, local_unit) in deps_compiled_units { let m = &local_unit.unit; - let module = m.name; let address = m.address.into_inner(); + + // Skip modules with on 0x0 because they are treated as part of the root package, + // even if they are a source dependency. if address == AccountAddress::ZERO { - return Err(SourceVerificationError::InvalidModuleFailure { - name: module.to_string(), - message: "Can't verify unpublished source".to_string(), - }); + continue; } - map.insert((address, module), (root_package, m.module.clone())); + map.insert((address, module), (package, m.module.clone())); } } - // Include the root compiled units, and any unpublished dependencies with their - // addresses substituted - SourceMode::VerifyAt(root_address) => { - // Compile root modules with prior compiler if needed. - let root_compiled_units = { - let root_compiled_units = compiled_package - .root_compiled_units - .iter() - .map(|u| ("root".into(), u.clone())) - .collect::>(); - - units_for_toolchain(&root_compiled_units).map_err(|e| { - SourceVerificationError::CannotCheckLocalModules { - package: compiled_package.compiled_package_info.package_name, - message: e.to_string(), - } - })? - }; + let Self::Root { at, .. } = self else { + return Ok(map); + }; - for (_, local_unit) in root_compiled_units { - let m = &local_unit.unit; + // Potentially rebuild according to the toolchain that the package was originally built + // with. + let root_compiled_units = units_for_toolchain( + &package + .root_compiled_units + .iter() + .map(|u| ("root".into(), u.clone())) + .collect(), + ) + .map_err(|e| Error::CannotCheckLocalModules { + package: package.compiled_package_info.package_name, + message: e.to_string(), + })?; - let module = m.name; - map.insert( - (root_address, module), - (root_package, substitute_root_address(m, root_address)?), - ); - } + // Add the root modules, potentially remapping 0x0 if we have been supplied an address to + // substitute with. + for (_, local_unit) in root_compiled_units { + let m = &local_unit.unit; + let module = m.name; + let address = m.address.into_inner(); + + let (address, compiled_module) = if let Some(root_address) = at { + (*root_address, substitute_root_address(m, *root_address)?) + } else if address == AccountAddress::ZERO { + return Err(Error::InvalidModuleFailure { + name: module.to_string(), + message: "Can't verify unpublished source".to_string(), + }); + } else { + (address, m.module.clone()) + }; - for (package, local_unit) in &compiled_package.deps_compiled_units { + map.insert((address, module), (root_package, compiled_module)); + } + + // If we have a root address to substitute, we need to find unpublished dependencies that + // would have gone into the root package as well. + if let Some(root_address) = at { + for (package, local_unit) in &package.deps_compiled_units { let m = &local_unit.unit; let module = m.name; let address = m.address.into_inner(); + if address != AccountAddress::ZERO { continue; } map.insert( - (root_address, module), - (*package, substitute_root_address(m, root_address)?), + (*root_address, module), + (*package, substitute_root_address(m, *root_address)?), ); } } - } - Ok(map) -} - -fn current_toolchain() -> ToolchainVersion { - ToolchainVersion { - compiler_version: CURRENT_COMPILER_VERSION.into(), - edition: Edition::LEGACY, /* does not matter, unused for current_toolchain */ - flavor: Flavor::Sui, /* does not matter, unused for current_toolchain */ + Ok(map) } } -fn legacy_toolchain() -> ToolchainVersion { - ToolchainVersion { - compiler_version: LEGACY_COMPILER_VERSION.into(), - edition: Edition::LEGACY, - flavor: Flavor::Sui, +impl<'a> BytecodeSourceVerifier<'a> { + pub fn new(rpc_client: &'a ReadApi) -> Self { + BytecodeSourceVerifier { rpc_client } } -} -/// Ensures `compiled_units` are compiled with the right compiler version, based on -/// Move.lock contents. This works by detecting if a compiled unit requires a prior compiler version: -/// - If so, download the compiler, recompile the unit, and return that unit in the result. -/// - If not, simply keep the current compiled unit. -fn units_for_toolchain( - compiled_units: &Vec<(PackageName, CompiledUnitWithSource)>, -) -> anyhow::Result> { - if std::env::var("SUI_RUN_TOOLCHAIN_BUILD").is_err() { - return Ok(compiled_units.clone()); - } - let mut package_version_map: HashMap)> = - HashMap::new(); - // First iterate over packages, mapping the required version for each package in `package_version_map`. - for (package, local_unit) in compiled_units { - if let Some((_, units)) = package_version_map.get_mut(package) { - // We've processed this package's required version. - units.push(local_unit.clone()); - continue; + /// Verify that the `compiled_package` matches its on-chain representation. + /// + /// See [`ValidationMode`] for more details on what is verified. + pub async fn verify( + &self, + package: &CompiledPackage, + mode: ValidationMode, + ) -> Result<(), AggregateError> { + if matches!( + mode, + ValidationMode::Root { + at: Some(AccountAddress::ZERO), + .. + } + ) { + return Err(Error::ZeroOnChainAddresSpecifiedFailure.into()); } - if sui_types::is_system_package(local_unit.unit.address.into_inner()) { - // System packages are always compiled with the current compiler. - package_version_map.insert(*package, (current_toolchain(), vec![local_unit.clone()])); - continue; - } + let local = mode.local(package)?; + let mut chain = mode.on_chain(package, self).await?; + let mut errs = vec![]; - let package_root = SourcePackageLayout::try_find_root(&local_unit.source_path)?; - let lock_file = package_root.join(SourcePackageLayout::Lock.path()); - if !lock_file.exists() { - // No lock file implies current compiler for this package. - package_version_map.insert(*package, (current_toolchain(), vec![local_unit.clone()])); - continue; + // Check that the transitive dependencies listed on chain match the dependencies listed in + // source code. Ignore 0x0 becaus this signifies an unpublished dependency. + if let Some(on_chain_deps) = &mut chain.on_chain_dependencies { + for dependency_id in dependency_addresses(package) { + if dependency_id != AccountAddress::ZERO && !on_chain_deps.remove(&dependency_id) { + errs.push(Error::MissingDependencyInLinkageTable(dependency_id)); + } + } } - let mut lock_file = File::open(lock_file)?; - let lock_version = Header::read(&mut lock_file)?.version; - if lock_version == PRE_TOOLCHAIN_MOVE_LOCK_VERSION { - // No need to attempt reading lock file toolchain - debug!("{package} on legacy compiler",); - package_version_map.insert(*package, (legacy_toolchain(), vec![local_unit.clone()])); - continue; + for on_chain_dep_id in chain.on_chain_dependencies.take().into_iter().flatten() { + errs.push(Error::MissingDependencyInSourcePackage(on_chain_dep_id)); } - // Read lock file toolchain info - lock_file.rewind()?; - let toolchain_version = ToolchainVersion::read(&mut lock_file)?; - match toolchain_version { - // No ToolchainVersion and new Move.lock version implies current compiler. - None => { - debug!("{package} on current compiler @ {CURRENT_COMPILER_VERSION}",); - package_version_map - .insert(*package, (current_toolchain(), vec![local_unit.clone()])); - } - // This dependency uses the current compiler. - Some(ToolchainVersion { - compiler_version, .. - }) if compiler_version == CURRENT_COMPILER_VERSION => { - debug!("{package} on current compiler @ {CURRENT_COMPILER_VERSION}",); - package_version_map - .insert(*package, (current_toolchain(), vec![local_unit.clone()])); - } - // This dependency needs a prior compiler. Mark it and compile. - Some(toolchain_version) => { - println!( - "{} {package} compiler @ {}", - "REQUIRE".bold().green(), - toolchain_version.compiler_version.yellow(), - ); - package_version_map.insert(*package, (toolchain_version, vec![local_unit.clone()])); + // Check that the contents of bytecode matches between modules. + for ((address, module), (package, local_module)) in local { + let Some(on_chain_module) = chain.modules.remove(&(address, module)) else { + errs.push(Error::OnChainDependencyNotFound { package, module }); + continue; + }; + + if local_module != on_chain_module { + errs.push(Error::ModuleBytecodeMismatch { + address, + package, + module, + }) } } - } - let mut units = vec![]; - // Iterate over compiled units, and check if they need to be recompiled and replaced by a prior compiler's output. - for (package, (toolchain_version, local_units)) in package_version_map { - if toolchain_version.compiler_version == CURRENT_COMPILER_VERSION { - let local_units: Vec<_> = local_units.iter().map(|u| (package, u.clone())).collect(); - units.extend(local_units); - continue; + for (address, module) in chain.modules.into_keys() { + errs.push(Error::LocalDependencyNotFound { address, module }); } - if local_units.is_empty() { - bail!("Expected one or more modules, but none found"); - } - let package_root = SourcePackageLayout::try_find_root(&local_units[0].source_path)?; - let install_dir = tempfile::tempdir()?; // place compiled packages in this temp dir, don't pollute this packages build dir - download_and_compile( - package_root.clone(), - &install_dir, - &toolchain_version, - &package, - )?; - - let compiled_unit_paths = vec![package_root.clone()]; - let compiled_units = find_filenames(&compiled_unit_paths, |path| { - extension_equals(path, MOVE_COMPILED_EXTENSION) - })?; - let build_path = install_dir - .path() - .join(CompiledPackageLayout::path(&CompiledPackageLayout::Root)) - .join(package.as_str()); - debug!("build path is {}", build_path.display()); - - // Add all units compiled with the previous compiler. - for bytecode_path in compiled_units { - info!("bytecode path {bytecode_path}, {package}"); - let local_unit = decode_bytecode_file(build_path.clone(), &package, &bytecode_path)?; - units.push((package, local_unit)) + if !errs.is_empty() { + return Err(AggregateError(errs)); } - } - Ok(units) -} -fn download_and_compile( - root: PathBuf, - install_dir: &TempDir, - ToolchainVersion { - compiler_version, - edition, - flavor, - }: &ToolchainVersion, - dep_name: &Symbol, -) -> anyhow::Result<()> { - let dest_dir = PathBuf::from_iter([&*MOVE_HOME, "binaries"]); // E.g., ~/.move/binaries - let dest_version = dest_dir.join(compiler_version); - let mut dest_canonical_path = dest_version.clone(); - dest_canonical_path.extend(["target", "release"]); - let mut dest_canonical_binary = dest_canonical_path.clone(); - - let platform = detect_platform(&root, compiler_version, &dest_canonical_path)?; - if platform == "windows-x86_64" { - dest_canonical_binary.push(CANONICAL_WIN_BINARY_NAME); - } else { - dest_canonical_binary.push(CANONICAL_UNIX_BINARY_NAME); + Ok(()) } - if !dest_canonical_binary.exists() { - // Check the platform and proceed if we can download a binary. If not, the user should follow error instructions to sideload the binary. - // Download if binary does not exist. - let mainnet_url = format!( - "https://github.com/MystenLabs/sui/releases/download/mainnet-v{compiler_version}/sui-mainnet-v{compiler_version}-{platform}.tgz", - ); - - println!( - "{} mainnet compiler @ {} (this may take a while)", - "DOWNLOADING".bold().green(), - compiler_version.yellow() - ); - - let mut response = match ureq::get(&mainnet_url).call() { - Ok(response) => response, - Err(ureq::Error::Status(404, _)) => { - println!( - "{} sui mainnet compiler {} not available, attempting to download testnet compiler release...", - "WARNING".bold().yellow(), - compiler_version.yellow() - ); - println!( - "{} testnet compiler @ {} (this may take a while)", - "DOWNLOADING".bold().green(), - compiler_version.yellow() - ); - let testnet_url = format!("https://github.com/MystenLabs/sui/releases/download/testnet-v{compiler_version}/sui-testnet-v{compiler_version}-{platform}.tgz"); - ureq::get(&testnet_url).call()? + async fn pkg_for_address(&self, addr: AccountAddress) -> Result { + // Move packages are specified with an AccountAddress, but are + // fetched from a sui network via sui_getObject, which takes an object ID + let obj_id = ObjectID::from(addr); + + // fetch the Sui object at the address specified for the package in the local resolution table + // if future packages with a large set of dependency packages prove too slow to verify, + // batched object fetching should be added to the ReadApi & used here + let obj_read = self + .rpc_client + .get_object_with_options(obj_id, SuiObjectDataOptions::new().with_bcs()) + .await + .map_err(Error::DependencyObjectReadFailure)?; + + let obj = obj_read + .into_object() + .map_err(Error::SuiObjectRefFailure)? + .bcs + .ok_or_else(|| { + Error::DependencyObjectReadFailure(SdkError::DataError( + "Bcs field is not found".to_string(), + )) + })?; + + match obj { + SuiRawData::Package(pkg) => Ok(pkg), + SuiRawData::MoveObject(move_obj) => { + Err(Error::ObjectFoundWhenPackageExpected(obj_id, move_obj)) } - Err(e) => return Err(e.into()), - }.into_reader(); - - let dest_tarball = dest_version.join(format!("{}.tgz", compiler_version)); - debug!("tarball destination: {} ", dest_tarball.display()); - if let Some(parent) = dest_tarball.parent() { - std::fs::create_dir_all(parent) - .map_err(|e| anyhow!("failed to create directory for tarball: {e}"))?; - } - let mut dest_file = File::create(&dest_tarball)?; - io::copy(&mut response, &mut dest_file)?; - - // Extract the tarball using the tar crate - let tar_gz = File::open(&dest_tarball)?; - let tar = flate2::read::GzDecoder::new(tar_gz); - let mut archive = Archive::new(tar); - archive - .unpack(&dest_version) - .map_err(|e| anyhow!("failed to untar compiler binary: {e}"))?; - - let mut dest_binary = dest_version.clone(); - dest_binary.extend(["target", "release"]); - if platform == "windows-x86_64" { - dest_binary.push(&format!("sui-{platform}.exe")); - } else { - dest_binary.push(&format!("sui-{platform}")); } - let dest_binary_os = OsStr::new(dest_binary.as_path()); - set_executable_permission(dest_binary_os)?; - std::fs::rename(dest_binary_os, dest_canonical_binary.clone())?; } - - debug!( - "{} move build --default-move-edition {} --default-move-flavor {} -p {} --install-dir {}", - dest_canonical_binary.display(), - edition.to_string().as_str(), - flavor.to_string().as_str(), - root.display(), - install_dir.path().display(), - ); - info!( - "{} {} (compiler @ {})", - "BUILDING".bold().green(), - dep_name.as_str(), - compiler_version.yellow() - ); - Command::new(dest_canonical_binary) - .args([ - OsStr::new("move"), - OsStr::new("build"), - OsStr::new("--default-move-edition"), - OsStr::new(edition.to_string().as_str()), - OsStr::new("--default-move-flavor"), - OsStr::new(flavor.to_string().as_str()), - OsStr::new("-p"), - OsStr::new(root.as_path()), - OsStr::new("--install-dir"), - OsStr::new(install_dir.path()), - ]) - .output() - .map_err(|e| { - anyhow!("failed to build package from compiler binary {compiler_version}: {e}",) - })?; - Ok(()) } -fn detect_platform( - package_path: &Path, - compiler_version: &String, - dest_dir: &Path, -) -> anyhow::Result { - let s = match (std::env::consts::OS, std::env::consts::ARCH) { - ("macos", "aarch64") => "macos-arm64", - ("macos", "x86_64") => "macos-x86_64", - ("linux", "x86_64") => "ubuntu-x86_64", - ("windows", "x86_64") => "windows-x86_64", - (os, arch) => { - let mut binary_name = CANONICAL_UNIX_BINARY_NAME; - if os == "windows" { - binary_name = CANONICAL_WIN_BINARY_NAME; - }; - bail!( - "The package {} needs to be built with sui compiler version {compiler_version} but there \ - is no binary release available to download for your platform:\n\ - Operating System: {os}\n\ - Architecture: {arch}\n\ - You can manually put a {binary_name} binary for your platform in {} and rerun your command to continue.", - package_path.display(), - dest_dir.display(), - ) - } +fn substitute_root_address( + named_module: &NamedCompiledModule, + root: AccountAddress, +) -> Result { + let mut module = named_module.module.clone(); + let address_idx = module.self_handle().address; + + let Some(addr) = module.address_identifiers.get_mut(address_idx.0 as usize) else { + return Err(Error::InvalidModuleFailure { + name: named_module.name.to_string(), + message: "Self address field missing".into(), + }); }; - Ok(s.into()) -} -#[cfg(unix)] -fn set_executable_permission(path: &OsStr) -> anyhow::Result<()> { - use std::fs; - use std::os::unix::prelude::PermissionsExt; - let mut perms = fs::metadata(path)?.permissions(); - perms.set_mode(0o755); - fs::set_permissions(path, perms)?; - Ok(()) -} + if *addr != AccountAddress::ZERO { + return Err(Error::InvalidModuleFailure { + name: named_module.name.to_string(), + message: "Self address already populated".to_string(), + }); + } -#[cfg(not(unix))] -fn set_executable_permission(path: &OsStr) -> anyhow::Result<()> { - Command::new("icacls") - .args([path, OsStr::new("/grant"), OsStr::new("Everyone:(RX)")]) - .status()?; - Ok(()) + *addr = root; + Ok(module) } -fn decode_bytecode_file( - root_path: PathBuf, - package_name: &Symbol, - bytecode_path_str: &str, -) -> anyhow::Result { - let package_name_opt = Some(*package_name); - let bytecode_path = Path::new(bytecode_path_str); - let path_to_file = CompiledPackageLayout::path_to_file_after_category(bytecode_path); - let bytecode_bytes = std::fs::read(bytecode_path)?; - let source_map = source_map_from_file( - &root_path - .join(CompiledPackageLayout::SourceMaps.path()) - .join(&path_to_file) - .with_extension(SOURCE_MAP_EXTENSION), - )?; - let source_path = &root_path - .join(CompiledPackageLayout::Sources.path()) - .join(path_to_file) - .with_extension(MOVE_EXTENSION); - ensure!( - source_path.is_file(), - "Error decoding package: Unable to find corresponding source file for '{bytecode_path_str}' in package {package_name}" - ); - let module = CompiledModule::deserialize_with_defaults(&bytecode_bytes)?; - let (address_bytes, module_name) = { - let id = module.self_id(); - let parsed_addr = NumericalAddress::new( - id.address().into_bytes(), - move_compiler::shared::NumberFormat::Hex, - ); - let module_name = FileName::from(id.name().as_str()); - (parsed_addr, module_name) - }; - let unit = NamedCompiledModule { - package_name: package_name_opt, - address: address_bytes, - name: module_name, - module, - source_map, - address_name: None, - }; - Ok(CompiledUnitWithSource { - unit, - source_path: source_path.clone(), - }) +/// The on-chain addresses for a source package's dependencies +fn dependency_addresses(package: &CompiledPackage) -> impl Iterator + '_ { + package.dependency_ids.published.values().map(|id| **id) } diff --git a/crates/sui-source-validation/src/tests.rs b/crates/sui-source-validation/src/tests.rs index 8940636b06f7b..f865c651c08c2 100644 --- a/crates/sui-source-validation/src/tests.rs +++ b/crates/sui-source-validation/src/tests.rs @@ -21,7 +21,8 @@ use sui_types::{ }; use test_cluster::TestClusterBuilder; -use crate::{BytecodeSourceVerifier, SourceMode, CURRENT_COMPILER_VERSION}; +use crate::toolchain::CURRENT_COMPILER_VERSION; +use crate::{BytecodeSourceVerifier, ValidationMode}; #[tokio::test] async fn successful_verification() -> anyhow::Result<()> { @@ -53,30 +54,27 @@ async fn successful_verification() -> anyhow::Result<()> { let client = context.get_client().await?; let verifier = BytecodeSourceVerifier::new(client.read_api()); - // Skip deps and root + // Verify root without updating the address verifier - .verify_package(&a_pkg, /* verify_deps */ false, SourceMode::Skip) + .verify(&b_pkg, ValidationMode::root()) .await .unwrap(); - // Verify root without updating the address + // Verify deps but skip root verifier - .verify_package(&b_pkg, /* verify_deps */ false, SourceMode::Verify) + .verify(&a_pkg, ValidationMode::deps()) .await .unwrap(); - // Verify deps but skip root - verifier.verify_package_deps(&a_pkg).await.unwrap(); - // Skip deps but verify root verifier - .verify_package_root(&a_pkg, a_ref.0.into()) + .verify(&a_pkg, ValidationMode::root_at(a_ref.0.into())) .await .unwrap(); // Verify both deps and root verifier - .verify_package_root_and_deps(&a_pkg, a_ref.0.into()) + .verify(&a_pkg, ValidationMode::root_and_deps_at(a_ref.0.into())) .await .unwrap(); @@ -102,7 +100,7 @@ async fn successful_verification_unpublished_deps() -> anyhow::Result<()> { // Verify the root package which now includes dependency modules verifier - .verify_package_root(&a_pkg, a_ref.0.into()) + .verify(&a_pkg, ValidationMode::root_at(a_ref.0.into())) .await .unwrap(); @@ -135,11 +133,8 @@ async fn successful_verification_module_ordering() -> anyhow::Result<()> { }; let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); - - let verify_deps = false; - verifier - .verify_package(&z_pkg, verify_deps, SourceMode::Verify) + BytecodeSourceVerifier::new(client.read_api()) + .verify(&z_pkg, ValidationMode::root()) .await .unwrap(); @@ -176,14 +171,16 @@ async fn successful_verification_upgrades() -> anyhow::Result<()> { let verifier = BytecodeSourceVerifier::new(client.read_api()); // Verify the upgraded package b-v2 as the root. - let verify_deps = false; verifier - .verify_package(&b_pkg, verify_deps, SourceMode::Verify) + .verify(&b_pkg, ValidationMode::root()) .await .unwrap(); // Verify the upgraded package b-v2 as a dep of e. - verifier.verify_package_deps(&e_pkg).await.unwrap(); + verifier + .verify(&e_pkg, ValidationMode::deps()) + .await + .unwrap(); Ok(()) } @@ -208,12 +205,13 @@ async fn fail_verification_bad_address() -> anyhow::Result<()> { }; let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); - let expected = expect!["On-chain address cannot be zero"]; expected.assert_eq( - &verifier - .verify_package_root_and_deps(&a_pkg, AccountAddress::ZERO) + &BytecodeSourceVerifier::new(client.read_api()) + .verify( + &a_pkg, + ValidationMode::root_and_deps_at(AccountAddress::ZERO), + ) .await .unwrap_err() .to_string(), @@ -234,14 +232,13 @@ async fn fail_to_verify_unpublished_root() -> anyhow::Result<()> { }; let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); // Trying to verify the root package, which hasn't been published -- this is going to fail // because there is no on-chain package to verify against. let expected = expect!["Invalid module b with error: Can't verify unpublished source"]; expected.assert_eq( - &verifier - .verify_package(&b_pkg, /* verify_deps */ false, SourceMode::Verify) + &BytecodeSourceVerifier::new(client.read_api()) + .verify(&b_pkg, ValidationMode::root()) .await .unwrap_err() .to_string(), @@ -320,7 +317,7 @@ async fn package_not_found() -> anyhow::Result<()> { let client = context.get_client().await?; let verifier = BytecodeSourceVerifier::new(client.read_api()); - let Err(err) = verifier.verify_package_deps(&a_pkg).await else { + let Err(err) = verifier.verify(&a_pkg, ValidationMode::deps()).await else { panic!("Expected verification to fail"); }; @@ -331,7 +328,7 @@ async fn package_not_found() -> anyhow::Result<()> { let package_root = AccountAddress::random(); stable_addrs.insert(SuiAddress::from(package_root), ""); let Err(err) = verifier - .verify_package_root_and_deps(&a_pkg, package_root) + .verify(&a_pkg, ValidationMode::root_and_deps_at(package_root)) .await else { panic!("Expected verification to fail"); @@ -345,7 +342,10 @@ async fn package_not_found() -> anyhow::Result<()> { let package_root = AccountAddress::random(); stable_addrs.insert(SuiAddress::from(package_root), ""); - let Err(err) = verifier.verify_package_root(&a_pkg, package_root).await else { + let Err(err) = verifier + .verify(&a_pkg, ValidationMode::root_at(package_root)) + .await + else { panic!("Expected verification to fail"); }; @@ -368,13 +368,12 @@ async fn dependency_is_an_object() -> anyhow::Result<()> { let a_src = copy_published_package(&a_pkg_fixtures, "a", SuiAddress::ZERO).await?; compile_package(a_src) }; - let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); + let client = context.get_client().await?; let expected = expect!["Dependency ID contains a Sui object, not a Move package: 0x0000000000000000000000000000000000000000000000000000000000000005"]; expected.assert_eq( - &verifier - .verify_package_deps(&a_pkg) + &BytecodeSourceVerifier::new(client.read_api()) + .verify(&a_pkg, ValidationMode::deps()) .await .unwrap_err() .to_string(), @@ -401,10 +400,12 @@ async fn module_not_found_on_chain() -> anyhow::Result<()> { let a_src = copy_published_package(&a_pkg_fixtures, "a", SuiAddress::ZERO).await?; compile_package(a_src) }; - let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); - let Err(err) = verifier.verify_package_deps(&a_pkg).await else { + let client = context.get_client().await?; + let Err(err) = BytecodeSourceVerifier::new(client.read_api()) + .verify(&a_pkg, ValidationMode::deps()) + .await + else { panic!("Expected verification to fail"); }; @@ -437,9 +438,10 @@ async fn module_not_found_locally() -> anyhow::Result<()> { }; let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); - - let Err(err) = verifier.verify_package_deps(&a_pkg).await else { + let Err(err) = BytecodeSourceVerifier::new(client.read_api()) + .verify(&a_pkg, ValidationMode::deps()) + .await + else { panic!("Expected verification to fail"); }; @@ -492,14 +494,17 @@ async fn module_bytecode_mismatch() -> anyhow::Result<()> { let client = context.get_client().await?; let verifier = BytecodeSourceVerifier::new(client.read_api()); - let Err(err) = verifier.verify_package_deps(&a_pkg).await else { + let Err(err) = verifier.verify(&a_pkg, ValidationMode::deps()).await else { panic!("Expected verification to fail"); }; let expected = expect!["Local dependency did not match its on-chain version at ::b::c"]; expected.assert_eq(&sanitize_id(err.to_string(), &stable_addrs)); - let Err(err) = verifier.verify_package_root(&a_pkg, a_addr.into()).await else { + let Err(err) = verifier + .verify(&a_pkg, ValidationMode::root_at(a_addr.into())) + .await + else { panic!("Expected verification to fail"); }; @@ -509,6 +514,73 @@ async fn module_bytecode_mismatch() -> anyhow::Result<()> { Ok(()) } +#[tokio::test] +async fn linkage_differs() -> anyhow::Result<()> { + let mut cluster = TestClusterBuilder::new().build().await; + let context = &mut cluster.wallet; + + let b_v1_fixtures = tempfile::tempdir()?; + let (b_v1, b_cap) = { + let b_src = copy_published_package(&b_v1_fixtures, "b", SuiAddress::ZERO).await?; + publish_package(context, b_src).await + }; + + let b_v2_fixtures = tempfile::tempdir()?; + let b_v2 = { + let b_src = + copy_upgraded_package(&b_v2_fixtures, "b-v2", b_v1.0.into(), SuiAddress::ZERO).await?; + upgrade_package(context, b_v1.0, b_cap.0, b_src).await + }; + + // Publish b-v2 a second time, to create a third version of the package that is othewise + // byte-for-byte identical with the second version; + let b_v3_fixtures = tempfile::tempdir()?; + let b_v3 = { + let b_src = + copy_upgraded_package(&b_v3_fixtures, "b-v2", b_v2.0.into(), SuiAddress::ZERO).await?; + upgrade_package(context, b_v2.0, b_cap.0, b_src).await + }; + + // Publish E pointing at v2 of B. + let e_v1_fixtures = tempfile::tempdir()?; + let (e_v1, _) = { + copy_upgraded_package(&e_v1_fixtures, "b-v2", b_v2.0.into(), b_v1.0.into()).await?; + let e_src = copy_published_package(&e_v1_fixtures, "e", SuiAddress::ZERO).await?; + publish_package(context, e_src).await + }; + + // Compile E pointing at v3 of B, which is byte-for-byte identical with v2, but nevertheless + // has a different address. + let e_v2_fixtures = tempfile::tempdir()?; + let e_pkg = { + copy_upgraded_package(&e_v2_fixtures, "b-v2", b_v3.0.into(), b_v1.0.into()).await?; + let e_src = copy_published_package(&e_v2_fixtures, "e", e_v1.0.into()).await?; + compile_package(e_src) + }; + + let client = context.get_client().await?; + let stable_ids = HashMap::from_iter([ + (b_v1.0.into(), ""), + (b_v2.0.into(), ""), + (b_v3.0.into(), ""), + ]); + + let error = BytecodeSourceVerifier::new(client.read_api()) + .verify(&e_pkg, ValidationMode::root()) + .await + .unwrap_err() + .to_string(); + + let expected = expect![[r#" + Multiple source verification errors found: + + - Source package depends on which is not in the linkage table. + - On-chain package depends on which is not a source dependency."#]]; + expected.assert_eq(&sanitize_id(error, &stable_ids)); + + Ok(()) +} + #[tokio::test] async fn multiple_failures() -> anyhow::Result<()> { let mut cluster = TestClusterBuilder::new().build().await; @@ -547,9 +619,10 @@ async fn multiple_failures() -> anyhow::Result<()> { }; let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); - - let Err(err) = verifier.verify_package_deps(&d_pkg).await else { + let Err(err) = BytecodeSourceVerifier::new(client.read_api()) + .verify(&d_pkg, ValidationMode::deps()) + .await + else { panic!("Expected verification to fail"); }; @@ -585,10 +658,12 @@ async fn successful_versioned_dependency_verification() -> anyhow::Result<()> { }; let client = context.get_client().await?; - let verifier = BytecodeSourceVerifier::new(client.read_api()); // Verify versioned dependency - verifier.verify_package_deps(&a_pkg).await.unwrap(); + BytecodeSourceVerifier::new(client.read_api()) + .verify(&a_pkg, ValidationMode::deps()) + .await + .unwrap(); Ok(()) } diff --git a/crates/sui-source-validation/src/toolchain.rs b/crates/sui-source-validation/src/toolchain.rs new file mode 100644 index 0000000000000..54f87f2e1b83a --- /dev/null +++ b/crates/sui-source-validation/src/toolchain.rs @@ -0,0 +1,387 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::{ + collections::HashMap, + ffi::OsStr, + fs::File, + io::{self, Seek}, + path::{Path, PathBuf}, + process::Command, +}; + +use anyhow::{anyhow, bail, ensure}; +use colored::Colorize; +use move_binary_format::CompiledModule; +use move_bytecode_source_map::utils::source_map_from_file; +use move_command_line_common::{ + env::MOVE_HOME, + files::{ + extension_equals, find_filenames, MOVE_COMPILED_EXTENSION, MOVE_EXTENSION, + SOURCE_MAP_EXTENSION, + }, +}; +use move_compiler::{ + compiled_unit::NamedCompiledModule, + editions::{Edition, Flavor}, + shared::{files::FileName, NumericalAddress}, +}; +use move_package::{ + compilation::{ + compiled_package::CompiledUnitWithSource, package_layout::CompiledPackageLayout, + }, + lock_file::schema::{Header, ToolchainVersion}, + source_package::{layout::SourcePackageLayout, parsed_manifest::PackageName}, +}; +use move_symbol_pool::Symbol; +use tar::Archive; +use tempfile::TempDir; +use tracing::{debug, info}; + +pub(crate) const CURRENT_COMPILER_VERSION: &str = env!("CARGO_PKG_VERSION"); +const LEGACY_COMPILER_VERSION: &str = CURRENT_COMPILER_VERSION; // TODO: update this when Move 2024 is released +const PRE_TOOLCHAIN_MOVE_LOCK_VERSION: u64 = 0; // Used to detect lockfiles pre-toolchain versioning support +const CANONICAL_UNIX_BINARY_NAME: &str = "sui"; +const CANONICAL_WIN_BINARY_NAME: &str = "sui.exe"; + +pub(crate) fn current_toolchain() -> ToolchainVersion { + ToolchainVersion { + compiler_version: CURRENT_COMPILER_VERSION.into(), + edition: Edition::LEGACY, /* does not matter, unused for current_toolchain */ + flavor: Flavor::Sui, /* does not matter, unused for current_toolchain */ + } +} + +pub(crate) fn legacy_toolchain() -> ToolchainVersion { + ToolchainVersion { + compiler_version: LEGACY_COMPILER_VERSION.into(), + edition: Edition::LEGACY, + flavor: Flavor::Sui, + } +} + +/// Ensures `compiled_units` are compiled with the right compiler version, based on +/// Move.lock contents. This works by detecting if a compiled unit requires a prior compiler version: +/// - If so, download the compiler, recompile the unit, and return that unit in the result. +/// - If not, simply keep the current compiled unit. +pub(crate) fn units_for_toolchain( + compiled_units: &Vec<(PackageName, CompiledUnitWithSource)>, +) -> anyhow::Result> { + if std::env::var("SUI_RUN_TOOLCHAIN_BUILD").is_err() { + return Ok(compiled_units.clone()); + } + let mut package_version_map: HashMap)> = + HashMap::new(); + // First iterate over packages, mapping the required version for each package in `package_version_map`. + for (package, local_unit) in compiled_units { + if let Some((_, units)) = package_version_map.get_mut(package) { + // We've processed this package's required version. + units.push(local_unit.clone()); + continue; + } + + if sui_types::is_system_package(local_unit.unit.address.into_inner()) { + // System packages are always compiled with the current compiler. + package_version_map.insert(*package, (current_toolchain(), vec![local_unit.clone()])); + continue; + } + + let package_root = SourcePackageLayout::try_find_root(&local_unit.source_path)?; + let lock_file = package_root.join(SourcePackageLayout::Lock.path()); + if !lock_file.exists() { + // No lock file implies current compiler for this package. + package_version_map.insert(*package, (current_toolchain(), vec![local_unit.clone()])); + continue; + } + + let mut lock_file = File::open(lock_file)?; + let lock_version = Header::read(&mut lock_file)?.version; + if lock_version == PRE_TOOLCHAIN_MOVE_LOCK_VERSION { + // No need to attempt reading lock file toolchain + debug!("{package} on legacy compiler",); + package_version_map.insert(*package, (legacy_toolchain(), vec![local_unit.clone()])); + continue; + } + + // Read lock file toolchain info + lock_file.rewind()?; + let toolchain_version = ToolchainVersion::read(&mut lock_file)?; + match toolchain_version { + // No ToolchainVersion and new Move.lock version implies current compiler. + None => { + debug!("{package} on current compiler @ {CURRENT_COMPILER_VERSION}",); + package_version_map + .insert(*package, (current_toolchain(), vec![local_unit.clone()])); + } + // This dependency uses the current compiler. + Some(ToolchainVersion { + compiler_version, .. + }) if compiler_version == CURRENT_COMPILER_VERSION => { + debug!("{package} on current compiler @ {CURRENT_COMPILER_VERSION}",); + package_version_map + .insert(*package, (current_toolchain(), vec![local_unit.clone()])); + } + // This dependency needs a prior compiler. Mark it and compile. + Some(toolchain_version) => { + println!( + "{} {package} compiler @ {}", + "REQUIRE".bold().green(), + toolchain_version.compiler_version.yellow(), + ); + package_version_map.insert(*package, (toolchain_version, vec![local_unit.clone()])); + } + } + } + + let mut units = vec![]; + // Iterate over compiled units, and check if they need to be recompiled and replaced by a prior compiler's output. + for (package, (toolchain_version, local_units)) in package_version_map { + if toolchain_version.compiler_version == CURRENT_COMPILER_VERSION { + let local_units: Vec<_> = local_units.iter().map(|u| (package, u.clone())).collect(); + units.extend(local_units); + continue; + } + + if local_units.is_empty() { + bail!("Expected one or more modules, but none found"); + } + let package_root = SourcePackageLayout::try_find_root(&local_units[0].source_path)?; + let install_dir = tempfile::tempdir()?; // place compiled packages in this temp dir, don't pollute this packages build dir + download_and_compile( + package_root.clone(), + &install_dir, + &toolchain_version, + &package, + )?; + + let compiled_unit_paths = vec![package_root.clone()]; + let compiled_units = find_filenames(&compiled_unit_paths, |path| { + extension_equals(path, MOVE_COMPILED_EXTENSION) + })?; + let build_path = install_dir + .path() + .join(CompiledPackageLayout::path(&CompiledPackageLayout::Root)) + .join(package.as_str()); + debug!("build path is {}", build_path.display()); + + // Add all units compiled with the previous compiler. + for bytecode_path in compiled_units { + info!("bytecode path {bytecode_path}, {package}"); + let local_unit = decode_bytecode_file(build_path.clone(), &package, &bytecode_path)?; + units.push((package, local_unit)) + } + } + Ok(units) +} + +fn download_and_compile( + root: PathBuf, + install_dir: &TempDir, + ToolchainVersion { + compiler_version, + edition, + flavor, + }: &ToolchainVersion, + dep_name: &Symbol, +) -> anyhow::Result<()> { + let dest_dir = PathBuf::from_iter([&*MOVE_HOME, "binaries"]); // E.g., ~/.move/binaries + let dest_version = dest_dir.join(compiler_version); + let mut dest_canonical_path = dest_version.clone(); + dest_canonical_path.extend(["target", "release"]); + let mut dest_canonical_binary = dest_canonical_path.clone(); + + let platform = detect_platform(&root, compiler_version, &dest_canonical_path)?; + if platform == "windows-x86_64" { + dest_canonical_binary.push(CANONICAL_WIN_BINARY_NAME); + } else { + dest_canonical_binary.push(CANONICAL_UNIX_BINARY_NAME); + } + + if !dest_canonical_binary.exists() { + // Check the platform and proceed if we can download a binary. If not, the user should follow error instructions to sideload the binary. + // Download if binary does not exist. + let mainnet_url = format!( + "https://github.com/MystenLabs/sui/releases/download/mainnet-v{compiler_version}/sui-mainnet-v{compiler_version}-{platform}.tgz", + ); + + println!( + "{} mainnet compiler @ {} (this may take a while)", + "DOWNLOADING".bold().green(), + compiler_version.yellow() + ); + + let mut response = match ureq::get(&mainnet_url).call() { + Ok(response) => response, + Err(ureq::Error::Status(404, _)) => { + println!( + "{} sui mainnet compiler {} not available, attempting to download testnet compiler release...", + "WARNING".bold().yellow(), + compiler_version.yellow() + ); + println!( + "{} testnet compiler @ {} (this may take a while)", + "DOWNLOADING".bold().green(), + compiler_version.yellow() + ); + let testnet_url = format!("https://github.com/MystenLabs/sui/releases/download/testnet-v{compiler_version}/sui-testnet-v{compiler_version}-{platform}.tgz"); + ureq::get(&testnet_url).call()? + } + Err(e) => return Err(e.into()), + }.into_reader(); + + let dest_tarball = dest_version.join(format!("{}.tgz", compiler_version)); + debug!("tarball destination: {} ", dest_tarball.display()); + if let Some(parent) = dest_tarball.parent() { + std::fs::create_dir_all(parent) + .map_err(|e| anyhow!("failed to create directory for tarball: {e}"))?; + } + let mut dest_file = File::create(&dest_tarball)?; + io::copy(&mut response, &mut dest_file)?; + + // Extract the tarball using the tar crate + let tar_gz = File::open(&dest_tarball)?; + let tar = flate2::read::GzDecoder::new(tar_gz); + let mut archive = Archive::new(tar); + archive + .unpack(&dest_version) + .map_err(|e| anyhow!("failed to untar compiler binary: {e}"))?; + + let mut dest_binary = dest_version.clone(); + dest_binary.extend(["target", "release"]); + if platform == "windows-x86_64" { + dest_binary.push(&format!("sui-{platform}.exe")); + } else { + dest_binary.push(&format!("sui-{platform}")); + } + let dest_binary_os = OsStr::new(dest_binary.as_path()); + set_executable_permission(dest_binary_os)?; + std::fs::rename(dest_binary_os, dest_canonical_binary.clone())?; + } + + debug!( + "{} move build --default-move-edition {} --default-move-flavor {} -p {} --install-dir {}", + dest_canonical_binary.display(), + edition.to_string().as_str(), + flavor.to_string().as_str(), + root.display(), + install_dir.path().display(), + ); + info!( + "{} {} (compiler @ {})", + "BUILDING".bold().green(), + dep_name.as_str(), + compiler_version.yellow() + ); + Command::new(dest_canonical_binary) + .args([ + OsStr::new("move"), + OsStr::new("build"), + OsStr::new("--default-move-edition"), + OsStr::new(edition.to_string().as_str()), + OsStr::new("--default-move-flavor"), + OsStr::new(flavor.to_string().as_str()), + OsStr::new("-p"), + OsStr::new(root.as_path()), + OsStr::new("--install-dir"), + OsStr::new(install_dir.path()), + ]) + .output() + .map_err(|e| { + anyhow!("failed to build package from compiler binary {compiler_version}: {e}",) + })?; + Ok(()) +} + +fn detect_platform( + package_path: &Path, + compiler_version: &String, + dest_dir: &Path, +) -> anyhow::Result { + let s = match (std::env::consts::OS, std::env::consts::ARCH) { + ("macos", "aarch64") => "macos-arm64", + ("macos", "x86_64") => "macos-x86_64", + ("linux", "x86_64") => "ubuntu-x86_64", + ("windows", "x86_64") => "windows-x86_64", + (os, arch) => { + let mut binary_name = CANONICAL_UNIX_BINARY_NAME; + if os == "windows" { + binary_name = CANONICAL_WIN_BINARY_NAME; + }; + bail!( + "The package {} needs to be built with sui compiler version {compiler_version} but there \ + is no binary release available to download for your platform:\n\ + Operating System: {os}\n\ + Architecture: {arch}\n\ + You can manually put a {binary_name} binary for your platform in {} and rerun your command to continue.", + package_path.display(), + dest_dir.display(), + ) + } + }; + Ok(s.into()) +} + +#[cfg(unix)] +fn set_executable_permission(path: &OsStr) -> anyhow::Result<()> { + use std::fs; + use std::os::unix::prelude::PermissionsExt; + let mut perms = fs::metadata(path)?.permissions(); + perms.set_mode(0o755); + fs::set_permissions(path, perms)?; + Ok(()) +} + +#[cfg(not(unix))] +fn set_executable_permission(path: &OsStr) -> anyhow::Result<()> { + Command::new("icacls") + .args([path, OsStr::new("/grant"), OsStr::new("Everyone:(RX)")]) + .status()?; + Ok(()) +} + +fn decode_bytecode_file( + root_path: PathBuf, + package_name: &Symbol, + bytecode_path_str: &str, +) -> anyhow::Result { + let package_name_opt = Some(*package_name); + let bytecode_path = Path::new(bytecode_path_str); + let path_to_file = CompiledPackageLayout::path_to_file_after_category(bytecode_path); + let bytecode_bytes = std::fs::read(bytecode_path)?; + let source_map = source_map_from_file( + &root_path + .join(CompiledPackageLayout::SourceMaps.path()) + .join(&path_to_file) + .with_extension(SOURCE_MAP_EXTENSION), + )?; + let source_path = &root_path + .join(CompiledPackageLayout::Sources.path()) + .join(path_to_file) + .with_extension(MOVE_EXTENSION); + ensure!( + source_path.is_file(), + "Error decoding package: Unable to find corresponding source file for '{bytecode_path_str}' in package {package_name}" + ); + let module = CompiledModule::deserialize_with_defaults(&bytecode_bytes)?; + let (address_bytes, module_name) = { + let id = module.self_id(); + let parsed_addr = NumericalAddress::new( + id.address().into_bytes(), + move_compiler::shared::NumberFormat::Hex, + ); + let module_name = FileName::from(id.name().as_str()); + (parsed_addr, module_name) + }; + let unit = NamedCompiledModule { + package_name: package_name_opt, + address: address_bytes, + name: module_name, + module, + source_map, + address_name: None, + }; + Ok(CompiledUnitWithSource { + unit, + source_path: source_path.clone(), + }) +} diff --git a/crates/sui-storage/Cargo.toml b/crates/sui-storage/Cargo.toml index e34973c342c0b..ff541a7b7e962 100644 --- a/crates/sui-storage/Cargo.toml +++ b/crates/sui-storage/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] integer-encoding.workspace = true async-trait.workspace = true diff --git a/crates/sui-storage/src/http_key_value_store.rs b/crates/sui-storage/src/http_key_value_store.rs index 9232bdfb1bc50..461a441cd2a02 100644 --- a/crates/sui-storage/src/http_key_value_store.rs +++ b/crates/sui-storage/src/http_key_value_store.rs @@ -148,12 +148,7 @@ impl HttpKVStore { async fn multi_fetch(&self, uris: Vec) -> Vec>> { let uris_vec = uris.to_vec(); - let fetches = stream::iter( - uris_vec - .into_iter() - .enumerate() - .map(|(_i, url)| self.fetch(url)), - ); + let fetches = stream::iter(uris_vec.into_iter().map(|url| self.fetch(url))); fetches.buffered(uris.len()).collect::>().await } @@ -221,7 +216,7 @@ fn multi_split_slice<'a, T>(slice: &'a [T], lengths: &'a [usize]) -> Vec<&'a [T] .collect() } -fn deser_check_digest( +fn deser_check_digest( digest: &D, bytes: &Bytes, get_expected_digest: impl FnOnce(&T) -> D, diff --git a/crates/sui-swarm-config/Cargo.toml b/crates/sui-swarm-config/Cargo.toml index 8c6561469549f..12a367ccbd6c4 100644 --- a/crates/sui-swarm-config/Cargo.toml +++ b/crates/sui-swarm-config/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anemo.workspace = true anyhow.workspace = true diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap index c24f52aa519d9..c780c2c68a94a 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap @@ -6,7 +6,7 @@ ssfn_config_info: ~ validator_config_info: ~ parameters: chain_start_timestamp_ms: 0 - protocol_version: 54 + protocol_version: 55 allow_insertion_of_extra_objects: true epoch_duration_ms: 86400000 stake_subsidy_start_epoch: 0 @@ -49,3 +49,4 @@ accounts: - 30000000000000000 - 30000000000000000 - 30000000000000000 + diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__network_config_snapshot_matches.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__network_config_snapshot_matches.snap index 02d6693f87ab4..929df5b6b20b9 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__network_config_snapshot_matches.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__network_config_snapshot_matches.snap @@ -100,14 +100,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -241,14 +245,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -382,14 +390,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -523,14 +535,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -664,14 +680,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -805,14 +825,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -946,14 +970,18 @@ validator_configs: Mainnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Testnet: - Apple - "AwsTenant-region:us-east-1-tenant_id:us-east-1_qPsZxYqd8" + - Credenza3 - Facebook - Google + - KarrierOne - Twitch Unknown: - Apple @@ -997,3 +1025,4 @@ account_keys: - mfPjCoE6SX0Sl84MnmNS/LS+tfPpkn7I8tziuk2g0WM= - 5RWlYF22jS9i76zLl8jP2D3D8GC5ht+IP1dWUBGZxi8= genesis: "[fake genesis]" + diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap index dd9ade4ff9987..8dd452630584e 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap @@ -3,7 +3,7 @@ source: crates/sui-swarm-config/tests/snapshot_tests.rs expression: genesis.sui_system_object().into_genesis_version_for_tooling() --- epoch: 0 -protocol_version: 54 +protocol_version: 55 system_state_version: 1 validators: total_stake: 20000000000000000 @@ -240,13 +240,13 @@ validators: next_epoch_worker_address: ~ extra_fields: id: - id: "0x914faf0978a3304bc5eca36ef861f4fb5c8e2fd1d7531fac16fb59c42f2444e0" + id: "0x5c69dd788c7563c072656a524608262510732d402034ca53215f41e32dc3db49" size: 0 voting_power: 10000 - operation_cap_id: "0xdd0a37c4cb272a3ef7596ed7a3a3d39e3043ac5694ea85679401afed5a579226" + operation_cap_id: "0x5fa2be1df8bd48729b78bf6edd46a0e9ea28fb6c770cb50ac47f5df471f753b1" gas_price: 1000 staking_pool: - id: "0x152174c0dc497a9ed5bf38916a6e2a83ed94f7fcac9ad3d2a394d42ac2f1535c" + id: "0x21201b029962e18d8d5564676500afd842684e1e0c4f288c6eaa90e0b9a3c3ef" activation_epoch: 0 deactivation_epoch: ~ sui_balance: 20000000000000000 @@ -254,14 +254,14 @@ validators: value: 0 pool_token_balance: 20000000000000000 exchange_rates: - id: "0x813360f00f57eeb9d7606b042e40cd894600da18cfd2b09c92850429266d27a1" + id: "0x2424c3e08b4a40a728acf962554669000c48b6ba7691bfa260bd424fcd0c0834" size: 1 pending_stake: 0 pending_total_sui_withdraw: 0 pending_pool_token_withdraw: 0 extra_fields: id: - id: "0x3ea0a0b3a2ed3e97a41f7a5449dd6cdabc7c352d78060257070a7644df62e0be" + id: "0x7100ddb5358349900beb5e8e8e046939e1c03eedd9f4709ae492ff4d0e0cf66c" size: 0 commission_rate: 200 next_epoch_stake: 20000000000000000 @@ -269,27 +269,27 @@ validators: next_epoch_commission_rate: 200 extra_fields: id: - id: "0x6fe70da3c009c35f7f0775fbb7cf31fee9b791a06b568f9e5b6bdb962df6cd70" + id: "0xd791c0d0ddc2928045f1a5b2da7911cbdd83ba737e96c03550f39ce8e5643536" size: 0 pending_active_validators: contents: - id: "0xa1a5f2a0c949249c21875d68b08c2303afddde080a701a3eb7ed47cd5994a123" + id: "0xc24dd20b74615566bb9e91c83348fdc1e874124387dcfa094b9aff49e46c26be" size: 0 pending_removals: [] staking_pool_mappings: - id: "0x6d5379cad39050a7e1f6fe202e02729f1b5175ad2382019f05c564099fe0c680" + id: "0x771b57d70042c92903066603056006cd575cd2da9b47442da606f118f1c3fb19" size: 1 inactive_validators: - id: "0x8ec59c4f685c144a0a87b0944cd05aa177402af486357645a4c38b4c9746af88" + id: "0x4ef8c9e72a5c6323eea95dc1de5531f71b801ff57f61b4b02f58b3ae144cc702" size: 0 validator_candidates: - id: "0xe77fe73477351803bc9dae400aed9112bbfda929dcdc22e2a14575e40e215a9c" + id: "0xdd4d1f847759e67723cb7b35c69ec7f8a87ad81e9d539c3167b63d5fb7cb6346" size: 0 at_risk_validators: contents: [] extra_fields: id: - id: "0x7467315914ac8e1c27eb2350a1783cdcea34b6fffe349c31e7f80f7c3543b282" + id: "0x444fd2bcbb8bd50ee5d444002ba287a733589cd276214698581cc68543629c61" size: 0 storage_fund: total_object_storage_rebates: @@ -306,7 +306,7 @@ parameters: validator_low_stake_grace_period: 7 extra_fields: id: - id: "0x6e650aa8265196ca0fdd9ef3c1c0e2f1d5042c74cee6a76a65316bcb3b6188fc" + id: "0x9dbd68e6ef30badec9e13354817d491d3414a7df56e5f81763d9ced438d94379" size: 0 reference_gas_price: 1000 validator_report_records: @@ -320,7 +320,7 @@ stake_subsidy: stake_subsidy_decrease_rate: 1000 extra_fields: id: - id: "0x3786047219840eac174fd6e74faa0602cea08ed146bdc3ac607a6536aff8dc2c" + id: "0xd5a6a44eb3ef3395304533553789eff6c70c6f8d8e9aec3f4af09f23782f203b" size: 0 safe_mode: false safe_mode_storage_rewards: @@ -332,5 +332,5 @@ safe_mode_non_refundable_storage_fee: 0 epoch_start_timestamp_ms: 10 extra_fields: id: - id: "0x0111121489c309364858942970d14ae6f85473f447a382651fb3a93938538a95" + id: "0xc2c89825a972146e7131722bde266cefc18277894ca57c91616745924c8ff7fe" size: 0 diff --git a/crates/sui-swarm/Cargo.toml b/crates/sui-swarm/Cargo.toml index b67f22a309465..51caeadf3b797 100644 --- a/crates/sui-swarm/Cargo.toml +++ b/crates/sui-swarm/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow.workspace = true rand.workspace = true diff --git a/crates/sui-swarm/src/memory/container.rs b/crates/sui-swarm/src/memory/container.rs index edbb5e4b4f146..b763fc7893cbc 100644 --- a/crates/sui-swarm/src/memory/container.rs +++ b/crates/sui-swarm/src/memory/container.rs @@ -69,7 +69,7 @@ impl Container { RuntimeType::MultiThreaded => { thread_local! { static SPAN: std::cell::RefCell> = - std::cell::RefCell::new(None); + const { std::cell::RefCell::new(None) }; } let mut builder = tokio::runtime::Builder::new_multi_thread(); let span = span.clone(); diff --git a/crates/sui-test-transaction-builder/src/lib.rs b/crates/sui-test-transaction-builder/src/lib.rs index 6b79fdef33b4f..b0d91e127b5ed 100644 --- a/crates/sui-test-transaction-builder/src/lib.rs +++ b/crates/sui-test-transaction-builder/src/lib.rs @@ -331,13 +331,13 @@ impl TestTransactionBuilder { let compiled_package = BuildConfig::new_for_testing().build(&path).unwrap(); let all_module_bytes = compiled_package.get_package_bytes(with_unpublished_deps); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); (all_module_bytes, dependencies) } PublishData::ModuleBytes(bytecode) => (bytecode, vec![]), PublishData::CompiledPackage(compiled_package) => { let all_module_bytes = compiled_package.get_package_bytes(false); - let dependencies = compiled_package.get_dependency_original_package_ids(); + let dependencies = compiled_package.get_dependency_storage_package_ids(); (all_module_bytes, dependencies) } }; diff --git a/crates/sui-tool/Cargo.toml b/crates/sui-tool/Cargo.toml index 2db2c5395e241..a19bfaa194b94 100644 --- a/crates/sui-tool/Cargo.toml +++ b/crates/sui-tool/Cargo.toml @@ -13,7 +13,6 @@ bcs.workspace = true clap = { version = "4.1.4", features = ["derive"] } colored.workspace = true comfy-table.workspace = true -diesel.workspace = true eyre.workspace = true futures.workspace = true hex.workspace = true @@ -41,7 +40,6 @@ narwhal-storage.workspace = true narwhal-types.workspace = true sui-config.workspace = true sui-core.workspace = true -sui-indexer.workspace = true sui-network.workspace = true sui-snapshot.workspace = true sui-protocol-config.workspace = true @@ -50,4 +48,5 @@ sui-sdk.workspace = true sui-storage.workspace = true sui-types.workspace = true sui-archival.workspace = true +sui-package-dump.workspace = true bin-version.workspace = true diff --git a/crates/sui-tool/src/commands.rs b/crates/sui-tool/src/commands.rs index 95b39183c96f6..d4a6eb83facfc 100644 --- a/crates/sui-tool/src/commands.rs +++ b/crates/sui-tool/src/commands.rs @@ -5,7 +5,7 @@ use crate::{ check_completed_snapshot, db_tool::{execute_db_tool_command, print_db_all_tables, DbToolCommand}, download_db_snapshot, download_formal_snapshot, dump_checkpoints_from_archive, - get_latest_available_epoch, get_object, get_transaction_block, make_clients, pkg_dump, + get_latest_available_epoch, get_object, get_transaction_block, make_clients, restore_from_db_checkpoint, verify_archive, verify_archive_by_checksum, ConciseObjectOutput, GroupedObjectOutput, SnapshotVerifyMode, VerboseObjectOutput, }; @@ -22,7 +22,6 @@ use telemetry_subscribers::TracingHandle; use sui_types::{ base_types::*, crypto::AuthorityPublicKeyBytes, messages_grpc::TransactionInfoRequest, - object::Owner, }; use clap::*; @@ -177,21 +176,26 @@ pub enum ToolCommand { max_content_length: usize, }, - /// Download all packages to the local filesystem from an indexer database. Each package gets - /// its own sub-directory, named for its ID on-chain, containing two metadata files - /// (linkage.json and origins.json) as well as a file for every module it contains. Each module - /// file is named for its module name, with a .mv suffix, and contains Move bytecode (suitable - /// for passing into a disassembler). + /// Download all packages to the local filesystem from a GraphQL service. Each package gets its + /// own sub-directory, named for its ID on chain and version containing two metadata files + /// (linkage.json and origins.json), a file containing the overall object and a file for every + /// module it contains. Each module file is named for its module name, with a .mv suffix, and + /// contains Move bytecode (suitable for passing into a disassembler). #[command(name = "dump-packages")] DumpPackages { - /// Connection information for the Indexer's Postgres DB. + /// Connection information for a GraphQL service. #[clap(long, short)] - db_url: String, + rpc_url: String, /// Path to a non-existent directory that can be created and filled with package information. #[clap(long, short)] output_dir: PathBuf, + /// Only fetch packages that were created before this checkpoint (given by its sequence + /// number). + #[clap(long)] + before_checkpoint: Option, + /// If false (default), log level will be overridden to "off", and output will be reduced to /// necessary status information. #[clap(short, long = "verbose")] @@ -414,59 +418,6 @@ pub enum ToolCommand { }, } -trait OptionDebug { - fn opt_debug(&self, def_str: &str) -> String; -} -trait OptionDisplay { - fn opt_display(&self, def_str: &str) -> String; -} - -impl OptionDebug for Option -where - T: std::fmt::Debug, -{ - fn opt_debug(&self, def_str: &str) -> String { - match self { - None => def_str.to_string(), - Some(t) => format!("{:?}", t), - } - } -} - -impl OptionDisplay for Option -where - T: std::fmt::Display, -{ - fn opt_display(&self, def_str: &str) -> String { - match self { - None => def_str.to_string(), - Some(t) => format!("{}", t), - } - } -} - -struct OwnerOutput(Owner); - -// grep/awk-friendly output for Owner -impl std::fmt::Display for OwnerOutput { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &self.0 { - Owner::AddressOwner(address) => { - write!(f, "address({})", address) - } - Owner::ObjectOwner(address) => { - write!(f, "object({})", address) - } - Owner::Immutable => { - write!(f, "immutable") - } - Owner::Shared { .. } => { - write!(f, "shared") - } - } - } -} - async fn check_locked_object( sui_client: &Arc, committee: Arc>, @@ -633,8 +584,9 @@ impl ToolCommand { } } ToolCommand::DumpPackages { - db_url, + rpc_url, output_dir, + before_checkpoint, verbose, } => { if !verbose { @@ -643,7 +595,7 @@ impl ToolCommand { .expect("Failed to update log level"); } - pkg_dump::dump(db_url, output_dir).await?; + sui_package_dump::dump(rpc_url, output_dir, before_checkpoint).await?; } ToolCommand::DumpValidators { genesis, concise } => { let genesis = Genesis::load(genesis).unwrap(); diff --git a/crates/sui-tool/src/lib.rs b/crates/sui-tool/src/lib.rs index 574eb87a64f1b..a49ac1b5e9498 100644 --- a/crates/sui-tool/src/lib.rs +++ b/crates/sui-tool/src/lib.rs @@ -71,7 +71,6 @@ use typed_store::rocks::MetricConf; pub mod commands; pub mod db_tool; -pub mod pkg_dump; #[derive( Clone, Serialize, Deserialize, Debug, PartialEq, Copy, PartialOrd, Ord, Eq, ValueEnum, Default, @@ -128,9 +127,6 @@ pub struct ObjectData { trait OptionDebug { fn opt_debug(&self, def_str: &str) -> String; } -trait OptionDisplay { - fn opt_display(&self, def_str: &str) -> String; -} impl OptionDebug for Option where @@ -144,40 +140,6 @@ where } } -impl OptionDisplay for Option -where - T: std::fmt::Display, -{ - fn opt_display(&self, def_str: &str) -> String { - match self { - None => def_str.to_string(), - Some(t) => format!("{}", t), - } - } -} - -struct OwnerOutput(Owner); - -// grep/awk-friendly output for Owner -impl std::fmt::Display for OwnerOutput { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &self.0 { - Owner::AddressOwner(address) => { - write!(f, "address({})", address) - } - Owner::ObjectOwner(address) => { - write!(f, "object({})", address) - } - Owner::Immutable => { - write!(f, "immutable") - } - Owner::Shared { .. } => { - write!(f, "shared") - } - } - } -} - #[allow(clippy::type_complexity)] pub struct GroupedObjectOutput { pub grouped_results: BTreeMap< diff --git a/crates/sui-tool/src/pkg_dump.rs b/crates/sui-tool/src/pkg_dump.rs deleted file mode 100644 index bd78cbf2b4b87..0000000000000 --- a/crates/sui-tool/src/pkg_dump.rs +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use std::{ - collections::BTreeMap, - fs, - path::{Path, PathBuf}, - time::Duration, -}; - -use anyhow::{anyhow, ensure, Context, Result}; -use diesel::{ - r2d2::{ConnectionManager, Pool}, - PgConnection, RunQueryDsl, -}; -use sui_indexer::{models::packages::StoredPackage, schema::packages}; -use sui_types::{base_types::SuiAddress, move_package::MovePackage}; -use tracing::info; - -type PgPool = Pool>; - -pub(crate) async fn dump(db_url: String, output_dir: PathBuf) -> Result<()> { - ensure_output_directory(&output_dir)?; - - let conn = ConnectionManager::::new(db_url); - let pool = Pool::builder() - .max_size(1) - .connection_timeout(Duration::from_secs(30)) - .build(conn) - .context("Failed to create connection pool.")?; - - info!("Querying Indexer..."); - let pkgs = query_packages(&pool)?; - let total = pkgs.len(); - - let mut progress = 0; - for (i, pkg) in pkgs.into_iter().enumerate() { - let pct = (100 * i) / total; - if pct % 5 == 0 && pct > progress { - info!("Dumping packages ({total}): {pct: >3}%"); - progress = pct; - } - - let id = SuiAddress::from_bytes(&pkg.package_id).context("Parsing package ID")?; - dump_package(&output_dir, id, &pkg.move_package) - .with_context(|| format!("Dumping package: {id}"))?; - } - - info!("Dumping packages ({total}): 100%, Done."); - Ok(()) -} - -/// Ensure the output directory exists, either because it already exists as an empty, writable -/// directory, or by creating a new directory. -fn ensure_output_directory(path: impl Into) -> Result<()> { - let path: PathBuf = path.into(); - if path.exists() { - ensure!( - path.is_dir(), - "Output path is not a directory: {}", - path.display() - ); - ensure!( - path.read_dir().is_ok_and(|mut d| d.next().is_none()), - "Output directory is not empty: {}", - path.display(), - ); - - let metadata = fs::metadata(&path).context("Getting metadata for output path")?; - - ensure!( - !metadata.permissions().readonly(), - "Output directory is not writable: {}", - path.display() - ) - } else { - fs::create_dir_all(&path).context("Making output directory")?; - } - - Ok(()) -} - -fn query_packages(pool: &PgPool) -> Result> { - let mut conn = pool - .get() - .map_err(|e| anyhow!("Failed to get connection: {e}"))?; - Ok(packages::dsl::packages.load::(&mut conn)?) -} - -fn dump_package(output_dir: &Path, id: SuiAddress, pkg: &[u8]) -> Result<()> { - let package = bcs::from_bytes::(pkg).context("Deserializing")?; - let origins: BTreeMap<_, _> = package - .type_origin_table() - .iter() - .map(|o| { - ( - format!("{}::{}", o.module_name, o.datatype_name), - o.package.to_string(), - ) - }) - .collect(); - - let package_dir = output_dir.join(format!("{}.{}", id, package.version().value())); - fs::create_dir(&package_dir).context("Making output directory")?; - - let linkage_json = - serde_json::to_string_pretty(package.linkage_table()).context("Serializing linkage")?; - let origins_json = - serde_json::to_string_pretty(&origins).context("Serializing type origins")?; - - fs::write(package_dir.join("package.bcs"), pkg).context("Writing package BCS")?; - fs::write(package_dir.join("linkage.json"), linkage_json).context("Writing linkage")?; - fs::write(package_dir.join("origins.json"), origins_json).context("Writing type origins")?; - - for (module_name, module_bytes) in package.serialized_module_map() { - let module_path = package_dir.join(format!("{module_name}.mv")); - fs::write(module_path, module_bytes) - .with_context(|| format!("Writing module: {module_name}"))? - } - - Ok(()) -} diff --git a/crates/sui-transaction-checks/src/lib.rs b/crates/sui-transaction-checks/src/lib.rs index 2e399e24b9820..1188850a135b2 100644 --- a/crates/sui-transaction-checks/src/lib.rs +++ b/crates/sui-transaction-checks/src/lib.rs @@ -562,7 +562,7 @@ mod checked { // Use the same verifier and meter for all packages, custom configured for signing. let for_signing = true; let mut verifier = sui_execution::verifier(protocol_config, for_signing, metrics); - let mut meter = verifier.meter(protocol_config.meter_config()); + let mut meter = verifier.meter(protocol_config.meter_config_for_signing()); // Measure time for verifying all packages in the PTB let shared_meter_verifier_timer = metrics diff --git a/crates/sui-transactional-test-runner/Cargo.toml b/crates/sui-transactional-test-runner/Cargo.toml index 46fd3ab10242f..d602aea50457a 100644 --- a/crates/sui-transactional-test-runner/Cargo.toml +++ b/crates/sui-transactional-test-runner/Cargo.toml @@ -7,6 +7,9 @@ description = "Move framework for Sui platform" license = "Apache-2.0" publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true bcs.workspace = true diff --git a/crates/sui-transactional-test-runner/src/lib.rs b/crates/sui-transactional-test-runner/src/lib.rs index fc4b5d7b22bfb..854ab9d096d81 100644 --- a/crates/sui-transactional-test-runner/src/lib.rs +++ b/crates/sui-transactional-test-runner/src/lib.rs @@ -142,9 +142,7 @@ impl TransactionalAdapter for ValidatorWithFullnode { ); let epoch_store = self.validator.load_epoch_store_one_call_per_task().clone(); - self.validator - .read_objects_for_execution(&tx, &epoch_store) - .await + self.validator.read_objects_for_execution(&tx, &epoch_store) } fn prepare_txn( diff --git a/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs b/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs index 2c2e9de3c7b90..64606fa58dc0d 100644 --- a/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs +++ b/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs @@ -313,16 +313,12 @@ impl SimulatorStore for PersistedStore { fn insert_committee(&mut self, committee: Committee) { let epoch = committee.epoch as usize; - let mut committees = if let Some(c) = self + let mut committees = self .read_write .epoch_to_committee .get(&()) .expect("Fatal: DB read failed") - { - c - } else { - vec![] - }; + .unwrap_or_default(); if committees.get(epoch).is_some() { return; @@ -397,16 +393,12 @@ impl SimulatorStore for PersistedStore { .live_objects .insert(&object_id, &version) .expect("Fatal: DB write failed"); - let mut q = if let Some(x) = self + let mut q = self .read_write .objects .get(&object_id) .expect("Fatal: DB read failed") - { - x - } else { - BTreeMap::new() - }; + .unwrap_or_default(); q.insert(version, object); self.read_write .objects diff --git a/crates/sui-types/Cargo.toml b/crates/sui-types/Cargo.toml index 5d2f6876ab28b..9943fc5465d25 100644 --- a/crates/sui-types/Cargo.toml +++ b/crates/sui-types/Cargo.toml @@ -6,7 +6,11 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] +async-trait.workspace = true anemo.workspace = true anyhow.workspace = true bincode.workspace = true diff --git a/crates/sui-types/src/bridge.rs b/crates/sui-types/src/bridge.rs index e00c9cc3bb3da..84e65ed53fdd5 100644 --- a/crates/sui-types/src/bridge.rs +++ b/crates/sui-types/src/bridge.rs @@ -34,6 +34,8 @@ pub type BridgeRecordDyanmicField = Field< >; pub const BRIDGE_MODULE_NAME: &IdentStr = ident_str!("bridge"); +pub const BRIDGE_TREASURY_MODULE_NAME: &IdentStr = ident_str!("treasury"); +pub const BRIDGE_LIMITER_MODULE_NAME: &IdentStr = ident_str!("limiter"); pub const BRIDGE_COMMITTEE_MODULE_NAME: &IdentStr = ident_str!("committee"); pub const BRIDGE_MESSAGE_MODULE_NAME: &IdentStr = ident_str!("message"); pub const BRIDGE_CREATE_FUNCTION_NAME: &IdentStr = ident_str!("create"); diff --git a/crates/sui-types/src/crypto.rs b/crates/sui-types/src/crypto.rs index 16c83f6fa2493..792906ce4f05e 100644 --- a/crates/sui-types/src/crypto.rs +++ b/crates/sui-types/src/crypto.rs @@ -730,7 +730,7 @@ impl Signature { // itself that computes the BCS hash of the Rust type prefix and `struct TransactionData`. // (See `fn digest` in `impl Message for SenderSignedData`). let mut hasher = DefaultHash::default(); - hasher.update(&bcs::to_bytes(&value).expect("Message serialization should not fail")); + hasher.update(bcs::to_bytes(&value).expect("Message serialization should not fail")); Signer::sign(secret, &hasher.finalize().digest) } @@ -999,7 +999,7 @@ impl SuiSignature for S { T: Serialize, { let mut hasher = DefaultHash::default(); - hasher.update(&bcs::to_bytes(&value).expect("Message serialization should not fail")); + hasher.update(bcs::to_bytes(&value).expect("Message serialization should not fail")); let digest = hasher.finalize().digest; let (sig, pk) = &self.get_verification_inputs()?; diff --git a/crates/sui-types/src/execution.rs b/crates/sui-types/src/execution.rs index c4bcf7ba293b8..53dbcfbd74797 100644 --- a/crates/sui-types/src/execution.rs +++ b/crates/sui-types/src/execution.rs @@ -175,18 +175,21 @@ impl ExecutionResultsV2 { /// gas smashing). Because this list is not gated by protocol version, there are a few important /// criteria for adding a digest to this list: /// 1. The certificate must be causing all validators to either panic or hang forever deterministically. -/// 2. If we ever ship a fix to make it no longer panic or hang when executing such transaction, -/// we must make sure the transaction is already in this list. Otherwise nodes running the newer version -/// without these transactions in the list will generate forked result. +/// 2. If we ever ship a fix to make it no longer panic or hang when executing such transaction, we +/// must make sure the transaction is already in this list. Otherwise nodes running the newer +/// version without these transactions in the list will generate forked result. +/// /// Below is a scenario of when we need to use this list: /// 1. We detect that a specific transaction is causing all validators to either panic or hang forever deterministically. /// 2. We push a CertificateDenyConfig to deny such transaction to all validators asap. -/// 3. To make sure that all fullnodes are able to sync to the latest version, we need to add the transaction digest -/// to this list as well asap, and ship this binary to all fullnodes, so that they can sync past this transaction. +/// 3. To make sure that all fullnodes are able to sync to the latest version, we need to add the +/// transaction digest to this list as well asap, and ship this binary to all fullnodes, so that +/// they can sync past this transaction. /// 4. We then can start fixing the issue, and ship the fix to all nodes. -/// 5. Unfortunately, we can't remove the transaction digest from this list, because if we do so, any future -/// node that sync from genesis will fork on this transaction. We may be able to remove it once -/// we have stable snapshots and the binary has a minimum supported protocol version past the epoch. +/// 5. Unfortunately, we can't remove the transaction digest from this list, because if we do so, +/// any future node that sync from genesis will fork on this transaction. We may be able to +/// remove it once we have stable snapshots and the binary has a minimum supported protocol +/// version past the epoch. pub fn get_denied_certificates() -> &'static HashSet { static DENIED_CERTIFICATES: Lazy> = Lazy::new(|| HashSet::from([])); Lazy::force(&DENIED_CERTIFICATES) diff --git a/crates/sui-types/src/full_checkpoint_content.rs b/crates/sui-types/src/full_checkpoint_content.rs index 02b7a897c3bed..2c8eef47f237e 100644 --- a/crates/sui-types/src/full_checkpoint_content.rs +++ b/crates/sui-types/src/full_checkpoint_content.rs @@ -1,7 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::effects::{IDOperation, ObjectIn, ObjectOut, TransactionEffects, TransactionEvents}; +use std::collections::BTreeMap; + +use crate::base_types::ObjectRef; +use crate::effects::{ + IDOperation, ObjectIn, ObjectOut, TransactionEffects, TransactionEffectsAPI, TransactionEvents, +}; use crate::messages_checkpoint::{CertifiedCheckpointSummary, CheckpointContents}; use crate::object::Object; use crate::storage::BackingPackageStore; @@ -18,11 +23,32 @@ pub struct CheckpointData { } impl CheckpointData { - pub fn output_objects(&self) -> Vec<&Object> { - self.transactions - .iter() - .flat_map(|tx| &tx.output_objects) - .collect() + // returns the latest versions of the output objects that still exist at the end of the checkpoint + pub fn latest_live_output_objects(&self) -> Vec<&Object> { + let mut latest_live_objects = BTreeMap::new(); + for tx in self.transactions.iter() { + for obj in tx.output_objects.iter() { + latest_live_objects.insert(obj.id(), obj); + } + for obj_ref in tx.removed_object_refs_post_version() { + latest_live_objects.remove(&(obj_ref.0)); + } + } + latest_live_objects.into_values().collect() + } + + // returns the object refs that are eventually deleted or wrapped in the current checkpoint + pub fn eventually_removed_object_refs_post_version(&self) -> Vec { + let mut eventually_removed_object_refs = BTreeMap::new(); + for tx in self.transactions.iter() { + for obj_ref in tx.removed_object_refs_post_version() { + eventually_removed_object_refs.insert(obj_ref.0, obj_ref); + } + for obj in tx.output_objects.iter() { + eventually_removed_object_refs.remove(&(obj.id())); + } + } + eventually_removed_object_refs.into_values().collect() } pub fn input_objects(&self) -> Vec<&Object> { @@ -51,19 +77,21 @@ pub struct CheckpointTransaction { pub events: Option, /// The state of all inputs to this transaction as they were prior to execution. pub input_objects: Vec, - /// The state of all output objects created or mutated by this transaction. + /// The state of all output objects created or mutated or unwrapped by this transaction. pub output_objects: Vec, } impl CheckpointTransaction { // provide an iterator over all deleted or wrapped objects in this transaction - pub fn removed_objects(&self) -> impl Iterator { + pub fn removed_objects_pre_version(&self) -> impl Iterator { // Iterator over id and versions for all deleted or wrapped objects match &self.effects { TransactionEffects::V1(v1) => Either::Left( // Effects v1 has delted and wrapped objects versions as the "new" version, not the // old one that was actually removed. So we need to take these and then look them // up in the `modified_at_versions`. + // No need to chain unwrapped_then_deleted because these objects must have been wrapped + // before the transaction, hence they will not be in modified_at_versions / input_objects. v1.deleted().iter().chain(v1.wrapped()).map(|(id, _, _)| { // lookup the old version for mutated objects let (_, old_version) = v1 @@ -108,6 +136,13 @@ impl CheckpointTransaction { }) } + pub fn removed_object_refs_post_version(&self) -> impl Iterator { + let deleted = self.effects.deleted().into_iter(); + let wrapped = self.effects.wrapped().into_iter(); + let unwrapped_then_deleted = self.effects.unwrapped_then_deleted().into_iter(); + deleted.chain(wrapped).chain(unwrapped_then_deleted) + } + pub fn changed_objects(&self) -> impl Iterator)> { // Iterator over ((ObjectId, new version), Option) match &self.effects { @@ -142,7 +177,7 @@ impl CheckpointTransaction { ObjectIn::NotExist, ObjectOut::PackageWrite((version, _)), IDOperation::Created, - ) => Some(((id, &version), None)), + ) => Some(((id, version), None)), // Unwrapped Objects (ObjectIn::NotExist, ObjectOut::ObjectWrite(_), IDOperation::None) => { @@ -206,7 +241,7 @@ impl CheckpointTransaction { ObjectIn::NotExist, ObjectOut::PackageWrite((version, _)), IDOperation::Created, - ) => Some((id, &version)), + ) => Some((id, version)), _ => None, } diff --git a/crates/sui-types/src/lib.rs b/crates/sui-types/src/lib.rs index 2c029c52cead5..ea47b7e082ef8 100644 --- a/crates/sui-types/src/lib.rs +++ b/crates/sui-types/src/lib.rs @@ -82,6 +82,7 @@ pub mod sui_system_state; pub mod supported_protocol_versions; pub mod traffic_control; pub mod transaction; +pub mod transaction_executor; pub mod transfer; pub mod versioned; pub mod zk_login_authenticator; diff --git a/crates/sui-types/src/messages_grpc.rs b/crates/sui-types/src/messages_grpc.rs index f29707cca75b7..ffe8001980e68 100644 --- a/crates/sui-types/src/messages_grpc.rs +++ b/crates/sui-types/src/messages_grpc.rs @@ -233,6 +233,7 @@ impl From for HandleCertificateResponseV2 { /// If `wait_for_effects` is true, it is guaranteed that: /// - Number of responses will be equal to the number of input transactions. /// - The order of the responses matches the order of the input transactions. +/// /// Otherwise, `responses` will be empty. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct HandleSoftBundleCertificatesResponseV3 { diff --git a/crates/sui-types/src/sui_sdk2_conversions.rs b/crates/sui-types/src/sui_sdk2_conversions.rs index 6b64a26fb5110..88a73019cd19d 100644 --- a/crates/sui-types/src/sui_sdk2_conversions.rs +++ b/crates/sui-types/src/sui_sdk2_conversions.rs @@ -50,6 +50,7 @@ bcs_convert_impl!( ); bcs_convert_impl!(crate::signature::GenericSignature, UserSignature); bcs_convert_impl!(crate::effects::TransactionEvents, TransactionEvents); +bcs_convert_impl!(crate::transaction::Command, Command); impl From> for ValidatorAggregatedSignature @@ -384,3 +385,21 @@ impl From for UnchangedSharedKind { } } } + +impl From for TransactionExpiration { + fn from(value: crate::transaction::TransactionExpiration) -> Self { + match value { + crate::transaction::TransactionExpiration::None => Self::None, + crate::transaction::TransactionExpiration::Epoch(epoch) => Self::Epoch(epoch), + } + } +} + +impl From for crate::transaction::TransactionExpiration { + fn from(value: TransactionExpiration) -> Self { + match value { + TransactionExpiration::None => Self::None, + TransactionExpiration::Epoch(epoch) => Self::Epoch(epoch), + } + } +} diff --git a/crates/sui-types/src/transaction.rs b/crates/sui-types/src/transaction.rs index 8d78a7d2d2229..5da9828803e40 100644 --- a/crates/sui-types/src/transaction.rs +++ b/crates/sui-types/src/transaction.rs @@ -1902,6 +1902,10 @@ impl TransactionData { .iter() .any(|obj| obj.id() == SUI_RANDOMNESS_STATE_OBJECT_ID) } + + pub fn digest(&self) -> TransactionDigest { + TransactionDigest::new(default_hash(self)) + } } #[enum_dispatch] @@ -2378,7 +2382,7 @@ impl Message for SenderSignedData { /// Computes the tx digest that encodes the Rust type prefix from Signable trait. fn digest(&self) -> Self::DigestType { - TransactionDigest::new(default_hash(&self.intent_message().value)) + self.intent_message().value.digest() } } diff --git a/crates/sui-types/src/transaction_executor.rs b/crates/sui-types/src/transaction_executor.rs new file mode 100644 index 0000000000000..8fd2ebc742a28 --- /dev/null +++ b/crates/sui-types/src/transaction_executor.rs @@ -0,0 +1,17 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::quorum_driver_types::ExecuteTransactionRequestV3; +use crate::quorum_driver_types::ExecuteTransactionResponseV3; +use crate::quorum_driver_types::QuorumDriverError; + +/// Trait to define the interface for how the REST service interacts with a a QuorumDriver or a +/// simulated transaction executor. +#[async_trait::async_trait] +pub trait TransactionExecutor: Send + Sync { + async fn execute_transaction( + &self, + request: ExecuteTransactionRequestV3, + client_addr: Option, + ) -> Result; +} diff --git a/crates/sui-types/tests/serde_tests.rs b/crates/sui-types/tests/serde_tests.rs index cd7056857b06b..2343c90b91329 100644 --- a/crates/sui-types/tests/serde_tests.rs +++ b/crates/sui-types/tests/serde_tests.rs @@ -18,7 +18,7 @@ fn test_struct_tag_serde() { struct TestStructTag(#[serde_as(as = "SuiStructTag")] StructTag); // serialize to json should not trim the leading 0 - let Value::String(json) = serde_json::to_value(&TestStructTag(tag.clone())).unwrap() else { + let Value::String(json) = serde_json::to_value(TestStructTag(tag.clone())).unwrap() else { panic!() }; assert_eq!(json, "0x07f89cdffd8968affa0b47bef91adc5314e19509080470c45bfd434cd83a766b::suifrens::SuiFren<0x07f89cdffd8968affa0b47bef91adc5314e19509080470c45bfd434cd83a766b::capy::Capy>"); diff --git a/crates/sui/Cargo.toml b/crates/sui/Cargo.toml index 9c06cb2893505..50afe8ab31472 100644 --- a/crates/sui/Cargo.toml +++ b/crates/sui/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anemo.workspace = true anyhow.workspace = true diff --git a/crates/sui/src/client_commands.rs b/crates/sui/src/client_commands.rs index 09907f0941ede..aec1751251eb1 100644 --- a/crates/sui/src/client_commands.rs +++ b/crates/sui/src/client_commands.rs @@ -36,7 +36,7 @@ use serde::Serialize; use serde_json::{json, Value}; use sui_move::manage_package::resolve_lock_file_path; use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; -use sui_source_validation::{BytecodeSourceVerifier, SourceMode}; +use sui_source_validation::{BytecodeSourceVerifier, ValidationMode}; use shared_crypto::intent::Intent; use sui_json::SuiJsonValue; @@ -1106,7 +1106,7 @@ impl SuiClientCommands { let mut used_ticks = meter.accumulator(Scope::Package).clone(); used_ticks.name = pkg_name; - let meter_config = protocol_config.meter_config(); + let meter_config = protocol_config.meter_config_for_signing(); let exceeded = matches!( meter_config.max_per_pkg_meter_units, @@ -1605,11 +1605,17 @@ impl SuiClientCommands { skip_source, address_override, } => { - if skip_source && !verify_deps { - return Err(anyhow!( - "Source skipped and not verifying deps: Nothing to verify." - )); - } + let mode = match (!skip_source, verify_deps, address_override) { + (false, false, _) => { + bail!("Source skipped and not verifying deps: Nothing to verify.") + } + + (false, true, _) => ValidationMode::deps(), + (true, false, None) => ValidationMode::root(), + (true, true, None) => ValidationMode::root_and_deps(), + (true, false, Some(at)) => ValidationMode::root_at(*at), + (true, true, Some(at)) => ValidationMode::root_and_deps_at(*at), + }; let build_config = resolve_lock_file_path(build_config, Some(&package_path))?; let chain_id = context @@ -1627,17 +1633,8 @@ impl SuiClientCommands { .build(&package_path)?; let client = context.get_client().await?; - BytecodeSourceVerifier::new(client.read_api()) - .verify_package( - &compiled_package, - verify_deps, - match (skip_source, address_override) { - (true, _) => SourceMode::Skip, - (false, None) => SourceMode::Verify, - (false, Some(addr)) => SourceMode::VerifyAt(addr.into()), - }, - ) + .verify(&compiled_package, mode) .await?; SuiClientCommandResult::VerifySource @@ -1860,7 +1857,10 @@ pub(crate) async fn compile_package( let compiled_modules = compiled_package.get_package_bytes(with_unpublished_dependencies); if !skip_dependency_verification { let verifier = BytecodeSourceVerifier::new(read_api); - if let Err(e) = verifier.verify_package_deps(&compiled_package).await { + if let Err(e) = verifier + .verify(&compiled_package, ValidationMode::deps()) + .await + { return Err(SuiError::ModulePublishFailure { error: format!( "[warning] {e}\n\ @@ -2715,6 +2715,7 @@ pub async fn execute_dry_run( /// Call a dry run with the transaction data to estimate the gas budget. /// The estimated gas budget is computed as following: /// * the maximum between A and B, where: +/// /// A = computation cost + GAS_SAFE_OVERHEAD * reference gas price /// B = computation cost + storage cost - storage rebate + GAS_SAFE_OVERHEAD * reference gas price /// overhead diff --git a/crates/sui/src/client_ptb/lexer.rs b/crates/sui/src/client_ptb/lexer.rs index 749e8f724ada6..2338683ffadad 100644 --- a/crates/sui/src/client_ptb/lexer.rs +++ b/crates/sui/src/client_ptb/lexer.rs @@ -17,9 +17,7 @@ pub struct Lexer<'l, I: Iterator> { impl<'l, I: Iterator> Lexer<'l, I> { pub fn new(mut tokens: I) -> Option { - let Some(buf) = tokens.next() else { - return None; - }; + let buf = tokens.next()?; Some(Self { buf, @@ -63,9 +61,7 @@ impl<'l, I: Iterator> Lexer<'l, I> { fn eat_prefix(&mut self, patt: &str) -> Option> { let start = self.offset; - let Some(rest) = self.buf.strip_prefix(patt) else { - return None; - }; + let rest = self.buf.strip_prefix(patt)?; let len = self.buf.len() - rest.len(); let value = &self.buf[..len]; @@ -120,9 +116,7 @@ impl<'l, I: Iterator> Lexer<'l, I> { /// Look at the next character in the current shell token without consuming it, if it exists. fn peek(&self) -> Option> { let start = self.offset; - let Some((ix, _)) = self.next_char_boundary() else { - return None; - }; + let (ix, _) = self.next_char_boundary()?; let value = &self.buf[..ix]; let span = Span { diff --git a/crates/sui/tests/cli_tests.rs b/crates/sui/tests/cli_tests.rs index cc0f05e41e3af..7d0c374dce9df 100644 --- a/crates/sui/tests/cli_tests.rs +++ b/crates/sui/tests/cli_tests.rs @@ -691,7 +691,7 @@ async fn test_move_call_args_linter_command() -> Result<(), anyhow::Error> { // Try a transfer // This should fail due to mismatch of object being sent - let args = vec![ + let args = [ SuiJsonValue::new(json!(obj))?, SuiJsonValue::new(json!(address2))?, ]; @@ -712,7 +712,7 @@ async fn test_move_call_args_linter_command() -> Result<(), anyhow::Error> { // Try a transfer with explicitly set gas price. // It should fail due to that gas price is below RGP. - let args = vec![ + let args = [ SuiJsonValue::new(json!(created_obj))?, SuiJsonValue::new(json!(address2))?, ]; @@ -740,7 +740,7 @@ async fn test_move_call_args_linter_command() -> Result<(), anyhow::Error> { // assert!(err_string.contains(&format!("Expected argument of type {package_addr}::object_basics::Object, but found type {framework_addr}::coin::Coin<{framework_addr}::sui::SUI>"))); // Try a proper transfer - let args = vec![ + let args = [ SuiJsonValue::new(json!(created_obj))?, SuiJsonValue::new(json!(address2))?, ]; @@ -2080,7 +2080,7 @@ async fn test_package_management_on_upgrade_command_conflict() -> Result<(), any let err_string = err_string.replace(&package.object_id().to_string(), ""); let expect = expect![[r#" - Conflicting published package address: `Move.toml` contains published-at address 0xbad but `Move.lock` file contains published-at address . You may want to: + Conflicting published package address: `Move.toml` contains published-at address 0x0000000000000000000000000000000000000000000000000000000000000bad but `Move.lock` file contains published-at address . You may want to: - delete the published-at address in the `Move.toml` if the `Move.lock` address is correct; OR - update the `Move.lock` address using the `sui manage-package` command to be the same as the `Move.toml`; OR diff --git a/crates/suiop-cli/src/cli/ci/mod.rs b/crates/suiop-cli/src/cli/ci/mod.rs index 351d45809d8d0..ecbae85e75a19 100644 --- a/crates/suiop-cli/src/cli/ci/mod.rs +++ b/crates/suiop-cli/src/cli/ci/mod.rs @@ -20,7 +20,7 @@ pub struct CIArgs { pub(crate) enum CIAction { #[clap(aliases = ["k", "key"])] Keys(KeyArgs), - #[clap(aliases = ["i", "image"])] + #[clap(aliases = ["i"])] Image(ImageArgs), } diff --git a/crates/suiop-cli/src/command.rs b/crates/suiop-cli/src/command.rs index c14c39cb3e4f2..8247dbb9938f1 100644 --- a/crates/suiop-cli/src/command.rs +++ b/crates/suiop-cli/src/command.rs @@ -38,11 +38,7 @@ impl Default for CommandOptions { pub fn run_cmd(cmd_in: Vec<&str>, options: Option) -> Result { debug!("attempting to run {}", cmd_in.join(" ")); - let opts = if let Some(opts) = options { - opts - } else { - CommandOptions::default() - }; + let opts = options.unwrap_or_default(); let mut cmd = Command::new(cmd_in[0]); // add extra args diff --git a/crates/telemetry-subscribers/src/lib.rs b/crates/telemetry-subscribers/src/lib.rs index c320ad39d5b97..7221034ad9273 100644 --- a/crates/telemetry-subscribers/src/lib.rs +++ b/crates/telemetry-subscribers/src/lib.rs @@ -380,6 +380,7 @@ impl TelemetryConfig { let mut file_output = CachedOpenFile::new::<&str>(None).unwrap(); let mut provider = None; let sampler = SamplingFilter::new(config.sample_rate); + let service_name = env::var("OTEL_SERVICE_NAME").unwrap_or("sui-node".to_owned()); if config.enable_otlp_tracing { let trace_file = env::var("TRACE_FILE").ok(); @@ -387,7 +388,7 @@ impl TelemetryConfig { let config = sdk::trace::config() .with_resource(Resource::new(vec![opentelemetry::KeyValue::new( "service.name", - "sui-node", + service_name.clone(), )])) .with_sampler(Sampler::ParentBased(Box::new(sampler.clone()))); @@ -404,7 +405,7 @@ impl TelemetryConfig { .with_span_processor(processor) .build(); - let tracer = p.tracer("sui-node"); + let tracer = p.tracer(service_name); provider = Some(p); tracing_opentelemetry::layer().with_tracer(tracer) @@ -499,7 +500,7 @@ impl SamplingFilter { fn clamp(sample_rate: f64) -> f64 { // clamp sample rate to between 0.0001 and 1.0 - sample_rate.max(0.0001).min(1.0) + sample_rate.clamp(0.0001, 1.0) } fn update_sampling_rate(&self, sample_rate: f64) { diff --git a/crates/telemetry-subscribers/src/span_latency_prom.rs b/crates/telemetry-subscribers/src/span_latency_prom.rs index a42783d84684b..476bf30151980 100644 --- a/crates/telemetry-subscribers/src/span_latency_prom.rs +++ b/crates/telemetry-subscribers/src/span_latency_prom.rs @@ -10,6 +10,7 @@ //! - tracing-timing does not output to Prometheus, and extracting data from its histograms takes extra CPU //! - tracing-timing records latencies using HDRHistogram, which is great, but uses extra memory when one //! is already using Prometheus +//! //! Thus this is a much smaller and more focused module. //! //! ## Making spans visible diff --git a/crates/test-cluster/Cargo.toml b/crates/test-cluster/Cargo.toml index 826433cb330f2..6f967e93389b8 100644 --- a/crates/test-cluster/Cargo.toml +++ b/crates/test-cluster/Cargo.toml @@ -6,6 +6,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow.workspace = true bcs.workspace = true diff --git a/crates/test-cluster/src/lib.rs b/crates/test-cluster/src/lib.rs index 8740c39577349..0e730eadfda0e 100644 --- a/crates/test-cluster/src/lib.rs +++ b/crates/test-cluster/src/lib.rs @@ -5,8 +5,6 @@ use futures::Future; use futures::{future::join_all, StreamExt}; use jsonrpsee::core::RpcResult; use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; -use jsonrpsee::ws_client::WsClient; -use jsonrpsee::ws_client::WsClientBuilder; use rand::{distributions::*, rngs::OsRng, seq::SliceRandom}; use std::collections::BTreeMap; use std::collections::HashMap; @@ -88,7 +86,6 @@ pub struct FullNodeHandle { pub sui_client: SuiClient, pub rpc_client: HttpClient, pub rpc_url: String, - pub ws_url: String, } impl FullNodeHandle { @@ -96,7 +93,6 @@ impl FullNodeHandle { let rpc_url = format!("http://{}", json_rpc_address); let rpc_client = HttpClientBuilder::default().build(&rpc_url).unwrap(); - let ws_url = format!("ws://{}", json_rpc_address); let sui_client = SuiClientBuilder::default().build(&rpc_url).await.unwrap(); Self { @@ -104,16 +100,8 @@ impl FullNodeHandle { sui_client, rpc_client, rpc_url, - ws_url, } } - - pub async fn ws_client(&self) -> WsClient { - WsClientBuilder::default() - .build(&self.ws_url) - .await - .unwrap() - } } pub struct TestCluster { diff --git a/crates/transaction-fuzzer/Cargo.toml b/crates/transaction-fuzzer/Cargo.toml index 15fe64e0735b4..106bebdf5a8d1 100644 --- a/crates/transaction-fuzzer/Cargo.toml +++ b/crates/transaction-fuzzer/Cargo.toml @@ -7,6 +7,9 @@ description = "Tool to fuzz the system with randomly generated transactions" license = "Apache-2.0" publish = false +[lints] +workspace = true + [dependencies] proptest.workspace = true proptest-derive.workspace = true diff --git a/crates/transaction-fuzzer/src/programmable_transaction_gen.rs b/crates/transaction-fuzzer/src/programmable_transaction_gen.rs index 85de6eabbe2a6..3526a02009d61 100644 --- a/crates/transaction-fuzzer/src/programmable_transaction_gen.rs +++ b/crates/transaction-fuzzer/src/programmable_transaction_gen.rs @@ -404,7 +404,7 @@ pub fn gen_move_vec_input( /// A helper function to generate enough input coins for a command (transfer, merge, or create vector) /// - either collect them all from previous command or generate additional ones if the previous -/// command does not deliver enough. +/// command does not deliver enough. fn gen_enough_arguments( builder: &mut ProgrammableTransactionBuilder, prev_cmd_num: i64, diff --git a/crates/typed-store-derive/src/lib.rs b/crates/typed-store-derive/src/lib.rs index 9b0c73b8acd12..be1d81862cb8d 100644 --- a/crates/typed-store-derive/src/lib.rs +++ b/crates/typed-store-derive/src/lib.rs @@ -93,7 +93,7 @@ fn extract_struct_info( } else { field_name.clone() }; - if attrs.get(DB_OPTIONS_DEPRECATE).is_some() { + if attrs.contains_key(DB_OPTIONS_DEPRECATE) { deprecated_cfs.push(field_name.clone()); } diff --git a/crates/typed-store/src/lib.rs b/crates/typed-store/src/lib.rs index 734d22dc9cfea..00743e16fd65c 100644 --- a/crates/typed-store/src/lib.rs +++ b/crates/typed-store/src/lib.rs @@ -33,7 +33,8 @@ pub type StoreError = typed_store_error::TypedStoreError; /// 5. Other convenience features /// /// 1. Flexible configuration: -/// a. Static options specified at struct definition +/// a. Static options specified at struct definition +/// /// The definer of the struct can specify the default options for each table using annotations /// We can also supply column family options on the default ones /// A user defined function of signature () -> Options can be provided for each table @@ -90,10 +91,10 @@ pub type StoreError = typed_store_error::TypedStoreError; ///``` /// /// 2. Auto-generated `open` routine -/// The function `open_tables_read_write` is generated which allows for specifying DB wide options and custom table configs as mentioned above +/// The function `open_tables_read_write` is generated which allows for specifying DB wide options and custom table configs as mentioned above /// /// 3. Auto-generated `read_only_mode` handle -/// This mode provides handle struct which opens the DB in read only mode and has certain features like dumping and counting the keys in the tables +/// This mode provides handle struct which opens the DB in read only mode and has certain features like dumping and counting the keys in the tables /// /// Use the function `Tables::get_read_only_handle` which returns a handle that only allows read only features ///``` @@ -138,10 +139,10 @@ pub type StoreError = typed_store_error::TypedStoreError; /// } /// ``` /// 4. Auto-generated memory stats method -/// `self.get_memory_usage` is derived to provide memory and cache usage +/// `self.get_memory_usage` is derived to provide memory and cache usage /// /// 5. Other convenience features -/// `Tables::describe_tables` is used to get a list of the table names and key-value types as string in a BTreeMap +/// `Tables::describe_tables` is used to get a list of the table names and key-value types as string in a BTreeMap /// /// // Bad usage example /// // Structs fields most only be of type Store or DMBap diff --git a/crates/typed-store/src/metrics.rs b/crates/typed-store/src/metrics.rs index 8b92fe983832c..904ad5a31cf67 100644 --- a/crates/typed-store/src/metrics.rs +++ b/crates/typed-store/src/metrics.rs @@ -85,13 +85,13 @@ pub struct ColumnFamilyMetrics { pub rocksdb_block_cache_capacity: IntGaugeVec, pub rocksdb_block_cache_usage: IntGaugeVec, pub rocksdb_block_cache_pinned_usage: IntGaugeVec, - pub rocskdb_estimate_table_readers_mem: IntGaugeVec, + pub rocksdb_estimate_table_readers_mem: IntGaugeVec, pub rocksdb_mem_table_flush_pending: IntGaugeVec, - pub rocskdb_compaction_pending: IntGaugeVec, - pub rocskdb_num_running_compactions: IntGaugeVec, + pub rocksdb_compaction_pending: IntGaugeVec, + pub rocksdb_num_running_compactions: IntGaugeVec, pub rocksdb_num_running_flushes: IntGaugeVec, pub rocksdb_estimate_oldest_key_time: IntGaugeVec, - pub rocskdb_background_errors: IntGaugeVec, + pub rocksdb_background_errors: IntGaugeVec, pub rocksdb_estimated_num_keys: IntGaugeVec, } @@ -168,8 +168,8 @@ impl ColumnFamilyMetrics { registry, ) .unwrap(), - rocskdb_estimate_table_readers_mem: register_int_gauge_vec_with_registry!( - "rocskdb_estimate_table_readers_mem", + rocksdb_estimate_table_readers_mem: register_int_gauge_vec_with_registry!( + "rocksdb_estimate_table_readers_mem", "The estimated memory size used for reading SST tables in this column family such as filters and index blocks. Note that this number does not include the memory used in block cache.", @@ -186,8 +186,8 @@ impl ColumnFamilyMetrics { registry, ) .unwrap(), - rocskdb_compaction_pending: register_int_gauge_vec_with_registry!( - "rocskdb_compaction_pending", + rocksdb_compaction_pending: register_int_gauge_vec_with_registry!( + "rocksdb_compaction_pending", "A 1 or 0 flag indicating whether a compaction job is pending. If this number is 1, it means some part of the column family requires compaction in order to maintain shape of LSM tree, but the compaction @@ -198,8 +198,8 @@ impl ColumnFamilyMetrics { registry, ) .unwrap(), - rocskdb_num_running_compactions: register_int_gauge_vec_with_registry!( - "rocskdb_num_running_compactions", + rocksdb_num_running_compactions: register_int_gauge_vec_with_registry!( + "rocksdb_num_running_compactions", "The number of compactions that are currently running for the column family.", &["cf_name"], registry, @@ -227,8 +227,8 @@ impl ColumnFamilyMetrics { registry, ) .unwrap(), - rocskdb_background_errors: register_int_gauge_vec_with_registry!( - "rocskdb_background_errors", + rocksdb_background_errors: register_int_gauge_vec_with_registry!( + "rocksdb_background_errors", "The accumulated number of RocksDB background errors.", &["cf_name"], registry, @@ -250,6 +250,7 @@ pub struct OperationMetrics { pub rocksdb_multiget_bytes: HistogramVec, pub rocksdb_put_latency_seconds: HistogramVec, pub rocksdb_put_bytes: HistogramVec, + pub rocksdb_batch_put_bytes: HistogramVec, pub rocksdb_delete_latency_seconds: HistogramVec, pub rocksdb_deletes: IntCounterVec, pub rocksdb_batch_commit_latency_seconds: HistogramVec, @@ -343,6 +344,16 @@ impl OperationMetrics { registry, ) .unwrap(), + rocksdb_batch_put_bytes: register_histogram_vec_with_registry!( + "rocksdb_batch_put_bytes", + "Rocksdb batch put call puts data size in bytes", + &["cf_name"], + prometheus::exponential_buckets(1.0, 4.0, 15) + .unwrap() + .to_vec(), + registry, + ) + .unwrap(), rocksdb_delete_latency_seconds: register_histogram_vec_with_registry!( "rocksdb_delete_latency_seconds", "Rocksdb delete latency in seconds", diff --git a/crates/typed-store/src/rocks/mod.rs b/crates/typed-store/src/rocks/mod.rs index 734e10f4c7269..ca6505b9a448e 100644 --- a/crates/typed-store/src/rocks/mod.rs +++ b/crates/typed-store/src/rocks/mod.rs @@ -88,9 +88,8 @@ mod tests; /// # Arguments /// /// * `db` - a reference to a rocks DB object -/// * `cf;` - a comma separated list of column families to open. For each -/// column family a concatenation of column family name (cf) and Key-Value -/// should be provided. +/// * `cf;` - a comma separated list of column families to open. For each column family a +/// concatenation of column family name (cf) and Key-Value should be provided. /// /// # Examples /// @@ -1050,7 +1049,7 @@ impl DBMap { ); db_metrics .cf_metrics - .rocskdb_estimate_table_readers_mem + .rocksdb_estimate_table_readers_mem .with_label_values(&[cf_name]) .set( Self::get_int_property(rocksdb, &cf, properties::ESTIMATE_TABLE_READERS_MEM) @@ -1074,7 +1073,7 @@ impl DBMap { ); db_metrics .cf_metrics - .rocskdb_compaction_pending + .rocksdb_compaction_pending .with_label_values(&[cf_name]) .set( Self::get_int_property(rocksdb, &cf, properties::COMPACTION_PENDING) @@ -1082,7 +1081,7 @@ impl DBMap { ); db_metrics .cf_metrics - .rocskdb_num_running_compactions + .rocksdb_num_running_compactions .with_label_values(&[cf_name]) .set( Self::get_int_property(rocksdb, &cf, properties::NUM_RUNNING_COMPACTIONS) @@ -1106,7 +1105,7 @@ impl DBMap { ); db_metrics .cf_metrics - .rocskdb_background_errors + .rocksdb_background_errors .with_label_values(&[cf_name]) .set( Self::get_int_property(rocksdb, &cf, properties::BACKGROUND_ERRORS) @@ -1451,15 +1450,21 @@ impl DBBatch { if !Arc::ptr_eq(&db.rocksdb, &self.rocksdb) { return Err(TypedStoreError::CrossDBBatch); } - + let mut total = 0usize; new_vals .into_iter() .try_for_each::<_, Result<_, TypedStoreError>>(|(k, v)| { let k_buf = be_fix_int_ser(k.borrow())?; let v_buf = bcs::to_bytes(v.borrow()).map_err(typed_store_err_from_bcs_err)?; + total += k_buf.len() + v_buf.len(); self.batch.put_cf(&db.cf(), k_buf, v_buf); Ok(()) })?; + self.db_metrics + .op_metrics + .rocksdb_batch_put_bytes + .with_label_values(&[&db.cf]) + .observe(total as f64); Ok(self) } @@ -2729,7 +2734,7 @@ fn populate_missing_cfs( /// Given a vec, find the value which is one more than the vector /// if the vector was a big endian number. /// If the vector is already minimum, don't change it. -fn big_endian_saturating_add_one(v: &mut Vec) { +fn big_endian_saturating_add_one(v: &mut [u8]) { if is_max(v) { return; } @@ -2763,7 +2768,6 @@ fn test_helpers() { uint::construct_uint! { // 32 byte number - #[cfg_attr(feature = "scale-info", derive(TypeInfo))] struct Num32(4); } diff --git a/crates/typed-store/tests/macro_tests.rs b/crates/typed-store/tests/macro_tests.rs index ccd040ebddef9..dc8252864c144 100644 --- a/crates/typed-store/tests/macro_tests.rs +++ b/crates/typed-store/tests/macro_tests.rs @@ -166,13 +166,13 @@ async fn macro_test() { // Test pagination let m = tbls_secondary.dump("table1", 2, 0).unwrap(); assert_eq!(2, m.len()); - assert_eq!(format!("\"1\""), *m.get(&"\"1\"".to_string()).unwrap()); - assert_eq!(format!("\"2\""), *m.get(&"\"2\"".to_string()).unwrap()); + assert_eq!(format!("\"1\""), *m.get("\"1\"").unwrap()); + assert_eq!(format!("\"2\""), *m.get("\"2\"").unwrap()); let m = tbls_secondary.dump("table1", 3, 2).unwrap(); assert_eq!(3, m.len()); - assert_eq!(format!("\"7\""), *m.get(&"\"7\"".to_string()).unwrap()); - assert_eq!(format!("\"8\""), *m.get(&"\"8\"".to_string()).unwrap()); + assert_eq!(format!("\"7\""), *m.get("\"7\"").unwrap()); + assert_eq!(format!("\"8\""), *m.get("\"8\"").unwrap()); } #[tokio::test] @@ -306,13 +306,13 @@ async fn test_sallydb() { // Test pagination let m = example_db_secondary.dump("col1", 2, 0).unwrap(); assert_eq!(2, m.len()); - assert_eq!(format!("\"1\""), *m.get(&"\"1\"".to_string()).unwrap()); - assert_eq!(format!("\"2\""), *m.get(&"\"2\"".to_string()).unwrap()); + assert_eq!(format!("\"1\""), *m.get("\"1\"").unwrap()); + assert_eq!(format!("\"2\""), *m.get("\"2\"").unwrap()); let m = example_db_secondary.dump("col1", 3, 2).unwrap(); assert_eq!(3, m.len()); - assert_eq!(format!("\"7\""), *m.get(&"\"7\"".to_string()).unwrap()); - assert_eq!(format!("\"8\""), *m.get(&"\"8\"".to_string()).unwrap()); + assert_eq!(format!("\"7\""), *m.get("\"7\"").unwrap()); + assert_eq!(format!("\"8\""), *m.get("\"8\"").unwrap()); } #[tokio::test] diff --git a/dapps/multisig-toolkit/src/components/connect.tsx b/dapps/multisig-toolkit/src/components/connect.tsx index 1dcddd002c47d..7f750bcb285a4 100644 --- a/dapps/multisig-toolkit/src/components/connect.tsx +++ b/dapps/multisig-toolkit/src/components/connect.tsx @@ -15,7 +15,14 @@ import { useState } from 'react'; import { cn } from '@/lib/utils'; import { Button } from './ui/button'; -import { Command, CommandEmpty, CommandGroup, CommandInput, CommandItem } from './ui/command'; +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList, +} from './ui/command'; import { Popover, PopoverContent, PopoverTrigger } from './ui/popover'; function ConnectedButton() { @@ -41,37 +48,39 @@ function ConnectedButton() { - No account found. - - {accounts.map((account) => ( + + No account found. + + {accounts.map((account) => ( + { + switchAccount({ account }); + setOpen(false); + }} + > + + {formatAddress(account.address)} + + ))} + { - switchAccount({ account }); - setOpen(false); + disconnect(); }} > - - {formatAddress(account.address)} + Disconnect - ))} - - { - disconnect(); - }} - > - Disconnect - - + + diff --git a/dapps/multisig-toolkit/src/components/ui/command.tsx b/dapps/multisig-toolkit/src/components/ui/command.tsx index 8a160da4d7ab2..5226361fda57d 100644 --- a/dapps/multisig-toolkit/src/components/ui/command.tsx +++ b/dapps/multisig-toolkit/src/components/ui/command.tsx @@ -1,6 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -import { DialogProps } from '@radix-ui/react-dialog'; +import { type DialogProps } from '@radix-ui/react-dialog'; import { Command as CommandPrimitive } from 'cmdk'; import { Search } from 'lucide-react'; import * as React from 'react'; @@ -113,7 +113,7 @@ const CommandItem = React.forwardRef< = 10.2 to fix. - "RUSTSEC-2024-0358", # A few dependencies use unpatched rustls. "RUSTSEC-2024-0336", # allow yaml-rust being unmaintained @@ -81,8 +49,7 @@ ignore = [ # More documentation for the licenses section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] -# The lint level for crates which do not have a detectable license -unlicensed = "deny" +version = 2 # List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. @@ -101,26 +68,6 @@ allow = [ "Unicode-DFS-2016", #"Apache-2.0 WITH LLVM-exception", ] -# List of explicitly disallowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. -deny = [ - #"Nokia", -] -# Lint level for licenses considered copyleft -copyleft = "deny" -# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses -# * both - The license will be approved if it is both OSI-approved *AND* FSF -# * either - The license will be approved if it is either OSI-approved *OR* FSF -# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF -# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved -# * neither - This predicate is ignored and the default lint level is used -allow-osi-fsf-free = "neither" -# Lint level used when no other predicates are matched -# 1. License isn't in the allow or deny lists -# 2. License isn't copyleft -# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" -default = "deny" # The confidence threshold for detecting a license from license text. # The higher the value, the more closely the license text must be to the # canonical license text of a valid SPDX license file. @@ -254,8 +201,6 @@ github = [ "mystenmark", "bmwill", "mystenlabs", - "MystenLabs", "nextest-rs", "wlmyng", # jsonrpsee fork - "quinn-rs", ] diff --git a/docker/deterministic-canary/Dockerfile b/docker/deterministic-canary/Dockerfile index 67f253135d3b6..5ebdef630f182 100644 --- a/docker/deterministic-canary/Dockerfile +++ b/docker/deterministic-canary/Dockerfile @@ -1,7 +1,7 @@ ARG PROFILE=release # ARG BUILD_DATE # ARG GIT_REVISION -ARG RUST_VERSION=1.76.0 +ARG RUST_VERSION=1.80.1 FROM scratch AS base diff --git a/docker/sui-bridge-indexer/Dockerfile b/docker/sui-bridge-indexer/Dockerfile index 3be48a6e006cb..4f0fcde7b9c42 100644 --- a/docker/sui-bridge-indexer/Dockerfile +++ b/docker/sui-bridge-indexer/Dockerfile @@ -2,7 +2,7 @@ # # Copy in all crates, Cargo.toml and Cargo.lock unmodified, # and build the application. -FROM rust:1.75-bullseye AS builder +FROM rust:1.80.1-bullseye AS builder ARG PROFILE=release ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION diff --git a/docker/sui-graphql-rpc/Dockerfile b/docker/sui-graphql-rpc/Dockerfile index 0035b8268bd81..049b8603e39c8 100644 --- a/docker/sui-graphql-rpc/Dockerfile +++ b/docker/sui-graphql-rpc/Dockerfile @@ -2,7 +2,7 @@ # # Copy in all crates, Cargo.toml and Cargo.lock unmodified, # and build the application. -FROM rust:1.75-bullseye AS builder +FROM rust:1.80.1-bullseye AS builder ARG PROFILE=release ENV PROFILE=$PROFILE ARG GIT_REVISION diff --git a/docker/sui-indexer-tidb/Dockerfile b/docker/sui-indexer-tidb/Dockerfile index d1d46bf81024c..6d11dc569bd60 100644 --- a/docker/sui-indexer-tidb/Dockerfile +++ b/docker/sui-indexer-tidb/Dockerfile @@ -2,7 +2,7 @@ # # Copy in all crates, Cargo.toml and Cargo.lock unmodified, # and build the application. -FROM rust:1.75-bullseye AS builder +FROM rust:1.80.1-bullseye AS builder ARG PROFILE=release ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION diff --git a/docker/sui-indexer/Dockerfile b/docker/sui-indexer/Dockerfile index db27235841255..98635444f78c3 100644 --- a/docker/sui-indexer/Dockerfile +++ b/docker/sui-indexer/Dockerfile @@ -2,7 +2,7 @@ # # Copy in all crates, Cargo.toml and Cargo.lock unmodified, # and build the application. -FROM rust:1.75-bullseye AS builder +FROM rust:1.80.1-bullseye AS builder ARG PROFILE=release ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION diff --git a/docker/sui-network/docker-compose-antithesis.yaml b/docker/sui-network/docker-compose-antithesis.yaml index eada610c7c315..1e67f2884507c 100644 --- a/docker/sui-network/docker-compose-antithesis.yaml +++ b/docker/sui-network/docker-compose-antithesis.yaml @@ -22,7 +22,7 @@ services: - ./dbs/validator1:/opt/sui/db:rw - ./logs/validator1:/opt/sui/logs command: - "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml > /opt/sui/logs/full_logs.log 2>&1'" + "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml >> /opt/sui/logs/full_logs.log 2>&1'" restart: on-failure logging: driver: "json-file" @@ -50,7 +50,7 @@ services: - ./dbs/validator2:/opt/sui/db:rw - ./logs/validator2:/opt/sui/logs command: - "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml > /opt/sui/logs/full_logs.log 2>&1'" + "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml >> /opt/sui/logs/full_logs.log 2>&1'" restart: on-failure logging: driver: "json-file" @@ -77,7 +77,7 @@ services: - ./dbs/validator3:/opt/sui/db:rw - ./logs/validator3:/opt/sui/logs command: - "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml > /opt/sui/logs/full_logs.log 2>&1'" + "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml >> /opt/sui/logs/full_logs.log 2>&1'" restart: on-failure logging: driver: "json-file" @@ -104,7 +104,7 @@ services: - ./dbs/validator4:/opt/sui/db:rw - ./logs/validator4:/opt/sui/logs command: - "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml > /opt/sui/logs/full_logs.log 2>&1'" + "bash -c '/usr/local/bin/sui-node-inst --config-path /opt/sui/config/validator.yaml >> /opt/sui/logs/full_logs.log 2>&1'" restart: on-failure logging: driver: "json-file" diff --git a/docker/sui-node/Dockerfile b/docker/sui-node/Dockerfile index ed500c6e8f06b..e9557b7698adc 100644 --- a/docker/sui-node/Dockerfile +++ b/docker/sui-node/Dockerfile @@ -2,16 +2,12 @@ # # Copy in all crates, Cargo.toml and Cargo.lock unmodified, # and build the application. -FROM rust:1.76-bullseye AS builder +FROM rust:1.80.1-bullseye AS builder ARG PROFILE=release ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION WORKDIR "$WORKDIR/sui" -RUN apt-get update && apt-get install -y cmake clang protobuf-compiler - -COPY root-config /root/ -RUN sed 's|/home/runner|/root|g' -i.bak /root/.ssh/config -ENV CARGO_NET_GIT_FETCH_WITH_CLI=true +RUN apt-get update && apt-get install -y cmake clang COPY Cargo.toml Cargo.lock ./ COPY consensus consensus @@ -20,7 +16,7 @@ COPY sui-execution sui-execution COPY narwhal narwhal COPY external-crates external-crates -RUN --mount=type=ssh cargo build --profile ${PROFILE} --bin sui-node +RUN cargo build --profile ${PROFILE} --bin sui-node # Production Image FROM debian:bullseye-slim AS runtime diff --git a/docker/sui-services/Dockerfile b/docker/sui-services/Dockerfile index dd9cc69f5edda..2002fe6b9140b 100644 --- a/docker/sui-services/Dockerfile +++ b/docker/sui-services/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.75-bullseye AS chef +FROM rust:1.80.1-bullseye AS chef WORKDIR sui ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION diff --git a/docker/sui-source-service/Dockerfile b/docker/sui-source-service/Dockerfile index bb73ef2e3e0d5..0936bd0562256 100644 --- a/docker/sui-source-service/Dockerfile +++ b/docker/sui-source-service/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.75-bullseye AS chef +FROM rust:1.80.1-bullseye AS chef WORKDIR sui ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION diff --git a/docker/sui-tools/Dockerfile b/docker/sui-tools/Dockerfile index e475fd2abf160..4f800e03b01e6 100644 --- a/docker/sui-tools/Dockerfile +++ b/docker/sui-tools/Dockerfile @@ -2,7 +2,7 @@ # # Copy in all crates, Cargo.toml and Cargo.lock unmodified, # and build the application. -FROM rust:1.75-bullseye AS builder +FROM rust:1.80.1-bullseye AS builder ARG PROFILE=release ARG GIT_REVISION ENV GIT_REVISION=$GIT_REVISION diff --git a/docs/content/concepts/object-model.mdx b/docs/content/concepts/object-model.mdx index 63bb248c8a1bf..4b3b242a9e785 100644 --- a/docs/content/concepts/object-model.mdx +++ b/docs/content/concepts/object-model.mdx @@ -3,6 +3,8 @@ title: Object Model description: Everything on the Sui blockchain is an object, with metadata, type of ownership, and a referencing scheme. --- +import ProtocolConfig from "@site/src/components/ProtocolConfig"; + The basic unit of storage in Sui is the object. In contrast to many other blockchains where storage is centered around accounts containing key-value stores, Sui's storage is centered around objects addressable on-chain by unique IDs. A smart contract is an object (called a Sui Move package), and these smart contracts manipulate objects on the Sui network: - Sui Move Package: a set of Sui Move bytecode modules. Each module has a name that's unique within the containing package. The combination of the package's on-chain ID and the name of a module uniquely identify the module. When you publish smart contracts to Sui, a package is the unit of publishing. After you publish a package object, it is immutable and can never be changed or removed. A package object can depend on other package objects that were previously published to Sui. @@ -13,7 +15,7 @@ The basic unit of storage in Sui is the object. In contrast to many other blockc Each Sui object has the following metadata: - A 32-byte globally unique ID. An object ID is derived from the digest of the transaction that created the object and from a counter encoding the number of IDs generated by the transaction. -- An 8-byte unsigned integer version that monotonically increases with every transaction that modifies it (see [Object and package versioning](./versioning.mdx)). +- An 8-byte unsigned integer version that monotonically increases with every transaction that modifies it (see [Object and Package Versioning](./versioning.mdx)). - A 32-byte transaction digest indicating the last transaction that included this object as an output. - A 32-byte owner field that indicates how this object can be accessed. See [Object Ownership](./object-ownership.mdx) for more information. @@ -44,4 +46,24 @@ When this DAG contains all committed transactions in the system, it forms a comp ## Limits on transactions, objects, and data -Sui has some limits on transactions and data used in transactions, such as a maximum size and number of objects used. +Sui has some limits on transactions and data used in transactions, such as a maximum size and number of objects used. For more information on limits, see [Building against Limits](https://move-book.com/guides/building-against-limits.html) in The Move Book. + +The `ProtocolConfig` struct in the [`sui-protocol-config` crate](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/lib.rs) itemizes these limits. Expand the following code to see the `ProtocolConfig` struct and the comments that explain each parameter. + +
+ +Toggle source code + +{@inject: crates/sui-protocol-config/src/lib.rs#struct=ProtocolConfig} +
+ +Select a network from the following tabs to see the currently configured limits and values. + + + +## Related links + +- [Object and Package Versioning](./versioning.mdx): Versioning provides the ability to upgrade packages and objects on the Sui network. +- [Object Ownership](./object-ownership.mdx): Every object has an owner field that dictates how you can use it in transactions. +- [`sui-protocol-config`](https://github.com/MystenLabs/sui/blob/main/crates/sui-protocol-config/src/lib.rs): Crate that defines the `ProtocolConfig` struct with limit definitions. +- [Building against Limits](https://move-book.com/guides/building-against-limits.html): The Move Book provides a concise overview for limits most projects deal with. \ No newline at end of file diff --git a/docs/content/concepts/object-ownership.mdx b/docs/content/concepts/object-ownership.mdx index 9a1e9e15a88ec..a2ab513c6e644 100644 --- a/docs/content/concepts/object-ownership.mdx +++ b/docs/content/concepts/object-ownership.mdx @@ -1,5 +1,6 @@ --- title: Object Ownership +description: Every object has an owner field that dictates how you can use it in transactions. Each object is either address-owned, dynamic fields, immutable, shared, or wrapped. --- Every object has an owner field that dictates how you can use it in transactions. Objects can have the following types of ownership: diff --git a/docs/content/concepts/tokenomics.mdx b/docs/content/concepts/tokenomics.mdx index e005f0605d55f..08c545def53ef 100644 --- a/docs/content/concepts/tokenomics.mdx +++ b/docs/content/concepts/tokenomics.mdx @@ -4,7 +4,7 @@ title: Sui Tokenomics The collective ideation that the term tokenomics encompasses includes a wide range of concepts that define the science and behavior of blockchain economies. In basic terms, tokenomics are the financial foundation of blockchains. Much the same way a building with a poor foundation is doomed to fail, a blockchain without a well-researched, extensively planned, and painstakingly implemented token economy eventually crumbles. -Sui tokenomics are based on sound financial concepts informed by extensive blockchain research. Designed for scale, the Sui tokenomic structure is designed to support the financial needs of web3 now and into the future. +Sui tokenomics are based on sound financial concepts informed by extensive blockchain research. Designed for scale, the Sui tokenomics structure is designed to support the financial needs of web3 now and into the future. ## The Sui economy @@ -34,4 +34,4 @@ The following flowchart presents the tokenomic flow of Sui at a high level. Refe ## Tokenomics whitepaper -Beyond the topics in this section of the documentation, you can read [The Sui Smart Contracts Platform: Economics and Incentives](/paper/tokenomics.pdf) whitepaper to learn more about tokenomic design on Sui. +Beyond the topics in this section of the documentation, you can read [The Sui Smart Contracts Platform: Economics and Incentives](/paper/tokenomics.pdf) whitepaper to learn more about tokenomics design on Sui. diff --git a/docs/content/concepts/tokenomics/gas-in-sui.mdx b/docs/content/concepts/tokenomics/gas-in-sui.mdx index 4a33dbb6d547b..9e7505248eca6 100644 --- a/docs/content/concepts/tokenomics/gas-in-sui.mdx +++ b/docs/content/concepts/tokenomics/gas-in-sui.mdx @@ -13,7 +13,7 @@ Finally, Sui [Storage mechanics](storage-fund.mdx#storage-fund-rewards) provide `net_gas_fees = computation_gas_fee + storage_gas_fee - storage_rebate` -The information on net gas fees displays in a Sui network explorer for each transaction block: +The information on net gas fees is displayed in a Sui network explorer for each transaction block: ![Gas Fees displayed on a Sui network explorer](images/gas-fees-explorer.png "The Gas Fees section displayed on a Sui network explorer") _The Gas Fees section for a transaction block displayed on a Sui network explorer_ diff --git a/docs/content/concepts/tokenomics/staking-unstaking.mdx b/docs/content/concepts/tokenomics/staking-unstaking.mdx index 4fbacb7789b47..593f91aa59376 100644 --- a/docs/content/concepts/tokenomics/staking-unstaking.mdx +++ b/docs/content/concepts/tokenomics/staking-unstaking.mdx @@ -19,7 +19,7 @@ Similar to staking, a user withdraws stake from a validator by sending a transac When you stake on Sui, you have to choose a specific validator you would like to stake with. The choice of validator can potentially impact the amount of staking rewards you receive. The factors determining this amount include, but are not limited to: -- Validator commission rate: a validator can choose to set a non-zero commission rate specifying the percentage of staking rewards they are taking from the stakers. For example, if a validator has the commission rate of 10%, then 10% of every staker's staking rewards is given to the validator. Understand that a validator can choose its commission at a future moment in time without prior notice. +- Validator commission rate: a validator can choose to set a non-zero commission rate specifying the percentage of staking rewards they are taking from the stakers. For example, if a validator has a commission rate of 10%, then 10% of every staker's staking rewards is given to the validator. Understand that a validator can choose its commission at a future moment in time without prior notice. - Validator performance: a validator with bad performance might be punished according to the [tallying rule](./gas-pricing.mdx#tallying-rule). Punished validators do not receive any staking rewards for the epoch during which they are punished, and you also do not receive that epoch's rewards when you withdraw your stake from that validator. Sui-compatible crypto wallets and explorers typically provide validator information such as commission and APY. See the respective documentation for these tools for information on how to retrieve this data. diff --git a/docs/content/concepts/tokenomics/storage-fund.mdx b/docs/content/concepts/tokenomics/storage-fund.mdx index 23cc36707bfc3..69500cbec1836 100644 --- a/docs/content/concepts/tokenomics/storage-fund.mdx +++ b/docs/content/concepts/tokenomics/storage-fund.mdx @@ -40,5 +40,5 @@ The key property of the rebate function is that it limits storage fund outflows The storage fund introduces various desirable incentives into the Sui economy: - Its mechanics incentivize users to delete data and obtain a rebate on their storage fees when the cost of storing such data exceeds the value obtained from maintaining that data on-chain. This introduces a useful market-based mechanism where users free storage when it becomes uneconomical for them to keep it. -- It creates deflationary pressure over the SUI token in that increased activity leads to larger storage requirements and to more SUI removed from circulation. +- It creates deflationary pressure over the SUI token in that increased activity leads to larger storage requirements and to more SUI being removed from circulation. - It is capital efficient in that it is economically equivalent to a rent model where users pay for storage through a pay-per-period model. diff --git a/docs/content/guides/developer/advanced/custom-indexer.mdx b/docs/content/guides/developer/advanced/custom-indexer.mdx index eb325e53a2825..7eb800f4108d2 100644 --- a/docs/content/guides/developer/advanced/custom-indexer.mdx +++ b/docs/content/guides/developer/advanced/custom-indexer.mdx @@ -37,6 +37,8 @@ The most straightforward stream source is to subscribe to a remote store of chec - Testnet: `https://checkpoints.testnet.sui.io` - Mainnet: `https://checkpoints.mainnet.sui.io` +The checkpoint files are stored in the following format: `https://checkpoints.testnet.sui.io/.chk`. You can download the checkpoint file by sending an HTTP GET request to the relevant URL. Try it yourself for checkpoint 1 at [https://checkpoints.testnet.sui.io/1.chk](https://checkpoints.testnet.sui.io/1.chk). + ```mermaid flowchart LR A("fa:fa-cloud Cloud storage(S3, GCP)"); @@ -53,6 +55,12 @@ flowchart LR B-->External ``` +The Sui data ingestion framework provides a helper function to quickly bootstrap an indexer workflow. + +{@inject: examples/custom-indexer/rust/remote_reader.rs} + +This is suitable for setups with a single ingestion pipeline where progress tracking is managed outside of the framework. + ### Local reader Colocate the data ingestion daemon with a Full node and enable checkpoint dumping on the latter to set up a local stream source. After enabling, the Full node starts dumping executed checkpoints as files to a local directory, and the data ingestion daemon subscribes to changes in the directory through an inotify-like mechanism. This approach allows minimizing ingestion latency (checkpoint are processed immediately after a checkpoint executor on a Full node) and getting rid of dependency on an externally managed bucket. @@ -84,77 +92,12 @@ flowchart LR C<-->D("fa:fa-floppy-disk Progress store"); ``` - -### Hybrid mode - -Specify both a local and remote store as a fallback to ensure constant data flow. The framework always prioritizes locally available checkpoint data over remote data. It's useful when you want to start utilizing your own Full node for data ingestion but need to partially backfill historical data or just have a failover. - - -## Examples - -The Sui data ingestion framework provides a helper function to quickly bootstrap an indexer workflow. -```rust -struct CustomWorker; - -#[async_trait] -impl Worker for CustomWorker { - async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { - // custom processing logic - ... - Ok(()) - } -} - -#[tokio::main] -async fn main() -> Result<()> { - let (executor, term_sender) = setup_single_workflow( - CustomWorker, - "https://checkpoints.mainnet.sui.io".to_string(), - 0, /* initial checkpoint number */ - 5, /* concurrency */ - None, /* extra reader options */ - ).await?; - executor.await?; - Ok(()) -} -``` -This is suitable for setups with a single ingestion pipeline where progress tracking is managed outside of the framework. - -For more complex setups, refer to the following example: -```rust -struct CustomWorker; - -#[async_trait] -impl Worker for CustomWorker { - async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { - // custom processing logic - ... - Ok(()) - } -} - -#[tokio::main] -async fn main() -> Result<()> { - let (exit_sender, exit_receiver) = oneshot::channel(); - let metrics = DataIngestionMetrics::new(&Registry::new()); - let progress_store = FileProgressStore::new("path_to_file"); - let mut executor = IndexerExecutor::new(progress_store, 1 /* number of workflow types */, metrics); - let worker_pool = WorkerPool::new(CustomWorker, "custom worker", 100); - executor.register(worker_pool).await?; - executor.run( - PathBuf::from("..."), // path to a local directory - Some("https://checkpoints.mainnet.sui.io".to_string()), - vec![], // optional remote store access options - exit_receiver, - ).await?; - Ok(()) -} -``` +{@inject: examples/custom-indexer/rust/local_reader.rs} Let's highlight a couple lines of code: ```rust -let worker_pool = WorkerPool::new(CustomWorker, "custom worker", 100); +let worker_pool = WorkerPool::new(CustomWorker, "local_reader".to_string(), concurrency); executor.register(worker_pool).await?; ``` @@ -162,21 +105,30 @@ The data ingestion executor can run multiple workflows simultaneously. For each The concurrency parameter specifies how many threads the workflow uses. Having a concurrency value greater than 1 is helpful when tasks are idempotent and can be processed in parallel and out of order. The executor only updates the progress/watermark to a certain checkpoint when all preceding checkpoints are processed. -## Source code for an implementation {#source-code} +### Hybrid mode + +Specify both a local and remote store as a fallback to ensure constant data flow. The framework always prioritizes locally available checkpoint data over remote data. It's useful when you want to start utilizing your own Full node for data ingestion but need to partially backfill historical data or just have a failover. +```rust +executor.run( + PathBuf::from("./chk".to_string()), // path to a local directory + Some("https://checkpoints.testnet.sui.io".to_string()), // Remote Checkpoint Store + vec![], // optional remote store access options + ReaderOptions::default(), + exit_receiver, + ).await?; +``` -Find the following source code in the [Sui repo](https://github.com/mystenlabs/sui/tree/main/examples/custom-indexer/rust). ### Manifest Code for the cargo.toml manifest file for the custom indexer. -{@inject: examples/custom-indexer/rust/cargo.toml} +{@inject: examples/custom-indexer/rust/Cargo.toml} -### Rust source +## Source code for an implementation {#source-code} -Code for the main.rs file that creates the custom indexer. +Find the following source code in the [Sui repo](https://github.com/mystenlabs/sui/tree/main/examples/custom-indexer/rust). -{@inject: examples/custom-indexer/rust/main.rs} ## Related links diff --git a/docs/content/guides/developer/getting-started/get-address.mdx b/docs/content/guides/developer/getting-started/get-address.mdx index 005941c82ee87..7c94530484e27 100644 --- a/docs/content/guides/developer/getting-started/get-address.mdx +++ b/docs/content/guides/developer/getting-started/get-address.mdx @@ -7,7 +7,7 @@ An address is a way to uniquely and anonymously identify an account that exists The Sui address is unique, similarly to the way a social security number or a personal identification number is unique to one person. However, in Sui you can create and own multiple addresses, all of which are unique. -In Sui, an address is 32 bytes and is often encoded in base58 with a `0x` prefix. For example, this is a valid Sui address: `0x02a212de6a9dfa3a69e22387acfbafbb1a9e591bd9d636e7895dcfc8de05f331`. You can use a Sui network explorer to find more information about this address and the objects it owns. +In Sui, an address is 32 bytes and is often encoded in hexadecimal with a `0x` prefix. For example, this is a valid Sui address: `0x02a212de6a9dfa3a69e22387acfbafbb1a9e591bd9d636e7895dcfc8de05f331`. You can use a Sui network explorer to find more information about this address and the objects it owns. If you'd like to understand how a Sui address is derived from private keys and other cryptography related topics, see the [Keys and Addresses](/concepts/cryptography/transaction-auth/keys-addresses.mdx) topic. diff --git a/docs/content/guides/developer/sui-101/using-events.mdx b/docs/content/guides/developer/sui-101/using-events.mdx index 79a788b75d069..00797838a7d7d 100644 --- a/docs/content/guides/developer/sui-101/using-events.mdx +++ b/docs/content/guides/developer/sui-101/using-events.mdx @@ -119,11 +119,11 @@ Move smart contracts can call other smart contracts that emit events. For exampl ## Examples -## Subscribe to event +### Subscribe to event This example leverages the Sui TypeScript SDK to subscribe to events the package with ID `` emits. Each time the event fires, the code displays the response to the console. -### TypeScript +#### TypeScript To create the event subscription, you can use a basic Node.js app. You need the Sui TypeScript SDK, so install the module using `npm install @mysten/sui` at the root of your project. In your TypeScript code, import `JsonRpcProvider` and a connection from the library. @@ -161,7 +161,7 @@ process.on('SIGINT', async () => { }); ``` -### Response +#### Response When the subscribed to event fires, the example displays the following JSON representation of the event. @@ -186,7 +186,7 @@ subscribeEvent { } ``` -### Rust SDK +#### Rust SDK ```rust use futures::StreamExt; @@ -207,7 +207,7 @@ async fn main() -> Result<()> { } ``` -## Filtering event queries +### Filtering event queries To filter the events returned from your queries, use the following data structures. @@ -230,7 +230,13 @@ This set of filters applies only to event querying APIs. It differs from the fil | `Object` | Return events associated with the given object | `{"Object":"0x727b37454ab13d5c1dbb22e8741bff72b145d1e660f71b275c01f24e7860e5e5"}` | | `TimeRange` | Return events emitted in [start_time, end_time] interval | `{"TimeRange":{"startTime":1669039504014, "endTime":1669039604014}}` | -## Filtering events for subscription +### Filtering events for subscription (deprecated) + +:::warning + +This section is deprecated beginning with Sui client version 1.28. Use the [custom indexer](../advanced/custom-indexer.mdx) section to learn about how to stream checkpoints and filter events continuously. + +::: To create a subscription, you can set a filter to return only the set of events you're interested in listening for. @@ -241,6 +247,7 @@ This set of filters applies only to event subscription APIs. It differs from the ::: + | Filter | Description | JSON-RPC Parameter Example | | ----------------- | ----------------------------------------------------- | -------------------------------------------------------------------------------------------- | | `Package` | Move package ID | `{"Package":""}` | diff --git a/docs/content/standards/kiosk.mdx b/docs/content/standards/kiosk.mdx index b22b65fbdd735..658e1614e1e78 100644 --- a/docs/content/standards/kiosk.mdx +++ b/docs/content/standards/kiosk.mdx @@ -3,11 +3,11 @@ title: Sui Kiosk description: Kiosk is a decentralized system for commerce applications on Sui. Kiosk is a part of the Sui framework, native to the system, and available to everyone. --- -Kiosk is a decentralized system for commerce applications on Sui. It consists of Kiosks - shared objects owned by individual parties that store assets and allow listing them for sale as well as utilize custom trading functionality - for example, an auction. While being highly decentralized, Kiosk provides a set of strong guarantees: +Kiosk is a decentralized system for commerce applications on Sui. It consists of `Kiosk` objects - shared objects owned by individual parties that store assets and allow listing them for sale as well as utilize custom trading functionality - for example, an auction. While being highly decentralized, Sui Kiosk provides a set of strong guarantees: - Kiosk owners retain ownership of their assets to the moment of purchase. - Creators set custom policies - sets of rules applied to every trade (such as pay royalty fee or do some arbitrary action X). -- Marketplaces can index events the Kiosk emits and subscribe to a single feed for on-chain asset trading. +- Marketplaces can index events the `Kiosk` object emits and subscribe to a single feed for on-chain asset trading. Practically, Kiosk is a part of the Sui framework, and it is native to the system and available to everyone out of the box. @@ -23,7 +23,7 @@ Anyone can create a Sui Kiosk. Ownership of a kiosk is determined by the owner o To sell an item, if there is an existing transfer policy for the type (T), you just add your assets to your kiosk and then list them. You specify an offer amount when you list an item. Anyone can then purchase the item for the amount of SUI specified in the listing. The associated transfer policy determines what the buyer can do with the purchased asset. -A Kiosk owner can: +A kiosk owner can: - Place and take items - List items for sale @@ -34,13 +34,13 @@ A Kiosk owner can: ## Sui Kiosk for buyers -A buyer is a party that purchases (or - more generally - receives) items from Kiosks, anyone on the network can be a Buyer (and, for example, a Kiosk Owner at the same time). +A buyer is a party that purchases (or - more generally - receives) items from kiosks, anyone on the network can be a buyer (and, for example, a kiosk owner at the same time). -** Benefits:** +**Benefits:** - Buyers get access to global liquidity and can get the best offer -- Buyers can place bids on collections through their Kiosks -- Most of the actions performed in Kiosks are free (gas-less) for Buyers +- Buyers can place bids on collections through their kiosks +- Most buyer actions performed in kiosks clean up seller objects, which results in free (gas-less) actions **Responsibilities:** @@ -49,7 +49,7 @@ A buyer is a party that purchases (or - more generally - receives) items from Ki **Guarantees:** -- When using a custom trading logic such as an Auction, the items are guaranteed to be unchanged until the trade is complete +- When using a custom trading logic such as an auction, the items are guaranteed to be unchanged until the trade is complete ## Sui Kiosk for marketplaces @@ -59,22 +59,22 @@ As a marketplace operator, you can implement Sui Kiosk to watch for offers made As a creator, Sui Kiosk supports strong enforcement for transfer policies and associated rules to protect assets and enforce asset ownership. Sui Kiosk gives creators more control over their creations, and puts creators and owners in control of how their works can be used. -Creator is a party that creates and controls the TransferPolicy for a single type. For example, the authors of SuiFrens are the Creators of the `SuiFren` type and act as creators in the Kiosk ecosystem. Creators set the policy, but they might also be the first sellers of their assets through a Kiosk. +Creator is a party that creates and controls the TransferPolicy for a single type. For example, the authors of SuiFrens are the Creators of the `SuiFren` type and act as creators in the Kiosk ecosystem. Creators set the policy, but they might also be the first sellers of their assets through a kiosk. **Creators can:** - Set any rules for trades - Set multiple ways ("tracks") of rules - Enable or disable trades at any moment with a policy -- Enforce policies (eg royalties) on all trades -- Perform a primary sale of their assets through a Kiosk +- Enforce policies (like royalties) on all trades +- Perform a primary sale of their assets through a kiosk All of the above is effective immediately and globally. **Creators cannot:** -- Take or modify items stored in someone else's Kiosk -- Restrict taking items from Kiosks if the "locking" rule was not set in the policy +- Take or modify items stored in someone else's kiosk +- Restrict taking items from kiosks if the "locking" rule was not set in the policy ## Sui Kiosk guarantees @@ -82,7 +82,7 @@ Sui Kiosk provides a set of guarantees that Sui enforces through smart contracts These guarantees include: - Every trade in Sui Kiosk requires a `TransferPolicy` resolution. This gives creators control over how their assets can be traded. -- True Ownership, which means that only a kiosk owner can take, list, borrow, or modify the assets added to their kiosk. This is similar to how single-owner objects work on Sui. +- True ownership, which means that only a kiosk owner can take, list, borrow, or modify the assets added to their kiosk. This is similar to how single-owner objects work on Sui. - Strong policy enforcement, for example Royalty policies, that lets creators enable or disable policies at any time that applies to all trades on the platform for objects with that policy attached. - Changes to a `TransferPolicy` apply instantly and globally. @@ -98,10 +98,10 @@ In practice, these guarantees mean that: Sui Kiosk is a shared object that can store heterogeneous values, such as different sets of asset collectibles. When you add an asset to your kiosk, it has one of the following states: -- PLACED - an item placed in the kiosk using the `kiosk::place` function. The Kiosk Owner can withdraw it and use it directly, borrow it (mutably or immutably), or list an item for sale. -- LOCKED - an item placed in the kiosk using the `kiosk::lock` function. You can’t withdraw a Locked item from a kiosk, but you can borrow it mutably and list it for sale. Any item placed in a kiosk that has an associated Kiosk Lock policy have a LOCKED state. +- PLACED - an item placed in the kiosk using the `kiosk::place` function. The kiosk owner can withdraw it and use it directly, borrow it (mutably or immutably), or list an item for sale. +- LOCKED - an item placed in the kiosk using the `kiosk::lock` function. You can’t withdraw a Locked item from a kiosk, but you can borrow it mutably and list it for sale. Any item placed in a kiosk that has an associated kiosk lock policy have a LOCKED state. - LISTED - an item in the kiosk that is listed for sale using the `kiosk::list` or `kiosk::place_and_list` functions. You can’t modify an item while listed, but you can borrow it immutably or delist it, which returns it to its previous state. -- LISTED EXCLUSIVELY - an item placed or locked in the kiosk by an extension that calls the `kiosk::list_with_purchase_cap` function. Only the kiosk owner can approve calling the function. The owner can only borrow it immutably. The extension must provide the functionality to delist / unlock the asset, or it might stay locked forever. Given that this action is explicitly performed by the Owner - it is the responsibility of the Owner to choose verified and audited extensions to use. +- LISTED EXCLUSIVELY - an item placed or locked in the kiosk by an extension that calls the `kiosk::list_with_purchase_cap` function. Only the kiosk owner can approve calling the function. The owner can only borrow it immutably. The extension must provide the functionality to delist / unlock the asset, or it might stay locked forever. Given that this action is explicitly performed by the owner - it is the responsibility of the owner to choose verified and audited extensions to use. When someone purchases an asset from a kiosk, the asset leaves the kiosk and ownership transfers to the buyer’s address. @@ -132,8 +132,8 @@ sui client call \ ### Create a Sui Kiosk with advanced options -For more advanced use cases, when you want to choose the storage model or perform an action right away, you can use the programmable transaction block (PTB) friendly function kiosk::new. -Kiosk is designed to be shared. If you choose a different storage model, such as owned, your kiosk might not function as intended or not be accessible to other users. You can make sure your Kiosk works by testing it on Sui Testnet. +For more advanced use cases, when you want to choose the storage model or perform an action right away, you can use the programmable transaction block (PTB) friendly function `kiosk::new`. +Kiosk is designed to be shared. If you choose a different storage model, such as owned, your kiosk might not function as intended or not be accessible to other users. You can make sure your kiosk works by testing it on Sui Testnet. ### Create a Sui Kiosk with advanced options using programmable transaction blocks @@ -159,11 +159,11 @@ Sui CLI does not support PTBs and transaction chaining yet. You can use the `kio As a kiosk owner, you can place any assets into your Sui Kiosk. You can take any item from your kiosk that is not currently listed for sale. -There's no limitations on which assets you can place in your kiosk. However, you can’t necessarily list and trade all of the items you place in your kiosk. The `TransferPolicy` associated with the type for the item determines whether you can trade it. To learn more, see the [Purchase items from a Kiosk](#purchase) section. +There's no limitations on which assets you can place in your kiosk. However, you can’t necessarily list and trade all of the items you place in your kiosk. The `TransferPolicy` associated with the type for the item determines whether you can trade it. To learn more, see the [Purchase items from a kiosk](#purchase) section. ### Place an item in your kiosk -To place an item to the Kiosk, the owner needs to call the `sui::kiosk::place` function on the `Kiosk` object and pass the `KioskOwnerCap` and the `Item` as arguments. +To place an item to the kiosk, the owner needs to call the `sui::kiosk::place` function on the `Kiosk` object and pass the `KioskOwnerCap` and the `Item` as arguments. `ITEM_TYPE` in the following examples represents the full type of the item. @@ -281,7 +281,7 @@ Anyone on the network can purchase an item listed from a Sui Kiosk. To learn mor ### List an item from a kiosk As a kiosk owner, you can use the `kiosk::list` function to list any asset you added to your kiosk. Include the item to sell and the list price as arguments. All listings on Sui are in SUI tokens. -When you list an item, Sui emits a `kiosk::ItemListed` event that contains the Kiosk ID, Item ID, type of the Item, and the list price. +When you list an item, Sui emits a `kiosk::ItemListed` event that contains the kiosk ID, item ID, type of the item, and the list price. ### List an item using programmable transaction blocks @@ -319,7 +319,7 @@ As a kiosk owner you can use the `kiosk::delist` to delist any currently listed When you delist an item, Sui returns to the kiosk owner the gas fees charged to list the item. -When you delist an item, Sui emits a `kiosk::ItemDelisted` event that contains the Kiosk ID, Item ID, and the type of the item. +When you delist an item, Sui emits a `kiosk::ItemDelisted` event that contains the kiosk ID, item ID, and the type of the item. ### Delist an item using the programmable transaction blocks @@ -351,7 +351,7 @@ sui client call \ ## Purchase an item from a kiosk {#purchase} -Anyone that has an address on the Sui network can purchase an item listed from a Sui Kiosk. To purchase an item, you can use the `kiosk::purchase` function. Specify the item to purchase and pay the list price set by the Kiosk Owner. +Anyone that has an address on the Sui network can purchase an item listed from a Sui Kiosk. To purchase an item, you can use the `kiosk::purchase` function. Specify the item to purchase and pay the list price set by the kiosk owner. You can discover the items listed on the network with the `kiosk::ItemListed` event. @@ -434,7 +434,7 @@ tx.moveCall({ ## Withdraw proceeds from a completed sale -When someone purchases an item, Sui stores the proceeds from the sale in the Kiosk. As the kiosk owner, you can withdraw the proceeds at any time by calling the `kiosk::withdraw` function. The function is simple, but because it is PTB friendly it is not currently supported in the Sui CLI. +When someone purchases an item, Sui stores the proceeds from the sale in the kiosk. As the kiosk owner, you can withdraw the proceeds at any time by calling the `kiosk::withdraw` function. The function is simple, but because it is PTB friendly it is not currently supported in the Sui CLI. ### Withdraw proceeds using programmable transaction blocks @@ -466,4 +466,4 @@ let coin = tx.moveCall({ ### Withdraw proceeds using the Sui CLI -Due to the function being PTB friendly, it is not currently supported in the CLI environment. +This action is not currently supported in the CLI environment. diff --git a/docs/site/docusaurus.config.js b/docs/site/docusaurus.config.js index e784d38744f15..5760ff48882b1 100644 --- a/docs/site/docusaurus.config.js +++ b/docs/site/docusaurus.config.js @@ -62,7 +62,7 @@ const config = { "@graphql-markdown/docusaurus", { schema: - "../../crates/sui-graphql-rpc/schema/current_progress_schema.graphql", + "../../crates/sui-graphql-rpc/schema.graphql", rootPath: "../content", // docs will be generated under rootPath/baseURL baseURL: "references/sui-api/sui-graphql/reference", loaders: { diff --git a/docs/site/src/components/ProtocolConfig/index.tsx b/docs/site/src/components/ProtocolConfig/index.tsx new file mode 100644 index 0000000000000..99f660669ddae --- /dev/null +++ b/docs/site/src/components/ProtocolConfig/index.tsx @@ -0,0 +1,117 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import React, { useState, useEffect } from "react"; +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import axios from "axios"; + +export default function ProtocolConfig() { + const data = { + jsonrpc: "2.0", + id: 1, + method: "sui_getProtocolConfig", + params: [], + }; + const urls = [ + "https://fullnode.mainnet.sui.io:443", + "https://fullnode.testnet.sui.io:443", + "https://fullnode.devnet.sui.io:443", + ]; + const [results, setResults] = useState({ + mainnet: null, + testnet: null, + devnet: null, + }); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + + const parseResult = (data) => { + let items = Object.entries(data); + + return items.map((item) => { + if (item[1] === null) { + return item; + } + if (typeof item[1] === "object") { + const [k, v] = Object.entries(item[1])[0]; + return [item[0], k, v]; + } + return item; + }); + }; + + const DisplayResults = (props) => { + const { results } = props; + return ( + + + + + + + + + + {results.map((item, index) => ( + + + + + + ))} + +
ParameterTypeValue
{item[0]}{item[1]}{item[2] ? item[2] : "null"}
+ ); + }; + + useEffect(() => { + const fetchData = async () => { + try { + const responses = await Promise.all( + urls.map((url) => + axios.post(url, data, { + headers: { + "Content-Type": "application/json", + }, + }), + ), + ); + + setResults({ + mainnet: parseResult(responses[0].data.result.attributes), + testnet: parseResult(responses[1].data.result.attributes), + devnet: parseResult(responses[2].data.result.attributes), + }); + } catch (err) { + setError(err.message); + } finally { + setLoading(false); + } + }; + + fetchData(); + }, []); + + if (loading) { + return
Loading...
; + } + + if (error) { + return
Error: {error}
; + } + + return ( + + + + + + + + + + + + ); +} diff --git a/docs/site/src/pages/index.js b/docs/site/src/pages/index.js index bfd85f5122d3a..7fa05c6824877 100644 --- a/docs/site/src/pages/index.js +++ b/docs/site/src/pages/index.js @@ -42,17 +42,6 @@ export default function Home() {
- - - Tokenomics - - - Cryptography - - - Standards - - + + + Tokenomics + + + Cryptography + + + Standards + + /tmp/local_reader_progress +``` + +then, create the `chk` directory in the same level as the `local_reader.rs` file +```sh +mkdir -p chk +``` + +then, run the local reader example +```sh +cargo run --bin local_reader +``` + +Finally, copy the checkpoint files to the `chk` directory and the program should process the checkpoint files as they come in +```sh +cp $YOUR_CHECKPOINT_FILE chk/ ``` diff --git a/examples/custom-indexer/rust/local_reader.rs b/examples/custom-indexer/rust/local_reader.rs new file mode 100644 index 0000000000000..6d372749f3600 --- /dev/null +++ b/examples/custom-indexer/rust/local_reader.rs @@ -0,0 +1,46 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use tokio::sync::oneshot; +use anyhow::Result; +use async_trait::async_trait; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_data_ingestion_core as sdic; +use sdic::{Worker, WorkerPool, ReaderOptions}; +use sdic::{DataIngestionMetrics, FileProgressStore, IndexerExecutor}; +use prometheus::Registry; +use std::path::PathBuf; +use std::env; + +struct CustomWorker; + +#[async_trait] +impl Worker for CustomWorker { + async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { + // custom processing logic + println!("Processing Local checkpoint: {}", checkpoint.checkpoint_summary.to_string()); + Ok(()) + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let concurrency = 5; + let (exit_sender, exit_receiver) = oneshot::channel(); + let metrics = DataIngestionMetrics::new(&Registry::new()); + let backfill_progress_file_path = + env::var("BACKFILL_PROGRESS_FILE_PATH").unwrap_or("/tmp/local_reader_progress".to_string()); + let progress_store = FileProgressStore::new(PathBuf::from(backfill_progress_file_path)); + let mut executor = IndexerExecutor::new(progress_store, 1 /* number of workflow types */, metrics); + let worker_pool = WorkerPool::new(CustomWorker, "local_reader".to_string(), concurrency); + + executor.register(worker_pool).await?; + executor.run( + PathBuf::from("./chk".to_string()), // path to a local directory + None, + vec![], // optional remote store access options + ReaderOptions::default(), /* remote_read_batch_size */ + exit_receiver, + ).await?; + Ok(()) +} diff --git a/examples/custom-indexer/rust/remote_reader.rs b/examples/custom-indexer/rust/remote_reader.rs new file mode 100644 index 0000000000000..4c55fcb960772 --- /dev/null +++ b/examples/custom-indexer/rust/remote_reader.rs @@ -0,0 +1,32 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use async_trait::async_trait; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_data_ingestion_core::{Worker, setup_single_workflow}; + +struct CustomWorker; + +#[async_trait] +impl Worker for CustomWorker { + async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { + // custom processing logic + // print out the checkpoint number + println!("Processing checkpoint: {}", checkpoint.checkpoint_summary.to_string()); + Ok(()) + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let (executor, term_sender) = setup_single_workflow( + CustomWorker, + "https://checkpoints.testnet.sui.io".to_string(), + 0, /* initial checkpoint number */ + 5, /* concurrency */ + None, /* extra reader options */ + ).await?; + executor.await?; + Ok(()) +} diff --git a/external-crates/move/Cargo.toml b/external-crates/move/Cargo.toml index 8bac472e296d6..4aeea3a73a592 100644 --- a/external-crates/move/Cargo.toml +++ b/external-crates/move/Cargo.toml @@ -83,7 +83,7 @@ parking_lot = "0.11.1" paste = "1.0.5" petgraph = "0.5.1" phf = { version = "0.11", features = ["macros"] } -plotters = { version = "0.3.0", default_features = false, features = ["evcxr", "line_series", "histogram"]} +plotters = { version = "0.3.0", default-features = false, features = ["evcxr", "line_series", "histogram"]} pretty = "0.10.0" prettydiff = "0.4.0" primitive-types = { version = "0.10.1", features = ["impl-serde"]} diff --git a/external-crates/move/crates/bytecode-verifier-prop-tests/src/unit_tests/prop_tests.rs b/external-crates/move/crates/bytecode-verifier-prop-tests/src/unit_tests/prop_tests.rs index 2d799592a7107..518ead16b9a99 100644 --- a/external-crates/move/crates/bytecode-verifier-prop-tests/src/unit_tests/prop_tests.rs +++ b/external-crates/move/crates/bytecode-verifier-prop-tests/src/unit_tests/prop_tests.rs @@ -13,9 +13,11 @@ use move_binary_format::{ proptest_types::CompiledModuleStrategyGen, }; use move_bytecode_verifier::{ - ability_field_requirements, constants, instantiation_loops::InstantiationLoopChecker, - DuplicationChecker, InstructionConsistency, RecursiveDataDefChecker, SignatureChecker, + ability_cache::AbilityCache, ability_field_requirements, constants, + instantiation_loops::InstantiationLoopChecker, DuplicationChecker, InstructionConsistency, + RecursiveDataDefChecker, SignatureChecker, }; +use move_bytecode_verifier_meter::dummy::DummyMeter; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, vm_status::StatusCode, }; @@ -28,7 +30,9 @@ proptest! { #[test] fn valid_ability_transitivity(module in CompiledModule::valid_strategy(20)) { - prop_assert!(ability_field_requirements::verify_module(&module).is_ok()); + let module = &module; + let ability_cache = &mut AbilityCache::new(module); + prop_assert!(ability_field_requirements::verify_module(module, ability_cache, &mut DummyMeter).is_ok()); } #[test] @@ -101,18 +105,22 @@ proptest! { #[test] fn check_verifier_passes(module in CompiledModule::valid_strategy(20)) { - DuplicationChecker::verify_module(&module).expect("DuplicationChecker failure"); - SignatureChecker::verify_module(&module).expect("SignatureChecker failure"); - InstructionConsistency::verify_module(&module).expect("InstructionConsistency failure"); - constants::verify_module(&module).expect("constants failure"); - ability_field_requirements::verify_module(&module).expect("ability_field_requirements failure"); - RecursiveDataDefChecker::verify_module(&module).expect("RecursiveDataDefChecker failure"); - InstantiationLoopChecker::verify_module(&module).expect("InstantiationLoopChecker failure"); + let module = &module; + let ability_cache = &mut AbilityCache::new(module); + DuplicationChecker::verify_module(module).expect("DuplicationChecker failure"); + SignatureChecker::verify_module(module, ability_cache, &mut DummyMeter).expect("SignatureChecker failure"); + InstructionConsistency::verify_module(module).expect("InstructionConsistency failure"); + constants::verify_module(module).expect("constants failure"); + ability_field_requirements::verify_module(module, ability_cache, &mut DummyMeter).expect("ability_field_requirements failure"); + RecursiveDataDefChecker::verify_module(module).expect("RecursiveDataDefChecker failure"); + InstantiationLoopChecker::verify_module(module).expect("InstantiationLoopChecker failure"); } #[test] fn valid_signatures(module in CompiledModule::valid_strategy(20)) { - prop_assert!(SignatureChecker::verify_module(&module).is_ok()) + let module = &module; + let ability_cache = &mut AbilityCache::new(module); + prop_assert!(SignatureChecker::verify_module(module, ability_cache, &mut DummyMeter).is_ok()) } #[test] @@ -123,7 +131,9 @@ proptest! { let context = SignatureRefMutation::new(&mut module, mutations); let expected_violations = context.apply(); - let result = SignatureChecker::verify_module(&module); + let module = &module; + let ability_cache = &mut AbilityCache::new(module); + let result = SignatureChecker::verify_module(module, ability_cache, &mut DummyMeter); prop_assert_eq!(expected_violations, result.is_err()); } @@ -136,7 +146,9 @@ proptest! { let context = FieldRefMutation::new(&mut module, mutations); let expected_violations = context.apply(); - let result = SignatureChecker::verify_module(&module); + let module = &module; + let ability_cache = &mut AbilityCache::new(module); + let result = SignatureChecker::verify_module(module, ability_cache, &mut DummyMeter); prop_assert_eq!(expected_violations, result.is_err()); } diff --git a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/code_unit_tests.rs b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/code_unit_tests.rs index befa382686d74..9993b6d0c4027 100644 --- a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/code_unit_tests.rs +++ b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/code_unit_tests.rs @@ -4,15 +4,22 @@ use crate::support::dummy_procedure_module; use move_binary_format::file_format::Bytecode; -use move_bytecode_verifier::CodeUnitVerifier; +use move_bytecode_verifier::ability_cache::AbilityCache; +use move_bytecode_verifier::code_unit_verifier; use move_bytecode_verifier_meter::dummy::DummyMeter; use move_core_types::vm_status::StatusCode; use move_vm_config::verifier::VerifierConfig; #[test] fn invalid_fallthrough_br_true() { - let module = dummy_procedure_module(vec![Bytecode::LdFalse, Bytecode::BrTrue(1)]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::LdFalse, Bytecode::BrTrue(1)]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::INVALID_FALL_THROUGH @@ -21,8 +28,14 @@ fn invalid_fallthrough_br_true() { #[test] fn invalid_fallthrough_br_false() { - let module = dummy_procedure_module(vec![Bytecode::LdTrue, Bytecode::BrFalse(1)]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::LdTrue, Bytecode::BrFalse(1)]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::INVALID_FALL_THROUGH @@ -32,8 +45,14 @@ fn invalid_fallthrough_br_false() { // all non-branch instructions should trigger invalid fallthrough; just check one of them #[test] fn invalid_fallthrough_non_branch() { - let module = dummy_procedure_module(vec![Bytecode::LdTrue, Bytecode::Pop]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::LdTrue, Bytecode::Pop]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::INVALID_FALL_THROUGH @@ -42,22 +61,40 @@ fn invalid_fallthrough_non_branch() { #[test] fn valid_fallthrough_branch() { - let module = dummy_procedure_module(vec![Bytecode::Branch(0)]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::Branch(0)]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert!(result.is_ok()); } #[test] fn valid_fallthrough_ret() { - let module = dummy_procedure_module(vec![Bytecode::Ret]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::Ret]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert!(result.is_ok()); } #[test] fn valid_fallthrough_abort() { - let module = dummy_procedure_module(vec![Bytecode::LdU64(7), Bytecode::Abort]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::LdU64(7), Bytecode::Abort]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert!(result.is_ok()); } @@ -68,10 +105,15 @@ fn test_max_number_of_bytecode() { nops.push(Bytecode::Nop); } nops.push(Bytecode::Ret); - let module = dummy_procedure_module(nops); + let module = &dummy_procedure_module(nops); + let ability_cache = &mut AbilityCache::new(module); - let result = - CodeUnitVerifier::verify_module(&VerifierConfig::default(), &module, &mut DummyMeter); + let result = code_unit_verifier::verify_module( + &VerifierConfig::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert!(result.is_ok()); } @@ -81,14 +123,16 @@ fn test_max_basic_blocks() { .map(|idx| Bytecode::Branch(idx + 1)) .collect::>(); code.push(Bytecode::Ret); - let module = dummy_procedure_module(code); + let module = &dummy_procedure_module(code); + let ability_cache = &mut AbilityCache::new(module); - let result = CodeUnitVerifier::verify_module( + let result = code_unit_verifier::verify_module( &VerifierConfig { max_basic_blocks: Some(16), ..Default::default() }, - &module, + module, + ability_cache, &mut DummyMeter, ); assert_eq!( diff --git a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs index 17676ad1fe557..c3a6a22b7e08b 100644 --- a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs +++ b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs @@ -53,6 +53,6 @@ pub(crate) fn production_config() -> (VerifierConfig, MeterConfig) { bytecode_version: VERSION_MAX, max_variants_in_enum: Some(DEFAULT_MAX_VARIANTS), }, - MeterConfig::default(), + MeterConfig::old_default(), ) } diff --git a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/negative_stack_size_tests.rs b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/negative_stack_size_tests.rs index 9f9eb9327e916..1b160ce6be167 100644 --- a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/negative_stack_size_tests.rs +++ b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/negative_stack_size_tests.rs @@ -4,14 +4,21 @@ use crate::support::dummy_procedure_module; use move_binary_format::file_format::Bytecode; -use move_bytecode_verifier::CodeUnitVerifier; +use move_bytecode_verifier::ability_cache::AbilityCache; +use move_bytecode_verifier::code_unit_verifier; use move_bytecode_verifier_meter::dummy::DummyMeter; use move_core_types::vm_status::StatusCode; #[test] fn one_pop_no_push() { - let module = dummy_procedure_module(vec![Bytecode::Pop, Bytecode::Ret]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::Pop, Bytecode::Ret]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::NEGATIVE_STACK_SIZE_WITHIN_BLOCK @@ -21,8 +28,14 @@ fn one_pop_no_push() { #[test] fn one_pop_one_push() { // Height: 0 + (-1 + 1) = 0 would have passed original usage verifier - let module = dummy_procedure_module(vec![Bytecode::ReadRef, Bytecode::Ret]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::ReadRef, Bytecode::Ret]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::NEGATIVE_STACK_SIZE_WITHIN_BLOCK @@ -32,8 +45,14 @@ fn one_pop_one_push() { #[test] fn two_pop_one_push() { // Height: 0 + 1 + (-2 + 1) = 0 would have passed original usage verifier - let module = dummy_procedure_module(vec![Bytecode::LdU64(0), Bytecode::Add, Bytecode::Ret]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::LdU64(0), Bytecode::Add, Bytecode::Ret]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::NEGATIVE_STACK_SIZE_WITHIN_BLOCK @@ -42,8 +61,14 @@ fn two_pop_one_push() { #[test] fn two_pop_no_push() { - let module = dummy_procedure_module(vec![Bytecode::WriteRef, Bytecode::Ret]); - let result = CodeUnitVerifier::verify_module(&Default::default(), &module, &mut DummyMeter); + let module = &dummy_procedure_module(vec![Bytecode::WriteRef, Bytecode::Ret]); + let ability_cache = &mut AbilityCache::new(module); + let result = code_unit_verifier::verify_module( + &Default::default(), + module, + ability_cache, + &mut DummyMeter, + ); assert_eq!( result.unwrap_err().major_status(), StatusCode::NEGATIVE_STACK_SIZE_WITHIN_BLOCK diff --git a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/signature_tests.rs b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/signature_tests.rs index ad7b496b0a23f..b59ef9aa05152 100644 --- a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/signature_tests.rs +++ b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/signature_tests.rs @@ -7,7 +7,8 @@ use move_binary_format::file_format::{ Bytecode::*, CompiledModule, SignatureToken::*, Visibility::Public, *, }; use move_bytecode_verifier::{ - verify_module_unmetered, verify_module_with_config_for_test, SignatureChecker, + ability_cache::AbilityCache, verify_module_unmetered, verify_module_with_config_for_test, + SignatureChecker, }; use move_bytecode_verifier_meter::dummy::DummyMeter; use move_core_types::{ @@ -20,7 +21,8 @@ fn test_reference_of_reference() { m.signatures[0] = Signature(vec![Reference(Box::new(Reference(Box::new( SignatureToken::Bool, ))))]); - let errors = SignatureChecker::verify_module(&m); + let ability_cache = &mut AbilityCache::new(&m); + let errors = SignatureChecker::verify_module(&m, ability_cache, &mut DummyMeter); assert!(errors.is_err()); } diff --git a/external-crates/move/crates/move-abstract-interpreter/src/absint.rs b/external-crates/move/crates/move-abstract-interpreter/src/absint.rs index 5742c01504cf0..d8dc34a0a5fd2 100644 --- a/external-crates/move/crates/move-abstract-interpreter/src/absint.rs +++ b/external-crates/move/crates/move-abstract-interpreter/src/absint.rs @@ -202,23 +202,23 @@ impl<'a> FunctionContext<'a> { self.index } - pub fn code(&self) -> &CodeUnit { + pub fn code(&self) -> &'a CodeUnit { self.code } - pub fn parameters(&self) -> &Signature { + pub fn parameters(&self) -> &'a Signature { self.parameters } - pub fn return_(&self) -> &Signature { + pub fn return_(&self) -> &'a Signature { self.return_ } - pub fn locals(&self) -> &Signature { + pub fn locals(&self) -> &'a Signature { self.locals } - pub fn type_parameters(&self) -> &[AbilitySet] { + pub fn type_parameters(&self) -> &'a [AbilitySet] { self.type_parameters } diff --git a/external-crates/move/crates/move-analyzer/editors/code/package-lock.json b/external-crates/move/crates/move-analyzer/editors/code/package-lock.json index 1e990b17166cc..ebb58e03531da 100644 --- a/external-crates/move/crates/move-analyzer/editors/code/package-lock.json +++ b/external-crates/move/crates/move-analyzer/editors/code/package-lock.json @@ -1,12 +1,12 @@ { "name": "move", - "version": "1.0.8", + "version": "1.0.10", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "move", - "version": "1.0.8", + "version": "1.0.10", "license": "Apache-2.0", "dependencies": { "command-exists": "^1.2.9", diff --git a/external-crates/move/crates/move-analyzer/src/analysis/mod.rs b/external-crates/move/crates/move-analyzer/src/analysis/mod.rs index d1e3391e262e1..6f2a94305dea9 100644 --- a/external-crates/move/crates/move-analyzer/src/analysis/mod.rs +++ b/external-crates/move/crates/move-analyzer/src/analysis/mod.rs @@ -1,4 +1,5 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 +pub mod parsing_analysis; pub mod typing_analysis; diff --git a/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs b/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs new file mode 100644 index 0000000000000..710a007dd9d24 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs @@ -0,0 +1,848 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + symbols::{ + add_member_use_def, ignored_function, CallInfo, CursorContext, CursorDefinition, + CursorPosition, DefMap, ModuleDefs, References, UseDef, UseDefMap, + }, + utils::loc_start_to_lsp_position_opt, +}; + +use lsp_types::Position; + +use std::collections::BTreeMap; + +use move_compiler::{ + expansion::ast as E, + parser::ast as P, + shared::{files::MappedFiles, Identifier, Name, NamedAddressMap, NamedAddressMaps}, +}; +use move_ir_types::location::*; + +pub struct ParsingAnalysisContext<'a> { + /// Outermost definitions in a module (structs, consts, functions), keyd on a ModuleIdent + /// string so that we can access it regardless of the ModuleIdent representation + /// (e.g., in the parsing AST or in the typing AST) + pub mod_outer_defs: &'a mut BTreeMap, + /// Mapped file information for translating locations into positions + pub files: &'a MappedFiles, + /// Associates uses for a given definition to allow displaying all references + pub references: &'a mut References, + /// Additional information about definitions + pub def_info: &'a mut DefMap, + /// A UseDefMap for a given module (needs to be appropriately set before the module + /// processing starts) + pub use_defs: UseDefMap, + /// Current module identifier string (needs to be appropriately set before the module + /// processing starts) + pub current_mod_ident_str: Option, + /// Module name lengths in access paths for a given module (needs to be appropriately + /// set before the module processing starts) + pub alias_lengths: BTreeMap, + /// A per-package mapping from package names to their addresses (needs to be appropriately set + /// before the package processint starts) + pub pkg_addresses: &'a NamedAddressMap, + /// Cursor contextual information, computed as part of the traversal. + pub cursor: Option<&'a mut CursorContext>, +} + +macro_rules! update_cursor { + ($cursor:expr, $subject:expr, $kind:ident) => { + if let Some(cursor) = &mut $cursor { + if $subject.loc.contains(&cursor.loc) { + cursor.position = CursorPosition::$kind($subject.clone()); + } + }; + }; + (IDENT, $cursor:expr, $subject:expr, $kind:ident) => { + if let Some(cursor) = &mut $cursor { + if $subject.loc().contains(&cursor.loc) { + cursor.position = CursorPosition::$kind($subject.clone()); + } + }; + }; +} + +impl<'a> ParsingAnalysisContext<'a> { + /// Get symbols for the whole program + pub fn prog_symbols( + &mut self, + prog: &'a P::Program, + mod_use_defs: &mut BTreeMap, + mod_to_alias_lengths: &mut BTreeMap>, + ) { + prog.source_definitions.iter().for_each(|pkg_def| { + self.pkg_symbols( + &prog.named_address_maps, + pkg_def, + mod_use_defs, + mod_to_alias_lengths, + ) + }); + prog.lib_definitions.iter().for_each(|pkg_def| { + self.pkg_symbols( + &prog.named_address_maps, + pkg_def, + mod_use_defs, + mod_to_alias_lengths, + ) + }); + } + + /// Get symbols for the whole package + fn pkg_symbols( + &mut self, + pkg_address_maps: &'a NamedAddressMaps, + pkg_def: &P::PackageDefinition, + mod_use_defs: &mut BTreeMap, + mod_to_alias_lengths: &mut BTreeMap>, + ) { + if let P::Definition::Module(mod_def) = &pkg_def.def { + let pkg_addresses = pkg_address_maps.get(pkg_def.named_address_map); + let old_addresses = std::mem::replace(&mut self.pkg_addresses, pkg_addresses); + self.mod_symbols(mod_def, mod_use_defs, mod_to_alias_lengths); + self.current_mod_ident_str = None; + let _ = std::mem::replace(&mut self.pkg_addresses, old_addresses); + } + } + + fn attr_symbols(&mut self, sp!(_, attr): P::Attribute) { + use P::Attribute_ as A; + match attr { + A::Name(_) => (), + A::Assigned(_, v) => { + update_cursor!(self.cursor, *v, Attribute); + } + A::Parameterized(_, sp!(_, attributes)) => { + attributes.iter().for_each(|a| self.attr_symbols(a.clone())) + } + } + } + + /// Get symbols for the whole module + fn mod_symbols( + &mut self, + mod_def: &P::ModuleDefinition, + mod_use_defs: &mut BTreeMap, + mod_to_alias_lengths: &mut BTreeMap>, + ) { + // parsing symbolicator is currently only responsible for processing use declarations + let Some(mod_ident_str) = parsing_mod_def_to_map_key(self.pkg_addresses, mod_def) else { + return; + }; + assert!(self.current_mod_ident_str.is_none()); + self.current_mod_ident_str = Some(mod_ident_str.clone()); + + let use_defs = mod_use_defs.remove(&mod_ident_str).unwrap(); + let old_defs = std::mem::replace(&mut self.use_defs, use_defs); + let alias_lengths: BTreeMap = BTreeMap::new(); + let old_alias_lengths = std::mem::replace(&mut self.alias_lengths, alias_lengths); + + mod_def + .attributes + .iter() + .for_each(|sp!(_, attrs)| attrs.iter().for_each(|a| self.attr_symbols(a.clone()))); + + for m in &mod_def.members { + use P::ModuleMember as MM; + match m { + MM::Function(fun) => { + if ignored_function(fun.name.value()) { + continue; + } + + // Unit returns span the entire function signature, so we process them first + // for cursor ordering. + self.type_symbols(&fun.signature.return_type); + + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + if let Some(cursor) = &mut self.cursor { + if fun.name.loc().contains(&cursor.loc) { + cursor.position = CursorPosition::DefName; + debug_assert!(cursor.defn_name.is_none()); + cursor.defn_name = Some(CursorDefinition::Function(fun.name)); + } else if fun.loc.contains(&cursor.loc) { + cursor.defn_name = Some(CursorDefinition::Function(fun.name)); + } + }; + + fun.attributes.iter().for_each(|sp!(_, attrs)| { + attrs.iter().for_each(|a| self.attr_symbols(a.clone())) + }); + + for (_, x, t) in fun.signature.parameters.iter() { + update_cursor!(IDENT, self.cursor, x, Parameter); + self.type_symbols(t) + } + + if fun.macro_.is_some() { + // we currently do not process macro function bodies + // in the parsing symbolicator (and do very limited + // processing in typing symbolicator) + continue; + } + if let P::FunctionBody_::Defined(seq) = &fun.body.value { + self.seq_symbols(seq); + }; + } + MM::Struct(sdef) => { + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + if let Some(cursor) = &mut self.cursor { + if sdef.name.loc().contains(&cursor.loc) { + cursor.position = CursorPosition::DefName; + debug_assert!(cursor.defn_name.is_none()); + cursor.defn_name = Some(CursorDefinition::Struct(sdef.name)); + } else if sdef.loc.contains(&cursor.loc) { + cursor.defn_name = Some(CursorDefinition::Struct(sdef.name)); + } + }; + + sdef.attributes.iter().for_each(|sp!(_, attrs)| { + attrs.iter().for_each(|a| self.attr_symbols(a.clone())) + }); + + match &sdef.fields { + P::StructFields::Named(v) => v.iter().for_each(|(x, t)| { + self.field_defn(x); + self.type_symbols(t) + }), + P::StructFields::Positional(v) => { + v.iter().for_each(|t| self.type_symbols(t)) + } + P::StructFields::Native(_) => (), + } + } + MM::Enum(edef) => { + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + if let Some(cursor) = &mut self.cursor { + if edef.name.loc().contains(&cursor.loc) { + cursor.position = CursorPosition::DefName; + debug_assert!(cursor.defn_name.is_none()); + cursor.defn_name = Some(CursorDefinition::Enum(edef.name)); + } else if edef.loc.contains(&cursor.loc) { + cursor.defn_name = Some(CursorDefinition::Enum(edef.name)); + } + }; + + edef.attributes.iter().for_each(|sp!(_, attrs)| { + attrs.iter().for_each(|a| self.attr_symbols(a.clone())) + }); + + let P::EnumDefinition { variants, .. } = edef; + for variant in variants { + let P::VariantDefinition { fields, .. } = variant; + match fields { + P::VariantFields::Named(v) => v.iter().for_each(|(x, t)| { + self.field_defn(x); + self.type_symbols(t) + }), + P::VariantFields::Positional(v) => { + v.iter().for_each(|t| self.type_symbols(t)) + } + P::VariantFields::Empty => (), + } + } + } + MM::Use(use_decl) => self.use_decl_symbols(use_decl), + MM::Friend(fdecl) => self.chain_symbols(&fdecl.friend), + MM::Constant(c) => { + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + if let Some(cursor) = &mut self.cursor { + if c.name.loc().contains(&cursor.loc) { + cursor.position = CursorPosition::DefName; + debug_assert!(cursor.defn_name.is_none()); + cursor.defn_name = Some(CursorDefinition::Constant(c.name)); + } else if c.loc.contains(&cursor.loc) { + cursor.defn_name = Some(CursorDefinition::Constant(c.name)); + } + }; + + c.attributes.iter().for_each(|sp!(_, attrs)| { + attrs.iter().for_each(|a| self.attr_symbols(a.clone())) + }); + + self.type_symbols(&c.signature); + self.exp_symbols(&c.value); + } + MM::Spec(_) => (), + } + } + self.current_mod_ident_str = None; + let processed_defs = std::mem::replace(&mut self.use_defs, old_defs); + mod_use_defs.insert(mod_ident_str.clone(), processed_defs); + let processed_alias_lengths = std::mem::replace(&mut self.alias_lengths, old_alias_lengths); + mod_to_alias_lengths.insert(mod_ident_str, processed_alias_lengths); + } + + /// Get symbols for a sequence item + fn seq_item_symbols(&mut self, seq_item: &P::SequenceItem) { + use P::SequenceItem_ as I; + + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + update_cursor!(self.cursor, seq_item, SeqItem); + + match &seq_item.value { + I::Seq(e) => self.exp_symbols(e), + I::Declare(v, to) => { + v.value + .iter() + .for_each(|bind| self.bind_symbols(bind, to.is_some())); + if let Some(t) = to { + self.type_symbols(t); + } + } + I::Bind(v, to, e) => { + v.value + .iter() + .for_each(|bind| self.bind_symbols(bind, to.is_some())); + if let Some(t) = to { + self.type_symbols(t); + } + self.exp_symbols(e); + } + } + } + + fn path_entry_symbols(&mut self, path: &P::PathEntry) { + let P::PathEntry { + name: _, + tyargs, + is_macro: _, + } = path; + if let Some(sp!(_, tyargs)) = tyargs { + tyargs.iter().for_each(|t| self.type_symbols(t)); + } + } + + fn root_path_entry_symbols(&mut self, path: &P::RootPathEntry) { + let P::RootPathEntry { + name: _, + tyargs, + is_macro: _, + } = path; + if let Some(sp!(_, tyargs)) = tyargs { + tyargs.iter().for_each(|t| self.type_symbols(t)); + } + } + + /// Get symbols for an expression + fn exp_symbols(&mut self, exp: &P::Exp) { + use P::Exp_ as E; + fn last_chain_symbol_loc(sp!(_, chain): &P::NameAccessChain) -> Loc { + use P::NameAccessChain_ as NA; + match chain { + NA::Single(entry) => entry.name.loc, + NA::Path(path) => { + if path.entries.is_empty() { + path.root.name.loc + } else { + path.entries.last().unwrap().name.loc + } + } + } + } + + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + update_cursor!(self.cursor, exp, Exp); + + match &exp.value { + E::Move(_, e) => self.exp_symbols(e), + E::Copy(_, e) => self.exp_symbols(e), + E::Name(chain) => self.chain_symbols(chain), + E::Call(chain, v) => { + self.chain_symbols(chain); + v.value.iter().for_each(|e| self.exp_symbols(e)); + assert!(self.current_mod_ident_str.is_some()); + if let Some(mod_defs) = self + .mod_outer_defs + .get_mut(&self.current_mod_ident_str.clone().unwrap()) + { + mod_defs.call_infos.insert( + last_chain_symbol_loc(chain), + CallInfo::new(/* do_call */ false, &v.value), + ); + }; + } + E::Pack(chain, v) => { + self.chain_symbols(chain); + v.iter().for_each(|(_, e)| self.exp_symbols(e)); + } + E::Vector(_, vo, v) => { + if let Some(v) = vo { + v.iter().for_each(|t| self.type_symbols(t)); + } + v.value.iter().for_each(|e| self.exp_symbols(e)); + } + E::IfElse(e1, e2, oe) => { + self.exp_symbols(e1); + self.exp_symbols(e2); + if let Some(e) = oe.as_ref() { + self.exp_symbols(e) + } + } + E::Match(e, sp!(_, v)) => { + self.exp_symbols(e); + v.iter().for_each(|sp!(_, arm)| { + self.match_pattern_symbols(&arm.pattern); + if let Some(g) = &arm.guard { + self.exp_symbols(g); + } + self.exp_symbols(&arm.rhs); + }) + } + E::While(e1, e2) => { + self.exp_symbols(e1); + self.exp_symbols(e2); + } + E::Loop(e) => self.exp_symbols(e), + E::Labeled(_, e) => self.exp_symbols(e), + E::Block(seq) => self.seq_symbols(seq), + E::Lambda(sp!(_, bindings), to, e) => { + for (sp!(_, v), bto) in bindings { + if let Some(bt) = bto { + self.type_symbols(bt); + } + v.iter() + .for_each(|bind| self.bind_symbols(bind, to.is_some())); + } + if let Some(t) = to { + self.type_symbols(t); + } + self.exp_symbols(e); + } + E::ExpList(l) => l.iter().for_each(|e| self.exp_symbols(e)), + E::Parens(e) => self.exp_symbols(e), + E::Assign(e1, e2) => { + self.exp_symbols(e1); + self.exp_symbols(e2); + } + E::Abort(e) => self.exp_symbols(e), + E::Return(_, oe) => { + if let Some(e) = oe.as_ref() { + self.exp_symbols(e) + } + } + E::Break(_, oe) => { + if let Some(e) = oe.as_ref() { + self.exp_symbols(e) + } + } + E::Dereference(e) => self.exp_symbols(e), + E::UnaryExp(_, e) => self.exp_symbols(e), + E::BinopExp(e1, _, e2) => { + self.exp_symbols(e1); + self.exp_symbols(e2); + } + E::Borrow(_, e) => self.exp_symbols(e), + E::Dot(e, _) => self.exp_symbols(e), + E::DotCall(e, name, _, vo, v) => { + self.exp_symbols(e); + if let Some(v) = vo { + v.iter().for_each(|t| self.type_symbols(t)); + } + v.value.iter().for_each(|e| self.exp_symbols(e)); + assert!(self.current_mod_ident_str.is_some()); + if let Some(mod_defs) = self + .mod_outer_defs + .get_mut(&self.current_mod_ident_str.clone().unwrap()) + { + mod_defs + .call_infos + .insert(name.loc, CallInfo::new(/* do_call */ true, &v.value)); + }; + } + E::Index(e, v) => { + self.exp_symbols(e); + v.value.iter().for_each(|e| self.exp_symbols(e)); + } + E::Cast(e, t) => { + self.exp_symbols(e); + self.type_symbols(t); + } + E::Annotate(e, t) => { + self.exp_symbols(e); + self.type_symbols(t); + } + E::DotUnresolved(_, e) => self.exp_symbols(e), + E::Value(_) + | E::Quant(..) + | E::Unit + | E::Continue(_) + | E::Spec(_) + | E::UnresolvedError => (), + } + } + + fn match_pattern_symbols(&mut self, sp!(_, pattern): &P::MatchPattern) { + use P::MatchPattern_ as MP; + match pattern { + MP::PositionalConstructor(chain, sp!(_, v)) => { + self.chain_symbols(chain); + v.iter().for_each(|e| { + if let P::Ellipsis::Binder(m) = e { + self.match_pattern_symbols(m); + } + }) + } + MP::FieldConstructor(chain, sp!(_, v)) => { + self.chain_symbols(chain); + v.iter().for_each(|e| { + if let P::Ellipsis::Binder((_, m)) = e { + self.match_pattern_symbols(m); + } + }) + } + MP::Name(_, chain) => { + self.chain_symbols(chain); + assert!(self.current_mod_ident_str.is_some()); + if let Some(mod_defs) = self + .mod_outer_defs + .get_mut(&self.current_mod_ident_str.clone().unwrap()) + { + mod_defs.untyped_defs.insert(chain.loc); + }; + } + MP::Or(m1, m2) => { + self.match_pattern_symbols(m2); + self.match_pattern_symbols(m1); + } + MP::At(_, m) => self.match_pattern_symbols(m), + MP::Literal(_) => (), + } + } + + /// Get symbols for a sequence + fn seq_symbols(&mut self, (use_decls, seq_items, _, oe): &P::Sequence) { + use_decls + .iter() + .for_each(|use_decl| self.use_decl_symbols(use_decl)); + + seq_items + .iter() + .for_each(|seq_item| self.seq_item_symbols(seq_item)); + if let Some(e) = oe.as_ref().as_ref() { + self.exp_symbols(e) + } + } + + /// Get symbols for a use declaration + fn use_decl_symbols(&mut self, use_decl: &P::UseDecl) { + use_decl + .attributes + .iter() + .for_each(|sp!(_, attrs)| attrs.iter().for_each(|a| self.attr_symbols(a.clone()))); + + update_cursor!(self.cursor, sp(use_decl.loc, use_decl.use_.clone()), Use); + + match &use_decl.use_ { + P::Use::ModuleUse(mod_ident, mod_use) => { + let mod_ident_str = + parsing_mod_ident_to_map_key(self.pkg_addresses, &mod_ident.value); + self.mod_name_symbol(&mod_ident.value.module, &mod_ident_str); + self.mod_use_symbols(mod_use, &mod_ident_str); + } + P::Use::NestedModuleUses(leading_name, uses) => { + for (mod_name, mod_use) in uses { + let mod_ident_str = parsing_leading_and_mod_names_to_map_key( + self.pkg_addresses, + *leading_name, + *mod_name, + ); + + self.mod_name_symbol(mod_name, &mod_ident_str); + self.mod_use_symbols(mod_use, &mod_ident_str); + } + } + P::Use::Fun { + visibility: _, + function, + ty, + method: _, + } => { + self.chain_symbols(function); + self.chain_symbols(ty); + } + P::Use::Partial { .. } => (), + } + } + + /// Get module name symbol + fn mod_name_symbol(&mut self, mod_name: &P::ModuleName, mod_ident_str: &String) { + let Some(mod_defs) = self.mod_outer_defs.get_mut(mod_ident_str) else { + return; + }; + let Some(mod_name_start) = loc_start_to_lsp_position_opt(self.files, &mod_name.loc()) + else { + debug_assert!(false); + return; + }; + self.use_defs.insert( + mod_name_start.line, + UseDef::new( + self.references, + &BTreeMap::new(), + mod_name.loc().file_hash(), + mod_name_start, + mod_defs.name_loc, + &mod_name.value(), + None, + ), + ); + } + + /// Get symbols for a module use + fn mod_use_symbols(&mut self, mod_use: &P::ModuleUse, mod_ident_str: &String) { + match mod_use { + P::ModuleUse::Module(Some(alias_name)) => { + self.mod_name_symbol(alias_name, mod_ident_str); + } + P::ModuleUse::Module(None) => (), // nothing more to do + P::ModuleUse::Members(v) => { + for (name, alias_opt) in v { + self.use_decl_member_symbols(mod_ident_str.clone(), name, alias_opt); + } + } + P::ModuleUse::Partial { .. } => (), + } + } + + /// Get symbols for a module member in the use declaration (can be a struct or a function) + fn use_decl_member_symbols( + &mut self, + mod_ident_str: String, + name: &Name, + alias_opt: &Option, + ) { + let Some(mod_defs) = self.mod_outer_defs.get(&mod_ident_str) else { + return; + }; + if let Some(mut ud) = add_member_use_def( + &name.value, + self.files, + mod_defs, + &name.value, + &name.loc, + self.references, + self.def_info, + &mut self.use_defs, + &BTreeMap::new(), + ) { + // it's a struct - add it for the alias as well + if let Some(alias) = alias_opt { + let Some(alias_start) = loc_start_to_lsp_position_opt(self.files, &alias.loc) + else { + debug_assert!(false); + return; + }; + ud.rename_use( + self.references, + alias.value, + alias_start, + alias.loc.file_hash(), + ); + self.use_defs.insert(alias_start.line, ud); + } + return; + } + if let Some(mut ud) = add_member_use_def( + &name.value, + self.files, + mod_defs, + &name.value, + &name.loc, + self.references, + self.def_info, + &mut self.use_defs, + &BTreeMap::new(), + ) { + // it's a function - add it for the alias as well + if let Some(alias) = alias_opt { + let Some(alias_start) = loc_start_to_lsp_position_opt(self.files, &alias.loc) + else { + debug_assert!(false); + return; + }; + ud.rename_use( + self.references, + alias.value, + alias_start, + alias.loc.file_hash(), + ); + self.use_defs.insert(alias_start.line, ud); + } + } + } + + /// Get symbols for a type + fn type_symbols(&mut self, type_: &P::Type) { + use P::Type_ as T; + + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + update_cursor!(self.cursor, type_, Type); + + match &type_.value { + T::Apply(chain) => { + self.chain_symbols(chain); + } + T::Ref(_, t) => self.type_symbols(t), + T::Fun(v, t) => { + v.iter().for_each(|t| self.type_symbols(t)); + self.type_symbols(t); + } + T::Multiple(v) => v.iter().for_each(|t| self.type_symbols(t)), + T::Unit => (), + T::UnresolvedError => (), + } + } + + /// Get symbols for a bind statement + fn bind_symbols(&mut self, bind: &P::Bind, explicitly_typed: bool) { + use P::Bind_ as B; + + // If the cursor is in this item, mark that down. + // This may be overridden by the recursion below. + update_cursor!(self.cursor, bind, Binding); + + match &bind.value { + B::Unpack(chain, bindings) => { + self.chain_symbols(chain); + match bindings { + P::FieldBindings::Named(v) => { + for symbol in v { + match symbol { + P::Ellipsis::Binder((_, x)) => self.bind_symbols(x, false), + P::Ellipsis::Ellipsis(_) => (), + } + } + } + P::FieldBindings::Positional(v) => { + for symbol in v.iter() { + match symbol { + P::Ellipsis::Binder(x) => self.bind_symbols(x, false), + P::Ellipsis::Ellipsis(_) => (), + } + } + } + } + } + B::Var(_, var) => { + if !explicitly_typed { + assert!(self.current_mod_ident_str.is_some()); + if let Some(mod_defs) = self + .mod_outer_defs + .get_mut(&self.current_mod_ident_str.clone().unwrap()) + { + mod_defs.untyped_defs.insert(var.loc()); + }; + } + } + } + } + + /// Get symbols for a name access chain + fn chain_symbols(&mut self, sp!(_, chain): &P::NameAccessChain) { + use P::NameAccessChain_ as NA; + // Record the length of all identifiers representing a potentially + // aliased module, struct, enum or function name in an access chain. + // We can conservatively record all identifiers as they are only + // accessed by-location so those irrelevant will never be queried. + match chain { + NA::Single(entry) => { + self.path_entry_symbols(entry); + if let Some(loc) = loc_start_to_lsp_position_opt(self.files, &entry.name.loc) { + self.alias_lengths.insert(loc, entry.name.value.len()); + }; + } + NA::Path(path) => { + let P::NamePath { + root, + entries, + is_incomplete: _, + } = path; + self.root_path_entry_symbols(root); + if let Some(root_loc) = loc_start_to_lsp_position_opt(self.files, &root.name.loc) { + if let P::LeadingNameAccess_::Name(n) = root.name.value { + self.alias_lengths.insert(root_loc, n.value.len()); + } + }; + entries.iter().for_each(|entry| { + self.path_entry_symbols(entry); + if let Some(loc) = loc_start_to_lsp_position_opt(self.files, &entry.name.loc) { + self.alias_lengths.insert(loc, entry.name.value.len()); + }; + }); + } + }; + } + + fn field_defn(&mut self, field: &P::Field) { + // If the cursor is in this item, mark that down. + update_cursor!(IDENT, self.cursor, field, FieldDefn); + } +} + +/// Produces module ident string of the form pkg::module to be used as a map key. +/// It's important that these are consistent between parsing AST and typed AST, +fn parsing_mod_ident_to_map_key( + pkg_addresses: &NamedAddressMap, + mod_ident: &P::ModuleIdent_, +) -> String { + format!( + "{}::{}", + parsed_address(mod_ident.address, pkg_addresses), + mod_ident.module + ) + .to_string() +} + +/// Produces module ident string of the form pkg::module to be used as a map key. +/// It's important that these are consistent between parsing AST and typed AST. +fn parsing_mod_def_to_map_key( + pkg_addresses: &NamedAddressMap, + mod_def: &P::ModuleDefinition, +) -> Option { + // we assume that modules are declared using the PkgName::ModName pattern (which seems to be the + // standard practice) and while Move allows other ways of defining modules (i.e., with address + // preceding a sequence of modules), this method is now deprecated. + // + // TODO: make this function simply return String when the other way of defining modules is + // removed + mod_def + .address + .map(|a| parsing_leading_and_mod_names_to_map_key(pkg_addresses, a, mod_def.name)) +} + +/// Produces module ident string of the form pkg::module to be used as a map key. +/// It's important that these are consistent between parsing AST and typed AST. +fn parsing_leading_and_mod_names_to_map_key( + pkg_addresses: &NamedAddressMap, + ln: P::LeadingNameAccess, + name: P::ModuleName, +) -> String { + format!("{}::{}", parsed_address(ln, pkg_addresses), name).to_string() +} + +/// Converts parsing AST's `LeadingNameAccess` to expansion AST's `Address` (similarly to +/// expansion::translate::top_level_address but disregarding the name portion of `Address` as we +/// only care about actual address here if it's available). We need this to be able to reliably +/// compare parsing AST's module identifier with expansion/typing AST's module identifier, even in +/// presence of module renaming (i.e., we cannot rely on module names if addresses are available). +fn parsed_address(ln: P::LeadingNameAccess, pkg_addresses: &NamedAddressMap) -> E::Address { + let sp!(loc, ln_) = ln; + match ln_ { + P::LeadingNameAccess_::AnonymousAddress(bytes) => E::Address::anonymous(loc, bytes), + P::LeadingNameAccess_::GlobalAddress(name) => E::Address::NamedUnassigned(name), + P::LeadingNameAccess_::Name(name) => match pkg_addresses.get(&name.value).copied() { + Some(addr) => E::Address::anonymous(loc, addr), + None => E::Address::NamedUnassigned(name), + }, + } +} diff --git a/external-crates/move/crates/move-analyzer/src/symbols.rs b/external-crates/move/crates/move-analyzer/src/symbols.rs index 46bb538efcee7..aae8c17cbf87b 100644 --- a/external-crates/move/crates/move-analyzer/src/symbols.rs +++ b/external-crates/move/crates/move-analyzer/src/symbols.rs @@ -5,10 +5,14 @@ //! and typed ASTs, in particular identifier definitions to be used for implementing go-to-def, //! go-to-references, and on-hover language server commands. //! -//! There are different structs that are used at different phases of the process, the -//! ParsingSymbolicator and Typing Symbolicator structs are used when building symbolication -//! information and the Symbols struct is summarizes the symbolication results and is used by the -//! language server find definitions and references. +//! The analysis starts with top-level module definitions being processed and then proceeds to +//! process parsed AST (parsing analysis) and typed AST (typing analysis) to gather all the required +//! information which is then summarized in the Symbols struct subsequently used by the language +//! server to find definitions, references, auto-completions, etc. Parsing analysis is largely +//! responsible for processing import statements (no longer available at the level of typed AST) and +//! typing analysis gathers remaining information. In particular, for local definitions, typing +//! analysis builds a scope stack, entering encountered definitions and matching uses to a +//! definition in the innermost scope. //! //! Here is a brief description of how the symbolication information is encoded. Each identifier in //! the source code of a given module is represented by its location (UseLoc struct): line number, @@ -45,17 +49,10 @@ //! We also associate all uses of an identifier with its definition to support //! go-to-references. This is done in a global map from an identifier location (DefLoc) to a set of //! use locations (UseLoc). -//! -//! Symbolication algorithm over typing AST first analyzes all top-level definitions from all -//! modules. ParsingSymbolicator then processes import statements (no longer available at the level -//! of typed AST) and TypingSymbolicator processes function bodies, as well as constant and struct -//! definitions. For local definitions, TypingSymbolicator builds a scope stack, entering -//! encountered definitions and matching uses to a definition in the innermost scope. - #![allow(clippy::non_canonical_partial_ord_impl)] use crate::{ - analysis::typing_analysis, + analysis::{parsing_analysis, typing_analysis}, compiler_info::CompilerInfo, context::Context, diagnostics::{lsp_diagnostics, lsp_empty_diagnostics}, @@ -100,7 +97,7 @@ use move_compiler::{ shared::{ files::{FileId, MappedFiles}, unique_map::UniqueMap, - Identifier, Name, NamedAddressMap, NamedAddressMaps, + Identifier, Name, NamedAddressMap, }, typing::{ ast::{ @@ -511,33 +508,6 @@ pub enum CursorDefinition { Struct(P::DatatypeName), Enum(P::DatatypeName), } -/// Data used during symbolication over parsed AST -pub struct ParsingSymbolicator<'a> { - /// Outermost definitions in a module (structs, consts, functions), keyd on a ModuleIdent - /// string so that we can access it regardless of the ModuleIdent representation - /// (e.g., in the parsing AST or in the typing AST) - mod_outer_defs: &'a mut BTreeMap, - /// Mapped file information for translating locations into positions - files: &'a MappedFiles, - /// Associates uses for a given definition to allow displaying all references - references: &'a mut References, - /// Additional information about definitions - def_info: &'a mut DefMap, - /// A UseDefMap for a given module (needs to be appropriately set before the module - /// processing starts) - use_defs: UseDefMap, - /// Current module identifier string (needs to be appropriately set before the module - /// processing starts) - current_mod_ident_str: Option, - /// Module name lengths in access paths for a given module (needs to be appropriately - /// set before the module processing starts) - alias_lengths: BTreeMap, - /// A per-package mapping from package names to their addresses (needs to be appropriately set - /// before the package processint starts) - pkg_addresses: &'a NamedAddressMap, - /// Cursor contextual information, computed as part of the traversal. - cursor: Option<&'a mut CursorContext>, -} type LineOffset = u32; @@ -1365,7 +1335,7 @@ impl UseDef { } /// Given a UseDef, modify just the use name and location (to make it represent an alias). - fn rename_use( + pub fn rename_use( &mut self, references: &mut References, new_name: Symbol, @@ -1817,7 +1787,7 @@ pub fn get_symbols( let mut file_use_defs = BTreeMap::new(); let mut mod_to_alias_lengths = BTreeMap::new(); - let mut parsing_symbolicator = ParsingSymbolicator { + let mut parsing_symbolicator = parsing_analysis::ParsingAnalysisContext { mod_outer_defs: &mut mod_outer_defs, files: &mapped_files, references: &mut references, @@ -2013,64 +1983,6 @@ fn file_sources( .collect() } -/// Produces module ident string of the form pkg::module to be used as a map key. -/// It's important that these are consistent between parsing AST and typed AST, -fn parsing_mod_ident_to_map_key( - pkg_addresses: &NamedAddressMap, - mod_ident: &P::ModuleIdent_, -) -> String { - format!( - "{}::{}", - parsed_address(mod_ident.address, pkg_addresses), - mod_ident.module - ) - .to_string() -} - -/// Produces module ident string of the form pkg::module to be used as a map key. -/// It's important that these are consistent between parsing AST and typed AST. -fn parsing_mod_def_to_map_key( - pkg_addresses: &NamedAddressMap, - mod_def: &P::ModuleDefinition, -) -> Option { - // we assume that modules are declared using the PkgName::ModName pattern (which seems to be the - // standard practice) and while Move allows other ways of defining modules (i.e., with address - // preceding a sequence of modules), this method is now deprecated. - // - // TODO: make this function simply return String when the other way of defining modules is - // removed - mod_def - .address - .map(|a| parsing_leading_and_mod_names_to_map_key(pkg_addresses, a, mod_def.name)) -} - -/// Produces module ident string of the form pkg::module to be used as a map key. -/// It's important that these are consistent between parsing AST and typed AST. -fn parsing_leading_and_mod_names_to_map_key( - pkg_addresses: &NamedAddressMap, - ln: P::LeadingNameAccess, - name: P::ModuleName, -) -> String { - format!("{}::{}", parsed_address(ln, pkg_addresses), name).to_string() -} - -/// Converts parsing AST's `LeadingNameAccess` to expansion AST's `Address` (similarly to -/// expansion::translate::top_level_address but disregarding the name portion of `Address` as we -/// only care about actual address here if it's available). We need this to be able to reliably -/// compare parsing AST's module identifier with expansion/typing AST's module identifier, even in -/// presence of module renaming (i.e., we cannot rely on module names if addresses are available). -fn parsed_address(ln: P::LeadingNameAccess, pkg_addresses: &NamedAddressMap) -> E::Address { - let sp!(loc, ln_) = ln; - match ln_ { - P::LeadingNameAccess_::AnonymousAddress(bytes) => E::Address::anonymous(loc, bytes), - P::LeadingNameAccess_::GlobalAddress(name) => E::Address::NamedUnassigned(name), - P::LeadingNameAccess_::Name(name) => match pkg_addresses.get(&name.value).copied() { - Some(addr) => E::Address::anonymous(loc, addr), - None => E::Address::NamedUnassigned(name), - }, - } -} - /// Produces module ident string of the form pkg::module to be used as a map key /// It's important that these are consistent between parsing AST and typed AST. pub fn expansion_mod_ident_to_map_key(mod_ident: &E::ModuleIdent_) -> String { @@ -2095,18 +2007,6 @@ pub fn empty_symbols() -> Symbols { } } -/// Some functions defined in a module need to be ignored. -fn ignored_function(name: Symbol) -> bool { - // In test mode (that's how IDE compiles Move source files), - // the compiler inserts an dummy function preventing preventing - // publishing of modules compiled in test mode. We need to - // ignore its definition to avoid spurious on-hover display - // of this function's info whe hovering close to `module` keyword. - name == UNIT_TEST_POISON_FUN_NAME -} - -/// Main AST traversal functions - fn field_defs_and_types( datatype_name: Symbol, datatype_loc: Loc, @@ -2154,6 +2054,15 @@ fn datatype_type_params(data_tparams: &[DatatypeTypeParameter]) -> Vec<(Type, /* .collect() } +/// Some functions defined in a module need to be ignored. +pub fn ignored_function(name: Symbol) -> bool { + // In test mode (that's how IDE compiles Move source files), the compiler inserts an dummy + // function preventing publishing of modules compiled in test mode. We need to ignore its + // definition to avoid spurious on-hover display of this function's info whe hovering close to + // `module` keyword. + name == UNIT_TEST_POISON_FUN_NAME +} + /// Get symbols for outer definitions in the module (functions, structs, and consts) fn get_mod_outer_defs( loc: &Loc, @@ -2405,748 +2314,6 @@ fn get_mod_outer_defs( (mod_defs, use_def_map) } -macro_rules! update_cursor { - ($cursor:expr, $subject:expr, $kind:ident) => { - if let Some(cursor) = &mut $cursor { - if $subject.loc.contains(&cursor.loc) { - cursor.position = CursorPosition::$kind($subject.clone()); - } - }; - }; - (IDENT, $cursor:expr, $subject:expr, $kind:ident) => { - if let Some(cursor) = &mut $cursor { - if $subject.loc().contains(&cursor.loc) { - cursor.position = CursorPosition::$kind($subject.clone()); - } - }; - }; -} - -impl<'a> ParsingSymbolicator<'a> { - /// Get symbols for the whole program - fn prog_symbols( - &mut self, - prog: &'a P::Program, - mod_use_defs: &mut BTreeMap, - mod_to_alias_lengths: &mut BTreeMap>, - ) { - prog.source_definitions.iter().for_each(|pkg_def| { - self.pkg_symbols( - &prog.named_address_maps, - pkg_def, - mod_use_defs, - mod_to_alias_lengths, - ) - }); - prog.lib_definitions.iter().for_each(|pkg_def| { - self.pkg_symbols( - &prog.named_address_maps, - pkg_def, - mod_use_defs, - mod_to_alias_lengths, - ) - }); - } - - /// Get symbols for the whole package - fn pkg_symbols( - &mut self, - pkg_address_maps: &'a NamedAddressMaps, - pkg_def: &P::PackageDefinition, - mod_use_defs: &mut BTreeMap, - mod_to_alias_lengths: &mut BTreeMap>, - ) { - if let P::Definition::Module(mod_def) = &pkg_def.def { - let pkg_addresses = pkg_address_maps.get(pkg_def.named_address_map); - let old_addresses = std::mem::replace(&mut self.pkg_addresses, pkg_addresses); - self.mod_symbols(mod_def, mod_use_defs, mod_to_alias_lengths); - self.current_mod_ident_str = None; - let _ = std::mem::replace(&mut self.pkg_addresses, old_addresses); - } - } - - fn attr_symbols(&mut self, sp!(_, attr): P::Attribute) { - use P::Attribute_ as A; - match attr { - A::Name(_) => (), - A::Assigned(_, v) => { - update_cursor!(self.cursor, *v, Attribute); - } - A::Parameterized(_, sp!(_, attributes)) => { - attributes.iter().for_each(|a| self.attr_symbols(a.clone())) - } - } - } - - /// Get symbols for the whole module - fn mod_symbols( - &mut self, - mod_def: &P::ModuleDefinition, - mod_use_defs: &mut BTreeMap, - mod_to_alias_lengths: &mut BTreeMap>, - ) { - // parsing symbolicator is currently only responsible for processing use declarations - let Some(mod_ident_str) = parsing_mod_def_to_map_key(self.pkg_addresses, mod_def) else { - return; - }; - assert!(self.current_mod_ident_str.is_none()); - self.current_mod_ident_str = Some(mod_ident_str.clone()); - - let use_defs = mod_use_defs.remove(&mod_ident_str).unwrap(); - let old_defs = std::mem::replace(&mut self.use_defs, use_defs); - let alias_lengths: BTreeMap = BTreeMap::new(); - let old_alias_lengths = std::mem::replace(&mut self.alias_lengths, alias_lengths); - - mod_def - .attributes - .iter() - .for_each(|sp!(_, attrs)| attrs.iter().for_each(|a| self.attr_symbols(a.clone()))); - - for m in &mod_def.members { - use P::ModuleMember as MM; - match m { - MM::Function(fun) => { - if ignored_function(fun.name.value()) { - continue; - } - - // Unit returns span the entire function signature, so we process them first - // for cursor ordering. - self.type_symbols(&fun.signature.return_type); - - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - if let Some(cursor) = &mut self.cursor { - if fun.name.loc().contains(&cursor.loc) { - cursor.position = CursorPosition::DefName; - debug_assert!(cursor.defn_name.is_none()); - cursor.defn_name = Some(CursorDefinition::Function(fun.name)); - } else if fun.loc.contains(&cursor.loc) { - cursor.defn_name = Some(CursorDefinition::Function(fun.name)); - } - }; - - fun.attributes.iter().for_each(|sp!(_, attrs)| { - attrs.iter().for_each(|a| self.attr_symbols(a.clone())) - }); - - for (_, x, t) in fun.signature.parameters.iter() { - update_cursor!(IDENT, self.cursor, x, Parameter); - self.type_symbols(t) - } - - if fun.macro_.is_some() { - // we currently do not process macro function bodies - // in the parsing symbolicator (and do very limited - // processing in typing symbolicator) - continue; - } - if let P::FunctionBody_::Defined(seq) = &fun.body.value { - self.seq_symbols(seq); - }; - } - MM::Struct(sdef) => { - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - if let Some(cursor) = &mut self.cursor { - if sdef.name.loc().contains(&cursor.loc) { - cursor.position = CursorPosition::DefName; - debug_assert!(cursor.defn_name.is_none()); - cursor.defn_name = Some(CursorDefinition::Struct(sdef.name)); - } else if sdef.loc.contains(&cursor.loc) { - cursor.defn_name = Some(CursorDefinition::Struct(sdef.name)); - } - }; - - sdef.attributes.iter().for_each(|sp!(_, attrs)| { - attrs.iter().for_each(|a| self.attr_symbols(a.clone())) - }); - - match &sdef.fields { - P::StructFields::Named(v) => v.iter().for_each(|(x, t)| { - self.field_defn(x); - self.type_symbols(t) - }), - P::StructFields::Positional(v) => { - v.iter().for_each(|t| self.type_symbols(t)) - } - P::StructFields::Native(_) => (), - } - } - MM::Enum(edef) => { - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - if let Some(cursor) = &mut self.cursor { - if edef.name.loc().contains(&cursor.loc) { - cursor.position = CursorPosition::DefName; - debug_assert!(cursor.defn_name.is_none()); - cursor.defn_name = Some(CursorDefinition::Enum(edef.name)); - } else if edef.loc.contains(&cursor.loc) { - cursor.defn_name = Some(CursorDefinition::Enum(edef.name)); - } - }; - - edef.attributes.iter().for_each(|sp!(_, attrs)| { - attrs.iter().for_each(|a| self.attr_symbols(a.clone())) - }); - - let P::EnumDefinition { variants, .. } = edef; - for variant in variants { - let P::VariantDefinition { fields, .. } = variant; - match fields { - P::VariantFields::Named(v) => v.iter().for_each(|(x, t)| { - self.field_defn(x); - self.type_symbols(t) - }), - P::VariantFields::Positional(v) => { - v.iter().for_each(|t| self.type_symbols(t)) - } - P::VariantFields::Empty => (), - } - } - } - MM::Use(use_decl) => self.use_decl_symbols(use_decl), - MM::Friend(fdecl) => self.chain_symbols(&fdecl.friend), - MM::Constant(c) => { - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - if let Some(cursor) = &mut self.cursor { - if c.name.loc().contains(&cursor.loc) { - cursor.position = CursorPosition::DefName; - debug_assert!(cursor.defn_name.is_none()); - cursor.defn_name = Some(CursorDefinition::Constant(c.name)); - } else if c.loc.contains(&cursor.loc) { - cursor.defn_name = Some(CursorDefinition::Constant(c.name)); - } - }; - - c.attributes.iter().for_each(|sp!(_, attrs)| { - attrs.iter().for_each(|a| self.attr_symbols(a.clone())) - }); - - self.type_symbols(&c.signature); - self.exp_symbols(&c.value); - } - MM::Spec(_) => (), - } - } - self.current_mod_ident_str = None; - let processed_defs = std::mem::replace(&mut self.use_defs, old_defs); - mod_use_defs.insert(mod_ident_str.clone(), processed_defs); - let processed_alias_lengths = std::mem::replace(&mut self.alias_lengths, old_alias_lengths); - mod_to_alias_lengths.insert(mod_ident_str, processed_alias_lengths); - } - - /// Get symbols for a sequence item - fn seq_item_symbols(&mut self, seq_item: &P::SequenceItem) { - use P::SequenceItem_ as I; - - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - update_cursor!(self.cursor, seq_item, SeqItem); - - match &seq_item.value { - I::Seq(e) => self.exp_symbols(e), - I::Declare(v, to) => { - v.value - .iter() - .for_each(|bind| self.bind_symbols(bind, to.is_some())); - if let Some(t) = to { - self.type_symbols(t); - } - } - I::Bind(v, to, e) => { - v.value - .iter() - .for_each(|bind| self.bind_symbols(bind, to.is_some())); - if let Some(t) = to { - self.type_symbols(t); - } - self.exp_symbols(e); - } - } - } - - fn path_entry_symbols(&mut self, path: &P::PathEntry) { - let P::PathEntry { - name: _, - tyargs, - is_macro: _, - } = path; - if let Some(sp!(_, tyargs)) = tyargs { - tyargs.iter().for_each(|t| self.type_symbols(t)); - } - } - - fn root_path_entry_symbols(&mut self, path: &P::RootPathEntry) { - let P::RootPathEntry { - name: _, - tyargs, - is_macro: _, - } = path; - if let Some(sp!(_, tyargs)) = tyargs { - tyargs.iter().for_each(|t| self.type_symbols(t)); - } - } - - /// Get symbols for an expression - fn exp_symbols(&mut self, exp: &P::Exp) { - use P::Exp_ as E; - fn last_chain_symbol_loc(sp!(_, chain): &P::NameAccessChain) -> Loc { - use P::NameAccessChain_ as NA; - match chain { - NA::Single(entry) => entry.name.loc, - NA::Path(path) => { - if path.entries.is_empty() { - path.root.name.loc - } else { - path.entries.last().unwrap().name.loc - } - } - } - } - - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - update_cursor!(self.cursor, exp, Exp); - - match &exp.value { - E::Move(_, e) => self.exp_symbols(e), - E::Copy(_, e) => self.exp_symbols(e), - E::Name(chain) => self.chain_symbols(chain), - E::Call(chain, v) => { - self.chain_symbols(chain); - v.value.iter().for_each(|e| self.exp_symbols(e)); - assert!(self.current_mod_ident_str.is_some()); - if let Some(mod_defs) = self - .mod_outer_defs - .get_mut(&self.current_mod_ident_str.clone().unwrap()) - { - mod_defs.call_infos.insert( - last_chain_symbol_loc(chain), - CallInfo::new(/* do_call */ false, &v.value), - ); - }; - } - E::Pack(chain, v) => { - self.chain_symbols(chain); - v.iter().for_each(|(_, e)| self.exp_symbols(e)); - } - E::Vector(_, vo, v) => { - if let Some(v) = vo { - v.iter().for_each(|t| self.type_symbols(t)); - } - v.value.iter().for_each(|e| self.exp_symbols(e)); - } - E::IfElse(e1, e2, oe) => { - self.exp_symbols(e1); - self.exp_symbols(e2); - if let Some(e) = oe.as_ref() { - self.exp_symbols(e) - } - } - E::Match(e, sp!(_, v)) => { - self.exp_symbols(e); - v.iter().for_each(|sp!(_, arm)| { - self.match_pattern_symbols(&arm.pattern); - if let Some(g) = &arm.guard { - self.exp_symbols(g); - } - self.exp_symbols(&arm.rhs); - }) - } - E::While(e1, e2) => { - self.exp_symbols(e1); - self.exp_symbols(e2); - } - E::Loop(e) => self.exp_symbols(e), - E::Labeled(_, e) => self.exp_symbols(e), - E::Block(seq) => self.seq_symbols(seq), - E::Lambda(sp!(_, bindings), to, e) => { - for (sp!(_, v), bto) in bindings { - if let Some(bt) = bto { - self.type_symbols(bt); - } - v.iter() - .for_each(|bind| self.bind_symbols(bind, to.is_some())); - } - if let Some(t) = to { - self.type_symbols(t); - } - self.exp_symbols(e); - } - E::ExpList(l) => l.iter().for_each(|e| self.exp_symbols(e)), - E::Parens(e) => self.exp_symbols(e), - E::Assign(e1, e2) => { - self.exp_symbols(e1); - self.exp_symbols(e2); - } - E::Abort(e) => self.exp_symbols(e), - E::Return(_, oe) => { - if let Some(e) = oe.as_ref() { - self.exp_symbols(e) - } - } - E::Break(_, oe) => { - if let Some(e) = oe.as_ref() { - self.exp_symbols(e) - } - } - E::Dereference(e) => self.exp_symbols(e), - E::UnaryExp(_, e) => self.exp_symbols(e), - E::BinopExp(e1, _, e2) => { - self.exp_symbols(e1); - self.exp_symbols(e2); - } - E::Borrow(_, e) => self.exp_symbols(e), - E::Dot(e, _) => self.exp_symbols(e), - E::DotCall(e, name, _, vo, v) => { - self.exp_symbols(e); - if let Some(v) = vo { - v.iter().for_each(|t| self.type_symbols(t)); - } - v.value.iter().for_each(|e| self.exp_symbols(e)); - assert!(self.current_mod_ident_str.is_some()); - if let Some(mod_defs) = self - .mod_outer_defs - .get_mut(&self.current_mod_ident_str.clone().unwrap()) - { - mod_defs - .call_infos - .insert(name.loc, CallInfo::new(/* do_call */ true, &v.value)); - }; - } - E::Index(e, v) => { - self.exp_symbols(e); - v.value.iter().for_each(|e| self.exp_symbols(e)); - } - E::Cast(e, t) => { - self.exp_symbols(e); - self.type_symbols(t); - } - E::Annotate(e, t) => { - self.exp_symbols(e); - self.type_symbols(t); - } - E::DotUnresolved(_, e) => self.exp_symbols(e), - E::Value(_) - | E::Quant(..) - | E::Unit - | E::Continue(_) - | E::Spec(_) - | E::UnresolvedError => (), - } - } - - fn match_pattern_symbols(&mut self, sp!(_, pattern): &P::MatchPattern) { - use P::MatchPattern_ as MP; - match pattern { - MP::PositionalConstructor(chain, sp!(_, v)) => { - self.chain_symbols(chain); - v.iter().for_each(|e| { - if let P::Ellipsis::Binder(m) = e { - self.match_pattern_symbols(m); - } - }) - } - MP::FieldConstructor(chain, sp!(_, v)) => { - self.chain_symbols(chain); - v.iter().for_each(|e| { - if let P::Ellipsis::Binder((_, m)) = e { - self.match_pattern_symbols(m); - } - }) - } - MP::Name(_, chain) => { - self.chain_symbols(chain); - assert!(self.current_mod_ident_str.is_some()); - if let Some(mod_defs) = self - .mod_outer_defs - .get_mut(&self.current_mod_ident_str.clone().unwrap()) - { - mod_defs.untyped_defs.insert(chain.loc); - }; - } - MP::Or(m1, m2) => { - self.match_pattern_symbols(m2); - self.match_pattern_symbols(m1); - } - MP::At(_, m) => self.match_pattern_symbols(m), - MP::Literal(_) => (), - } - } - - /// Get symbols for a sequence - fn seq_symbols(&mut self, (use_decls, seq_items, _, oe): &P::Sequence) { - use_decls - .iter() - .for_each(|use_decl| self.use_decl_symbols(use_decl)); - - seq_items - .iter() - .for_each(|seq_item| self.seq_item_symbols(seq_item)); - if let Some(e) = oe.as_ref().as_ref() { - self.exp_symbols(e) - } - } - - /// Get symbols for a use declaration - fn use_decl_symbols(&mut self, use_decl: &P::UseDecl) { - use_decl - .attributes - .iter() - .for_each(|sp!(_, attrs)| attrs.iter().for_each(|a| self.attr_symbols(a.clone()))); - - update_cursor!(self.cursor, sp(use_decl.loc, use_decl.use_.clone()), Use); - - match &use_decl.use_ { - P::Use::ModuleUse(mod_ident, mod_use) => { - let mod_ident_str = - parsing_mod_ident_to_map_key(self.pkg_addresses, &mod_ident.value); - self.mod_name_symbol(&mod_ident.value.module, &mod_ident_str); - self.mod_use_symbols(mod_use, &mod_ident_str); - } - P::Use::NestedModuleUses(leading_name, uses) => { - for (mod_name, mod_use) in uses { - let mod_ident_str = parsing_leading_and_mod_names_to_map_key( - self.pkg_addresses, - *leading_name, - *mod_name, - ); - - self.mod_name_symbol(mod_name, &mod_ident_str); - self.mod_use_symbols(mod_use, &mod_ident_str); - } - } - P::Use::Fun { - visibility: _, - function, - ty, - method: _, - } => { - self.chain_symbols(function); - self.chain_symbols(ty); - } - P::Use::Partial { .. } => (), - } - } - - /// Get module name symbol - fn mod_name_symbol(&mut self, mod_name: &P::ModuleName, mod_ident_str: &String) { - let Some(mod_defs) = self.mod_outer_defs.get_mut(mod_ident_str) else { - return; - }; - let Some(mod_name_start) = loc_start_to_lsp_position_opt(self.files, &mod_name.loc()) - else { - debug_assert!(false); - return; - }; - self.use_defs.insert( - mod_name_start.line, - UseDef::new( - self.references, - &BTreeMap::new(), - mod_name.loc().file_hash(), - mod_name_start, - mod_defs.name_loc, - &mod_name.value(), - None, - ), - ); - } - - /// Get symbols for a module use - fn mod_use_symbols(&mut self, mod_use: &P::ModuleUse, mod_ident_str: &String) { - match mod_use { - P::ModuleUse::Module(Some(alias_name)) => { - self.mod_name_symbol(alias_name, mod_ident_str); - } - P::ModuleUse::Module(None) => (), // nothing more to do - P::ModuleUse::Members(v) => { - for (name, alias_opt) in v { - self.use_decl_member_symbols(mod_ident_str.clone(), name, alias_opt); - } - } - P::ModuleUse::Partial { .. } => (), - } - } - - /// Get symbols for a module member in the use declaration (can be a struct or a function) - fn use_decl_member_symbols( - &mut self, - mod_ident_str: String, - name: &Name, - alias_opt: &Option, - ) { - let Some(mod_defs) = self.mod_outer_defs.get(&mod_ident_str) else { - return; - }; - if let Some(mut ud) = add_member_use_def( - &name.value, - self.files, - mod_defs, - &name.value, - &name.loc, - self.references, - self.def_info, - &mut self.use_defs, - &BTreeMap::new(), - ) { - // it's a struct - add it for the alias as well - if let Some(alias) = alias_opt { - let Some(alias_start) = loc_start_to_lsp_position_opt(self.files, &alias.loc) - else { - debug_assert!(false); - return; - }; - ud.rename_use( - self.references, - alias.value, - alias_start, - alias.loc.file_hash(), - ); - self.use_defs.insert(alias_start.line, ud); - } - return; - } - if let Some(mut ud) = add_member_use_def( - &name.value, - self.files, - mod_defs, - &name.value, - &name.loc, - self.references, - self.def_info, - &mut self.use_defs, - &BTreeMap::new(), - ) { - // it's a function - add it for the alias as well - if let Some(alias) = alias_opt { - let Some(alias_start) = loc_start_to_lsp_position_opt(self.files, &alias.loc) - else { - debug_assert!(false); - return; - }; - ud.rename_use( - self.references, - alias.value, - alias_start, - alias.loc.file_hash(), - ); - self.use_defs.insert(alias_start.line, ud); - } - } - } - - /// Get symbols for a type - fn type_symbols(&mut self, type_: &P::Type) { - use P::Type_ as T; - - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - update_cursor!(self.cursor, type_, Type); - - match &type_.value { - T::Apply(chain) => { - self.chain_symbols(chain); - } - T::Ref(_, t) => self.type_symbols(t), - T::Fun(v, t) => { - v.iter().for_each(|t| self.type_symbols(t)); - self.type_symbols(t); - } - T::Multiple(v) => v.iter().for_each(|t| self.type_symbols(t)), - T::Unit => (), - T::UnresolvedError => (), - } - } - - /// Get symbols for a bind statement - fn bind_symbols(&mut self, bind: &P::Bind, explicitly_typed: bool) { - use P::Bind_ as B; - - // If the cursor is in this item, mark that down. - // This may be overridden by the recursion below. - update_cursor!(self.cursor, bind, Binding); - - match &bind.value { - B::Unpack(chain, bindings) => { - self.chain_symbols(chain); - match bindings { - P::FieldBindings::Named(v) => { - for symbol in v { - match symbol { - P::Ellipsis::Binder((_, x)) => self.bind_symbols(x, false), - P::Ellipsis::Ellipsis(_) => (), - } - } - } - P::FieldBindings::Positional(v) => { - for symbol in v.iter() { - match symbol { - P::Ellipsis::Binder(x) => self.bind_symbols(x, false), - P::Ellipsis::Ellipsis(_) => (), - } - } - } - } - } - B::Var(_, var) => { - if !explicitly_typed { - assert!(self.current_mod_ident_str.is_some()); - if let Some(mod_defs) = self - .mod_outer_defs - .get_mut(&self.current_mod_ident_str.clone().unwrap()) - { - mod_defs.untyped_defs.insert(var.loc()); - }; - } - } - } - } - - /// Get symbols for a name access chain - fn chain_symbols(&mut self, sp!(_, chain): &P::NameAccessChain) { - use P::NameAccessChain_ as NA; - // Record the length of all identifiers representing a potentially - // aliased module, struct, enum or function name in an access chain. - // We can conservatively record all identifiers as they are only - // accessed by-location so those irrelevant will never be queried. - match chain { - NA::Single(entry) => { - self.path_entry_symbols(entry); - if let Some(loc) = loc_start_to_lsp_position_opt(self.files, &entry.name.loc) { - self.alias_lengths.insert(loc, entry.name.value.len()); - }; - } - NA::Path(path) => { - let P::NamePath { - root, - entries, - is_incomplete: _, - } = path; - self.root_path_entry_symbols(root); - if let Some(root_loc) = loc_start_to_lsp_position_opt(self.files, &root.name.loc) { - if let P::LeadingNameAccess_::Name(n) = root.name.value { - self.alias_lengths.insert(root_loc, n.value.len()); - } - }; - entries.iter().for_each(|entry| { - self.path_entry_symbols(entry); - if let Some(loc) = loc_start_to_lsp_position_opt(self.files, &entry.name.loc) { - self.alias_lengths.insert(loc, entry.name.value.len()); - }; - }); - } - }; - } - - fn field_defn(&mut self, field: &P::Field) { - // If the cursor is in this item, mark that down. - update_cursor!(IDENT, self.cursor, field, FieldDefn); - } -} - /// Add use of a function, method, struct or enum identifier pub fn add_member_use_def( member_def_name: &Symbol, // may be different from use_name for methods diff --git a/external-crates/move/crates/move-binary-format/src/file_format.rs b/external-crates/move/crates/move-binary-format/src/file_format.rs index ff60d27ee3849..c65207ff96e15 100644 --- a/external-crates/move/crates/move-binary-format/src/file_format.rs +++ b/external-crates/move/crates/move-binary-format/src/file_format.rs @@ -12,15 +12,15 @@ //! //! Overall the binary format is structured in a number of sections: //! - **Header**: this must start at offset 0 in the binary. It contains a blob that starts every -//! Diem binary, followed by the version of the VM used to compile the code, and last is the -//! number of tables present in this binary. +//! Diem binary, followed by the version of the VM used to compile the code, and last is the +//! number of tables present in this binary. //! - **Table Specification**: it's a number of tuple of the form -//! `(table type, starting_offset, byte_count)`. The number of entries is specified in the -//! header (last entry in header). There can only be a single entry per table type. The -//! `starting offset` is from the beginning of the binary. Tables must cover the entire size of -//! the binary blob and cannot overlap. +//! `(table type, starting_offset, byte_count)`. The number of entries is specified in the +//! header (last entry in header). There can only be a single entry per table type. The +//! `starting offset` is from the beginning of the binary. Tables must cover the entire size of +//! the binary blob and cannot overlap. //! - **Table Content**: the serialized form of the specific entries in the table. Those roughly -//! map to the structs defined in this module. Entries in each table must be unique. +//! map to the structs defined in this module. Entries in each table must be unique. //! //! We have two formats: one for modules here represented by `CompiledModule`, another //! for transaction scripts which is `CompiledScript`. Building those tables and passing them diff --git a/external-crates/move/crates/move-binary-format/src/file_format_common.rs b/external-crates/move/crates/move-binary-format/src/file_format_common.rs index 7c81c755a012a..7d006ef74f9f9 100644 --- a/external-crates/move/crates/move-binary-format/src/file_format_common.rs +++ b/external-crates/move/crates/move-binary-format/src/file_format_common.rs @@ -334,7 +334,7 @@ pub enum Opcodes { } /// Upper limit on the binary size -pub const BINARY_SIZE_LIMIT: usize = usize::max_value(); +pub const BINARY_SIZE_LIMIT: usize = usize::MAX; /// A wrapper for the binary vector #[derive(Default, Debug)] diff --git a/external-crates/move/crates/move-binary-format/src/lib.rs b/external-crates/move/crates/move-binary-format/src/lib.rs index 2490f2d247205..78e3607f7dfc8 100644 --- a/external-crates/move/crates/move-binary-format/src/lib.rs +++ b/external-crates/move/crates/move-binary-format/src/lib.rs @@ -161,8 +161,10 @@ macro_rules! safe_unwrap { match $e { Some(x) => x, None => { - let err = PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message(format!("{}:{} (none)", file!(), line!())); + let err = move_binary_format::errors::PartialVMError::new( + move_core_types::vm_status::StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + ) + .with_message(format!("{}:{} (none)", file!(), line!())); if cfg!(debug_assertions) { panic!("{:?}", err); } else { diff --git a/external-crates/move/crates/move-binary-format/src/proptest_types/functions.rs b/external-crates/move/crates/move-binary-format/src/proptest_types/functions.rs index a549e18fb8b94..42bbfacfc1b75 100644 --- a/external-crates/move/crates/move-binary-format/src/proptest_types/functions.rs +++ b/external-crates/move/crates/move-binary-format/src/proptest_types/functions.rs @@ -55,7 +55,7 @@ impl SignatureState { } fn add_signature(&mut self, sig: Signature) -> SignatureIndex { - debug_assert!(self.signatures.len() < TableSize::max_value() as usize); + debug_assert!(self.signatures.len() < TableSize::MAX as usize); if let Some(idx) = self.signature_map.get(&sig) { return *idx; } @@ -81,7 +81,7 @@ impl FieldHandleState { #[allow(unused)] fn add_field_handle(&mut self, fh: FieldHandle) -> FieldHandleIndex { - debug_assert!(self.field_handles.len() < TableSize::max_value() as usize); + debug_assert!(self.field_handles.len() < TableSize::MAX as usize); if let Some(idx) = self.field_map.get(&fh) { return *idx; } @@ -105,7 +105,7 @@ impl VariantHandleState { } fn add_variant_handle(&mut self, vh: VariantHandle) -> Option { - debug_assert!(self.variant_handles.len() < TableSize::max_value() as usize); + debug_assert!(self.variant_handles.len() < TableSize::MAX as usize); if let Some(idx) = self.variant_map.get(&vh) { return Some(*idx); } @@ -135,7 +135,7 @@ impl VariantInstantiationHandleState { &mut self, vh: VariantInstantiationHandle, ) -> Option { - debug_assert!(self.variant_instantiation_handles.len() < TableSize::max_value() as usize); + debug_assert!(self.variant_instantiation_handles.len() < TableSize::MAX as usize); if let Some(idx) = self.variant_map.get(&vh) { return Some(*idx); } @@ -179,7 +179,7 @@ where #[allow(unused)] fn add_instantiation(&mut self, inst: T) -> TableIndex { - debug_assert!(self.instantiations.len() < TableSize::max_value() as usize); + debug_assert!(self.instantiations.len() < TableSize::MAX as usize); if let Some(idx) = self.instantiation_map.get(&inst) { return *idx; } @@ -381,7 +381,7 @@ impl<'a> FnDefnMaterializeState<'a> { } fn add_function_handle(&mut self, handle: FunctionHandle) -> FunctionHandleIndex { - debug_assert!(self.function_handles.len() < TableSize::max_value() as usize); + debug_assert!(self.function_handles.len() < TableSize::MAX as usize); self.function_handles.push(handle); FunctionHandleIndex((self.function_handles.len() - 1) as TableIndex) } @@ -488,7 +488,7 @@ impl FunctionDefinitionGen { pub fn materialize(self, state: &mut FnDefnMaterializeState) -> Option { // This precondition should never fail because the table size cannot be greater - // than TableSize::max_value() + // than TableSize::MAX let iden_idx = IdentifierIndex(self.name.index(state.identifiers_len) as TableIndex); if state .def_function_handles diff --git a/external-crates/move/crates/move-binary-format/src/proptest_types/types.rs b/external-crates/move/crates/move-binary-format/src/proptest_types/types.rs index 6cd5e43e76afd..49071e100b080 100644 --- a/external-crates/move/crates/move-binary-format/src/proptest_types/types.rs +++ b/external-crates/move/crates/move-binary-format/src/proptest_types/types.rs @@ -23,9 +23,6 @@ use proptest::{ }; use std::collections::BTreeSet; -#[derive(Debug)] -struct TypeSignatureIndex(u16); - #[derive(Debug)] pub struct StDefnMaterializeState { pub self_module_handle_idx: ModuleHandleIndex, diff --git a/external-crates/move/crates/move-binary-format/src/serializer.rs b/external-crates/move/crates/move-binary-format/src/serializer.rs index 2d5a362e23914..0c8c6038df4d9 100644 --- a/external-crates/move/crates/move-binary-format/src/serializer.rs +++ b/external-crates/move/crates/move-binary-format/src/serializer.rs @@ -228,11 +228,11 @@ impl CompiledModule { let mut ser = ModuleSerializer::new(version); let mut temp = BinaryData::new(); ser.serialize_tables(&mut temp, self)?; - if temp.len() > u32::max_value() as usize { + if temp.len() > u32::MAX as usize { bail!( "table content size ({}) cannot exceed ({})", temp.len(), - u32::max_value() + u32::MAX ); } ser.common.serialize_header(&mut binary_data)?; @@ -296,11 +296,11 @@ struct ModuleSerializer { // Helpers // fn check_index_in_binary(index: usize) -> Result { - if index > u32::max_value() as usize { + if index > u32::MAX as usize { bail!( "Compilation unit too big ({}) cannot exceed {}", index, - u32::max_value() + u32::MAX ) } Ok(index as u32) diff --git a/external-crates/move/crates/move-binary-format/src/unit_tests/number_tests.rs b/external-crates/move/crates/move-binary-format/src/unit_tests/number_tests.rs index b99e94bb66e33..1e5f97f551374 100644 --- a/external-crates/move/crates/move-binary-format/src/unit_tests/number_tests.rs +++ b/external-crates/move/crates/move-binary-format/src/unit_tests/number_tests.rs @@ -42,8 +42,8 @@ fn uleb128_test() { uleb128_test_u64(2u64.pow(exp), n + 1); n += 1; } - uleb128_test_u64(u64::max_value() - 1, 10); - uleb128_test_u64(u64::max_value(), 10); + uleb128_test_u64(u64::MAX - 1, 10); + uleb128_test_u64(u64::MAX, 10); } #[test] diff --git a/external-crates/move/crates/move-borrow-graph/src/graph.rs b/external-crates/move/crates/move-borrow-graph/src/graph.rs index bb3dfe4bf0b29..0f51ed4e5bc05 100644 --- a/external-crates/move/crates/move-borrow-graph/src/graph.rs +++ b/external-crates/move/crates/move-borrow-graph/src/graph.rs @@ -232,19 +232,21 @@ impl BorrowGraph { /// Remove reference `id` from the graph /// Fixes any transitive borrows, so if `parent` borrowed by `id` borrowed by `child` /// After the release, `parent` borrowed by `child` - pub fn release(&mut self, id: RefID) { + pub fn release(&mut self, id: RefID) -> usize { debug_assert!(self.check_invariant()); let Ref { borrowed_by, borrows_from, .. } = self.0.remove(&id).unwrap(); + let mut released_edges = 0; for parent_ref_id in borrows_from.into_iter() { let parent = self.0.get_mut(&parent_ref_id).unwrap(); let parent_edges = parent.borrowed_by.0.remove(&id).unwrap(); for parent_edge in parent_edges { for (child_ref_id, child_edges) in &borrowed_by.0 { for child_edge in child_edges { + released_edges += 1; self.splice_out_intermediate( parent_ref_id, &parent_edge, @@ -260,6 +262,7 @@ impl BorrowGraph { child.borrows_from.remove(&id); } debug_assert!(self.check_invariant()); + released_edges } fn splice_out_intermediate( diff --git a/external-crates/move/crates/move-bytecode-verifier-meter/src/lib.rs b/external-crates/move/crates/move-bytecode-verifier-meter/src/lib.rs index 64bc6bd6e123f..ce90b1e1db8f8 100644 --- a/external-crates/move/crates/move-bytecode-verifier-meter/src/lib.rs +++ b/external-crates/move/crates/move-bytecode-verifier-meter/src/lib.rs @@ -46,6 +46,7 @@ pub trait Meter { } /// Adds the number of items with growth factor + #[deprecated(note = "this function is extremely slow and should be avoided")] fn add_items_with_growth( &mut self, scope: Scope, diff --git a/external-crates/move/crates/move-bytecode-verifier/src/ability_cache.rs b/external-crates/move/crates/move-bytecode-verifier/src/ability_cache.rs new file mode 100644 index 0000000000000..39ba92b79c49c --- /dev/null +++ b/external-crates/move/crates/move-bytecode-verifier/src/ability_cache.rs @@ -0,0 +1,100 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use move_binary_format::{ + errors::PartialVMResult, + file_format::{AbilitySet, DatatypeHandleIndex, SignatureToken}, + safe_unwrap, CompiledModule, +}; +use move_bytecode_verifier_meter::{Meter, Scope}; +use std::{ + cmp::max, + collections::{btree_map::Entry, BTreeMap}, +}; + +const TYPE_ARG_COST: u128 = 1; + +pub struct AbilityCache<'a> { + module: &'a CompiledModule, + vector_results: BTreeMap, + datatype_results: BTreeMap, AbilitySet>>, +} + +impl<'a> AbilityCache<'a> { + pub fn new(module: &'a CompiledModule) -> Self { + Self { + module, + vector_results: BTreeMap::new(), + datatype_results: BTreeMap::new(), + } + } + + pub fn abilities( + &mut self, + scope: Scope, + meter: &mut (impl Meter + ?Sized), + type_parameter_abilities: &[AbilitySet], + ty: &SignatureToken, + ) -> PartialVMResult { + use SignatureToken as S; + + Ok(match ty { + S::Bool | S::U8 | S::U16 | S::U32 | S::U64 | S::U128 | S::U256 | S::Address => { + AbilitySet::PRIMITIVES + } + + S::Reference(_) | S::MutableReference(_) => AbilitySet::REFERENCES, + S::Signer => AbilitySet::SIGNER, + S::TypeParameter(idx) => *safe_unwrap!(type_parameter_abilities.get(*idx as usize)), + S::Datatype(idx) => { + let sh = self.module.datatype_handle_at(*idx); + sh.abilities + } + S::Vector(inner) => { + let inner_abilities = + self.abilities(scope, meter, type_parameter_abilities, inner)?; + let entry = self.vector_results.entry(inner_abilities); + match entry { + Entry::Occupied(entry) => *entry.get(), + Entry::Vacant(entry) => { + meter.add(scope, TYPE_ARG_COST)?; + let abilities = AbilitySet::polymorphic_abilities( + AbilitySet::VECTOR, + vec![false], + vec![inner_abilities], + )?; + entry.insert(abilities); + abilities + } + } + } + S::DatatypeInstantiation(inst) => { + let (idx, type_args) = &**inst; + let type_arg_abilities = type_args + .iter() + .map(|arg| self.abilities(scope, meter, type_parameter_abilities, arg)) + .collect::>>()?; + let entry = self + .datatype_results + .entry(*idx) + .or_default() + .entry(type_arg_abilities.clone()); + match entry { + Entry::Occupied(entry) => *entry.get(), + Entry::Vacant(entry) => { + meter.add_items(scope, TYPE_ARG_COST, max(type_args.len(), 1))?; + let sh = self.module.datatype_handle_at(*idx); + let declared_abilities = sh.abilities; + let abilities = AbilitySet::polymorphic_abilities( + declared_abilities, + sh.type_parameters.iter().map(|param| param.is_phantom), + type_arg_abilities, + )?; + entry.insert(abilities); + abilities + } + } + } + }) + } +} diff --git a/external-crates/move/crates/move-bytecode-verifier/src/ability_field_requirements.rs b/external-crates/move/crates/move-bytecode-verifier/src/ability_field_requirements.rs index 1c868ad3856ad..3d462dcd04197 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/ability_field_requirements.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/ability_field_requirements.rs @@ -4,18 +4,29 @@ //! This module implements a checker for verifying that all of the struct's fields satisfy the //! abilities required by the struct's abilities +use crate::ability_cache::AbilityCache; use move_binary_format::{ errors::{verification_error, Location, PartialVMResult, VMResult}, file_format::{AbilitySet, CompiledModule, StructFieldInformation, TableIndex}, IndexKind, }; +use move_bytecode_verifier_meter::{Meter, Scope}; use move_core_types::vm_status::StatusCode; -pub fn verify_module(module: &CompiledModule) -> VMResult<()> { - verify_module_impl(module).map_err(|e| e.finish(Location::Module(module.self_id()))) +pub fn verify_module<'env>( + module: &'env CompiledModule, + ability_cache: &mut AbilityCache<'env>, + meter: &mut (impl Meter + ?Sized), +) -> VMResult<()> { + verify_module_impl(module, ability_cache, meter) + .map_err(|e| e.finish(Location::Module(module.self_id()))) } -fn verify_module_impl(module: &CompiledModule) -> PartialVMResult<()> { +fn verify_module_impl<'env>( + module: &'env CompiledModule, + ability_cache: &mut AbilityCache<'env>, + meter: &mut (impl Meter + ?Sized), +) -> PartialVMResult<()> { for (idx, struct_def) in module.struct_defs().iter().enumerate() { let sh = module.datatype_handle_at(struct_def.struct_handle); let fields = match &struct_def.field_information { @@ -35,8 +46,12 @@ fn verify_module_impl(module: &CompiledModule) -> PartialVMResult<()> { .map(|_| AbilitySet::ALL) .collect::>(); for field in fields { - let field_abilities = - module.abilities(&field.signature.0, &type_parameter_abilities)?; + let field_abilities = ability_cache.abilities( + Scope::Module, + meter, + &type_parameter_abilities, + &field.signature.0, + )?; if !required_abilities.is_subset(field_abilities) { return Err(verification_error( StatusCode::FIELD_MISSING_TYPE_ABILITY, @@ -63,8 +78,12 @@ fn verify_module_impl(module: &CompiledModule) -> PartialVMResult<()> { .collect::>(); for (i, variant) in enum_def.variants.iter().enumerate() { for (fi, field) in variant.fields.iter().enumerate() { - let field_abilities = - module.abilities(&field.signature.0, &type_parameter_abilities)?; + let field_abilities = ability_cache.abilities( + Scope::Module, + meter, + &type_parameter_abilities, + &field.signature.0, + )?; if !required_abilities.is_subset(field_abilities) { return Err(verification_error( StatusCode::FIELD_MISSING_TYPE_ABILITY, diff --git a/external-crates/move/crates/move-bytecode-verifier/src/code_unit_verifier.rs b/external-crates/move/crates/move-bytecode-verifier/src/code_unit_verifier.rs index a3b9a0ab07fa9..f8971d78924e4 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/code_unit_verifier.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/code_unit_verifier.rs @@ -6,8 +6,8 @@ //! The overall verification is split between stack_usage_verifier.rs and //! abstract_interpreter.rs. CodeUnitVerifier simply orchestrates calls into these two files. use crate::{ - acquires_list_verifier::AcquiresVerifier, control_flow, locals_safety, reference_safety, - stack_usage_verifier::StackUsageVerifier, type_safety, + ability_cache::AbilityCache, acquires_list_verifier::AcquiresVerifier, control_flow, + locals_safety, reference_safety, stack_usage_verifier::StackUsageVerifier, type_safety, }; use move_abstract_interpreter::{absint::FunctionContext, control_flow_graph::ControlFlowGraph}; use move_binary_format::{ @@ -22,123 +22,128 @@ use move_core_types::vm_status::StatusCode; use move_vm_config::verifier::VerifierConfig; use std::collections::HashMap; -pub struct CodeUnitVerifier<'a> { - module: &'a CompiledModule, - function_context: FunctionContext<'a>, +pub struct CodeUnitVerifier<'env, 'a> { + module: &'env CompiledModule, + function_context: FunctionContext<'env>, name_def_map: &'a HashMap, } -impl<'a> CodeUnitVerifier<'a> { - pub fn verify_module( - verifier_config: &VerifierConfig, - module: &'a CompiledModule, - meter: &mut (impl Meter + ?Sized), - ) -> VMResult<()> { - Self::verify_module_impl(verifier_config, module, meter) - .map_err(|e| e.finish(Location::Module(module.self_id()))) - } +pub fn verify_module<'env>( + verifier_config: &VerifierConfig, + module: &'env CompiledModule, + ability_cache: &mut AbilityCache<'env>, + meter: &mut (impl Meter + ?Sized), +) -> VMResult<()> { + verify_module_impl(verifier_config, module, ability_cache, meter) + .map_err(|e| e.finish(Location::Module(module.self_id()))) +} - fn verify_module_impl( - verifier_config: &VerifierConfig, - module: &CompiledModule, - meter: &mut (impl Meter + ?Sized), - ) -> PartialVMResult<()> { - let mut name_def_map = HashMap::new(); - for (idx, func_def) in module.function_defs().iter().enumerate() { - let fh = module.function_handle_at(func_def.function); - name_def_map.insert(fh.name, FunctionDefinitionIndex(idx as u16)); - } - let mut total_back_edges = 0; - for (idx, function_definition) in module.function_defs().iter().enumerate() { - let index = FunctionDefinitionIndex(idx as TableIndex); - let num_back_edges = Self::verify_function( - verifier_config, - index, - function_definition, - module, - &name_def_map, - meter, - ) - .map_err(|err| err.at_index(IndexKind::FunctionDefinition, index.0))?; - total_back_edges += num_back_edges; - } - if let Some(limit) = verifier_config.max_back_edges_per_module { - if total_back_edges > limit { - return Err(PartialVMError::new(StatusCode::TOO_MANY_BACK_EDGES)); - } - } - Ok(()) +fn verify_module_impl<'env>( + verifier_config: &VerifierConfig, + module: &'env CompiledModule, + ability_cache: &mut AbilityCache<'env>, + meter: &mut (impl Meter + ?Sized), +) -> PartialVMResult<()> { + let mut name_def_map = HashMap::new(); + for (idx, func_def) in module.function_defs().iter().enumerate() { + let fh = module.function_handle_at(func_def.function); + name_def_map.insert(fh.name, FunctionDefinitionIndex(idx as u16)); } - - fn verify_function( - verifier_config: &VerifierConfig, - index: FunctionDefinitionIndex, - function_definition: &FunctionDefinition, - module: &CompiledModule, - name_def_map: &HashMap, - meter: &mut (impl Meter + ?Sized), - ) -> PartialVMResult { - meter.enter_scope( - module - .identifier_at(module.function_handle_at(function_definition.function).name) - .as_str(), - Scope::Function, - ); - // nothing to verify for native function - let code = match &function_definition.code { - Some(code) => code, - None => return Ok(0), - }; - - // create `FunctionContext` and `BinaryIndexedView` - let function_context = control_flow::verify_function( + let mut total_back_edges = 0; + for (idx, function_definition) in module.function_defs().iter().enumerate() { + let index = FunctionDefinitionIndex(idx as TableIndex); + let num_back_edges = verify_function( verifier_config, - module, index, function_definition, - code, + module, + ability_cache, + &name_def_map, meter, - )?; + ) + .map_err(|err| err.at_index(IndexKind::FunctionDefinition, index.0))?; + total_back_edges += num_back_edges; + } + if let Some(limit) = verifier_config.max_back_edges_per_module { + if total_back_edges > limit { + return Err(PartialVMError::new(StatusCode::TOO_MANY_BACK_EDGES)); + } + } + Ok(()) +} - if let Some(limit) = verifier_config.max_basic_blocks { - if function_context.cfg().blocks().len() > limit { - return Err( - PartialVMError::new(StatusCode::TOO_MANY_BASIC_BLOCKS).at_code_offset(index, 0) - ); - } +fn verify_function<'env>( + verifier_config: &VerifierConfig, + index: FunctionDefinitionIndex, + function_definition: &'env FunctionDefinition, + module: &'env CompiledModule, + ability_cache: &mut AbilityCache<'env>, + name_def_map: &HashMap, + meter: &mut (impl Meter + ?Sized), +) -> PartialVMResult { + meter.enter_scope( + module + .identifier_at(module.function_handle_at(function_definition.function).name) + .as_str(), + Scope::Function, + ); + // nothing to verify for native function + let code = match &function_definition.code { + Some(code) => code, + None => return Ok(0), + }; + + // create `FunctionContext` and `BinaryIndexedView` + let function_context = control_flow::verify_function( + verifier_config, + module, + index, + function_definition, + code, + meter, + )?; + + if let Some(limit) = verifier_config.max_basic_blocks { + if function_context.cfg().blocks().len() > limit { + return Err( + PartialVMError::new(StatusCode::TOO_MANY_BASIC_BLOCKS).at_code_offset(index, 0) + ); } + } - let num_back_edges = function_context.cfg().num_back_edges(); - if let Some(limit) = verifier_config.max_back_edges_per_function { - if num_back_edges > limit { - return Err( - PartialVMError::new(StatusCode::TOO_MANY_BACK_EDGES).at_code_offset(index, 0) - ); - } + let num_back_edges = function_context.cfg().num_back_edges(); + if let Some(limit) = verifier_config.max_back_edges_per_function { + if num_back_edges > limit { + return Err( + PartialVMError::new(StatusCode::TOO_MANY_BACK_EDGES).at_code_offset(index, 0) + ); } + } - // verify - let code_unit_verifier = CodeUnitVerifier { - module, - function_context, - name_def_map, - }; - code_unit_verifier.verify_common(verifier_config, meter)?; - AcquiresVerifier::verify(module, index, function_definition, meter)?; + // verify + let code_unit_verifier = CodeUnitVerifier { + module, + function_context, + name_def_map, + }; + code_unit_verifier.verify_common(verifier_config, ability_cache, meter)?; + AcquiresVerifier::verify(module, index, function_definition, meter)?; - meter.transfer(Scope::Function, Scope::Module, 1.0)?; + meter.transfer(Scope::Function, Scope::Module, 1.0)?; - Ok(num_back_edges) - } + Ok(num_back_edges) +} +impl<'env, 'a> CodeUnitVerifier<'env, 'a> { fn verify_common( &self, verifier_config: &VerifierConfig, + ability_cache: &mut AbilityCache<'env>, meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult<()> { StackUsageVerifier::verify(verifier_config, self.module, &self.function_context, meter)?; - type_safety::verify(self.module, &self.function_context, meter)?; - locals_safety::verify(self.module, &self.function_context, meter)?; + type_safety::verify(self.module, &self.function_context, ability_cache, meter)?; + locals_safety::verify(self.module, &self.function_context, ability_cache, meter)?; reference_safety::verify( self.module, &self.function_context, diff --git a/external-crates/move/crates/move-bytecode-verifier/src/control_flow.rs b/external-crates/move/crates/move-bytecode-verifier/src/control_flow.rs index e32afcee37cff..07eb8b12991a0 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/control_flow.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/control_flow.rs @@ -29,14 +29,14 @@ use std::collections::BTreeSet; /// Perform control flow verification on the compiled function, returning its `FunctionContext` if /// verification was successful. -pub fn verify_function<'a>( - verifier_config: &'a VerifierConfig, - module: &'a CompiledModule, +pub fn verify_function<'env>( + verifier_config: &VerifierConfig, + module: &'env CompiledModule, index: FunctionDefinitionIndex, - function_definition: &'a FunctionDefinition, - code: &'a CodeUnit, + function_definition: &'env FunctionDefinition, + code: &'env CodeUnit, _meter: &mut (impl Meter + ?Sized), // TODO: metering -) -> PartialVMResult> { +) -> PartialVMResult> { let function_handle = module.function_handle_at(function_definition.function); if module.version() <= 5 { diff --git a/external-crates/move/crates/move-bytecode-verifier/src/control_flow_v5.rs b/external-crates/move/crates/move-bytecode-verifier/src/control_flow_v5.rs index b24d7aeca62b9..eb705b9498b8b 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/control_flow_v5.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/control_flow_v5.rs @@ -37,7 +37,7 @@ pub fn verify( fn verify_fallthrough( current_function: FunctionDefinitionIndex, - code: &Vec, + code: &[Bytecode], ) -> PartialVMResult<()> { // Check to make sure that the bytecode vector ends with a branching instruction. match code.last() { diff --git a/external-crates/move/crates/move-bytecode-verifier/src/lib.rs b/external-crates/move/crates/move-bytecode-verifier/src/lib.rs index 64a88f59842dd..660da728a05d5 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/lib.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/lib.rs @@ -7,6 +7,7 @@ //! Verifies bytecode sanity. // Bounds checks are implemented in the `vm` crate. +pub mod ability_cache; pub mod ability_field_requirements; pub mod check_duplication; pub mod code_unit_verifier; diff --git a/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/abstract_state.rs b/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/abstract_state.rs index c42e538bf5b87..26eae31969670 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/abstract_state.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/abstract_state.rs @@ -4,6 +4,7 @@ //! This module defines the abstract state for the local safety analysis. +use crate::ability_cache::AbilityCache; use move_abstract_interpreter::absint::{AbstractDomain, FunctionContext, JoinResult}; use move_binary_format::{ errors::{PartialVMError, PartialVMResult}, @@ -26,10 +27,9 @@ pub(crate) enum LocalState { } use LocalState::*; -pub(crate) const STEP_BASE_COST: u128 = 15; -pub(crate) const RET_PER_LOCAL_COST: u128 = 30; -pub(crate) const JOIN_BASE_COST: u128 = 10; -pub(crate) const JOIN_PER_LOCAL_COST: u128 = 5; +pub(crate) const STEP_BASE_COST: u128 = 1; +pub(crate) const RET_COST: u128 = 10; +pub(crate) const JOIN_COST: u128 = 10; #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct AbstractState { @@ -41,8 +41,10 @@ pub(crate) struct AbstractState { impl AbstractState { /// create a new abstract state pub fn new( - module: &CompiledModule, + _module: &CompiledModule, function_context: &FunctionContext, + ability_cache: &mut AbilityCache, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { let num_args = function_context.parameters().len(); let num_locals = num_args + function_context.locals().len(); @@ -55,7 +57,14 @@ impl AbstractState { .0 .iter() .chain(function_context.locals().0.iter()) - .map(|st| module.abilities(st, function_context.type_parameters())) + .map(|st| { + ability_cache.abilities( + Scope::Function, + meter, + function_context.type_parameters(), + st, + ) + }) .collect::>>()?; Ok(Self { @@ -141,12 +150,7 @@ impl AbstractDomain for AbstractState { state: &AbstractState, meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { - meter.add(Scope::Function, JOIN_BASE_COST)?; - meter.add_items( - Scope::Function, - JOIN_PER_LOCAL_COST, - state.local_states.len(), - )?; + meter.add(Scope::Function, JOIN_COST)?; let joined = Self::join_(self, state); assert!(self.local_states.len() == joined.local_states.len()); let locals_unchanged = self diff --git a/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/mod.rs b/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/mod.rs index f5eab08181ab5..1c9b0487b28d0 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/mod.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/locals_safety/mod.rs @@ -8,8 +8,8 @@ mod abstract_state; -use crate::locals_safety::abstract_state::{RET_PER_LOCAL_COST, STEP_BASE_COST}; -use abstract_state::{AbstractState, LocalState}; +use crate::ability_cache::AbilityCache; +use abstract_state::{AbstractState, LocalState, RET_COST, STEP_BASE_COST}; use move_abstract_interpreter::absint::{AbstractInterpreter, FunctionContext, TransferFunctions}; use move_binary_format::{ errors::{PartialVMError, PartialVMResult}, @@ -22,9 +22,10 @@ use move_core_types::vm_status::StatusCode; pub(crate) fn verify<'a>( module: &CompiledModule, function_context: &'a FunctionContext<'a>, + ability_cache: &mut AbilityCache, meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult<()> { - let initial_state = AbstractState::new(module, function_context)?; + let initial_state = AbstractState::new(module, function_context, ability_cache, meter)?; LocalsSafetyAnalysis().analyze_function(initial_state, function_context, meter) } @@ -70,7 +71,7 @@ fn execute_inner( Bytecode::Ret => { let local_states = state.local_states(); - meter.add_items(Scope::Function, RET_PER_LOCAL_COST, local_states.len())?; + meter.add_items(Scope::Function, RET_COST, local_states.len())?; let all_local_abilities = state.all_local_abilities(); assert!(local_states.len() == all_local_abilities.len()); for (local_state, local_abilities) in local_states.iter().zip(all_local_abilities) { diff --git a/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs b/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs index 6aaa140cdebec..2557a0f75ee88 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs @@ -16,7 +16,10 @@ use move_binary_format::{ use move_borrow_graph::references::RefID; use move_bytecode_verifier_meter::{Meter, Scope}; use move_core_types::vm_status::StatusCode; -use std::collections::{BTreeMap, BTreeSet}; +use std::{ + cmp::max, + collections::{BTreeMap, BTreeSet}, +}; type BorrowGraph = move_borrow_graph::graph::BorrowGraph<(), Label>; @@ -74,19 +77,18 @@ impl std::fmt::Display for Label { } } -pub(crate) const STEP_BASE_COST: u128 = 10; -pub(crate) const STEP_PER_LOCAL_COST: u128 = 20; -pub(crate) const STEP_PER_GRAPH_ITEM_COST: u128 = 50; -pub(crate) const JOIN_BASE_COST: u128 = 100; -pub(crate) const JOIN_PER_LOCAL_COST: u128 = 10; -pub(crate) const JOIN_PER_GRAPH_ITEM_COST: u128 = 50; +pub(crate) const STEP_BASE_COST: u128 = 1; +pub(crate) const JOIN_BASE_COST: u128 = 10; + +pub(crate) const PER_GRAPH_ITEM_COST: u128 = 4; -// The cost for an edge from an input reference parameter to output reference. -pub(crate) const REF_PARAM_EDGE_COST: u128 = 100; -pub(crate) const REF_PARAM_EDGE_COST_GROWTH: f32 = 1.5; +pub(crate) const RELEASE_ITEM_COST: u128 = 3; +pub(crate) const RELEASE_ITEM_QUADRATIC_THRESHOLD: usize = 5; -// The cost of an acquires in a call. -pub(crate) const CALL_PER_ACQUIRES_COST: u128 = 100; +pub(crate) const JOIN_ITEM_COST: u128 = 4; +pub(crate) const JOIN_ITEM_QUADRATIC_THRESHOLD: usize = 10; + +pub(crate) const ADD_BORROW_COST: u128 = 3; /// AbstractState is the analysis state over which abstract interpretation is performed. #[derive(Clone, Debug, PartialEq, Eq)] @@ -126,10 +128,6 @@ impl AbstractState { state } - pub(crate) fn local_count(&self) -> usize { - self.locals.len() - } - pub(crate) fn graph_size(&self) -> usize { self.borrow_graph.graph_size() } @@ -166,27 +164,63 @@ impl AbstractState { id } - fn add_copy(&mut self, parent: RefID, child: RefID) { - self.borrow_graph.add_strong_borrow((), parent, child) + fn add_copy( + &mut self, + parent: RefID, + child: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + meter.add(Scope::Function, ADD_BORROW_COST)?; + self.borrow_graph.add_strong_borrow((), parent, child); + Ok(()) } - fn add_borrow(&mut self, parent: RefID, child: RefID) { - self.borrow_graph.add_weak_borrow((), parent, child) + fn add_borrow( + &mut self, + parent: RefID, + child: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + meter.add(Scope::Function, ADD_BORROW_COST)?; + self.borrow_graph.add_weak_borrow((), parent, child); + Ok(()) } - fn add_field_borrow(&mut self, parent: RefID, field: FieldHandleIndex, child: RefID) { + fn add_field_borrow( + &mut self, + parent: RefID, + field: FieldHandleIndex, + child: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + meter.add(Scope::Function, ADD_BORROW_COST)?; self.borrow_graph - .add_strong_field_borrow((), parent, Label::StructField(field), child) + .add_strong_field_borrow((), parent, Label::StructField(field), child); + Ok(()) } - fn add_local_borrow(&mut self, local: LocalIndex, id: RefID) { + fn add_local_borrow( + &mut self, + local: LocalIndex, + id: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + meter.add(Scope::Function, ADD_BORROW_COST)?; self.borrow_graph - .add_strong_field_borrow((), self.frame_root(), Label::Local(local), id) + .add_strong_field_borrow((), self.frame_root(), Label::Local(local), id); + Ok(()) } - fn add_resource_borrow(&mut self, resource: StructDefinitionIndex, id: RefID) { + fn add_resource_borrow( + &mut self, + resource: StructDefinitionIndex, + id: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + meter.add(Scope::Function, ADD_BORROW_COST)?; self.borrow_graph - .add_weak_field_borrow((), self.frame_root(), Label::Global(resource), id) + .add_weak_field_borrow((), self.frame_root(), Label::Global(resource), id); + Ok(()) } fn add_variant_field_borrow( @@ -196,18 +230,22 @@ impl AbstractState { variant_tag: VariantTag, field_index: MemberCount, child_id: RefID, - ) { + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + meter.add(Scope::Function, ADD_BORROW_COST)?; self.borrow_graph.add_strong_field_borrow( (), parent, Label::VariantField(enum_def_idx, variant_tag, field_index), child_id, - ) + ); + Ok(()) } /// removes `id` from borrow graph - fn release(&mut self, id: RefID) { - self.borrow_graph.release(id); + fn release(&mut self, id: RefID, meter: &mut (impl Meter + ?Sized)) -> PartialVMResult<()> { + let released_edges = self.borrow_graph.release(id); + charge_release(released_edges, meter) } //********************************************************************************************** @@ -258,52 +296,85 @@ impl AbstractState { /// checks if `id` is writable /// - Mutable references are writable if there are no consistent borrows /// - Immutable references are not writable by the typing rules - fn is_writable(&self, id: RefID) -> bool { + fn is_writable(&self, id: RefID, meter: &mut (impl Meter + ?Sized)) -> PartialVMResult { assert!(self.borrow_graph.is_mutable(id)); - !self.has_consistent_borrows(id, None) + charge_graph_size(self.graph_size(), meter)?; + Ok(!self.has_consistent_borrows(id, None)) } /// checks if `id` is freezable /// - Mutable references are freezable if there are no consistent mutable borrows /// - Immutable references are not freezable by the typing rules - fn is_freezable(&self, id: RefID, at_field_opt: Option) -> bool { + fn is_freezable( + &self, + id: RefID, + at_field_opt: Option, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { assert!(self.borrow_graph.is_mutable(id)); - !self.has_consistent_mutable_borrows(id, at_field_opt.map(Label::StructField)) + charge_graph_size(self.graph_size(), meter)?; + Ok(!self.has_consistent_mutable_borrows(id, at_field_opt.map(Label::StructField))) } /// checks if `id` is readable /// - Mutable references are readable if they are freezable /// - Immutable references are always readable - fn is_readable(&self, id: RefID, at_field_opt: Option) -> bool { + fn is_readable( + &self, + id: RefID, + at_field_opt: Option, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { let is_mutable = self.borrow_graph.is_mutable(id); - !is_mutable || self.is_freezable(id, at_field_opt) + Ok(!is_mutable || self.is_freezable(id, at_field_opt, meter)?) } /// checks if local@idx is borrowed - fn is_local_borrowed(&self, idx: LocalIndex) -> bool { - self.has_consistent_borrows(self.frame_root(), Some(Label::Local(idx))) + fn is_local_borrowed( + &self, + idx: LocalIndex, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { + charge_graph_size(self.graph_size(), meter)?; + Ok(self.has_consistent_borrows(self.frame_root(), Some(Label::Local(idx)))) } /// checks if local@idx is mutably borrowed - fn is_local_mutably_borrowed(&self, idx: LocalIndex) -> bool { - self.has_consistent_mutable_borrows(self.frame_root(), Some(Label::Local(idx))) + fn is_local_mutably_borrowed( + &self, + idx: LocalIndex, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { + charge_graph_size(self.graph_size(), meter)?; + Ok(self.has_consistent_mutable_borrows(self.frame_root(), Some(Label::Local(idx)))) } /// checks if global@idx is borrowed - fn is_global_borrowed(&self, resource: StructDefinitionIndex) -> bool { - self.has_consistent_borrows(self.frame_root(), Some(Label::Global(resource))) + fn is_global_borrowed( + &self, + resource: StructDefinitionIndex, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { + charge_graph_size(self.graph_size(), meter)?; + Ok(self.has_consistent_borrows(self.frame_root(), Some(Label::Global(resource)))) } /// checks if global@idx is mutably borrowed - fn is_global_mutably_borrowed(&self, resource: StructDefinitionIndex) -> bool { - self.has_consistent_mutable_borrows(self.frame_root(), Some(Label::Global(resource))) + fn is_global_mutably_borrowed( + &self, + resource: StructDefinitionIndex, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { + charge_graph_size(self.graph_size(), meter)?; + Ok(self.has_consistent_mutable_borrows(self.frame_root(), Some(Label::Global(resource)))) } /// checks if the stack frame of the function being analyzed can be safely destroyed. /// safe destruction requires that all references in locals have already been destroyed /// and all values in locals are copyable and unborrowed. - fn is_frame_safe_to_destroy(&self) -> bool { - !self.has_consistent_borrows(self.frame_root(), None) + fn is_frame_safe_to_destroy(&self, meter: &mut (impl Meter + ?Sized)) -> PartialVMResult { + charge_graph_size(self.graph_size(), meter)?; + Ok(!self.has_consistent_borrows(self.frame_root(), None)) } //********************************************************************************************** @@ -311,10 +382,14 @@ impl AbstractState { //********************************************************************************************** /// destroys local@idx - pub fn release_value(&mut self, value: AbstractValue) { + pub fn release_value( + &mut self, + value: AbstractValue, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { match value { - AbstractValue::Reference(id) => self.release(id), - AbstractValue::NonReference => (), + AbstractValue::Reference(id) => self.release(id, meter), + AbstractValue::NonReference => Ok(()), } } @@ -322,15 +397,16 @@ impl AbstractState { &mut self, offset: CodeOffset, local: LocalIndex, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { match safe_unwrap!(self.locals.get(local as usize)) { AbstractValue::Reference(id) => { let id = *id; let new_id = self.new_ref(self.borrow_graph.is_mutable(id)); - self.add_copy(id, new_id); + self.add_copy(id, new_id, meter)?; Ok(AbstractValue::Reference(new_id)) } - AbstractValue::NonReference if self.is_local_mutably_borrowed(local) => { + AbstractValue::NonReference if self.is_local_mutably_borrowed(local, meter)? => { Err(self.error(StatusCode::COPYLOC_EXISTS_BORROW_ERROR, offset)) } AbstractValue::NonReference => Ok(AbstractValue::NonReference), @@ -341,6 +417,7 @@ impl AbstractState { &mut self, offset: CodeOffset, local: LocalIndex, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { let old_value = std::mem::replace( safe_unwrap!(self.locals.get_mut(local as usize)), @@ -348,7 +425,7 @@ impl AbstractState { ); match old_value { AbstractValue::Reference(id) => Ok(AbstractValue::Reference(id)), - AbstractValue::NonReference if self.is_local_borrowed(local) => { + AbstractValue::NonReference if self.is_local_borrowed(local, meter)? => { Err(self.error(StatusCode::MOVELOC_EXISTS_BORROW_ERROR, offset)) } AbstractValue::NonReference => Ok(AbstractValue::NonReference), @@ -360,29 +437,32 @@ impl AbstractState { offset: CodeOffset, local: LocalIndex, new_value: AbstractValue, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult<()> { let old_value = std::mem::replace(safe_unwrap!(self.locals.get_mut(local as usize)), new_value); match old_value { - AbstractValue::Reference(id) => { - self.release(id); - Ok(()) - } - AbstractValue::NonReference if self.is_local_borrowed(local) => { + AbstractValue::Reference(id) => self.release(id, meter), + AbstractValue::NonReference if self.is_local_borrowed(local, meter)? => { Err(self.error(StatusCode::STLOC_UNSAFE_TO_DESTROY_ERROR, offset)) } AbstractValue::NonReference => Ok(()), } } - pub fn freeze_ref(&mut self, offset: CodeOffset, id: RefID) -> PartialVMResult { - if !self.is_freezable(id, None) { + pub fn freeze_ref( + &mut self, + offset: CodeOffset, + id: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { + if !self.is_freezable(id, None, meter)? { return Err(self.error(StatusCode::FREEZEREF_EXISTS_MUTABLE_BORROW_ERROR, offset)); } let frozen_id = self.new_ref(false); - self.add_copy(id, frozen_id); - self.release(id); + self.add_copy(id, frozen_id, meter)?; + self.release(id, meter)?; Ok(AbstractValue::Reference(frozen_id)) } @@ -391,17 +471,19 @@ impl AbstractState { offset: CodeOffset, v1: AbstractValue, v2: AbstractValue, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { match (v1, v2) { (AbstractValue::Reference(id1), AbstractValue::Reference(id2)) - if !self.is_readable(id1, None) || !self.is_readable(id2, None) => + if !self.is_readable(id1, None, meter)? + || !self.is_readable(id2, None, meter)? => { // TODO better error code return Err(self.error(StatusCode::READREF_EXISTS_MUTABLE_BORROW_ERROR, offset)); } (AbstractValue::Reference(id1), AbstractValue::Reference(id2)) => { - self.release(id1); - self.release(id2) + self.release(id1, meter)?; + self.release(id2, meter)?; } (v1, v2) => { assert!(v1.is_value()); @@ -411,21 +493,31 @@ impl AbstractState { Ok(AbstractValue::NonReference) } - pub fn read_ref(&mut self, offset: CodeOffset, id: RefID) -> PartialVMResult { - if !self.is_readable(id, None) { + pub fn read_ref( + &mut self, + offset: CodeOffset, + id: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult { + if !self.is_readable(id, None, meter)? { return Err(self.error(StatusCode::READREF_EXISTS_MUTABLE_BORROW_ERROR, offset)); } - self.release(id); + self.release(id, meter)?; Ok(AbstractValue::NonReference) } - pub fn write_ref(&mut self, offset: CodeOffset, id: RefID) -> PartialVMResult<()> { - if !self.is_writable(id) { + pub fn write_ref( + &mut self, + offset: CodeOffset, + id: RefID, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { + if !self.is_writable(id, meter)? { return Err(self.error(StatusCode::WRITEREF_EXISTS_BORROW_ERROR, offset)); } - self.release(id); + self.release(id, meter)?; Ok(()) } @@ -434,15 +526,16 @@ impl AbstractState { offset: CodeOffset, mut_: bool, local: LocalIndex, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { // nothing to check in case borrow is mutable since the frame cannot have an full borrow/ // epsilon outgoing edge - if !mut_ && self.is_local_mutably_borrowed(local) { + if !mut_ && self.is_local_mutably_borrowed(local, meter)? { return Err(self.error(StatusCode::BORROWLOC_EXISTS_BORROW_ERROR, offset)); } let new_id = self.new_ref(mut_); - self.add_local_borrow(local, new_id); + self.add_local_borrow(local, new_id, meter)?; Ok(AbstractValue::Reference(new_id)) } @@ -452,21 +545,29 @@ impl AbstractState { mut_: bool, id: RefID, field: FieldHandleIndex, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { // Any field borrows will be factored out, so don't check in the mutable case - let is_mut_borrow_with_full_borrows = || mut_ && self.has_full_borrows(id); + macro_rules! is_mut_borrow_with_full_borrows { + () => { + mut_ && self.has_full_borrows(id) + }; + } // For new immutable borrow, the reference must be readable at that field // This means that there could exist a mutable borrow on some other field - let is_imm_borrow_with_mut_borrows = || !mut_ && !self.is_readable(id, Some(field)); - - if is_mut_borrow_with_full_borrows() || is_imm_borrow_with_mut_borrows() { + macro_rules! is_imm_borrow_with_mut_borrows { + () => { + !mut_ && !self.is_readable(id, Some(field), meter)? + }; + } + if is_mut_borrow_with_full_borrows!() || is_imm_borrow_with_mut_borrows!() { // TODO improve error for mutable case return Err(self.error(StatusCode::FIELD_EXISTS_MUTABLE_BORROW_ERROR, offset)); } let field_borrow_id = self.new_ref(mut_); - self.add_field_borrow(id, field, field_borrow_id); - self.release(id); + self.add_field_borrow(id, field, field_borrow_id, meter)?; + self.release(id, meter)?; Ok(AbstractValue::Reference(field_borrow_id)) } @@ -478,14 +579,22 @@ impl AbstractState { variant_def: &VariantDefinition, mut_: bool, id: RefID, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult> { // Any field borrows will be factored out, so don't check in the mutable case - let is_mut_borrow_with_full_borrows = || mut_ && self.has_full_borrows(id); + macro_rules! is_mut_borrow_with_full_borrows { + () => { + mut_ && self.has_full_borrows(id) + }; + } // For new immutable borrow, the reference to the variant must be readable. // This means that there _does not_ exist a mutable borrow on some other field - let is_imm_borrow_with_mut_borrows = || !mut_ && !self.is_readable(id, None); - - if is_mut_borrow_with_full_borrows() || is_imm_borrow_with_mut_borrows() { + macro_rules! is_imm_borrow_with_mut_borrows { + () => { + !mut_ && !self.is_readable(id, None, meter)? + }; + } + if is_mut_borrow_with_full_borrows!() || is_imm_borrow_with_mut_borrows!() { return Err(self.error(StatusCode::FIELD_EXISTS_MUTABLE_BORROW_ERROR, offset)); } @@ -501,12 +610,13 @@ impl AbstractState { variant_tag, i as MemberCount, field_borrow_id, - ); - AbstractValue::Reference(field_borrow_id) + meter, + )?; + Ok(AbstractValue::Reference(field_borrow_id)) }) - .collect(); + .collect::>()?; - self.release(id); + self.release(id, meter)?; Ok(field_borrows) } @@ -515,14 +625,16 @@ impl AbstractState { offset: CodeOffset, mut_: bool, resource: StructDefinitionIndex, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { - if (mut_ && self.is_global_borrowed(resource)) || self.is_global_mutably_borrowed(resource) + if (mut_ && self.is_global_borrowed(resource, meter)?) + || self.is_global_mutably_borrowed(resource, meter)? { return Err(self.error(StatusCode::GLOBAL_REFERENCE_ERROR, offset)); } let new_id = self.new_ref(mut_); - self.add_resource_borrow(resource, new_id); + self.add_resource_borrow(resource, new_id, meter)?; Ok(AbstractValue::Reference(new_id)) } @@ -530,8 +642,9 @@ impl AbstractState { &mut self, offset: CodeOffset, resource: StructDefinitionIndex, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { - if self.is_global_borrowed(resource) { + if self.is_global_borrowed(resource, meter)? { Err(self.error(StatusCode::GLOBAL_REFERENCE_ERROR, offset)) } else { Ok(AbstractValue::NonReference) @@ -543,12 +656,13 @@ impl AbstractState { offset: CodeOffset, vector: AbstractValue, mut_: bool, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult<()> { let id = safe_unwrap!(vector.ref_id()); - if mut_ && !self.is_writable(id) { + if mut_ && !self.is_writable(id, meter)? { return Err(self.error(StatusCode::VEC_UPDATE_EXISTS_MUTABLE_BORROW_ERROR, offset)); } - self.release(id); + self.release(id, meter)?; Ok(()) } @@ -557,9 +671,10 @@ impl AbstractState { offset: CodeOffset, vector: AbstractValue, mut_: bool, + meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { let vec_id = safe_unwrap!(vector.ref_id()); - if mut_ && !self.is_writable(vec_id) { + if mut_ && !self.is_writable(vec_id, meter)? { return Err(self.error( StatusCode::VEC_BORROW_ELEMENT_EXISTS_MUTABLE_BORROW_ERROR, offset, @@ -567,9 +682,9 @@ impl AbstractState { } let elem_id = self.new_ref(mut_); - self.add_borrow(vec_id, elem_id); + self.add_borrow(vec_id, elem_id, meter)?; - self.release(vec_id); + self.release(vec_id, meter)?; Ok(AbstractValue::Reference(elem_id)) } @@ -581,14 +696,9 @@ impl AbstractState { return_: &Signature, meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult> { - meter.add_items( - Scope::Function, - CALL_PER_ACQUIRES_COST, - acquired_resources.len(), - )?; // Check acquires for acquired_resource in acquired_resources { - if self.is_global_borrowed(*acquired_resource) { + if self.is_global_borrowed(*acquired_resource, meter)? { return Err(self.error(StatusCode::GLOBAL_REFERENCE_ERROR, offset)); } } @@ -598,7 +708,7 @@ impl AbstractState { let mut mutable_references_to_borrow_from = BTreeSet::new(); for id in arguments.iter().filter_map(|v| v.ref_id()) { if self.borrow_graph.is_mutable(id) { - if !self.is_writable(id) { + if !self.is_writable(id, meter)? { return Err( self.error(StatusCode::CALL_BORROWED_MUTABLE_REFERENCE_ERROR, offset) ); @@ -613,45 +723,42 @@ impl AbstractState { let return_values = return_ .0 .iter() - .map(|return_type| match return_type { - SignatureToken::MutableReference(_) => { - let id = self.new_ref(true); - for parent in &mutable_references_to_borrow_from { - self.add_borrow(*parent, id); + .map(|return_type| { + Ok(match return_type { + SignatureToken::MutableReference(_) => { + let id = self.new_ref(true); + for parent in &mutable_references_to_borrow_from { + self.add_borrow(*parent, id, meter)?; + } + returned_refs += 1; + AbstractValue::Reference(id) } - returned_refs += 1; - AbstractValue::Reference(id) - } - SignatureToken::Reference(_) => { - let id = self.new_ref(false); - for parent in &all_references_to_borrow_from { - self.add_borrow(*parent, id); + SignatureToken::Reference(_) => { + let id = self.new_ref(false); + for parent in &all_references_to_borrow_from { + self.add_borrow(*parent, id, meter)?; + } + returned_refs += 1; + AbstractValue::Reference(id) } - returned_refs += 1; - AbstractValue::Reference(id) - } - _ => AbstractValue::NonReference, + _ => AbstractValue::NonReference, + }) }) - .collect(); - - // Meter usage of reference edges - meter.add_items_with_growth( - Scope::Function, - REF_PARAM_EDGE_COST, - all_references_to_borrow_from - .len() - .saturating_mul(returned_refs), - REF_PARAM_EDGE_COST_GROWTH, - )?; + .collect::>()?; // Release input references for id in all_references_to_borrow_from { - self.release(id) + self.release(id, meter)? } Ok(return_values) } - pub fn ret(&mut self, offset: CodeOffset, values: Vec) -> PartialVMResult<()> { + pub fn ret( + &mut self, + offset: CodeOffset, + values: Vec, + meter: &mut (impl Meter + ?Sized), + ) -> PartialVMResult<()> { // release all local variables let mut released = BTreeSet::new(); for stored_value in self.locals.iter() { @@ -659,10 +766,12 @@ impl AbstractState { released.insert(*id); } } - released.into_iter().for_each(|id| self.release(id)); + for id in released { + self.release(id, meter)? + } // Check that no local or global is borrowed - if !self.is_frame_safe_to_destroy() { + if !self.is_frame_safe_to_destroy(meter)? { return Err(self.error( StatusCode::UNSAFE_RET_LOCAL_OR_RESOURCE_STILL_BORROWED, offset, @@ -671,7 +780,7 @@ impl AbstractState { // Check mutable references can be transferred for id in values.into_iter().filter_map(|v| v.ref_id()) { - if self.borrow_graph.is_mutable(id) && !self.is_writable(id) { + if self.borrow_graph.is_mutable(id) && !self.is_writable(id, meter)? { return Err(self.error(StatusCode::RET_BORROWED_MUTABLE_REFERENCE_ERROR, offset)); } } @@ -726,13 +835,14 @@ impl AbstractState { }) } - pub fn join_(&self, other: &Self) -> Self { + pub fn join_(&self, other: &Self) -> (Self, usize) { assert!(self.current_function == other.current_function); assert!(self.is_canonical() && other.is_canonical()); assert!(self.next_id == other.next_id); assert!(self.locals.len() == other.locals.len()); let mut self_graph = self.borrow_graph.clone(); let mut other_graph = other.borrow_graph.clone(); + let mut released = 0; let locals = self .locals .iter() @@ -740,11 +850,11 @@ impl AbstractState { .map(|(self_value, other_value)| { match (self_value, other_value) { (AbstractValue::Reference(id), AbstractValue::NonReference) => { - self_graph.release(*id); + released += self_graph.release(*id); AbstractValue::NonReference } (AbstractValue::NonReference, AbstractValue::Reference(id)) => { - other_graph.release(*id); + released += other_graph.release(*id); AbstractValue::NonReference } // The local has a value on each side, add it to the state @@ -759,13 +869,13 @@ impl AbstractState { let borrow_graph = self_graph.join(&other_graph); let current_function = self.current_function; let next_id = self.next_id; - - Self { + let joined = Self { current_function, locals, borrow_graph, next_id, - } + }; + (joined, released) } } @@ -776,16 +886,16 @@ impl AbstractDomain for AbstractState { state: &AbstractState, meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult { - let joined = Self::join_(self, state); + meter.add(Scope::Function, JOIN_BASE_COST)?; + let self_size = self.graph_size(); + let state_size = state.graph_size(); + let (joined, released) = Self::join_(self, state); assert!(joined.is_canonical()); assert!(self.locals.len() == joined.locals.len()); - meter.add(Scope::Function, JOIN_BASE_COST)?; - meter.add_items(Scope::Function, JOIN_PER_LOCAL_COST, self.locals.len())?; - meter.add_items( - Scope::Function, - JOIN_PER_GRAPH_ITEM_COST, - self.borrow_graph.graph_size(), - )?; + let max_size = max(max(self_size, state_size), joined.graph_size()); + charge_join(self_size, state_size, meter)?; + charge_graph_size(max_size, meter)?; + charge_release(released, meter)?; let locals_unchanged = self .locals .iter() @@ -801,3 +911,40 @@ impl AbstractDomain for AbstractState { } } } + +fn charge_graph_size(size: usize, meter: &mut (impl Meter + ?Sized)) -> PartialVMResult<()> { + let size = max(size, 1); + meter.add_items(Scope::Function, PER_GRAPH_ITEM_COST, size) +} + +fn charge_release(released: usize, meter: &mut (impl Meter + ?Sized)) -> PartialVMResult<()> { + let size = max(released, 1); + meter.add_items( + Scope::Function, + RELEASE_ITEM_COST, + // max(x, x^2/5) + max( + size, + size.saturating_mul(size) / RELEASE_ITEM_QUADRATIC_THRESHOLD, + ), + ) +} + +fn charge_join( + size1: usize, + size2: usize, + meter: &mut (impl Meter + ?Sized), +) -> PartialVMResult<()> { + let size1 = max(size1, 1); + let size2 = max(size2, 1); + let size = size1.saturating_add(size2); + meter.add_items( + Scope::Function, + JOIN_ITEM_COST, + // max(x, x^2/10) + max( + size, + size.saturating_mul(size) / JOIN_ITEM_QUADRATIC_THRESHOLD, + ), + ) +} diff --git a/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/mod.rs b/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/mod.rs index f3bfaf804cb16..9ac61018e27f3 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/mod.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/reference_safety/mod.rs @@ -10,9 +10,7 @@ mod abstract_state; -use crate::reference_safety::abstract_state::{ - STEP_BASE_COST, STEP_PER_GRAPH_ITEM_COST, STEP_PER_LOCAL_COST, -}; +use crate::reference_safety::abstract_state::STEP_BASE_COST; use abstract_state::{AbstractState, AbstractValue}; use move_abstract_interpreter::absint::{AbstractInterpreter, FunctionContext, TransferFunctions}; use move_abstract_stack::AbstractStack; @@ -182,113 +180,110 @@ fn execute_inner( meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult<()> { meter.add(Scope::Function, STEP_BASE_COST)?; - meter.add_items(Scope::Function, STEP_PER_LOCAL_COST, state.local_count())?; - meter.add_items( - Scope::Function, - STEP_PER_GRAPH_ITEM_COST, - state.graph_size(), - )?; match bytecode { - Bytecode::Pop => state.release_value(safe_unwrap_err!(verifier.stack.pop())), + Bytecode::Pop => state.release_value(safe_unwrap_err!(verifier.stack.pop()), meter)?, Bytecode::CopyLoc(local) => { - let value = state.copy_loc(offset, *local)?; + let value = state.copy_loc(offset, *local, meter)?; verifier.push(value)? } Bytecode::MoveLoc(local) => { - let value = state.move_loc(offset, *local)?; + let value = state.move_loc(offset, *local, meter)?; verifier.push(value)? } - Bytecode::StLoc(local) => { - state.st_loc(offset, *local, safe_unwrap_err!(verifier.stack.pop()))? - } + Bytecode::StLoc(local) => state.st_loc( + offset, + *local, + safe_unwrap_err!(verifier.stack.pop()), + meter, + )?, Bytecode::FreezeRef => { let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); - let frozen = state.freeze_ref(offset, id)?; + let frozen = state.freeze_ref(offset, id, meter)?; verifier.push(frozen)? } Bytecode::Eq | Bytecode::Neq => { let v1 = safe_unwrap_err!(verifier.stack.pop()); let v2 = safe_unwrap_err!(verifier.stack.pop()); - let value = state.comparison(offset, v1, v2)?; + let value = state.comparison(offset, v1, v2, meter)?; verifier.push(value)? } Bytecode::ReadRef => { let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); - let value = state.read_ref(offset, id)?; + let value = state.read_ref(offset, id, meter)?; verifier.push(value)? } Bytecode::WriteRef => { let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); let val_operand = safe_unwrap_err!(verifier.stack.pop()); safe_assert!(val_operand.is_value()); - state.write_ref(offset, id)? + state.write_ref(offset, id, meter)? } Bytecode::MutBorrowLoc(local) => { - let value = state.borrow_loc(offset, true, *local)?; + let value = state.borrow_loc(offset, true, *local, meter)?; verifier.push(value)? } Bytecode::ImmBorrowLoc(local) => { - let value = state.borrow_loc(offset, false, *local)?; + let value = state.borrow_loc(offset, false, *local, meter)?; verifier.push(value)? } Bytecode::MutBorrowField(field_handle_index) => { let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); - let value = state.borrow_field(offset, true, id, *field_handle_index)?; + let value = state.borrow_field(offset, true, id, *field_handle_index, meter)?; verifier.push(value)? } Bytecode::MutBorrowFieldGeneric(field_inst_index) => { let field_inst = verifier.module.field_instantiation_at(*field_inst_index); let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); - let value = state.borrow_field(offset, true, id, field_inst.handle)?; + let value = state.borrow_field(offset, true, id, field_inst.handle, meter)?; verifier.push(value)? } Bytecode::ImmBorrowField(field_handle_index) => { let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); - let value = state.borrow_field(offset, false, id, *field_handle_index)?; + let value = state.borrow_field(offset, false, id, *field_handle_index, meter)?; verifier.push(value)? } Bytecode::ImmBorrowFieldGeneric(field_inst_index) => { let field_inst = verifier.module.field_instantiation_at(*field_inst_index); let id = safe_unwrap!(safe_unwrap_err!(verifier.stack.pop()).ref_id()); - let value = state.borrow_field(offset, false, id, field_inst.handle)?; + let value = state.borrow_field(offset, false, id, field_inst.handle, meter)?; verifier.push(value)? } Bytecode::MutBorrowGlobalDeprecated(idx) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); - let value = state.borrow_global(offset, true, *idx)?; + let value = state.borrow_global(offset, true, *idx, meter)?; verifier.push(value)? } Bytecode::MutBorrowGlobalGenericDeprecated(idx) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let struct_inst = verifier.module.struct_instantiation_at(*idx); - let value = state.borrow_global(offset, true, struct_inst.def)?; + let value = state.borrow_global(offset, true, struct_inst.def, meter)?; verifier.push(value)? } Bytecode::ImmBorrowGlobalDeprecated(idx) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); - let value = state.borrow_global(offset, false, *idx)?; + let value = state.borrow_global(offset, false, *idx, meter)?; verifier.push(value)? } Bytecode::ImmBorrowGlobalGenericDeprecated(idx) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let struct_inst = verifier.module.struct_instantiation_at(*idx); - let value = state.borrow_global(offset, false, struct_inst.def)?; + let value = state.borrow_global(offset, false, struct_inst.def, meter)?; verifier.push(value)? } Bytecode::MoveFromDeprecated(idx) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); - let value = state.move_from(offset, *idx)?; + let value = state.move_from(offset, *idx, meter)?; verifier.push(value)? } Bytecode::MoveFromGenericDeprecated(idx) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let struct_inst = verifier.module.struct_instantiation_at(*idx); - let value = state.move_from(offset, struct_inst.def)?; + let value = state.move_from(offset, struct_inst.def, meter)?; verifier.push(value)? } @@ -309,7 +304,7 @@ fn execute_inner( } return_values.reverse(); - state.ret(offset, return_values)? + state.ret(offset, return_values, meter)? } Bytecode::Branch(_) @@ -331,7 +326,7 @@ fn execute_inner( // resource value safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); // signer reference - state.release_value(safe_unwrap_err!(verifier.stack.pop())); + state.release_value(safe_unwrap_err!(verifier.stack.pop()), meter)?; } Bytecode::LdTrue | Bytecode::LdFalse => { @@ -402,32 +397,32 @@ fn execute_inner( Bytecode::VecLen(_) => { let vec_ref = safe_unwrap_err!(verifier.stack.pop()); - state.vector_op(offset, vec_ref, false)?; + state.vector_op(offset, vec_ref, false, meter)?; verifier.push(state.value_for(&SignatureToken::U64))? } Bytecode::VecImmBorrow(_) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let vec_ref = safe_unwrap_err!(verifier.stack.pop()); - let elem_ref = state.vector_element_borrow(offset, vec_ref, false)?; + let elem_ref = state.vector_element_borrow(offset, vec_ref, false, meter)?; verifier.push(elem_ref)? } Bytecode::VecMutBorrow(_) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let vec_ref = safe_unwrap_err!(verifier.stack.pop()); - let elem_ref = state.vector_element_borrow(offset, vec_ref, true)?; + let elem_ref = state.vector_element_borrow(offset, vec_ref, true, meter)?; verifier.push(elem_ref)? } Bytecode::VecPushBack(_) => { safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let vec_ref = safe_unwrap_err!(verifier.stack.pop()); - state.vector_op(offset, vec_ref, true)?; + state.vector_op(offset, vec_ref, true, meter)?; } Bytecode::VecPopBack(idx) => { let vec_ref = safe_unwrap_err!(verifier.stack.pop()); - state.vector_op(offset, vec_ref, true)?; + state.vector_op(offset, vec_ref, true, meter)?; let element_type = vec_element_type(verifier, *idx)?; verifier.push(state.value_for(&element_type))? @@ -444,7 +439,7 @@ fn execute_inner( safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); safe_assert!(safe_unwrap_err!(verifier.stack.pop()).is_value()); let vec_ref = safe_unwrap_err!(verifier.stack.pop()); - state.vector_op(offset, vec_ref, true)?; + state.vector_op(offset, vec_ref, true, meter)?; } Bytecode::PackVariant(vidx) => { let handle = verifier.module.variant_handle_at(*vidx); @@ -486,6 +481,7 @@ fn execute_inner( variant_def, false, id, + meter, )? .into_iter() { @@ -506,6 +502,7 @@ fn execute_inner( variant_def, true, id, + meter, )? .into_iter() { @@ -525,6 +522,7 @@ fn execute_inner( variant_def, false, id, + meter, )? .into_iter() { @@ -544,13 +542,16 @@ fn execute_inner( variant_def, true, id, + meter, )? .into_iter() { verifier.push(val)? } } - Bytecode::VariantSwitch(_) => state.release_value(safe_unwrap_err!(verifier.stack.pop())), + Bytecode::VariantSwitch(_) => { + state.release_value(safe_unwrap_err!(verifier.stack.pop()), meter)? + } }; Ok(()) } diff --git a/external-crates/move/crates/move-bytecode-verifier/src/script_signature.rs b/external-crates/move/crates/move-bytecode-verifier/src/script_signature.rs index a26628c138ed0..8f11893a3f44a 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/script_signature.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/script_signature.rs @@ -9,6 +9,7 @@ //! - (DEPRECATED) Has an empty return type //! - All return types are not references //! - Satisfies the additional checks provided as an argument via `check_signature` +//! //! `check_signature` should be used by adapters to quickly and easily verify custom signature //! rules for entrypoints diff --git a/external-crates/move/crates/move-bytecode-verifier/src/signature.rs b/external-crates/move/crates/move-bytecode-verifier/src/signature.rs index 29b96a2a2af52..c1052c190ba6a 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/signature.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/signature.rs @@ -15,22 +15,38 @@ use move_binary_format::{ file_format_common::VERSION_6, IndexKind, }; +use move_bytecode_verifier_meter::{Meter, Scope}; use move_core_types::vm_status::StatusCode; use std::collections::{HashMap, HashSet}; -pub struct SignatureChecker<'a> { - module: &'a CompiledModule, +use crate::ability_cache::AbilityCache; + +pub struct SignatureChecker<'env, 'a, 'b, M: Meter + ?Sized> { + module: &'env CompiledModule, + module_ability_cache: &'a mut AbilityCache<'env>, + meter: &'b mut M, abilities_cache: HashMap>>, } -impl<'a> SignatureChecker<'a> { - pub fn verify_module(module: &'a CompiledModule) -> VMResult<()> { - Self::verify_module_impl(module).map_err(|e| e.finish(Location::Module(module.self_id()))) +impl<'env, 'a, 'b, M: Meter + ?Sized> SignatureChecker<'env, 'a, 'b, M> { + pub fn verify_module( + module: &'env CompiledModule, + module_ability_cache: &'a mut AbilityCache<'env>, + meter: &'b mut M, + ) -> VMResult<()> { + Self::verify_module_impl(module, module_ability_cache, meter) + .map_err(|e| e.finish(Location::Module(module.self_id()))) } - fn verify_module_impl(module: &'a CompiledModule) -> PartialVMResult<()> { + fn verify_module_impl( + module: &'env CompiledModule, + module_ability_cache: &'a mut AbilityCache<'env>, + meter: &'b mut M, + ) -> PartialVMResult<()> { let mut sig_check = Self { module, + module_ability_cache, + meter, abilities_cache: HashMap::new(), }; sig_check.verify_signature_pool(module.signatures())?; @@ -66,7 +82,7 @@ impl<'a> SignatureChecker<'a> { Ok(()) } - fn verify_struct_fields(&self, struct_defs: &[StructDefinition]) -> PartialVMResult<()> { + fn verify_struct_fields(&mut self, struct_defs: &[StructDefinition]) -> PartialVMResult<()> { for (struct_def_idx, struct_def) in struct_defs.iter().enumerate() { let fields = match &struct_def.field_information { StructFieldInformation::Native => continue, @@ -95,7 +111,7 @@ impl<'a> SignatureChecker<'a> { Ok(()) } - fn verify_enum_fields(&self, enum_defs: &[EnumDefinition]) -> PartialVMResult<()> { + fn verify_enum_fields(&mut self, enum_defs: &[EnumDefinition]) -> PartialVMResult<()> { for (enum_def_idx, enum_def) in enum_defs.iter().enumerate() { let enum_handle = self.module.datatype_handle_at(enum_def.enum_handle); let type_param_constraints: Vec<_> = enum_handle.type_param_constraints().collect(); @@ -419,7 +435,7 @@ impl<'a> SignatureChecker<'a> { } fn check_type_instantiation( - &self, + &mut self, s: &SignatureToken, type_parameters: &[AbilitySet], ) -> PartialVMResult<()> { @@ -435,7 +451,7 @@ impl<'a> SignatureChecker<'a> { } fn check_type_instantiation_( - &self, + &mut self, s: &SignatureToken, type_parameters: &[AbilitySet], ) -> PartialVMResult<()> { @@ -472,7 +488,7 @@ impl<'a> SignatureChecker<'a> { // Checks if the given types are well defined and satisfy the constraints in the given context. fn check_generic_instance( - &self, + &mut self, type_arguments: &[SignatureToken], constraints: impl ExactSizeIterator, global_abilities: &[AbilitySet], @@ -489,8 +505,11 @@ impl<'a> SignatureChecker<'a> { ); } + let meter: &mut M = self.meter; + let module_ability_cache: &mut AbilityCache = self.module_ability_cache; for (constraint, ty) in constraints.into_iter().zip(type_arguments) { - let given = self.module.abilities(ty, global_abilities)?; + let given = + module_ability_cache.abilities(Scope::Module, meter, global_abilities, ty)?; if !constraint.is_subset(given) { return Err(PartialVMError::new(StatusCode::CONSTRAINT_NOT_SATISFIED) .with_message(format!( diff --git a/external-crates/move/crates/move-bytecode-verifier/src/type_safety.rs b/external-crates/move/crates/move-bytecode-verifier/src/type_safety.rs index 6d2b33ee02425..ddace3cf61530 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/type_safety.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/type_safety.rs @@ -5,7 +5,7 @@ //! This module defines the transfer functions for verifying type safety of a procedure body. //! It does not utilize control flow, but does check each block independently -use std::num::NonZeroU64; +use std::{cmp::max, num::NonZeroU64}; use move_abstract_interpreter::{absint::FunctionContext, control_flow_graph::ControlFlowGraph}; use move_abstract_stack::AbstractStack; @@ -22,13 +22,17 @@ use move_binary_format::{ use move_bytecode_verifier_meter::{Meter, Scope}; use move_core_types::vm_status::StatusCode; +use crate::ability_cache::AbilityCache; + struct Locals<'a> { param_count: usize, parameters: &'a Signature, locals: &'a Signature, } -const TYPE_NODE_COST: u128 = 30; +const TYPE_NODE_COST: u128 = 6; +const TYPE_NODE_QUADRATIC_THRESHOLD: usize = 10; +const TYPE_PUSH_COST: u128 = 3; impl<'a> Locals<'a> { fn new(parameters: &'a Signature, locals: &'a Signature) -> Self { @@ -49,19 +53,25 @@ impl<'a> Locals<'a> { } } -struct TypeSafetyChecker<'a> { - module: &'a CompiledModule, - function_context: &'a FunctionContext<'a>, - locals: Locals<'a>, +struct TypeSafetyChecker<'env, 'a> { + module: &'env CompiledModule, + function_context: &'a FunctionContext<'env>, + ability_cache: &'a mut AbilityCache<'env>, + locals: Locals<'env>, stack: AbstractStack, } -impl<'a> TypeSafetyChecker<'a> { - fn new(module: &'a CompiledModule, function_context: &'a FunctionContext<'a>) -> Self { +impl<'env, 'a> TypeSafetyChecker<'env, 'a> { + fn new( + module: &'env CompiledModule, + function_context: &'a FunctionContext<'env>, + ability_cache: &'a mut AbilityCache<'env>, + ) -> Self { let locals = Locals::new(function_context.parameters(), function_context.locals()); Self { module, function_context, + ability_cache, locals, stack: AbstractStack::new(), } @@ -71,9 +81,17 @@ impl<'a> TypeSafetyChecker<'a> { self.locals.local_at(i) } - fn abilities(&self, t: &SignatureToken) -> PartialVMResult { - self.module - .abilities(t, self.function_context.type_parameters()) + fn abilities( + &mut self, + meter: &mut (impl Meter + ?Sized), + t: &SignatureToken, + ) -> PartialVMResult { + self.ability_cache.abilities( + Scope::Function, + meter, + self.function_context.type_parameters(), + t, + ) } fn error(&self, status: StatusCode, offset: CodeOffset) -> PartialVMError { @@ -90,7 +108,7 @@ impl<'a> TypeSafetyChecker<'a> { meter: &mut (impl Meter + ?Sized), ty: SignatureToken, ) -> PartialVMResult<()> { - self.charge_ty(meter, &ty)?; + meter.add(Scope::Function, TYPE_PUSH_COST)?; safe_unwrap_err!(self.stack.push(ty)); Ok(()) } @@ -101,50 +119,41 @@ impl<'a> TypeSafetyChecker<'a> { ty: SignatureToken, n: u64, ) -> PartialVMResult<()> { - self.charge_ty(meter, &ty)?; + meter.add_items(Scope::Function, TYPE_PUSH_COST, n as usize)?; safe_unwrap_err!(self.stack.push_n(ty, n)); Ok(()) } +} - fn charge_ty( - &mut self, - meter: &mut (impl Meter + ?Sized), - ty: &SignatureToken, - ) -> PartialVMResult<()> { - self.charge_ty_(meter, ty, 1) - } - - fn charge_ty_( - &mut self, - meter: &mut (impl Meter + ?Sized), - ty: &SignatureToken, - n: u64, - ) -> PartialVMResult<()> { - meter.add_items( - Scope::Function, - TYPE_NODE_COST, - ty.preorder_traversal().count() * (n as usize), - ) - } +macro_rules! charge_clone { + ($meter:ident, $ty:expr) => {{ + let ty: &SignatureToken = $ty; + charge_ty(&mut *$meter, ty)?; + ty.clone() + }}; +} - fn charge_tys( - &mut self, - meter: &mut (impl Meter + ?Sized), - tys: &[SignatureToken], - ) -> PartialVMResult<()> { - for ty in tys { - self.charge_ty(meter, ty)? - } - Ok(()) - } +fn charge_ty(meter: &mut (impl Meter + ?Sized), ty: &SignatureToken) -> PartialVMResult<()> { + let size = ty.preorder_traversal().count(); + meter.add_items( + Scope::Function, + TYPE_NODE_COST, + // max(x, x^2/10) + max( + size, + size.saturating_mul(size) / TYPE_NODE_QUADRATIC_THRESHOLD, + ), + ) } -pub(crate) fn verify<'a>( - module: &'a CompiledModule, - function_context: &'a FunctionContext<'a>, +pub(crate) fn verify<'env>( + module: &'env CompiledModule, + function_context: &FunctionContext<'env>, + ability_cache: &mut AbilityCache<'env>, meter: &mut (impl Meter + ?Sized), ) -> PartialVMResult<()> { - let verifier = &mut TypeSafetyChecker::new(module, function_context); + let mut checker = TypeSafetyChecker::new(module, function_context, ability_cache); + let verifier = &mut checker; for block_id in function_context.cfg().blocks() { for offset in function_context.cfg().instr_indexes(block_id) { @@ -220,7 +229,7 @@ fn borrow_field( // For generic fields access, this step materializes that type let field_handle = verifier.module.field_handle_at(field_handle_index); let struct_def = verifier.module.struct_def_at(field_handle.owner); - let expected_type = materialize_type(struct_def.struct_handle, type_args); + let expected_type = materialize_type(meter, struct_def.struct_handle, type_args)?; match operand { ST::Reference(inner) | ST::MutableReference(inner) if expected_type == *inner => (), _ => return Err(verifier.error(StatusCode::BORROWFIELD_TYPE_MISMATCH_ERROR, offset)), @@ -237,7 +246,7 @@ fn borrow_field( &fields[field_handle.field as usize] } }; - let field_type = Box::new(instantiate(&field_def.signature.0, type_args)); + let field_type = Box::new(instantiate(meter, &field_def.signature.0, type_args)?); verifier.push( meter, if mut_ { @@ -257,7 +266,7 @@ fn borrow_loc( mut_: bool, idx: LocalIndex, ) -> PartialVMResult<()> { - let loc_signature = verifier.local_at(idx).clone(); + let loc_signature = charge_clone!(meter, verifier.local_at(idx)); if loc_signature.is_reference() { return Err(verifier.error(StatusCode::BORROWLOC_REFERENCE_ERROR, offset)); @@ -289,12 +298,11 @@ fn borrow_global( } let struct_def = verifier.module.struct_def_at(idx); - let struct_type = materialize_type(struct_def.struct_handle, type_args); - if !verifier.abilities(&struct_type)?.has_key() { + let struct_type = materialize_type(meter, struct_def.struct_handle, type_args)?; + if !verifier.abilities(meter, &struct_type)?.has_key() { return Err(verifier.error(StatusCode::BORROWGLOBAL_WITHOUT_KEY_ABILITY, offset)); } - let struct_type = materialize_type(struct_def.struct_handle, type_args); verifier.push( meter, if mut_ { @@ -317,20 +325,21 @@ fn call( for parameter in parameters.0.iter().rev() { let arg = safe_unwrap_err!(verifier.stack.pop()); if (type_actuals.is_empty() && &arg != parameter) - || (!type_actuals.is_empty() && arg != instantiate(parameter, type_actuals)) + || (!type_actuals.is_empty() && arg != instantiate(meter, parameter, type_actuals)?) { return Err(verifier.error(StatusCode::CALL_TYPE_MISMATCH_ERROR, offset)); } } for return_type in &verifier.module.signature_at(function_handle.return_).0 { - verifier.push(meter, instantiate(return_type, type_actuals))? + let sig = instantiate(meter, return_type, type_actuals)?; + verifier.push(meter, sig)? } Ok(()) } fn type_fields_signature( verifier: &mut TypeSafetyChecker, - _meter: &mut (impl Meter + ?Sized), // TODO: metering + meter: &mut (impl Meter + ?Sized), offset: CodeOffset, struct_def: &StructDefinition, type_args: &Signature, @@ -341,10 +350,10 @@ fn type_fields_signature( Err(verifier.error(StatusCode::PACK_TYPE_MISMATCH_ERROR, offset)) } StructFieldInformation::Declared(fields) => { - let mut field_sig = vec![]; - for field_def in fields.iter() { - field_sig.push(instantiate(&field_def.signature.0, type_args)); - } + let field_sig = fields + .iter() + .map(|field_def| instantiate(meter, &field_def.signature.0, type_args)) + .collect::>()?; Ok(Signature(field_sig)) } } @@ -357,7 +366,6 @@ fn pack_struct( struct_def: &StructDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let struct_type = materialize_type(struct_def.struct_handle, type_args); let field_sig = type_fields_signature(verifier, meter, offset, struct_def, type_args)?; for sig in field_sig.0.iter().rev() { let arg = safe_unwrap_err!(verifier.stack.pop()); @@ -365,7 +373,7 @@ fn pack_struct( return Err(verifier.error(StatusCode::PACK_TYPE_MISMATCH_ERROR, offset)); } } - + let struct_type = materialize_type(meter, struct_def.struct_handle, type_args)?; verifier.push(meter, struct_type)?; Ok(()) } @@ -377,7 +385,7 @@ fn unpack_struct( struct_def: &StructDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let struct_type = materialize_type(struct_def.struct_handle, type_args); + let struct_type = materialize_type(meter, struct_def.struct_handle, type_args)?; // Pop an abstract value from the stack and check if its type is equal to the one // declared. @@ -401,18 +409,15 @@ fn pack_enum_variant( variant_def: &VariantDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let enum_type = materialize_type(enum_def.enum_handle, type_args); - let field_sig = variant_def - .fields - .iter() - .map(|field_def| instantiate(&field_def.signature.0, type_args)); - for sig in field_sig.rev() { + for field_def in variant_def.fields.iter().rev() { + let sig = instantiate(meter, &field_def.signature.0, type_args)?; let arg = safe_unwrap_err!(verifier.stack.pop()); if arg != sig { return Err(verifier.error(StatusCode::PACK_TYPE_MISMATCH_ERROR, offset)); } } + let enum_type = materialize_type(meter, enum_def.enum_handle, type_args)?; verifier.push(meter, enum_type)?; Ok(()) } @@ -425,7 +430,7 @@ fn unpack_enum_variant_by_value( variant_def: &VariantDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let enum_type = materialize_type(enum_def.enum_handle, type_args); + let enum_type = materialize_type(meter, enum_def.enum_handle, type_args)?; // Pop an abstract value from the stack and check if its type is equal to the one // declared. @@ -434,11 +439,8 @@ fn unpack_enum_variant_by_value( return Err(verifier.error(StatusCode::UNPACK_TYPE_MISMATCH_ERROR, offset)); } - let field_sig = variant_def - .fields - .iter() - .map(|field_def| instantiate(&field_def.signature.0, type_args)); - for sig in field_sig { + for field_def in &variant_def.fields { + let sig = instantiate(meter, &field_def.signature.0, type_args)?; verifier.push(meter, sig)? } Ok(()) @@ -453,8 +455,6 @@ fn unpack_enum_variant_by_ref( variant_def: &VariantDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let enum_type = materialize_type(enum_def.enum_handle, type_args); - // Pop an abstract value from the stack and check if its type is equal to the one // declared. let arg = safe_unwrap_err!(verifier.stack.pop()); @@ -467,20 +467,18 @@ fn unpack_enum_variant_by_ref( _ => return Err(verifier.error(StatusCode::UNPACK_TYPE_MISMATCH_ERROR, offset)), }; + let enum_type = materialize_type(meter, enum_def.enum_handle, type_args)?; if *inner != enum_type { return Err(verifier.error(StatusCode::UNPACK_TYPE_MISMATCH_ERROR, offset)); } - let field_sig = variant_def - .fields - .iter() - .map(|field_def| instantiate(&field_def.signature.0, type_args)); - for sig in field_sig { - let mk_sig = if mut_ { - ST::MutableReference - } else { - ST::Reference - }; + let mk_sig = if mut_ { + ST::MutableReference + } else { + ST::Reference + }; + for field_def in &variant_def.fields { + let sig = instantiate(meter, &field_def.signature.0, type_args)?; verifier.push(meter, mk_sig(Box::new(sig)))? } Ok(()) @@ -493,8 +491,8 @@ fn exists( struct_def: &StructDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let struct_type = materialize_type(struct_def.struct_handle, type_args); - if !verifier.abilities(&struct_type)?.has_key() { + let struct_type = materialize_type(meter, struct_def.struct_handle, type_args)?; + if !verifier.abilities(meter, &struct_type)?.has_key() { return Err(verifier.error( StatusCode::EXISTS_WITHOUT_KEY_ABILITY_OR_BAD_ARGUMENT, offset, @@ -521,12 +519,11 @@ fn move_from( struct_def: &StructDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let struct_type = materialize_type(struct_def.struct_handle, type_args); - if !verifier.abilities(&struct_type)?.has_key() { + let struct_type = materialize_type(meter, struct_def.struct_handle, type_args)?; + if !verifier.abilities(meter, &struct_type)?.has_key() { return Err(verifier.error(StatusCode::MOVEFROM_WITHOUT_KEY_ABILITY, offset)); } - let struct_type = materialize_type(struct_def.struct_handle, type_args); let operand = safe_unwrap_err!(verifier.stack.pop()); if operand != ST::Address { return Err(verifier.error(StatusCode::MOVEFROM_TYPE_MISMATCH_ERROR, offset)); @@ -538,16 +535,16 @@ fn move_from( fn move_to( verifier: &mut TypeSafetyChecker, + meter: &mut (impl Meter + ?Sized), offset: CodeOffset, struct_def: &StructDefinition, type_args: &Signature, ) -> PartialVMResult<()> { - let struct_type = materialize_type(struct_def.struct_handle, type_args); - if !verifier.abilities(&struct_type)?.has_key() { + let struct_type = materialize_type(meter, struct_def.struct_handle, type_args)?; + if !verifier.abilities(meter, &struct_type)?.has_key() { return Err(verifier.error(StatusCode::MOVETO_WITHOUT_KEY_ABILITY, offset)); } - let struct_type = materialize_type(struct_def.struct_handle, type_args); let key_struct_operand = safe_unwrap_err!(verifier.stack.pop()); let signer_reference_operand = safe_unwrap_err!(verifier.stack.pop()); if key_struct_operand != struct_type { @@ -602,10 +599,8 @@ fn verify_instr( match bytecode { Bytecode::Pop => { let operand = safe_unwrap_err!(verifier.stack.pop()); - let abilities = verifier - .module - .abilities(&operand, verifier.function_context.type_parameters()); - if !abilities?.has_drop() { + let abilities = verifier.abilities(meter, &operand)?; + if !abilities.has_drop() { return Err(verifier.error(StatusCode::POP_WITHOUT_DROP_ABILITY, offset)); } } @@ -663,7 +658,6 @@ fn verify_instr( Bytecode::MutBorrowFieldGeneric(field_inst_index) => { let field_inst = verifier.module.field_instantiation_at(*field_inst_index); let type_inst = verifier.module.signature_at(field_inst.type_parameters); - verifier.charge_tys(meter, &type_inst.0)?; borrow_field(verifier, meter, offset, true, field_inst.handle, type_inst)? } @@ -679,7 +673,6 @@ fn verify_instr( Bytecode::ImmBorrowFieldGeneric(field_inst_index) => { let field_inst = verifier.module.field_instantiation_at(*field_inst_index); let type_inst = verifier.module.signature_at(field_inst.type_parameters); - verifier.charge_tys(meter, &type_inst.0)?; borrow_field(verifier, meter, offset, false, field_inst.handle, type_inst)? } @@ -708,7 +701,7 @@ fn verify_instr( } Bytecode::LdConst(idx) => { - let signature = verifier.module.constant_at(*idx).type_.clone(); + let signature = charge_clone!(meter, &verifier.module.constant_at(*idx).type_); verifier.push(meter, signature)?; } @@ -717,22 +710,15 @@ fn verify_instr( } Bytecode::CopyLoc(idx) => { - let local_signature = verifier.local_at(*idx).clone(); - if !verifier - .module - .abilities( - &local_signature, - verifier.function_context.type_parameters(), - )? - .has_copy() - { + let local_signature = charge_clone!(meter, verifier.local_at(*idx)); + if !verifier.abilities(meter, &local_signature)?.has_copy() { return Err(verifier.error(StatusCode::COPYLOC_WITHOUT_COPY_ABILITY, offset)); } verifier.push(meter, local_signature)? } Bytecode::MoveLoc(idx) => { - let local_signature = verifier.local_at(*idx).clone(); + let local_signature = charge_clone!(meter, verifier.local_at(*idx)); verifier.push(meter, local_signature)? } @@ -749,7 +735,6 @@ fn verify_instr( let func_inst = verifier.module.function_instantiation_at(*idx); let func_handle = verifier.module.function_handle_at(func_inst.handle); let type_args = &verifier.module.signature_at(func_inst.type_parameters); - verifier.charge_tys(meter, &type_args.0)?; call(verifier, meter, offset, func_handle, type_args)? } @@ -768,7 +753,6 @@ fn verify_instr( let struct_inst = verifier.module.struct_instantiation_at(*idx); let struct_def = verifier.module.struct_def_at(struct_inst.def); let type_args = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_args.0)?; pack_struct(verifier, meter, offset, struct_def, type_args)? } @@ -787,7 +771,6 @@ fn verify_instr( let struct_inst = verifier.module.struct_instantiation_at(*idx); let struct_def = verifier.module.struct_def_at(struct_inst.def); let type_args = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_args.0)?; unpack_struct(verifier, meter, offset, struct_def, type_args)? } @@ -795,7 +778,7 @@ fn verify_instr( let operand = safe_unwrap_err!(verifier.stack.pop()); match operand { ST::Reference(inner) | ST::MutableReference(inner) => { - if !verifier.abilities(&inner)?.has_copy() { + if !verifier.abilities(meter, &inner)?.has_copy() { return Err( verifier.error(StatusCode::READREF_WITHOUT_COPY_ABILITY, offset) ); @@ -817,7 +800,7 @@ fn verify_instr( ) } }; - if !verifier.abilities(&ref_inner_signature)?.has_drop() { + if !verifier.abilities(meter, &ref_inner_signature)?.has_drop() { return Err(verifier.error(StatusCode::WRITEREF_WITHOUT_DROP_ABILITY, offset)); } @@ -897,7 +880,7 @@ fn verify_instr( Bytecode::Eq | Bytecode::Neq => { let operand1 = safe_unwrap_err!(verifier.stack.pop()); let operand2 = safe_unwrap_err!(verifier.stack.pop()); - if verifier.abilities(&operand1)?.has_drop() && operand1 == operand2 { + if verifier.abilities(meter, &operand1)?.has_drop() && operand1 == operand2 { verifier.push(meter, ST::Bool)?; } else { return Err(verifier.error(StatusCode::EQUALITY_OP_TYPE_MISMATCH_ERROR, offset)); @@ -921,7 +904,6 @@ fn verify_instr( Bytecode::MutBorrowGlobalGenericDeprecated(idx) => { let struct_inst = verifier.module.struct_instantiation_at(*idx); let type_inst = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_inst.0)?; borrow_global(verifier, meter, offset, true, struct_inst.def, type_inst)? } @@ -932,7 +914,6 @@ fn verify_instr( Bytecode::ImmBorrowGlobalGenericDeprecated(idx) => { let struct_inst = verifier.module.struct_instantiation_at(*idx); let type_inst = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_inst.0)?; borrow_global(verifier, meter, offset, false, struct_inst.def, type_inst)? } @@ -945,7 +926,6 @@ fn verify_instr( let struct_inst = verifier.module.struct_instantiation_at(*idx); let struct_def = verifier.module.struct_def_at(struct_inst.def); let type_args = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_args.0)?; exists(verifier, meter, offset, struct_def, type_args)? } @@ -958,21 +938,19 @@ fn verify_instr( let struct_inst = verifier.module.struct_instantiation_at(*idx); let struct_def = verifier.module.struct_def_at(struct_inst.def); let type_args = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_args.0)?; move_from(verifier, meter, offset, struct_def, type_args)? } Bytecode::MoveToDeprecated(idx) => { let struct_def = verifier.module.struct_def_at(*idx); - move_to(verifier, offset, struct_def, &Signature(vec![]))? + move_to(verifier, meter, offset, struct_def, &Signature(vec![]))? } Bytecode::MoveToGenericDeprecated(idx) => { let struct_inst = verifier.module.struct_instantiation_at(*idx); let struct_def = verifier.module.struct_def_at(struct_inst.def); let type_args = verifier.module.signature_at(struct_inst.type_parameters); - verifier.charge_tys(meter, &type_args.0)?; - move_to(verifier, offset, struct_def, type_args)? + move_to(verifier, meter, offset, struct_def, type_args)? } Bytecode::VecPack(idx, num) => { @@ -987,7 +965,8 @@ fn verify_instr( return Err(verifier.error(StatusCode::TYPE_MISMATCH, offset)); } } - verifier.push(meter, ST::Vector(Box::new(element_type.clone())))?; + let element_type = charge_clone!(meter, element_type); + verifier.push(meter, ST::Vector(Box::new(element_type)))?; } Bytecode::VecLen(idx) => { @@ -1037,10 +1016,13 @@ fn verify_instr( Bytecode::VecUnpack(idx, num) => { let operand_vec = safe_unwrap_err!(verifier.stack.pop()); let declared_element_type = &verifier.module.signature_at(*idx).0[0]; - if operand_vec != ST::Vector(Box::new(declared_element_type.clone())) { + let correct_vec_ty = + matches!(operand_vec, ST::Vector(inner) if &*inner == declared_element_type); + if !correct_vec_ty { return Err(verifier.error(StatusCode::TYPE_MISMATCH, offset)); } - verifier.push_n(meter, declared_element_type.clone(), *num)?; + let declared_element_type = charge_clone!(meter, declared_element_type); + verifier.push_n(meter, declared_element_type, *num)?; } Bytecode::VecSwap(idx) => { @@ -1096,7 +1078,6 @@ fn verify_instr( let type_args = verifier.module.signature_at(enum_inst.type_parameters); let enum_def = verifier.module.enum_def_at(enum_inst.def); let variant_def = &enum_def.variants[handle.variant as usize]; - verifier.charge_tys(meter, &type_args.0)?; pack_enum_variant(verifier, meter, offset, enum_def, variant_def, type_args)? } Bytecode::UnpackVariant(vidx) => { @@ -1118,7 +1099,6 @@ fn verify_instr( let type_args = verifier.module.signature_at(enum_inst.type_parameters); let enum_def = verifier.module.enum_def_at(enum_inst.def); let variant_def = &enum_def.variants[handle.variant as usize]; - verifier.charge_tys(meter, &type_args.0)?; unpack_enum_variant_by_value(verifier, meter, offset, enum_def, variant_def, type_args)? } Bytecode::UnpackVariantImmRef(vidx) => { @@ -1155,7 +1135,6 @@ fn verify_instr( let type_args = verifier.module.signature_at(enum_inst.type_parameters); let enum_def = verifier.module.enum_def_at(enum_inst.def); let variant_def = &enum_def.variants[handle.variant as usize]; - verifier.charge_tys(meter, &type_args.0)?; unpack_enum_variant_by_ref( verifier, meter, @@ -1172,7 +1151,6 @@ fn verify_instr( let type_args = verifier.module.signature_at(enum_inst.type_parameters); let enum_def = verifier.module.enum_def_at(enum_inst.def); let variant_def = &enum_def.variants[handle.variant as usize]; - verifier.charge_tys(meter, &type_args.0)?; unpack_enum_variant_by_ref( verifier, meter, @@ -1195,49 +1173,64 @@ fn verify_instr( // Helpers functions for types // -fn materialize_type(struct_handle: DatatypeHandleIndex, type_args: &Signature) -> SignatureToken { - if type_args.is_empty() { +fn materialize_type( + meter: &mut (impl Meter + ?Sized), + struct_handle: DatatypeHandleIndex, + type_args: &Signature, +) -> PartialVMResult { + let ty = if type_args.is_empty() { ST::Datatype(struct_handle) } else { ST::DatatypeInstantiation(Box::new((struct_handle, type_args.0.clone()))) - } + }; + charge_ty(meter, &ty)?; + Ok(ty) } -fn instantiate(token: &SignatureToken, subst: &Signature) -> SignatureToken { - use SignatureToken::*; - - if subst.0.is_empty() { - return token.clone(); - } - - match token { - Bool => Bool, - U8 => U8, - U16 => U16, - U32 => U32, - U64 => U64, - U128 => U128, - U256 => U256, - Address => Address, - Signer => Signer, - Vector(ty) => Vector(Box::new(instantiate(ty, subst))), - Datatype(idx) => Datatype(*idx), - DatatypeInstantiation(inst) => { - let (idx, type_args) = &**inst; - DatatypeInstantiation(Box::new(( - *idx, - type_args.iter().map(|ty| instantiate(ty, subst)).collect(), - ))) - } - Reference(ty) => Reference(Box::new(instantiate(ty, subst))), - MutableReference(ty) => MutableReference(Box::new(instantiate(ty, subst))), - TypeParameter(idx) => { - // Assume that the caller has previously parsed and verified the structure of the - // file and that this guarantees that type parameter indices are always in bounds. - debug_assert!((*idx as usize) < subst.len()); - subst.0[*idx as usize].clone() +fn instantiate( + meter: &mut (impl Meter + ?Sized), + token: &SignatureToken, + subst: &Signature, +) -> PartialVMResult { + fn rec(token: &SignatureToken, subst: &Signature) -> SignatureToken { + use SignatureToken::*; + + if subst.0.is_empty() { + return token.clone(); + } + + match token { + Bool => Bool, + U8 => U8, + U16 => U16, + U32 => U32, + U64 => U64, + U128 => U128, + U256 => U256, + Address => Address, + Signer => Signer, + Vector(ty) => Vector(Box::new(rec(ty, subst))), + Datatype(idx) => Datatype(*idx), + DatatypeInstantiation(inst) => { + let (idx, type_args) = &**inst; + DatatypeInstantiation(Box::new(( + *idx, + type_args.iter().map(|ty| rec(ty, subst)).collect(), + ))) + } + Reference(ty) => Reference(Box::new(rec(ty, subst))), + MutableReference(ty) => MutableReference(Box::new(rec(ty, subst))), + TypeParameter(idx) => { + // Assume that the caller has previously parsed and verified the structure of the + // file and that this guarantees that type parameter indices are always in bounds. + debug_assert!((*idx as usize) < subst.len()); + subst.0[*idx as usize].clone() + } } } + let ty = rec(token, subst); + charge_ty(meter, &ty)?; + Ok(ty) } fn get_vector_element_type( diff --git a/external-crates/move/crates/move-bytecode-verifier/src/verifier.rs b/external-crates/move/crates/move-bytecode-verifier/src/verifier.rs index 16fc546c260c8..2c441ce0cd2bb 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/verifier.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/verifier.rs @@ -4,11 +4,18 @@ //! This module contains the public APIs supported by the bytecode verifier. use crate::{ - ability_field_requirements, check_duplication::DuplicationChecker, - code_unit_verifier::CodeUnitVerifier, constants, data_defs::RecursiveDataDefChecker, friends, - instantiation_loops::InstantiationLoopChecker, instruction_consistency::InstructionConsistency, - limits::LimitsVerifier, script_signature, - script_signature::no_additional_script_signature_checks, signature::SignatureChecker, + ability_cache::AbilityCache, + ability_field_requirements, + check_duplication::DuplicationChecker, + code_unit_verifier::{self}, + constants, + data_defs::RecursiveDataDefChecker, + friends, + instantiation_loops::InstantiationLoopChecker, + instruction_consistency::InstructionConsistency, + limits::LimitsVerifier, + script_signature::{self, no_additional_script_signature_checks}, + signature::SignatureChecker, }; use move_binary_format::{ check_bounds::BoundsChecker, @@ -76,6 +83,7 @@ pub fn verify_module_with_config_metered( module: &CompiledModule, meter: &mut (impl Meter + ?Sized), ) -> VMResult<()> { + let ability_cache = &mut AbilityCache::new(module); BoundsChecker::verify_module(module).map_err(|e| { // We can't point the error at the module, because if bounds-checking // failed, we cannot safely index into module's handle to itself. @@ -83,14 +91,14 @@ pub fn verify_module_with_config_metered( })?; LimitsVerifier::verify_module(config, module)?; DuplicationChecker::verify_module(module)?; - SignatureChecker::verify_module(module)?; + SignatureChecker::verify_module(module, ability_cache, meter)?; InstructionConsistency::verify_module(module)?; constants::verify_module(module)?; friends::verify_module(module)?; - ability_field_requirements::verify_module(module)?; + ability_field_requirements::verify_module(module, ability_cache, meter)?; RecursiveDataDefChecker::verify_module(module)?; InstantiationLoopChecker::verify_module(module)?; - CodeUnitVerifier::verify_module(config, module, meter)?; + code_unit_verifier::verify_module(config, module, ability_cache, meter)?; script_signature::verify_module(module, no_additional_script_signature_checks) } diff --git a/external-crates/move/crates/move-command-line-common/src/files.rs b/external-crates/move/crates/move-command-line-common/src/files.rs index 9300f49f895fc..b7b1039c7c12a 100644 --- a/external-crates/move/crates/move-command-line-common/src/files.rs +++ b/external-crates/move/crates/move-command-line-common/src/files.rs @@ -212,6 +212,7 @@ pub fn try_exists_vfs(vfs_path: &VfsPath) -> VfsResult { /// - For each directory in `paths`, it will return all files that satisfy the predicate /// - Any file explicitly passed in `paths`, it will include that file in the result, regardless /// of the file extension +/// /// It implements the same functionality as find_filenames above but for the virtual file system pub fn find_filenames_vfs bool>( paths: &[VfsPath], @@ -244,7 +245,8 @@ pub fn find_filenames_vfs bool>( /// - For each directory in `paths`, it will return all files with the `MOVE_EXTENSION` found /// recursively in that directory /// - If `keep_specified_files` any file explicitly passed in `paths`, will be added to the result -/// Otherwise, they will be discarded +/// +/// Otherwise, they will be discarded /// It implements the same functionality as find_move_filenames above but for the virtual file /// system pub fn find_move_filenames_vfs( diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/functions/large_enum.exp b/external-crates/move/crates/move-compiler-transactional-tests/tests/functions/large_enum.exp new file mode 100644 index 0000000000000..f32b667741b9f --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/functions/large_enum.exp @@ -0,0 +1,15 @@ +processed 4 tasks + +task 2, line 34: +//# run 0x42::m::x1 +return values: |0|{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +task 3, line 36: +//# run 0x42::m::x3 +Error: Function execution failed with VMError: { + major_status: VERIFICATION_ERROR, + sub_status: None, + location: undefined, + indices: [], + offsets: [], +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/functions/large_enum.move b/external-crates/move/crates/move-compiler-transactional-tests/tests/functions/large_enum.move new file mode 100644 index 0000000000000..97813d424a567 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/functions/large_enum.move @@ -0,0 +1,36 @@ +// tests error after serializing a large enum return value + +//# init --edition 2024.alpha + +//# publish + +module 0x42::m { + +public enum X1 { + Big(u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8), +} + +public enum X2 { + V1(X1, X1, X1), + V2(X1, X1, X1), + V3(X1, X1, X1), +} + +public enum X3 { + X2(X2, X2, X2), + U64(u64), +} + +entry fun x1(): X1 { + X1::Big(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) +} + +entry fun x3(): X3 { + X3::U64(0) +} + +} + +//# run 0x42::m::x1 + +//# run 0x42::m::x3 diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/matching/field_ordering.exp b/external-crates/move/crates/move-compiler-transactional-tests/tests/matching/field_ordering.exp new file mode 100644 index 0000000000000..fc5a4436b29d4 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/matching/field_ordering.exp @@ -0,0 +1 @@ +processed 3 tasks diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/matching/field_ordering.move b/external-crates/move/crates/move-compiler-transactional-tests/tests/matching/field_ordering.move new file mode 100644 index 0000000000000..0b062b9decac5 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/matching/field_ordering.move @@ -0,0 +1,19 @@ +//# init --edition 2024.beta + +//# publish +module 0x42::m; + +public enum E { + V { zero: u64, one: u64 } +} + +fun add1(n: u64): u64 { n + 1 } + +public fun main() { + let one = match (E::V { zero: 0, one: 1}) { + E::V { one: _, zero: one } => add1(one) + }; + assert!(one == 1) +} + +//# run 0x42::m::main diff --git a/external-crates/move/crates/move-compiler/src/cfgir/borrows/state.rs b/external-crates/move/crates/move-compiler/src/cfgir/borrows/state.rs index 93d345fd3965b..a9a97be7e1efe 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/borrows/state.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/borrows/state.rs @@ -369,7 +369,7 @@ impl BorrowState { fn release(&mut self, ref_id: RefID) { self.id_to_exp.remove(&ref_id); - self.borrows.release(ref_id) + self.borrows.release(ref_id); } fn divergent_control_flow(&mut self) { diff --git a/external-crates/move/crates/move-compiler/src/cfgir/cfg.rs b/external-crates/move/crates/move-compiler/src/cfgir/cfg.rs index d640a3f20589a..8cad547a7ce11 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/cfg.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/cfg.rs @@ -205,6 +205,7 @@ impl<'a> ImmForwardCFG<'a> { /// Returns /// - A CFG /// - A set of infinite loop heads + /// /// This _must_ be called after `BlockMutCFG::new`, as the mutable version optimizes the code /// This will be done for external usage, /// since the Mut CFG is used during the building of the cfgir::ast::Program diff --git a/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs b/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs index cea38a40416d1..df12006c03ad6 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs @@ -271,7 +271,7 @@ fn compile_match_head( ) -> MatchStep { debug_print!( context.debug.match_specialization, - ("-----\ncompiling with fringe queue entry" => fringe; dbg) + ("-----\ncompiling with fringe queue entry" => fringe; sdbg) ); if matrix.is_empty() { MatchStep::Failure @@ -333,8 +333,10 @@ fn compile_match_head( // If we have an actual destructuring anywhere, we do that and take the specialized // matrix (which holds the default matrix and bindings, for our purpose). If we don't, // we just take the default matrix. + let decl_fields = context.info.struct_fields(&mident, &datatype_name).unwrap(); let unpack = if let Some((ploc, arg_types)) = matrix.first_struct_ctors() { - let fringe_binders = context.make_imm_ref_match_binders(ploc, arg_types); + let fringe_binders = + context.make_imm_ref_match_binders(decl_fields, ploc, arg_types); let fringe_exps = make_fringe_entries(&fringe_binders); let mut inner_fringe = fringe.clone(); for fringe_exp in fringe_exps.into_iter().rev() { @@ -376,7 +378,12 @@ fn compile_match_head( let mut arms = BTreeMap::new(); for (ctor, (ploc, arg_types)) in ctors { unmatched_variants.remove(&ctor); - let fringe_binders = context.make_imm_ref_match_binders(ploc, arg_types); + let decl_fields = context + .info + .enum_variant_fields(&mident, &datatype_name, &ctor) + .unwrap(); + let fringe_binders = + context.make_imm_ref_match_binders(decl_fields, ploc, arg_types); let fringe_exps = make_fringe_entries(&fringe_binders); let mut inner_fringe = fringe.clone(); for fringe_exp in fringe_exps.into_iter().rev() { @@ -898,6 +905,12 @@ fn make_arm_variant_unpack_fields( ) -> (Vec<(FringeEntry, MatchPattern)>, Vec<(Field, Var, Type)>) { let field_pats = fields.clone().map(|_key, (ndx, (_, pat))| (ndx, pat)); + let decl_fields = context + .hlir_context + .info + .enum_variant_fields(&mident, &enum_, &variant) + .unwrap(); + let field_tys = { let field_tys = fields.map(|_key, (ndx, (ty, _))| (ndx, ty)); if let Some(mut_) = mut_ref { @@ -911,13 +924,12 @@ fn make_arm_variant_unpack_fields( field_tys } }; - let fringe_binders = context.hlir_context.make_unpack_binders(pat_loc, field_tys); + let fringe_binders = + context + .hlir_context + .make_unpack_binders(decl_fields.clone(), pat_loc, field_tys); let fringe_exps = make_fringe_entries(&fringe_binders); - let decl_fields = context - .hlir_context - .info - .enum_variant_fields(&mident, &enum_, &variant); let ordered_pats = order_fields_by_decl(decl_fields, field_pats); let mut unpack_fields: Vec<(Field, Var, Type)> = vec![]; @@ -946,6 +958,11 @@ fn make_arm_struct_unpack_fields( fields: Fields<(Type, MatchPattern)>, ) -> (Vec<(FringeEntry, MatchPattern)>, Vec<(Field, Var, Type)>) { let field_pats = fields.clone().map(|_key, (ndx, (_, pat))| (ndx, pat)); + let decl_fields = context + .hlir_context + .info + .struct_fields(&mident, &struct_) + .unwrap(); let field_tys = { let field_tys = fields.map(|_key, (ndx, (ty, _))| (ndx, ty)); @@ -960,10 +977,12 @@ fn make_arm_struct_unpack_fields( field_tys } }; - let fringe_binders = context.hlir_context.make_unpack_binders(pat_loc, field_tys); + let fringe_binders = + context + .hlir_context + .make_unpack_binders(decl_fields.clone(), pat_loc, field_tys); let fringe_exps = make_fringe_entries(&fringe_binders); - let decl_fields = context.hlir_context.info.struct_fields(&mident, &struct_); let ordered_pats = order_fields_by_decl(decl_fields, field_pats); let mut unpack_fields: Vec<(Field, Var, Type)> = vec![]; diff --git a/external-crates/move/crates/move-compiler/src/linters/mod.rs b/external-crates/move/crates/move-compiler/src/linters/mod.rs index 01ab4d3c2902f..0457c5d511f51 100644 --- a/external-crates/move/crates/move-compiler/src/linters/mod.rs +++ b/external-crates/move/crates/move-compiler/src/linters/mod.rs @@ -8,6 +8,7 @@ use crate::{ linters::constant_naming::ConstantNamingVisitor, typing::visitor::TypingVisitor, }; pub mod constant_naming; +mod unnecessary_while_loop; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum LintLevel { @@ -34,16 +35,26 @@ pub const ALLOW_ATTR_CATEGORY: &str = "lint"; pub const LINT_WARNING_PREFIX: &str = "Lint "; pub const CONSTANT_NAMING_FILTER_NAME: &str = "constant_naming"; pub const CONSTANT_NAMING_DIAG_CODE: u8 = 1; +pub const WHILE_TRUE_TO_LOOP_FILTER_NAME: &str = "while_true"; +pub const WHILE_TRUE_TO_LOOP_DIAG_CODE: u8 = 4; pub fn known_filters() -> (Option, Vec) { ( Some(ALLOW_ATTR_CATEGORY.into()), - vec![WarningFilter::code( - Some(LINT_WARNING_PREFIX), - LinterDiagnosticCategory::Style as u8, - CONSTANT_NAMING_DIAG_CODE, - Some(CONSTANT_NAMING_FILTER_NAME), - )], + vec![ + WarningFilter::code( + Some(LINT_WARNING_PREFIX), + LinterDiagnosticCategory::Style as u8, + CONSTANT_NAMING_DIAG_CODE, + Some(CONSTANT_NAMING_FILTER_NAME), + ), + WarningFilter::code( + Some(LINT_WARNING_PREFIX), + LinterDiagnosticCategory::Complexity as u8, + WHILE_TRUE_TO_LOOP_DIAG_CODE, + Some(WHILE_TRUE_TO_LOOP_FILTER_NAME), + ), + ], ) } @@ -51,9 +62,12 @@ pub fn linter_visitors(level: LintLevel) -> Vec { match level { LintLevel::None | LintLevel::Default => vec![], LintLevel::All => { - vec![constant_naming::ConstantNamingVisitor::visitor( - ConstantNamingVisitor, - )] + vec![ + constant_naming::ConstantNamingVisitor::visitor(ConstantNamingVisitor), + unnecessary_while_loop::WhileTrueToLoop::visitor( + unnecessary_while_loop::WhileTrueToLoop, + ), + ] } } } diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs new file mode 100644 index 0000000000000..0985a02e7b01c --- /dev/null +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs @@ -0,0 +1,68 @@ +//! Encourages replacing `while(true)` with `loop` for infinite loops in Move for clarity and conciseness. +//! Identifies `while(true)` patterns, suggesting a more idiomatic approach using `loop`. +//! Aims to enhance code readability and adherence to Rust idioms. +use crate::{ + diag, + diagnostics::{ + codes::{custom, DiagnosticInfo, Severity}, + WarningFilters, + }, + expansion::ast::Value_, + shared::CompilationEnv, + typing::{ + ast::{self as T, UnannotatedExp_}, + visitor::{TypingVisitorConstructor, TypingVisitorContext}, + }, +}; + +use super::{LinterDiagnosticCategory, LINT_WARNING_PREFIX, WHILE_TRUE_TO_LOOP_DIAG_CODE}; + +const WHILE_TRUE_TO_LOOP_DIAG: DiagnosticInfo = custom( + LINT_WARNING_PREFIX, + Severity::Warning, + LinterDiagnosticCategory::Complexity as u8, + WHILE_TRUE_TO_LOOP_DIAG_CODE, + "unnecessary 'while (true)', replace with 'loop'", +); + +pub struct WhileTrueToLoop; + +pub struct Context<'a> { + env: &'a mut CompilationEnv, +} + +impl TypingVisitorConstructor for WhileTrueToLoop { + type Context<'a> = Context<'a>; + + fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { + Context { env } + } +} + +impl TypingVisitorContext for Context<'_> { + fn add_warning_filter_scope(&mut self, filter: WarningFilters) { + self.env.add_warning_filter_scope(filter) + } + fn pop_warning_filter_scope(&mut self) { + self.env.pop_warning_filter_scope() + } + + fn visit_exp_custom(&mut self, exp: &mut T::Exp) -> bool { + let UnannotatedExp_::While(_, cond, _) = &exp.exp.value else { + return false; + }; + let UnannotatedExp_::Value(sp!(_, Value_::Bool(true))) = &cond.exp.value else { + return false; + }; + + let msg = "'while (true)' can be always replaced with 'loop'"; + let mut diag = diag!(WHILE_TRUE_TO_LOOP_DIAG, (exp.exp.loc, msg)); + diag.add_note( + "A 'loop' is more useful in these cases. Unlike 'while', 'loop' can have a \ + 'break' with a value, e.g. 'let x = loop { break 42 };'", + ); + self.env.add_diag(diag); + + false + } +} diff --git a/external-crates/move/crates/move-compiler/src/naming/translate.rs b/external-crates/move/crates/move-compiler/src/naming/translate.rs index ed6593ec344e9..f238eb60f6ff1 100644 --- a/external-crates/move/crates/move-compiler/src/naming/translate.rs +++ b/external-crates/move/crates/move-compiler/src/naming/translate.rs @@ -51,6 +51,7 @@ pub struct ResolvedModuleFunction { pub mident: ModuleIdent, pub name: FunctionName, pub tyarg_arity: usize, + #[allow(unused)] pub arity: usize, } @@ -93,6 +94,7 @@ pub enum FieldInfo { pub struct ResolvedConstant { pub mident: ModuleIdent, pub name: ConstantName, + #[allow(unused)] pub decl_loc: Loc, } @@ -138,6 +140,7 @@ pub(super) enum ResolvedConstructor { #[derive(Debug, Clone)] pub(super) enum ResolvedCallSubject { Builtin(Box), + #[allow(unused)] Constructor(Box), Function(Box), Var(Box), @@ -146,6 +149,7 @@ pub(super) enum ResolvedCallSubject { #[derive(Debug, Clone)] pub(super) enum ResolvedUseFunFunction { + #[allow(unused)] Builtin(Box), Module(Box), Unbound, @@ -3193,7 +3197,7 @@ fn unique_pattern_binders( ) -> Vec<(Mutability, P::Var)> { use E::MatchPattern_ as EP; - fn report_duplicate(context: &mut Context, var: P::Var, locs: &Vec<(Mutability, Loc)>) { + fn report_duplicate(context: &mut Context, var: P::Var, locs: &[(Mutability, Loc)]) { assert!(locs.len() > 1, "ICE pattern duplicate detection error"); let (_, first_loc) = locs.first().unwrap(); let mut diag = diag!( diff --git a/external-crates/move/crates/move-compiler/src/shared/matching.rs b/external-crates/move/crates/move-compiler/src/shared/matching.rs index 7ef8e6da583d8..26ca069a61b9b 100644 --- a/external-crates/move/crates/move-compiler/src/shared/matching.rs +++ b/external-crates/move/crates/move-compiler/src/shared/matching.rs @@ -78,6 +78,7 @@ pub trait MatchContext { fn make_imm_ref_match_binders( &mut self, + decl_fields: UniqueMap, pattern_loc: Loc, arg_types: Fields, ) -> Vec<(Field, N::Var, N::Type)> { @@ -92,7 +93,7 @@ pub trait MatchContext { } } - let fields = order_fields_by_decl(None, arg_types.clone()); + let fields = order_fields_by_decl(decl_fields, arg_types.clone()); fields .into_iter() .map(|(_, field_name, field_type)| { @@ -107,10 +108,11 @@ pub trait MatchContext { fn make_unpack_binders( &mut self, + decl_fields: UniqueMap, pattern_loc: Loc, arg_types: Fields, ) -> Vec<(Field, N::Var, N::Type)> { - let fields = order_fields_by_decl(None, arg_types.clone()); + let fields = order_fields_by_decl(decl_fields, arg_types.clone()); fields .into_iter() .map(|(_, field_name, field_type)| { @@ -283,7 +285,8 @@ impl PatternArm { let field_pats = fields.clone().map(|_key, (ndx, (_, pat))| (ndx, pat)); let decl_fields = context .program_info() - .enum_variant_fields(&mident, &enum_, &name); + .enum_variant_fields(&mident, &enum_, &name) + .unwrap(); let ordered_pats = order_fields_by_decl(decl_fields, field_pats); for (_, _, pat) in ordered_pats.into_iter().rev() { output.pats.push_front(pat); @@ -341,7 +344,10 @@ impl PatternArm { TP::Struct(mident, struct_, _, fields) | TP::BorrowStruct(_, mident, struct_, _, fields) => { let field_pats = fields.clone().map(|_key, (ndx, (_, pat))| (ndx, pat)); - let decl_fields = context.program_info().struct_fields(&mident, &struct_); + let decl_fields = context + .program_info() + .struct_fields(&mident, &struct_) + .unwrap(); let ordered_pats = order_fields_by_decl(decl_fields, field_pats); for (_, _, pat) in ordered_pats.into_iter().rev() { output.pats.push_front(pat); @@ -921,22 +927,13 @@ fn combine_pattern_fields( /// Helper function for creating an ordered list of fields Field information and Fields. pub fn order_fields_by_decl( - decl_fields: Option>, + decl_fields: UniqueMap, fields: Fields, ) -> Vec<(usize, Field, T)> { - let mut texp_fields: Vec<(usize, Field, T)> = if let Some(field_map) = decl_fields { - fields - .into_iter() - .map(|(f, (_exp_idx, t))| (*field_map.get(&f).unwrap(), f, t)) - .collect() - } else { - // If no field map, compiler error in typing. - fields - .into_iter() - .enumerate() - .map(|(ndx, (f, (_exp_idx, t)))| (ndx, f, t)) - .collect() - }; + let mut texp_fields: Vec<(usize, Field, T)> = fields + .into_iter() + .map(|(f, (_exp_idx, t))| (*decl_fields.get(&f).unwrap(), f, t)) + .collect(); texp_fields.sort_by(|(decl_idx1, _, _), (decl_idx2, _, _)| decl_idx1.cmp(decl_idx2)); texp_fields } diff --git a/external-crates/move/crates/move-compiler/src/shared/program_info.rs b/external-crates/move/crates/move-compiler/src/shared/program_info.rs index 250bdeeb01213..2ceadbb461cce 100644 --- a/external-crates/move/crates/move-compiler/src/shared/program_info.rs +++ b/external-crates/move/crates/move-compiler/src/shared/program_info.rs @@ -219,6 +219,21 @@ impl ProgramInfo { _ => panic!("ICE should have failed in naming"), } } + + pub fn datatype_declared_loc(&self, m: &ModuleIdent, n: &DatatypeName) -> Loc { + match self.datatype_kind(m, n) { + DatatypeKind::Struct => self.struct_declared_loc_(m, &n.0.value), + DatatypeKind::Enum => self.enum_declared_loc_(m, &n.0.value), + } + } + + pub fn datatype_declared_abilities(&self, m: &ModuleIdent, n: &DatatypeName) -> &AbilitySet { + match self.datatype_kind(m, n) { + DatatypeKind::Struct => self.struct_declared_abilities(m, n), + DatatypeKind::Enum => self.enum_declared_abilities(m, n), + } + } + pub fn struct_definition(&self, m: &ModuleIdent, n: &DatatypeName) -> &StructDefinition { self.struct_definition_opt(m, n) .expect("ICE should have failed in naming") diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs index aa371ee00e3e6..d5f657d6cb2ee 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs @@ -15,11 +15,12 @@ use crate::{ shared::{CompilationEnv, Identifier}, sui_mode::linters::{FREEZE_FUN, PUBLIC_FREEZE_FUN, SUI_PKG_NAME, TRANSFER_MOD_NAME}, typing::{ - ast as T, + ast as T, core, visitor::{TypingVisitorConstructor, TypingVisitorContext}, }, }; use move_ir_types::location::*; +use once_cell::sync::Lazy; use regex::Regex; const FREEZE_CAPABILITY_DIAG: DiagnosticInfo = custom( @@ -27,7 +28,7 @@ const FREEZE_CAPABILITY_DIAG: DiagnosticInfo = custom( Severity::Warning, LinterDiagnosticCategory::Sui as u8, LinterDiagnosticCode::FreezingCapability as u8, - "Freezing a capability-like type can lead to design issues.", + "freezing potential capability", ); const FREEZE_FUNCTIONS: &[(&str, &str, &str)] = &[ @@ -39,16 +40,14 @@ pub struct WarnFreezeCapability; pub struct Context<'a> { env: &'a mut CompilationEnv, - capability_regex: Regex, } +static REGEX: Lazy = Lazy::new(|| Regex::new(r".*Cap(?:[A-Z0-9_]+|ability|$).*").unwrap()); + impl TypingVisitorConstructor for WarnFreezeCapability { type Context<'a> = Context<'a>; fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { - env, - capability_regex: Regex::new(r"Cap(ability)?(\w*v?\d*)?$").unwrap(), - } + Context { env } } } @@ -74,8 +73,8 @@ impl<'a> TypingVisitorContext for Context<'a> { fn visit_exp_custom(&mut self, exp: &mut T::Exp) -> bool { if let T::UnannotatedExp_::ModuleCall(fun) = &exp.exp.value { - if self.is_freeze_function(fun) { - self.check_type_arguments(fun, exp.exp.loc); + if is_freeze_function(fun) { + check_type_arguments(self, fun, exp.exp.loc); } } false @@ -90,27 +89,28 @@ impl<'a> TypingVisitorContext for Context<'a> { } } -impl<'a> Context<'a> { - fn is_freeze_function(&self, fun: &T::ModuleCall) -> bool { - FREEZE_FUNCTIONS.iter().any(|(addr, module, fname)| { - fun.module.value.is(*addr, *module) && &fun.name.value().as_str() == fname - }) - } - - fn check_type_arguments(&mut self, fun: &T::ModuleCall, loc: Loc) { - for sp!(_, type_arg) in &fun.type_arguments { - if let Some(sp!(_, TypeName_::ModuleType(_, struct_name))) = type_arg.type_name() { - if self.capability_regex.is_match(struct_name.value().as_str()) { - self.report_freeze_capability(loc); - break; - } - } - } - } +fn is_freeze_function(fun: &T::ModuleCall) -> bool { + FREEZE_FUNCTIONS.iter().any(|(addr, module, fname)| { + fun.module.value.is(*addr, *module) && &fun.name.value().as_str() == fname + }) +} - fn report_freeze_capability(&mut self, loc: Loc) { - let msg = "Freezing a capability-like type can lead to design issues."; - let diag = diag!(FREEZE_CAPABILITY_DIAG, (loc, msg)); - self.env.add_diag(diag); +fn check_type_arguments(context: &mut Context, fun: &T::ModuleCall, loc: Loc) { + for sp!(_, type_arg) in &fun.type_arguments { + let Some(sp!(_, TypeName_::ModuleType(_, struct_name))) = type_arg.type_name() else { + continue; + }; + if REGEX.is_match(struct_name.value().as_str()) { + let msg = format!( + "The type {} is potentially a capability based on its name", + core::error_format_(type_arg, &core::Subst::empty()), + ); + let mut diag = diag!(FREEZE_CAPABILITY_DIAG, (loc, msg)); + diag.add_note( + "Freezing a capability might lock out critical operations \ + or otherwise open access to operations that otherwise should be restricted", + ); + context.env.add_diag(diag); + }; } } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs index cd20476c33a53..d058f11d255d8 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs @@ -26,7 +26,7 @@ const MISSING_KEY_ABILITY_DIAG: DiagnosticInfo = custom( Severity::Warning, LinterDiagnosticCategory::Sui as u8, LinterDiagnosticCode::MissingKey as u8, - "The struct's first field is 'id' of type 'sui::object::UID' but is missing the 'key' ability.", + "struct with id but missing key ability", ); pub struct MissingKeyVisitor; @@ -51,8 +51,6 @@ impl TypingVisitorContext for Context<'_> { self.env.pop_warning_filter_scope() } - const VISIT_TYPES: bool = true; - fn visit_struct_custom( &mut self, _module: ModuleIdent, @@ -70,9 +68,12 @@ impl TypingVisitorContext for Context<'_> { } fn first_field_has_id_field_of_type_uid(sdef: &StructDefinition) -> bool { - matches!(&sdef.fields, StructFields::Defined(_, fields) if fields.iter().any(|(_, symbol, ftype)| { - ftype.0 == 0 && symbol == &symbol!("id") && ftype.1.value.is("sui", "object", "UID") - })) + match &sdef.fields { + StructFields::Defined(_, fields) => fields.iter().any(|(_, symbol, (idx, ty))| { + *idx == 0 && symbol == &symbol!("id") && ty.value.is("sui", "object", "UID") + }), + StructFields::Native(_) => false, + } } fn lacks_key_ability(sdef: &StructDefinition) -> bool { diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs index 84e3e7ed9a125..d8e21d6bb3167 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs @@ -20,6 +20,7 @@ pub mod custom_state_change; pub mod freeze_wrapped; pub mod freezing_capability; pub mod missing_key; +pub mod public_mut_tx_context; pub mod public_random; pub mod self_transfer; pub mod share_owned; @@ -72,6 +73,7 @@ pub const COLLECTION_EQUALITY_FILTER_NAME: &str = "collection_equality"; pub const PUBLIC_RANDOM_FILTER_NAME: &str = "public_random"; pub const MISSING_KEY_FILTER_NAME: &str = "missing_key"; pub const FREEZING_CAPABILITY_FILTER_NAME: &str = "freezing_capability"; +pub const PREFER_MUTABLE_TX_CONTEXT_FILTER_NAME: &str = "prefer_mut_tx_context"; pub const RANDOM_MOD_NAME: &str = "random"; pub const RANDOM_STRUCT_NAME: &str = "Random"; @@ -90,6 +92,7 @@ pub enum LinterDiagnosticCode { PublicRandom, MissingKey, FreezingCapability, + PreferMutableTxContext, } pub fn known_filters() -> (Option, Vec) { @@ -149,6 +152,12 @@ pub fn known_filters() -> (Option, Vec) { LinterDiagnosticCode::FreezingCapability as u8, Some(FREEZING_CAPABILITY_FILTER_NAME), ), + WarningFilter::code( + Some(LINT_WARNING_PREFIX), + LinterDiagnosticCategory::Sui as u8, + LinterDiagnosticCode::PreferMutableTxContext as u8, + Some(PREFER_MUTABLE_TX_CONTEXT_FILTER_NAME), + ), ]; (Some(ALLOW_ATTR_CATEGORY.into()), filters) @@ -169,7 +178,10 @@ pub fn linter_visitors(level: LintLevel) -> Vec { ], LintLevel::All => { let mut visitors = linter_visitors(LintLevel::Default); - visitors.extend([freezing_capability::WarnFreezeCapability.visitor()]); + visitors.extend([ + freezing_capability::WarnFreezeCapability.visitor(), + public_mut_tx_context::PreferMutableTxContext.visitor(), + ]); visitors } } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs new file mode 100644 index 0000000000000..c664fa1aa3fb0 --- /dev/null +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs @@ -0,0 +1,97 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +//! Enforces that public functions use `&mut TxContext` instead of `&TxContext` to ensure upgradability. +//! Detects and reports instances where a non-mutable reference to `TxContext` is used in public function signatures. +//! Promotes best practices for future-proofing smart contract code by allowing mutation of the transaction context. +use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; + +use crate::{ + diag, + diagnostics::{ + codes::{custom, DiagnosticInfo, Severity}, + WarningFilters, + }, + expansion::ast::{ModuleIdent, Visibility}, + naming::ast::Type_, + parser::ast::FunctionName, + shared::CompilationEnv, + sui_mode::{SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME, TX_CONTEXT_TYPE_NAME}, + typing::{ + ast as T, + visitor::{TypingVisitorConstructor, TypingVisitorContext}, + }, +}; +use move_ir_types::location::Loc; + +const REQUIRE_MUTABLE_TX_CONTEXT_DIAG: DiagnosticInfo = custom( + LINT_WARNING_PREFIX, + Severity::Warning, + LinterDiagnosticCategory::Sui as u8, + LinterDiagnosticCode::PreferMutableTxContext as u8, + "prefer '&mut TxContext' over '&TxContext'", +); + +pub struct PreferMutableTxContext; + +pub struct Context<'a> { + env: &'a mut CompilationEnv, +} + +impl TypingVisitorConstructor for PreferMutableTxContext { + type Context<'a> = Context<'a>; + + fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { + Context { env } + } +} + +impl TypingVisitorContext for Context<'_> { + fn add_warning_filter_scope(&mut self, filter: WarningFilters) { + self.env.add_warning_filter_scope(filter) + } + fn pop_warning_filter_scope(&mut self) { + self.env.pop_warning_filter_scope() + } + + fn visit_module_custom(&mut self, ident: ModuleIdent, _mdef: &mut T::ModuleDefinition) -> bool { + // skip if in 'sui::tx_context' + ident.value.is(SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME) + } + + fn visit_function_custom( + &mut self, + _module: ModuleIdent, + _function_name: FunctionName, + fdef: &mut T::Function, + ) -> bool { + if !matches!(&fdef.visibility, Visibility::Public(_)) { + return false; + } + + for (_, _, sp!(loc, param_ty_)) in &fdef.signature.parameters { + if matches!( + param_ty_, + Type_::Ref(false, t) if t.value.is(SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME, TX_CONTEXT_TYPE_NAME), + ) { + report_non_mutable_tx_context(self.env, *loc); + } + } + + false + } +} + +fn report_non_mutable_tx_context(env: &mut CompilationEnv, loc: Loc) { + let msg = format!( + "'public' functions should prefer '&mut {0}' over '&{0}' for better upgradability.", + TX_CONTEXT_TYPE_NAME + ); + let mut diag = diag!(REQUIRE_MUTABLE_TX_CONTEXT_DIAG, (loc, msg)); + diag.add_note( + "When upgrading, the public function cannot be modified to take '&mut TxContext' instead \ + of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to \ + future-proof the function.", + ); + env.add_diag(diag); +} diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs b/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs index 31238adb5258d..aba3fc400fe56 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs @@ -868,8 +868,8 @@ fn entry_return( let (declared_loc_opt, declared_abilities) = match tn_ { TypeName_::Multiple(_) => (None, AbilitySet::collection(*tloc)), TypeName_::ModuleType(m, n) => ( - Some(context.info.struct_declared_loc(m, n)), - context.info.struct_declared_abilities(m, n).clone(), + Some(context.info.datatype_declared_loc(m, n)), + context.info.datatype_declared_abilities(m, n).clone(), ), TypeName_::Builtin(b) => (None, b.value.declared_abilities(b.loc)), }; @@ -1064,7 +1064,7 @@ fn check_private_transfer(context: &mut Context, loc: Loc, mcall: &ModuleCall) { let store_loc = if let Some((first_ty_module, first_ty_name)) = &first_ty_tn { let abilities = context .info - .struct_declared_abilities(first_ty_module, first_ty_name); + .datatype_declared_abilities(first_ty_module, first_ty_name); abilities.ability_loc_(Ability_::Store).unwrap() } else { first_ty diff --git a/external-crates/move/crates/move-compiler/src/to_bytecode/canonicalize_handles.rs b/external-crates/move/crates/move-compiler/src/to_bytecode/canonicalize_handles.rs index 36d4a2544ef4b..5f816dd627ab4 100644 --- a/external-crates/move/crates/move-compiler/src/to_bytecode/canonicalize_handles.rs +++ b/external-crates/move/crates/move-compiler/src/to_bytecode/canonicalize_handles.rs @@ -31,7 +31,7 @@ use move_symbol_pool::Symbol; /// /// - Friend Declarations are sorted in lexical order (by address name and module name), followed by /// unnamed addresses in their original order. - +/// /// Key for ordering module handles, distinguishing the module's self handle, handles with names, /// and handles without names. #[derive(Eq, PartialEq, Ord, PartialOrd)] @@ -336,7 +336,7 @@ fn remap_code(code: &mut CodeUnit, functions: &[TableIndex]) { /// /// is sorted according to `key`. fn permutation<'p, T, K: Ord>( - pool: &'p Vec, + pool: &'p [T], key: impl Fn(TableIndex, &'p T) -> K + 'p, ) -> Vec { let mut inverse: Vec<_> = (0..pool.len() as TableIndex).collect(); @@ -352,7 +352,7 @@ fn permutation<'p, T, K: Ord>( /// Re-order `pool` according to the `permutation` array. `permutation[i]` is the new location of /// `pool[i]`. -fn apply_permutation(pool: &mut Vec, mut permutation: Vec) { +fn apply_permutation(pool: &mut [T], mut permutation: Vec) { assert_eq!(pool.len(), permutation.len()); // At every iteration we confirm that one more value is in its final position in the pool, diff --git a/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs b/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs index dc6c387a99cd9..9ba6765d66dc3 100644 --- a/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs +++ b/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs @@ -19,6 +19,7 @@ const NOTE_STR: &str = "note"; #[derive(Debug, Clone)] pub struct Deprecation { // The source location of the deprecation attribute + #[allow(unused)] pub source_location: Loc, // The type of the member that is deprecated (function, constant, etc.) pub location: AttributePosition, diff --git a/external-crates/move/crates/move-compiler/src/typing/expand.rs b/external-crates/move/crates/move-compiler/src/typing/expand.rs index 2fd127861672a..9f36764277e55 100644 --- a/external-crates/move/crates/move-compiler/src/typing/expand.rs +++ b/external-crates/move/crates/move-compiler/src/typing/expand.rs @@ -318,11 +318,11 @@ fn inferred_numerical_value( Some(sp!(_, bt)) if bt.is_numeric() => bt, _ => panic!("ICE inferred num failed {:?}", &ty.value), }; - let u8_max = U256::from(std::u8::MAX); - let u16_max = U256::from(std::u16::MAX); - let u32_max = U256::from(std::u32::MAX); - let u64_max = U256::from(std::u64::MAX); - let u128_max = U256::from(std::u128::MAX); + let u8_max = U256::from(u8::MAX); + let u16_max = U256::from(u16::MAX); + let u32_max = U256::from(u32::MAX); + let u64_max = U256::from(u64::MAX); + let u128_max = U256::from(u128::MAX); let u256_max = U256::max_value(); let max = match bt { BT::U8 => u8_max, diff --git a/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs b/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs index 55dbda9a44d3e..2e6b0dde0696c 100644 --- a/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs +++ b/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs @@ -373,7 +373,12 @@ fn find_counterexample_impl( // recur. If we don't, we check it as a default specialization. if let Some((ploc, arg_types)) = matrix.first_struct_ctors() { let ctor_arity = arg_types.len() as u32; - let fringe_binders = context.make_imm_ref_match_binders(ploc, arg_types); + let decl_fields = context + .modules + .struct_fields(&mident, &datatype_name) + .unwrap(); + let fringe_binders = + context.make_imm_ref_match_binders(decl_fields, ploc, arg_types); let is_positional = context .modules .struct_is_positional(&mident, &datatype_name); @@ -433,7 +438,12 @@ fn find_counterexample_impl( if unmatched_variants.is_empty() { for (ctor, (ploc, arg_types)) in ctors { let ctor_arity = arg_types.len() as u32; - let fringe_binders = context.make_imm_ref_match_binders(ploc, arg_types); + let decl_fields = context + .modules + .enum_variant_fields(&mident, &datatype_name, &ctor) + .unwrap(); + let fringe_binders = + context.make_imm_ref_match_binders(decl_fields, ploc, arg_types); let is_positional = context .modules diff --git a/external-crates/move/crates/move-compiler/src/typing/visitor.rs b/external-crates/move/crates/move-compiler/src/typing/visitor.rs index 1e9a51a0f5e02..8af7b81038e59 100644 --- a/external-crates/move/crates/move-compiler/src/typing/visitor.rs +++ b/external-crates/move/crates/move-compiler/src/typing/visitor.rs @@ -84,13 +84,11 @@ pub trait TypingVisitorContext { self.pop_warning_filter_scope(); return; } - if Self::VISIT_TYPES { - for (struct_name, sdef) in mdef.structs.key_cloned_iter_mut() { - self.visit_struct(ident, struct_name, sdef) - } - for (enum_name, edef) in mdef.enums.key_cloned_iter_mut() { - self.visit_enum(ident, enum_name, edef) - } + for (struct_name, sdef) in mdef.structs.key_cloned_iter_mut() { + self.visit_struct(ident, struct_name, sdef) + } + for (enum_name, edef) in mdef.enums.key_cloned_iter_mut() { + self.visit_enum(ident, enum_name, edef) } for (constant_name, cdef) in mdef.constants.key_cloned_iter_mut() { self.visit_constant(ident, constant_name, cdef) diff --git a/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs b/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs index 066cd6f982799..7aab1a163d71d 100644 --- a/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs +++ b/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs @@ -713,7 +713,7 @@ fn convert_attribute_value_u64( ) -> Option<(Loc, u64)> { use E::{AttributeValue_ as EAV, Value_ as EV}; match value { - sp!(vloc, EAV::Value(sp!(_, EV::InferredNum(u)))) if *u <= U256::from(std::u64::MAX) => { + sp!(vloc, EAV::Value(sp!(_, EV::InferredNum(u)))) if *u <= U256::from(u64::MAX) => { Some((*vloc, u.down_cast_lossy())) } sp!(vloc, EAV::Value(sp!(_, EV::U64(u)))) => Some((*vloc, *u)), diff --git a/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_while_loop.move b/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_while_loop.move new file mode 100644 index 0000000000000..0f06f05bc8e51 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_while_loop.move @@ -0,0 +1,11 @@ +module 0x42::loop_test { + + // These should trigger but currently dont + public fun false_negative_obfuscated_true() { + let always_true = true; + while (always_true) {}; + while (true && true) {}; + while (true || false) {}; + while (1 > 0) {}; + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_while_loop.move b/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_while_loop.move new file mode 100644 index 0000000000000..1477d966a74c3 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_while_loop.move @@ -0,0 +1,7 @@ +module 0x42::loop_test { + + #[allow(lint(while_true))] + public fun suppressed_while_true() { + while (true) {}; + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_while_loop.move b/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_while_loop.move new file mode 100644 index 0000000000000..d5ad1e0cfb6f6 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_while_loop.move @@ -0,0 +1,11 @@ +module 0x42::loop_test { + + public fun true_negative_while_with_condition() { + let b = false; + while (false) {}; + while (b) {}; + while (false && true) {}; + while (false || false) {}; + while (0 > 1) {}; + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_while_loop.exp b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_while_loop.exp new file mode 100644 index 0000000000000..2ba9789c925b3 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_while_loop.exp @@ -0,0 +1,18 @@ +warning[Lint W01004]: unnecessary 'while (true)', replace with 'loop' + ┌─ tests/linter/true_positive_unnecessary_while_loop.move:3:9 + │ +3 │ while (true) {}; + │ ^^^^^^^^^^^^^^^ 'while (true)' can be always replaced with 'loop' + │ + = A 'loop' is more useful in these cases. Unlike 'while', 'loop' can have a 'break' with a value, e.g. 'let x = loop { break 42 };' + = This warning can be suppressed with '#[allow(lint(while_true))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W01004]: unnecessary 'while (true)', replace with 'loop' + ┌─ tests/linter/true_positive_unnecessary_while_loop.move:4:9 + │ +4 │ while (true) { break } + │ ^^^^^^^^^^^^^^^^^^^^^^ 'while (true)' can be always replaced with 'loop' + │ + = A 'loop' is more useful in these cases. Unlike 'while', 'loop' can have a 'break' with a value, e.g. 'let x = loop { break 42 };' + = This warning can be suppressed with '#[allow(lint(while_true))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_while_loop.move b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_while_loop.move new file mode 100644 index 0000000000000..eaf03b83ba5e8 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_while_loop.move @@ -0,0 +1,6 @@ +module 0x42::loop_test { + public fun true_positive_infinite_loop() { + while (true) {}; + while (true) { break } + } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/field_order_counterexample.exp b/external-crates/move/crates/move-compiler/tests/move_2024/matching/field_order_counterexample.exp new file mode 100644 index 0000000000000..a8fb54edd0fc8 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/field_order_counterexample.exp @@ -0,0 +1,8 @@ +error[E04036]: non-exhaustive pattern + ┌─ tests/move_2024/matching/field_order_counterexample.move:8:12 + │ +8 │ match (e) { + │ ^ Pattern 'E::V { zero: _0, one: _ }' not covered + │ + = When '_0' is not 0 + diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/field_order_counterexample.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/field_order_counterexample.move new file mode 100644 index 0000000000000..eccb5f946e26f --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/field_order_counterexample.move @@ -0,0 +1,11 @@ +module a::m; + +public enum E { + V { zero: u64, one: u64 } +} + +public fun bad(e: &E) { + match (e) { + E::V { one: _, zero: 0 } => (), + } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/stloc_error.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/stloc_error.move new file mode 100644 index 0000000000000..3119c3442764c --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/stloc_error.move @@ -0,0 +1,30 @@ +module a::m; + +public enum Proposal has store, copy, drop { + ConfigProposalQuorum {approved: vector, rejected: vector}, + MpcTxProposalQuorum {approved: vector, rejected: vector}, + MpcTxProposalSpecific { require_approval_users: vector, threshold: u16, approved: vector, rejected: vector }, +} + +public struct Users {} + +public fun quorum_approves(_users: &Users, _approved: &vector): bool { false } + +public(package) fun is_proposal_approved(proposal: &Proposal, users: &Users): bool { + match (proposal) { + Proposal::ConfigProposalQuorum { approved: approved, rejected: _ } => { + users.quorum_approves(approved) + }, + Proposal::MpcTxProposalQuorum { approved:approved, rejected: _ } => { + users.quorum_approves(approved) + }, + Proposal::MpcTxProposalSpecific { + require_approval_users : _, + threshold: threshold, + approved: approved, + rejected: _ + } => { + ((approved.length() as u16) >= *threshold) + } + } +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/custom_state_change.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/custom_state_change.move index 37edbb43fc4a7..ff6151946908a 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/custom_state_change.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/custom_state_change.move @@ -11,7 +11,7 @@ module a::test { id: UID } - #[allow(lint(self_transfer))] + #[allow(lint(self_transfer, prefer_mut_tx_context))] public fun custom_transfer_bad(o: S1, ctx: &TxContext) { transfer::transfer(o, tx_context::sender(ctx)) } diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/edge_case_lint_missing_key.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/edge_case_lint_missing_key.move index 644fddfdcb112..270976888885f 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/edge_case_lint_missing_key.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/edge_case_lint_missing_key.move @@ -1,9 +1,12 @@ module a::edge_cases { - use sui::another::UID as AnotherUID; - + struct UID {} // Test case with a different UID type struct DifferentUID { - id: AnotherUID, + id: sui::another::UID, + } + + struct NotAnObject { + id: UID, } } diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.exp index 59e10198d3f98..6162ba8ff27d4 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.exp +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.exp @@ -1,4 +1,4 @@ -warning[Lint W99007]: The struct's first field is 'id' of type 'sui::object::UID' but is missing the 'key' ability. +warning[Lint W99007]: struct with id but missing key ability ┌─ tests/sui_mode/linter/false_negative_lint_missing_key.move:22:5 │ 22 │ ╭ struct Wrapper { diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.move index c1db6e30d4591..667c9f88872ca 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/false_negative_lint_missing_key.move @@ -28,4 +28,4 @@ module sui::object { struct UID has store { id: address, } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_negatives.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_negatives.move index d0371f2f4695a..2f7f69ec9fe5c 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_negatives.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_negatives.move @@ -17,6 +17,10 @@ module a::test_false_negatives { id: UID } + struct Capv0 has key { + id: UID + } + public fun freeze_admin_rights(w: AdminRights) { transfer::public_freeze_object(w); } @@ -28,6 +32,10 @@ module a::test_false_negatives { public fun freeze_access_control(w: AccessControl) { transfer::public_freeze_object(w); } + + public fun freeze_cap_v(w: Capv0) { + transfer::public_freeze_object(w); + } } module sui::object { @@ -40,4 +48,4 @@ module sui::transfer { public fun public_freeze_object(_: T) { abort 0 } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.exp index 920ef1eaee67c..fad2409846d1c 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.exp +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.exp @@ -1,16 +1,18 @@ -warning[Lint W99008]: Freezing a capability-like type can lead to design issues. +warning[Lint W99008]: freezing potential capability ┌─ tests/sui_mode/linter/freezing_capability_false_positives.move:25:9 │ 25 │ transfer::public_freeze_object(w); - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Freezing a capability-like type can lead to design issues. + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The type 'a::test_false_positives::NoCap' is potentially a capability based on its name │ + = Freezing a capability might lock out critical operations or otherwise open access to operations that otherwise should be restricted = This warning can be suppressed with '#[allow(lint(freezing_capability))]' applied to the 'module' or module member ('const', 'fun', or 'struct') -warning[Lint W99008]: Freezing a capability-like type can lead to design issues. - ┌─ tests/sui_mode/linter/freezing_capability_false_positives.move:37:9 +warning[Lint W99008]: freezing potential capability + ┌─ tests/sui_mode/linter/freezing_capability_false_positives.move:29:9 │ -37 │ transfer::public_freeze_object(w); - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Freezing a capability-like type can lead to design issues. +29 │ transfer::public_freeze_object(w); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The type 'a::test_false_positives::CapAndHat' is potentially a capability based on its name │ + = Freezing a capability might lock out critical operations or otherwise open access to operations that otherwise should be restricted = This warning can be suppressed with '#[allow(lint(freezing_capability))]' applied to the 'module' or module member ('const', 'fun', or 'struct') diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.move index 452dd131a86f0..c96c031482168 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_false_positives.move @@ -5,11 +5,11 @@ module a::test_false_positives { use sui::object::UID; use sui::transfer; - struct Capture has key { + struct NoCap has key { id: UID } - struct Handicap has key { + struct CapAndHat has key { id: UID } @@ -21,11 +21,11 @@ module a::test_false_positives { id: UID } - public fun freeze_capture(w: Capture) { + public fun freeze_capture(w: NoCap) { transfer::public_freeze_object(w); } - public fun freeze_handicap(w: Handicap) { + public fun freeze_handicap(w: CapAndHat) { transfer::public_freeze_object(w); } @@ -48,4 +48,4 @@ module sui::transfer { public fun public_freeze_object(_: T) { abort 0 } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_suppression.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_suppression.move index 5a48458cd77e4..f0e4f249c0e38 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_suppression.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_suppression.move @@ -43,4 +43,4 @@ module sui::transfer { public fun public_freeze_object(_: T) { abort 0 } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_negatives.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_negatives.move index e63b71a98e588..e1fa1deb126f8 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_negatives.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_negatives.move @@ -17,6 +17,22 @@ module a::test_true_negatives { id: UID } + struct Capture has key { + id: UID + } + + struct Handicap has key { + id: UID + } + + struct Recap has key { + id: UID + } + + struct MyCapybara has key { + id: UID + } + public fun freeze_normal(w: NormalStruct) { transfer::public_freeze_object(w); } @@ -28,6 +44,22 @@ module a::test_true_negatives { public fun freeze_token(w: Token) { transfer::public_freeze_object(w); } + + public fun freeze_capture(w: Capture) { + transfer::public_freeze_object(w); + } + + public fun freeze_handicap(w: Handicap) { + transfer::public_freeze_object(w); + } + + public fun freeze_recap(w: Recap) { + transfer::public_freeze_object(w); + } + + public fun freeze_capybara(w: MyCapybara) { + transfer::public_freeze_object(w); + } } module sui::object { @@ -40,4 +72,4 @@ module sui::transfer { public fun public_freeze_object(_: T) { abort 0 } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.exp index 935933ef6a714..78c5b8538a69c 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.exp +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.exp @@ -1,24 +1,27 @@ -warning[Lint W99008]: Freezing a capability-like type can lead to design issues. +warning[Lint W99008]: freezing potential capability ┌─ tests/sui_mode/linter/freezing_capability_true_positives.move:21:9 │ 21 │ transfer::public_freeze_object(w); - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Freezing a capability-like type can lead to design issues. + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The type 'a::test_true_positives::AdminCap' is potentially a capability based on its name │ + = Freezing a capability might lock out critical operations or otherwise open access to operations that otherwise should be restricted = This warning can be suppressed with '#[allow(lint(freezing_capability))]' applied to the 'module' or module member ('const', 'fun', or 'struct') -warning[Lint W99008]: Freezing a capability-like type can lead to design issues. +warning[Lint W99008]: freezing potential capability ┌─ tests/sui_mode/linter/freezing_capability_true_positives.move:25:9 │ 25 │ transfer::public_freeze_object(w); - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Freezing a capability-like type can lead to design issues. + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The type 'a::test_true_positives::UserCapability' is potentially a capability based on its name │ + = Freezing a capability might lock out critical operations or otherwise open access to operations that otherwise should be restricted = This warning can be suppressed with '#[allow(lint(freezing_capability))]' applied to the 'module' or module member ('const', 'fun', or 'struct') -warning[Lint W99008]: Freezing a capability-like type can lead to design issues. +warning[Lint W99008]: freezing potential capability ┌─ tests/sui_mode/linter/freezing_capability_true_positives.move:29:9 │ 29 │ transfer::public_freeze_object(w); - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Freezing a capability-like type can lead to design issues. + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The type 'a::test_true_positives::OwnerCapV2' is potentially a capability based on its name │ + = Freezing a capability might lock out critical operations or otherwise open access to operations that otherwise should be restricted = This warning can be suppressed with '#[allow(lint(freezing_capability))]' applied to the 'module' or module member ('const', 'fun', or 'struct') diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.move index 093051c49855c..03651926773a9 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/freezing_capability_true_positives.move @@ -40,4 +40,4 @@ module sui::transfer { public fun public_freeze_object(_: T) { abort 0 } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/no_trigger_lint_missing_key.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/no_trigger_lint_missing_key.move index 4d5b6a0009d80..6918849f48883 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/no_trigger_lint_missing_key.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/no_trigger_lint_missing_key.move @@ -5,39 +5,10 @@ module a::no_trigger_lint_cases { struct HasKeyAbility has key { id: UID, } - - // False positive cases (should not trigger warning but might): - - // 1. Has key but different field name - struct FP1_HasKeyButDifferentFieldName has key { - uid: UID, - } - - // 2. Has key but UID field not first - struct FP2_HasKeyUIDNotFirst has key { - point: u64, - id: UID, - } - - // 3. Has key with ID field of different type - struct FP3_HasKeyButIDNotUID has key { - id: address, - } - - // 4. Suppress warning - #[allow(lint(missing_key))] - struct SuppressWarning { - id: UID, - } - - // 5. Has ability but not key - struct FP5_HasAbilityButNotKey has store, copy, drop { - id: UID, - } } module sui::object { struct UID has store { id: address, } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_lint_missing_key.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_lint_missing_key.move index 83d07d6655c94..3e86667a95af7 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_lint_missing_key.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_lint_missing_key.move @@ -12,4 +12,4 @@ module sui::object { struct UID has store { id: address, } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_public_mut_tx_context.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_public_mut_tx_context.move new file mode 100644 index 0000000000000..2d8f98657902c --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/suppress_public_mut_tx_context.move @@ -0,0 +1,20 @@ +module 0x42::suppress_cases { + use sui::tx_context::TxContext; + + #[allow(lint(prefer_mut_tx_context))] + public fun suppressed_function(_ctx: &TxContext) { + } + + #[allow(lint(prefer_mut_tx_context))] + public fun multi_suppressed_function(_ctx: &TxContext) { + } + + #[allow(lint(prefer_mut_tx_context))] + public fun suppressed_multi_param(_a: u64, _ctx: &TxContext, _b: &mut TxContext) { + } +} + +// Mocking the sui::tx_context module +module sui::tx_context { + struct TxContext has drop {} +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.exp index 597643f3a342d..243ce11581d70 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.exp +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.exp @@ -1,4 +1,4 @@ -warning[Lint W99007]: The struct's first field is 'id' of type 'sui::object::UID' but is missing the 'key' ability. +warning[Lint W99007]: struct with id but missing key ability ┌─ tests/sui_mode/linter/trigger_lint_missing_key.move:5:5 │ 5 │ ╭ struct MissingKeyAbility { @@ -8,13 +8,3 @@ warning[Lint W99007]: The struct's first field is 'id' of type 'sui::object::UID │ = This warning can be suppressed with '#[allow(lint(missing_key))]' applied to the 'module' or module member ('const', 'fun', or 'struct') -warning[Lint W99007]: The struct's first field is 'id' of type 'sui::object::UID' but is missing the 'key' ability. - ┌─ tests/sui_mode/linter/trigger_lint_missing_key.move:27:5 - │ -27 │ ╭ struct Wrapper { -28 │ │ id: UID, -29 │ │ } - │ ╰─────^ Struct's first field has an 'id' field of type 'sui::object::UID' but is missing the 'key' ability. - │ - = This warning can be suppressed with '#[allow(lint(missing_key))]' applied to the 'module' or module member ('const', 'fun', or 'struct') - diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.move index 9d9c2952df71a..5b6bb908d4ff3 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.move +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/trigger_lint_missing_key.move @@ -6,31 +6,10 @@ module a::trigger_lint_cases { id: UID, } - // False negative cases (should trigger warning but might not): - - // 1. Different field name - struct FN1_MissingKeyWithDifferentFieldName { - uid: UID, - } - - // 2. UID field not first - struct FN2_MissingKeyUIDNotFirst { - point: u64, - id: UID, - } - - // 3. Nested UID - struct FN3_MissingKeyNestedUID { - wrapper: Wrapper, - } - - struct Wrapper { - id: UID, - } } module sui::object { struct UID has store { id: address, } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_negative_public_mut_tx_context.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_negative_public_mut_tx_context.move new file mode 100644 index 0000000000000..1796cb046a7b7 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_negative_public_mut_tx_context.move @@ -0,0 +1,26 @@ +// tests the lint for preferring &mut TxContext over &TxContext in public functions +// these cases correctly should not trigger the lint +module 0x42::true_negative { + use sui::tx_context::TxContext; + + public fun correct_mint(_ctx: &mut TxContext) { + } + + public fun another_correct(_a: u64, _b: &mut TxContext, _c: u64) { + } + + fun private_function(_ctx: &TxContext) { + } + + public fun custom_module(_b: &mut sui::mock_tx_context::TxContext) {} + + +} + +module sui::tx_context { + struct TxContext has drop {} +} + +module sui::mock_tx_context { + struct TxContext has drop {} +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_positive_public_mut_tx_context.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_positive_public_mut_tx_context.exp new file mode 100644 index 0000000000000..981d0e737b750 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_positive_public_mut_tx_context.exp @@ -0,0 +1,36 @@ +warning[Lint W99009]: prefer '&mut TxContext' over '&TxContext' + ┌─ tests/sui_mode/linter/true_positive_public_mut_tx_context.move:8:37 + │ +8 │ public fun incorrect_mint(_ctx: &TxContext) { + │ ^^^^^^^^^^ 'public' functions should prefer '&mut TxContext' over '&TxContext' for better upgradability. + │ + = When upgrading, the public function cannot be modified to take '&mut TxContext' instead of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to future-proof the function. + = This warning can be suppressed with '#[allow(lint(prefer_mut_tx_context))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W99009]: prefer '&mut TxContext' over '&TxContext' + ┌─ tests/sui_mode/linter/true_positive_public_mut_tx_context.move:11:47 + │ +11 │ public fun another_incorrect(_a: u64, _b: &TxContext, _c: u64) { + │ ^^^^^^^^^^ 'public' functions should prefer '&mut TxContext' over '&TxContext' for better upgradability. + │ + = When upgrading, the public function cannot be modified to take '&mut TxContext' instead of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to future-proof the function. + = This warning can be suppressed with '#[allow(lint(prefer_mut_tx_context))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W99009]: prefer '&mut TxContext' over '&TxContext' + ┌─ tests/sui_mode/linter/true_positive_public_mut_tx_context.move:14:54 + │ +14 │ public fun mixed_function(_a: &CustomStruct, _b: &TxContext, _c: &mut TxContext) {} + │ ^^^^^^^^^^ 'public' functions should prefer '&mut TxContext' over '&TxContext' for better upgradability. + │ + = When upgrading, the public function cannot be modified to take '&mut TxContext' instead of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to future-proof the function. + = This warning can be suppressed with '#[allow(lint(prefer_mut_tx_context))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W99009]: prefer '&mut TxContext' over '&TxContext' + ┌─ tests/sui_mode/linter/true_positive_public_mut_tx_context.move:20:13 + │ +20 │ _b: &TxContext, // Should warn + │ ^^^^^^^^^^ 'public' functions should prefer '&mut TxContext' over '&TxContext' for better upgradability. + │ + = When upgrading, the public function cannot be modified to take '&mut TxContext' instead of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to future-proof the function. + = This warning can be suppressed with '#[allow(lint(prefer_mut_tx_context))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_positive_public_mut_tx_context.move b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_positive_public_mut_tx_context.move new file mode 100644 index 0000000000000..cee7d73f1b2e7 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/true_positive_public_mut_tx_context.move @@ -0,0 +1,29 @@ +// tests the lint for preferring &mut TxContext over &TxContext in public functions +// these cases correctly should trigger the lint +module 0x42::true_positive { + use sui::tx_context::TxContext; + + struct CustomStruct has drop {} + + public fun incorrect_mint(_ctx: &TxContext) { + } + + public fun another_incorrect(_a: u64, _b: &TxContext, _c: u64) { + } + + public fun mixed_function(_a: &CustomStruct, _b: &TxContext, _c: &mut TxContext) {} + + fun private_function(_ctx: &TxContext) {} + + public fun complex_function( + _a: u64, + _b: &TxContext, // Should warn + _c: &mut TxContext, + _d: &T, + _e: &CustomStruct + ) {} +} + +module sui::tx_context { + struct TxContext has drop {} +} diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/move_2024/typing/enum_in_struct_position.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/move_2024/typing/enum_in_struct_position.exp new file mode 100644 index 0000000000000..5f388c9fe77c9 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/move_2024/typing/enum_in_struct_position.exp @@ -0,0 +1,39 @@ +error[Sui E02007]: invalid object declaration + ┌─ tests/sui_mode/move_2024/typing/enum_in_struct_position.move:11:13 + │ +11 │ public enum Obj has key, store { + │ ^^^ --- Enums cannot have the 'key' ability. + │ │ + │ Invalid object 'Obj' + +error[Sui E02002]: invalid 'entry' function signature + ┌─ tests/sui_mode/move_2024/typing/enum_in_struct_position.move:18:1 + │ + 3 │ public enum E { + │ - To satisfy the constraint, the 'drop' ability would need to be added here + · +18 │ entry fun ret(): E { + │ ^^^^^ - The type 'a::m::E' does not have the ability 'drop' + │ │ + │ Invalid return type for entry function 'ret' + +error[Sui E02002]: invalid 'entry' function signature + ┌─ tests/sui_mode/move_2024/typing/enum_in_struct_position.move:22:14 + │ +22 │ entry fun x3(_: E) { + │ ----- ^ - 'entry' parameters must be primitives (by-value), vectors of primitives, objects (by-reference or by-value), vectors of objects, or 'Receiving' arguments (by-reference or by-value) + │ │ │ + │ │ Invalid 'entry' parameter type for parameter '_' + │ 'x3' was declared 'entry' here + +error[Sui E02009]: invalid private transfer call + ┌─ tests/sui_mode/move_2024/typing/enum_in_struct_position.move:30:5 + │ +11 │ public enum Obj has key, store { + │ ----- The object has 'store' so 'sui::transfer::public_transfer' can be called instead + · +29 │ public fun transfer(o: a::m::Obj) { + │ --------- The type 'a::m::Obj' is not declared in the current module +30 │ transfer::transfer(o, @0) + │ ^^^^^^^^^^^^^^^^^^^^^^^^^ Invalid private transfer. The function 'sui::transfer::transfer' is restricted to being called in the object's module, 'a::m' + diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/move_2024/typing/enum_in_struct_position.move b/external-crates/move/crates/move-compiler/tests/sui_mode/move_2024/typing/enum_in_struct_position.move new file mode 100644 index 0000000000000..3a38fc40f633e --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/move_2024/typing/enum_in_struct_position.move @@ -0,0 +1,42 @@ +module a::m { + +public enum E { + V() +} + +public enum M has drop { + V() +} + +public enum Obj has key, store { + V() +} + +fun init(_: M, _: &mut TxContext) { +} + +entry fun ret(): E { + E::V() +} + +entry fun x3(_: E) { + abort 0 +} + +} + +module a::n { +public fun transfer(o: a::m::Obj) { + transfer::transfer(o, @0) +} +} + +module sui::transfer { +public fun transfer(_: T, _: address) { + abort 0 +} +} + +module sui::tx_context{ +public struct TxContext has drop {} +} diff --git a/external-crates/move/crates/move-core-types/src/identifier.rs b/external-crates/move/crates/move-core-types/src/identifier.rs index 019adcc274eaa..b45a25dbbf459 100644 --- a/external-crates/move/crates/move-core-types/src/identifier.rs +++ b/external-crates/move/crates/move-core-types/src/identifier.rs @@ -234,6 +234,12 @@ impl Borrow for Identifier { } } +impl Borrow for Identifier { + fn borrow(&self) -> &str { + &self.0 + } +} + impl ToOwned for IdentStr { type Owned = Identifier; diff --git a/external-crates/move/crates/move-core-types/src/state.rs b/external-crates/move/crates/move-core-types/src/state.rs index 6d94c574a9dd6..f57e1abdcad79 100644 --- a/external-crates/move/crates/move-core-types/src/state.rs +++ b/external-crates/move/crates/move-core-types/src/state.rs @@ -13,7 +13,7 @@ pub enum VMState { } thread_local! { - static STATE: RefCell = RefCell::new(VMState::OTHER); + static STATE: RefCell = const { RefCell::new(VMState::OTHER) }; } pub fn set_state(state: VMState) -> VMState { diff --git a/external-crates/move/crates/move-core-types/src/u256.rs b/external-crates/move/crates/move-core-types/src/u256.rs index fb240326cebad..d47245857df5c 100644 --- a/external-crates/move/crates/move-core-types/src/u256.rs +++ b/external-crates/move/crates/move-core-types/src/u256.rs @@ -567,7 +567,6 @@ impl Distribution for Standard { // Rand impl below are inspired by u128 impl found in https://rust-random.github.io/rand/src/rand/distributions/uniform.rs.html #[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] pub struct UniformU256 { low: U256, range: U256, diff --git a/external-crates/move/crates/move-docgen/tests/sources/enums_test.move b/external-crates/move/crates/move-docgen/tests/sources/enums_test.move index 93f04ca442284..b85b53d003cbf 100644 --- a/external-crates/move/crates/move-docgen/tests/sources/enums_test.move +++ b/external-crates/move/crates/move-docgen/tests/sources/enums_test.move @@ -1,8 +1,8 @@ /// This is a doc comment above an annotation. -#[allow(unused_const)] +#[allow(unused)] module 0x42::m { /// This is a doc comment above an enum - public enum Enum { + public enum Enum has drop { /// This is a doc comment above a variant A, B(), @@ -14,5 +14,34 @@ module 0x42::m { }, E { x: u64, y: u64 }, } + + public enum GenericEnum { + A(T), + B, + } + + public struct X has drop { x: Enum } + public struct Y(Enum) + + public struct XG { x: GenericEnum } + public struct YG(GenericEnum) + + public struct XGG { x: GenericEnum } + public struct YGG(GenericEnum) + + public struct VecMap has copy, drop, store { + contents: vector>, + } + + /// An entry in the map + public struct Entry has copy, drop, store { + key: K, + value: V, + } + + /// Doc comments `type_: VecMap` + fun f(x: VecMap): u64 { + 0 + } } diff --git a/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline.md b/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline.md index d9e02630a316e..38cd3f13aaa3d 100644 --- a/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline.md +++ b/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline.md @@ -6,13 +6,246 @@ This is a doc comment above an annotation. +- [Struct `X`](#0x42_m_X) +- [Struct `Y`](#0x42_m_Y) +- [Struct `XG`](#0x42_m_XG) +- [Struct `YG`](#0x42_m_YG) +- [Struct `XGG`](#0x42_m_XGG) +- [Struct `YGG`](#0x42_m_YGG) +- [Struct `VecMap`](#0x42_m_VecMap) +- [Struct `Entry`](#0x42_m_Entry) - [Enum `Enum`](#0x42_m_Enum) +- [Enum `GenericEnum`](#0x42_m_GenericEnum) +- [Function `f`](#0x42_m_f)
+ + +## Struct `X` + + + +
struct X has drop
+
+ + + +
+Fields + + +
+
+x: m::Enum +
+
+ +
+
+ + +
+ + + +## Struct `Y` + + + +
struct Y
+
+ + + +
+Fields + + +
+
+pos0: m::Enum +
+
+ +
+
+ + +
+ + + +## Struct `XG` + + + +
struct XG
+
+ + + +
+Fields + + +
+
+x: m::GenericEnum<m::Enum> +
+
+ +
+
+ + +
+ + + +## Struct `YG` + + + +
struct YG
+
+ + + +
+Fields + + +
+
+pos0: m::GenericEnum<m::Enum> +
+
+ +
+
+ + +
+ + + +## Struct `XGG` + + + +
struct XGG<T>
+
+ + + +
+Fields + + +
+
+x: m::GenericEnum<T> +
+
+ +
+
+ + +
+ + + +## Struct `YGG` + + + +
struct YGG<T>
+
+ + + +
+Fields + + +
+
+pos0: m::GenericEnum<T> +
+
+ +
+
+ + +
+ + + +## Struct `VecMap` + + + +
struct VecMap<K: copy, V> has copy, drop, store
+
+ + + +
+Fields + + +
+
+contents: vector<m::Entry<K, V>> +
+
+ +
+
+ + +
+ + + +## Struct `Entry` + +An entry in the map + + +
struct Entry<K: copy, V> has copy, drop, store
+
+ + + +
+Fields + + +
+
+key: K +
+
+ +
+
+value: V +
+
+ +
+
+ + +
+ ## Enum `Enum` @@ -20,7 +253,7 @@ This is a doc comment above an annotation. This is a doc comment above an enum -
public enum Enum
+
public enum Enum has drop
 
@@ -103,4 +336,72 @@ Variant E + + + + +## Enum `GenericEnum` + + + +
public enum GenericEnum<T>
+
+ + + +
+Variants + + +
+
+Variant A +
+
+ +
+ +
+
+pos0: T +
+
+ +
+
+ +
+Variant B +
+
+ +
+
+ + +
+ + + +## Function `f` + +Doc comments type_: VecMap<u64, X> + + +
fun f(x: m::VecMap<u64, m::X>): u64
+
+ + + +
+Implementation + + +
fun f(x: VecMap<u64, X>): u64 {
+    0
+}
+
+ + +
diff --git a/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline_no_fold.md b/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline_no_fold.md index c13b9c83b1ea0..f761ecf936c3b 100644 --- a/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline_no_fold.md +++ b/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_inline_no_fold.md @@ -6,13 +6,222 @@ This is a doc comment above an annotation. +- [Struct `X`](#0x42_m_X) +- [Struct `Y`](#0x42_m_Y) +- [Struct `XG`](#0x42_m_XG) +- [Struct `YG`](#0x42_m_YG) +- [Struct `XGG`](#0x42_m_XGG) +- [Struct `YGG`](#0x42_m_YGG) +- [Struct `VecMap`](#0x42_m_VecMap) +- [Struct `Entry`](#0x42_m_Entry) - [Enum `Enum`](#0x42_m_Enum) +- [Enum `GenericEnum`](#0x42_m_GenericEnum) +- [Function `f`](#0x42_m_f)
+ + +## Struct `X` + + + +
struct X has drop
+
+ + + +##### Fields + + +
+
+x: m::Enum +
+
+ +
+
+ + + + +## Struct `Y` + + + +
struct Y
+
+ + + +##### Fields + + +
+
+pos0: m::Enum +
+
+ +
+
+ + + + +## Struct `XG` + + + +
struct XG
+
+ + + +##### Fields + + +
+
+x: m::GenericEnum<m::Enum> +
+
+ +
+
+ + + + +## Struct `YG` + + + +
struct YG
+
+ + + +##### Fields + + +
+
+pos0: m::GenericEnum<m::Enum> +
+
+ +
+
+ + + + +## Struct `XGG` + + + +
struct XGG<T>
+
+ + + +##### Fields + + +
+
+x: m::GenericEnum<T> +
+
+ +
+
+ + + + +## Struct `YGG` + + + +
struct YGG<T>
+
+ + + +##### Fields + + +
+
+pos0: m::GenericEnum<T> +
+
+ +
+
+ + + + +## Struct `VecMap` + + + +
struct VecMap<K: copy, V> has copy, drop, store
+
+ + + +##### Fields + + +
+
+contents: vector<m::Entry<K, V>> +
+
+ +
+
+ + + + +## Struct `Entry` + +An entry in the map + + +
struct Entry<K: copy, V> has copy, drop, store
+
+ + + +##### Fields + + +
+
+key: K +
+
+ +
+
+value: V +
+
+ +
+
+ + ## Enum `Enum` @@ -20,7 +229,7 @@ This is a doc comment above an annotation. This is a doc comment above an enum -
public enum Enum
+
public enum Enum has drop
 
@@ -100,3 +309,64 @@ Variant E + + + + +## Enum `GenericEnum` + + + +
public enum GenericEnum<T>
+
+ + + +##### Variants + + +
+
+Variant A +
+
+ +
+ +
+
+pos0: T +
+
+ +
+
+ +
+Variant B +
+
+ +
+
+ + + + +## Function `f` + +Doc comments type_: VecMap<u64, X> + + +
fun f(x: m::VecMap<u64, m::X>): u64
+
+ + + +##### Implementation + + +
fun f(x: VecMap<u64, X>): u64 {
+    0
+}
+
diff --git a/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_separate.md b/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_separate.md index d9e02630a316e..38cd3f13aaa3d 100644 --- a/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_separate.md +++ b/external-crates/move/crates/move-docgen/tests/sources/enums_test.spec_separate.md @@ -6,13 +6,246 @@ This is a doc comment above an annotation. +- [Struct `X`](#0x42_m_X) +- [Struct `Y`](#0x42_m_Y) +- [Struct `XG`](#0x42_m_XG) +- [Struct `YG`](#0x42_m_YG) +- [Struct `XGG`](#0x42_m_XGG) +- [Struct `YGG`](#0x42_m_YGG) +- [Struct `VecMap`](#0x42_m_VecMap) +- [Struct `Entry`](#0x42_m_Entry) - [Enum `Enum`](#0x42_m_Enum) +- [Enum `GenericEnum`](#0x42_m_GenericEnum) +- [Function `f`](#0x42_m_f)
+ + +## Struct `X` + + + +
struct X has drop
+
+ + + +
+Fields + + +
+
+x: m::Enum +
+
+ +
+
+ + +
+ + + +## Struct `Y` + + + +
struct Y
+
+ + + +
+Fields + + +
+
+pos0: m::Enum +
+
+ +
+
+ + +
+ + + +## Struct `XG` + + + +
struct XG
+
+ + + +
+Fields + + +
+
+x: m::GenericEnum<m::Enum> +
+
+ +
+
+ + +
+ + + +## Struct `YG` + + + +
struct YG
+
+ + + +
+Fields + + +
+
+pos0: m::GenericEnum<m::Enum> +
+
+ +
+
+ + +
+ + + +## Struct `XGG` + + + +
struct XGG<T>
+
+ + + +
+Fields + + +
+
+x: m::GenericEnum<T> +
+
+ +
+
+ + +
+ + + +## Struct `YGG` + + + +
struct YGG<T>
+
+ + + +
+Fields + + +
+
+pos0: m::GenericEnum<T> +
+
+ +
+
+ + +
+ + + +## Struct `VecMap` + + + +
struct VecMap<K: copy, V> has copy, drop, store
+
+ + + +
+Fields + + +
+
+contents: vector<m::Entry<K, V>> +
+
+ +
+
+ + +
+ + + +## Struct `Entry` + +An entry in the map + + +
struct Entry<K: copy, V> has copy, drop, store
+
+ + + +
+Fields + + +
+
+key: K +
+
+ +
+
+value: V +
+
+ +
+
+ + +
+ ## Enum `Enum` @@ -20,7 +253,7 @@ This is a doc comment above an annotation. This is a doc comment above an enum -
public enum Enum
+
public enum Enum has drop
 
@@ -103,4 +336,72 @@ Variant E + + + + +## Enum `GenericEnum` + + + +
public enum GenericEnum<T>
+
+ + + +
+Variants + + +
+
+Variant A +
+
+ +
+ +
+
+pos0: T +
+
+ +
+
+ +
+Variant B +
+
+ +
+
+ + +
+ + + +## Function `f` + +Doc comments type_: VecMap<u64, X> + + +
fun f(x: m::VecMap<u64, m::X>): u64
+
+ + + +
+Implementation + + +
fun f(x: VecMap<u64, X>): u64 {
+    0
+}
+
+ + +
diff --git a/external-crates/move/crates/move-ir-to-bytecode/src/context.rs b/external-crates/move/crates/move-ir-to-bytecode/src/context.rs index b62cda8506c6d..1d320b4653cf9 100644 --- a/external-crates/move/crates/move-ir-to-bytecode/src/context.rs +++ b/external-crates/move/crates/move-ir-to-bytecode/src/context.rs @@ -48,7 +48,7 @@ macro_rules! get_or_add_item_macro { }}; } -pub const TABLE_MAX_SIZE: usize = u16::max_value() as usize; +pub const TABLE_MAX_SIZE: usize = u16::MAX as usize; fn get_or_add_item_ref( m: &mut HashMap, k: &K, diff --git a/external-crates/move/crates/move-model/src/builder/exp_translator.rs b/external-crates/move/crates/move-model/src/builder/exp_translator.rs index b6538796856a3..cba675c90343c 100644 --- a/external-crates/move/crates/move-model/src/builder/exp_translator.rs +++ b/external-crates/move/crates/move-model/src/builder/exp_translator.rs @@ -132,7 +132,7 @@ impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'mo fn type_display_context(&self) -> TypeDisplayContext<'_> { TypeDisplayContext::WithoutEnv { symbol_pool: self.symbol_pool(), - reverse_struct_table: &self.parent.parent.reverse_datatype_table, + reverse_datatype_table: &self.parent.parent.reverse_datatype_table, } } diff --git a/external-crates/move/crates/move-model/src/exp_generator.rs b/external-crates/move/crates/move-model/src/exp_generator.rs index d1334af6f8d51..007a657960a9b 100644 --- a/external-crates/move/crates/move-model/src/exp_generator.rs +++ b/external-crates/move/crates/move-model/src/exp_generator.rs @@ -74,7 +74,7 @@ pub trait ExpGenerator<'env> { fn get_memory_of_node(&self, node_id: NodeId) -> QualifiedInstId { // We do have a call `f>` so extract the type from the function instantiation. let rty = &self.global_env().get_node_instantiation(node_id)[0]; - let (mid, sid, inst) = rty.require_struct(); + let (mid, sid, inst) = rty.require_datatype(); mid.qualified_inst(sid, inst.to_owned()) } } diff --git a/external-crates/move/crates/move-model/src/model.rs b/external-crates/move/crates/move-model/src/model.rs index f701d4b94260f..859be6bf46cc8 100644 --- a/external-crates/move/crates/move-model/src/model.rs +++ b/external-crates/move/crates/move-model/src/model.rs @@ -1121,7 +1121,7 @@ impl GlobalEnv { } /// Gets a StructEnv in this module by its `StructTag` - pub fn find_struct_by_tag( + pub fn find_datatype_by_tag( &self, tag: &language_storage::StructTag, ) -> Option> { @@ -1129,6 +1129,10 @@ impl GlobalEnv { .and_then(|menv| { menv.find_struct_by_identifier(tag.name.clone()) .map(|sid| menv.get_id().qualified(sid)) + .or_else(|| { + menv.find_enum_by_identifier(tag.name.clone()) + .map(|sid| menv.get_id().qualified(sid)) + }) }) } @@ -1242,16 +1246,23 @@ impl GlobalEnv { sid: DatatypeId, ts: &[Type], ) -> Option { - self.get_struct_type(mid, sid, ts)?.into_struct_tag() + self.get_datatype(mid, sid, ts)?.into_struct_tag() } /// Attempt to compute a struct type for (`mid`, `sid`, `ts`). - pub fn get_struct_type(&self, mid: ModuleId, sid: DatatypeId, ts: &[Type]) -> Option { + pub fn get_datatype(&self, mid: ModuleId, sid: DatatypeId, ts: &[Type]) -> Option { let menv = self.get_module(mid); + let name = menv + .find_struct(sid.symbol()) + .map(|senv| senv.get_identifier()) + .or_else(|| { + menv.find_enum(sid.symbol()) + .map(|eenv| eenv.get_identifier()) + })??; Some(MType::Struct { address: *menv.self_address(), module: menv.get_identifier(), - name: menv.get_struct(sid).get_identifier()?, + name, type_arguments: ts .iter() .map(|t| t.clone().into_normalized_type(self).unwrap()) @@ -2030,10 +2041,13 @@ impl<'env> ModuleEnv<'env> { .env .find_module(&self.env.to_module_name(&declaring_module)) .expect("undefined module"); - let struct_env = declaring_module_env - .find_struct(self.env.symbol_pool.make(sname)) - .expect("undefined struct"); - Type::Datatype(declaring_module_env.data.id, struct_env.get_id(), vec![]) + let name = self.env.symbol_pool.make(sname); + let datatype_id = declaring_module_env + .find_struct(name) + .map(|env| env.get_id()) + .or_else(|| declaring_module_env.find_enum(name).map(|env| env.get_id())) + .expect("undefined datatype"); + Type::Datatype(declaring_module_env.data.id, datatype_id, vec![]) } SignatureToken::DatatypeInstantiation(inst) => { let (handle_idx, args) = &**inst; @@ -2046,12 +2060,15 @@ impl<'env> ModuleEnv<'env> { .env .find_module(&self.env.to_module_name(&declaring_module)) .expect("undefined module"); - let struct_env = declaring_module_env - .find_struct(self.env.symbol_pool.make(sname)) - .expect("undefined struct"); + let name = self.env.symbol_pool.make(sname); + let datatype_id = declaring_module_env + .find_struct(name) + .map(|env| env.get_id()) + .or_else(|| declaring_module_env.find_enum(name).map(|env| env.get_id())) + .expect("undefined datatype"); Type::Datatype( declaring_module_env.data.id, - struct_env.get_id(), + datatype_id, self.globalize_signatures(args), ) } diff --git a/external-crates/move/crates/move-model/src/ty.rs b/external-crates/move/crates/move-model/src/ty.rs index 4d74a64c5412b..31ef1d646d1b7 100644 --- a/external-crates/move/crates/move-model/src/ty.rs +++ b/external-crates/move/crates/move-model/src/ty.rs @@ -15,7 +15,7 @@ use move_core_types::language_storage::{StructTag, TypeTag}; use crate::{ ast::QualifiedSymbol, - model::{DatatypeId, GlobalEnv, ModuleId, QualifiedInstId, StructEnv}, + model::{DatatypeId, GlobalEnv, ModuleId}, symbol::{Symbol, SymbolPool}, }; @@ -173,19 +173,18 @@ impl Type { /// Returns true if this is any number type. pub fn is_number(&self) -> bool { - if let Type::Primitive(p) = self { - if let PrimitiveType::U8 - | PrimitiveType::U16 - | PrimitiveType::U32 - | PrimitiveType::U64 - | PrimitiveType::U128 - | PrimitiveType::U256 - | PrimitiveType::Num = p - { - return true; - } - } - false + matches!( + self, + Type::Primitive( + PrimitiveType::U8 + | PrimitiveType::U16 + | PrimitiveType::U32 + | PrimitiveType::U64 + | PrimitiveType::U128 + | PrimitiveType::U256 + | PrimitiveType::Num, + ) + ) } /// Returns true if this is an address or signer type. pub fn is_signer_or_address(&self) -> bool { @@ -233,38 +232,16 @@ impl Type { } } - /// If this is a struct type, replace the type instantiation. - pub fn replace_struct_instantiation(&self, inst: &[Type]) -> Type { + /// If this is a datatype, replace the type instantiation. + pub fn replace_datatype_instantiation(&self, inst: &[Type]) -> Type { match self { Type::Datatype(mid, sid, _) => Type::Datatype(*mid, *sid, inst.to_vec()), _ => self.clone(), } } - /// If this is a struct type, return the associated struct env and type parameters. - pub fn get_struct<'env>( - &'env self, - env: &'env GlobalEnv, - ) -> Option<(StructEnv<'env>, &'env [Type])> { - if let Type::Datatype(module_idx, struct_idx, params) = self { - Some((env.get_module(*module_idx).into_struct(*struct_idx), params)) - } else { - None - } - } - - /// If this is a struct type, return the associated QualifiedInstId. - pub fn get_struct_id(&self, env: &GlobalEnv) -> Option> { - self.get_struct(env).map(|(se, inst)| { - se.module_env - .get_id() - .qualified(se.get_id()) - .instantiate(inst.to_vec()) - }) - } - - /// Require this to be a struct, if so extracts its content. - pub fn require_struct(&self) -> (ModuleId, DatatypeId, &[Type]) { + /// Require this to be a datatype, if so extracts its content. + pub fn require_datatype(&self) -> (ModuleId, DatatypeId, &[Type]) { if let Type::Datatype(mid, sid, targs) = self { (*mid, *sid, targs.as_slice()) } else { @@ -414,10 +391,10 @@ impl Type { } /// Attempt to convert this type into a normalized::Type - pub fn into_struct_type(self, env: &GlobalEnv) -> Option { + pub fn into_datatype_ty(self, env: &GlobalEnv) -> Option { use Type::*; match self { - Datatype(mid, sid, ts) => env.get_struct_type(mid, sid, &ts), + Datatype(mid, sid, ts) => env.get_datatype(mid, sid, &ts), _ => None, } } @@ -428,7 +405,7 @@ impl Type { match self { Primitive(p) => Some(p.into_normalized_type().expect("Invariant violation: unexpected spec primitive")), Datatype(mid, sid, ts) => - env.get_struct_type(mid, sid, &ts), + env.get_datatype(mid, sid, &ts), Vector(et) => Some(MType::Vector( Box::new(et.into_normalized_type(env) .expect("Invariant violation: vector type argument contains incomplete, tuple, or spec type")) @@ -447,7 +424,7 @@ impl Type { /// Attempt to convert this type into a language_storage::StructTag pub fn into_struct_tag(self, env: &GlobalEnv) -> Option { - self.into_struct_type(env)?.into_struct_tag() + self.into_datatype_ty(env)?.into_struct_tag() } /// Attempt to convert this type into a language_storage::TypeTag @@ -469,8 +446,8 @@ impl Type { TypeTag::Address => Primitive(PrimitiveType::Address), TypeTag::Signer => Primitive(PrimitiveType::Signer), TypeTag::Struct(s) => { - let qid = env.find_struct_by_tag(s).unwrap_or_else(|| { - panic!("Invariant violation: couldn't resolve struct {:?}", s) + let qid = env.find_datatype_by_tag(s).unwrap_or_else(|| { + panic!("Invariant violation: couldn't resolve datatype {:?}", s) }); let type_args = s .type_params @@ -802,6 +779,7 @@ impl TypeUnificationAdapter { /// - any type parameter on the LHS with index < P will be treated as concrete types and /// - only type parameters on the LHS with index >= P are treated as variables and thus, /// participate in the type unification process. + /// /// The same rule applies to the RHS parameters via `treat_rhs_type_param_as_var_after_index`. fn new<'a, I>( lhs_types: I, @@ -1004,6 +982,7 @@ impl TypeInstantiationDerivation { /// - be assigned with a concrete type already and hence, ceases to be a type parameter, or /// - does not have any matching instantiation and hence, either remains a type parameter or is /// represented as a type error. + /// /// But in anyway, these type parameters no longer participate in type unification anymore. /// /// If `target_lhs` is True, derive instantiations for the type parameter with @@ -1065,6 +1044,7 @@ impl TypeInstantiationDerivation { /// - finds all possible instantiations for parameter at index 2 (`inst_param_2`) /// - for each instantiation in `inst_param_2`, /// - ...... + /// /// The process continues until all type parameters are analyzed (i.e., reaching the type /// parameter at index `N`). /// @@ -1159,7 +1139,7 @@ impl TypeInstantiationDerivation { pub enum TypeDisplayContext<'a> { WithoutEnv { symbol_pool: &'a SymbolPool, - reverse_struct_table: &'a BTreeMap<(ModuleId, DatatypeId), QualifiedSymbol>, + reverse_datatype_table: &'a BTreeMap<(ModuleId, DatatypeId), QualifiedSymbol>, }, WithEnv { env: &'a GlobalEnv, @@ -1216,7 +1196,7 @@ impl<'a> fmt::Display for TypeDisplay<'a> { Vector(t) => write!(f, "vector<{}>", t.display(self.context)), TypeDomain(t) => write!(f, "domain<{}>", t.display(self.context)), ResourceDomain(mid, sid, inst_opt) => { - write!(f, "resources<{}", self.struct_str(*mid, *sid))?; + write!(f, "resources<{}", self.datatype_str(*mid, *sid))?; if let Some(inst) = inst_opt { f.write_str("<")?; comma_list(f, inst)?; @@ -1231,7 +1211,7 @@ impl<'a> fmt::Display for TypeDisplay<'a> { write!(f, "{}", t.display(self.context)) } Datatype(mid, sid, ts) => { - write!(f, "{}", self.struct_str(*mid, *sid))?; + write!(f, "{}", self.datatype_str(*mid, *sid))?; if !ts.is_empty() { f.write_str("<")?; comma_list(f, ts)?; @@ -1269,11 +1249,11 @@ impl<'a> fmt::Display for TypeDisplay<'a> { } impl<'a> TypeDisplay<'a> { - fn struct_str(&self, mid: ModuleId, sid: DatatypeId) -> String { + fn datatype_str(&self, mid: ModuleId, sid: DatatypeId) -> String { match self.context { TypeDisplayContext::WithoutEnv { symbol_pool, - reverse_struct_table, + reverse_datatype_table: reverse_struct_table, } => { if let Some(sym) = reverse_struct_table.get(&(mid, sid)) { sym.display(symbol_pool).to_string() @@ -1282,12 +1262,25 @@ impl<'a> TypeDisplay<'a> { } } TypeDisplayContext::WithEnv { env, .. } => { - let struct_env = env.get_module(mid).into_struct(sid); - format!( - "{}::{}", - struct_env.module_env.get_name().display(env.symbol_pool()), - struct_env.get_name().display(env.symbol_pool()) - ) + let menv = env.get_module(mid); + menv.find_struct(sid.symbol()) + .map(|senv| { + format!( + "{}::{}", + senv.module_env.get_name().display(env.symbol_pool()), + senv.get_name().display(env.symbol_pool()), + ) + }) + .or_else(|| { + menv.find_enum(sid.symbol()).map(|eenv| { + format!( + "{}::{}", + eenv.module_env.get_name().display(env.symbol_pool()), + eenv.get_name().display(env.symbol_pool()), + ) + }) + }) + .expect("Unknown struct or enum") } } } diff --git a/external-crates/move/crates/move-package/src/resolution/dependency_cache.rs b/external-crates/move/crates/move-package/src/resolution/dependency_cache.rs index 7d2886052d3c8..6752f3f2fe39d 100644 --- a/external-crates/move/crates/move-package/src/resolution/dependency_cache.rs +++ b/external-crates/move/crates/move-package/src/resolution/dependency_cache.rs @@ -165,6 +165,7 @@ impl DependencyCache { OsStr::new("fetch"), OsStr::new("origin"), ]) + .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .status() @@ -193,6 +194,7 @@ impl DependencyCache { OsStr::new("--hard"), OsStr::new(&format!("origin/{}", git_rev)), ]) + .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .status() diff --git a/external-crates/move/crates/move-stackless-bytecode/src/access_path.rs b/external-crates/move/crates/move-stackless-bytecode/src/access_path.rs index 52927f081a542..84d41e97e530d 100644 --- a/external-crates/move/crates/move-stackless-bytecode/src/access_path.rs +++ b/external-crates/move/crates/move-stackless-bytecode/src/access_path.rs @@ -6,6 +6,7 @@ //! memory. Some examples of concrete paths are: //! * `0x7/M::T/f` (i.e., the field `f` of the `M::T` resource stored at address `0x7` //! * `Formal(0)/[2]` (i.e., the value stored at index 2 of the array bound the 0th formal of the current procedure) +//! //! An abstract path is similar; it consists of the following components: //! * A *root*, which is either an abstract address or a local //! * Zero or more *offsets*, where an offset is a field, an unknown vector index, or an abstract struct type diff --git a/external-crates/move/crates/move-stackless-bytecode/src/inconsistency_check.rs b/external-crates/move/crates/move-stackless-bytecode/src/inconsistency_check.rs index 7ca6aad925ba2..8a5f2dec7e20a 100644 --- a/external-crates/move/crates/move-stackless-bytecode/src/inconsistency_check.rs +++ b/external-crates/move/crates/move-stackless-bytecode/src/inconsistency_check.rs @@ -11,6 +11,7 @@ //! an `assert false` before //! - every `return` and //! - every `abort` (if the `unconditional-abort-as-inconsistency` option is set). +//! //! In this way, if the instrumented `assert false` can be proved, it means we have an inconsistency //! in the specifications. //! diff --git a/external-crates/move/crates/move-stackless-bytecode/src/packed_types_analysis.rs b/external-crates/move/crates/move-stackless-bytecode/src/packed_types_analysis.rs index 1e0783cd8b308..10fc6fbe99891 100644 --- a/external-crates/move/crates/move-stackless-bytecode/src/packed_types_analysis.rs +++ b/external-crates/move/crates/move-stackless-bytecode/src/packed_types_analysis.rs @@ -26,6 +26,7 @@ use crate::{ /// - Transaction scripts have at most 1 type argument /// - The only values that can be bound to a transaction script type argument are XUS and /// XDX. Passing any other values will lead to an aborted transaction. +/// /// The first assumption is checked and will trigger an assert failure if violated. The second /// is unchecked, but would be a nice property for the prover. pub fn get_packed_types( diff --git a/external-crates/move/crates/move-stdlib-natives/src/debug.rs b/external-crates/move/crates/move-stdlib-natives/src/debug.rs index 1aee70ebed864..d4e55a46ea170 100644 --- a/external-crates/move/crates/move-stdlib-natives/src/debug.rs +++ b/external-crates/move/crates/move-stdlib-natives/src/debug.rs @@ -286,7 +286,7 @@ mod testing { Ok(()) } - fn is_non_empty_vector_u8(vec: &Vec) -> bool { + fn is_non_empty_vector_u8(vec: &[A::MoveValue]) -> bool { if vec.is_empty() { false } else { diff --git a/external-crates/move/crates/move-vm-config/src/runtime.rs b/external-crates/move/crates/move-vm-config/src/runtime.rs index 515efeec3f6ef..d51578a3d208d 100644 --- a/external-crates/move/crates/move-vm-config/src/runtime.rs +++ b/external-crates/move/crates/move-vm-config/src/runtime.rs @@ -33,6 +33,9 @@ pub struct VMConfig { pub error_execution_state: bool, // configuration for binary deserialization (modules) pub binary_config: BinaryConfig, + // Whether value serialization errors when generating type layouts should be rethrown or + // converted to a different error. + pub rethrow_serialization_type_layout_errors: bool, } impl Default for VMConfig { @@ -46,6 +49,7 @@ impl Default for VMConfig { profiler_config: None, error_execution_state: true, binary_config: BinaryConfig::with_extraneous_bytes_check(false), + rethrow_serialization_type_layout_errors: false, } } } diff --git a/external-crates/move/crates/move-vm-config/src/verifier.rs b/external-crates/move/crates/move-vm-config/src/verifier.rs index d33febd865d83..d029b308cf260 100644 --- a/external-crates/move/crates/move-vm-config/src/verifier.rs +++ b/external-crates/move/crates/move-vm-config/src/verifier.rs @@ -77,12 +77,22 @@ impl Default for VerifierConfig { } } +impl MeterConfig { + pub fn old_default() -> Self { + Self { + max_per_fun_meter_units: Some(8_000_000), + max_per_mod_meter_units: Some(8_000_000), + max_per_pkg_meter_units: Some(8_000_000), + } + } +} + impl Default for MeterConfig { fn default() -> Self { Self { - max_per_fun_meter_units: Some(1000 * 8000), - max_per_mod_meter_units: Some(1000 * 8000), - max_per_pkg_meter_units: Some(1000 * 8000), + max_per_fun_meter_units: Some(2_200_000), + max_per_mod_meter_units: Some(2_200_000), + max_per_pkg_meter_units: Some(2_200_000), } } } diff --git a/external-crates/move/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/crates/move-vm-runtime/src/runtime.rs index 400573cb52525..fedf0925679b5 100644 --- a/external-crates/move/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/crates/move-vm-runtime/src/runtime.rs @@ -265,11 +265,20 @@ impl VMRuntime { _ => (ty, value), }; - let layout = self.loader.type_to_type_layout(ty).map_err(|_err| { - PartialVMError::new(StatusCode::VERIFICATION_ERROR).with_message( - "entry point functions cannot have non-serializable return types".to_string(), - ) - })?; + let layout = if self + .loader() + .vm_config() + .rethrow_serialization_type_layout_errors + { + self.loader.type_to_type_layout(ty)? + } else { + self.loader.type_to_type_layout(ty).map_err(|_err| { + PartialVMError::new(StatusCode::VERIFICATION_ERROR).with_message( + "entry point functions cannot have non-serializable return types".to_string(), + ) + })? + }; + let bytes = value.simple_serialize(&layout).ok_or_else(|| { PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message("failed to serialize return values".to_string()) diff --git a/external-crates/move/crates/move-vm-runtime/src/tracing.rs b/external-crates/move/crates/move-vm-runtime/src/tracing.rs index b016879c9bb45..a4c984c62a537 100644 --- a/external-crates/move/crates/move-vm-runtime/src/tracing.rs +++ b/external-crates/move/crates/move-vm-runtime/src/tracing.rs @@ -48,7 +48,6 @@ static DEBUGGING_ENABLED: Lazy = static LOGGING_FILE: Lazy> = Lazy::new(|| { Mutex::new( OpenOptions::new() - .write(true) .create(true) .append(true) .open(&*FILE_PATH) diff --git a/external-crates/move/crates/move-vm-test-utils/src/gas_schedule.rs b/external-crates/move/crates/move-vm-test-utils/src/gas_schedule.rs index cc6ddff8e1ef8..d2859c974f09f 100644 --- a/external-crates/move/crates/move-vm-test-utils/src/gas_schedule.rs +++ b/external-crates/move/crates/move-vm-test-utils/src/gas_schedule.rs @@ -33,10 +33,7 @@ use move_vm_types::{ }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use std::{ - ops::{Add, Mul}, - u64, -}; +use std::ops::{Add, Mul}; pub enum GasUnit {} pub type Gas = GasQuantity; diff --git a/external-crates/move/crates/move-vm-types/src/values/values_impl.rs b/external-crates/move/crates/move-vm-types/src/values/values_impl.rs index d037318bb341b..a7d3552e0f60d 100644 --- a/external-crates/move/crates/move-vm-types/src/values/values_impl.rs +++ b/external-crates/move/crates/move-vm-types/src/values/values_impl.rs @@ -2114,7 +2114,7 @@ impl IntegerValue { match self { U8(x) => Ok(x), U16(x) => { - if x > (std::u8::MAX as u16) { + if x > (u8::MAX as u16) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u16({}) to u8", x))) } else { @@ -2122,7 +2122,7 @@ impl IntegerValue { } } U32(x) => { - if x > (std::u8::MAX as u32) { + if x > (u8::MAX as u32) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u32({}) to u8", x))) } else { @@ -2130,7 +2130,7 @@ impl IntegerValue { } } U64(x) => { - if x > (std::u8::MAX as u64) { + if x > (u8::MAX as u64) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u64({}) to u8", x))) } else { @@ -2138,7 +2138,7 @@ impl IntegerValue { } } U128(x) => { - if x > (std::u8::MAX as u128) { + if x > (u8::MAX as u128) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u8", x))) } else { @@ -2146,7 +2146,7 @@ impl IntegerValue { } } U256(x) => { - if x > (u256::U256::from(std::u8::MAX)) { + if x > (u256::U256::from(u8::MAX)) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u256({}) to u8", x))) } else { @@ -2163,7 +2163,7 @@ impl IntegerValue { U8(x) => Ok(x as u16), U16(x) => Ok(x), U32(x) => { - if x > (std::u16::MAX as u32) { + if x > (u16::MAX as u32) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u32({}) to u16", x))) } else { @@ -2171,7 +2171,7 @@ impl IntegerValue { } } U64(x) => { - if x > (std::u16::MAX as u64) { + if x > (u16::MAX as u64) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u64({}) to u16", x))) } else { @@ -2179,7 +2179,7 @@ impl IntegerValue { } } U128(x) => { - if x > (std::u16::MAX as u128) { + if x > (u16::MAX as u128) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u16", x))) } else { @@ -2187,7 +2187,7 @@ impl IntegerValue { } } U256(x) => { - if x > (u256::U256::from(std::u16::MAX)) { + if x > (u256::U256::from(u16::MAX)) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u256({}) to u16", x))) } else { @@ -2205,7 +2205,7 @@ impl IntegerValue { U16(x) => Ok(x as u32), U32(x) => Ok(x), U64(x) => { - if x > (std::u32::MAX as u64) { + if x > (u32::MAX as u64) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u64({}) to u32", x))) } else { @@ -2213,7 +2213,7 @@ impl IntegerValue { } } U128(x) => { - if x > (std::u32::MAX as u128) { + if x > (u32::MAX as u128) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u32", x))) } else { @@ -2221,7 +2221,7 @@ impl IntegerValue { } } U256(x) => { - if x > (u256::U256::from(std::u32::MAX)) { + if x > (u256::U256::from(u32::MAX)) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u32", x))) } else { @@ -2240,7 +2240,7 @@ impl IntegerValue { U32(x) => Ok(x as u64), U64(x) => Ok(x), U128(x) => { - if x > (std::u64::MAX as u128) { + if x > (u64::MAX as u128) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u64", x))) } else { @@ -2248,7 +2248,7 @@ impl IntegerValue { } } U256(x) => { - if x > (u256::U256::from(std::u64::MAX)) { + if x > (u256::U256::from(u64::MAX)) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u256({}) to u64", x))) } else { @@ -2268,7 +2268,7 @@ impl IntegerValue { U64(x) => Ok(x as u128), U128(x) => Ok(x), U256(x) => { - if x > (u256::U256::from(std::u128::MAX)) { + if x > (u256::U256::from(u128::MAX)) { Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u256({}) to u128", x))) } else { diff --git a/external-crates/move/crates/test-generation/src/abstract_state.rs b/external-crates/move/crates/test-generation/src/abstract_state.rs index 00325826f9402..b83e2759c98b9 100644 --- a/external-crates/move/crates/test-generation/src/abstract_state.rs +++ b/external-crates/move/crates/test-generation/src/abstract_state.rs @@ -511,7 +511,7 @@ impl AbstractState { pub fn stack_push(&mut self, item: AbstractValue) { // Programs that are large enough to exceed this bound // will not be generated - debug_assert!(self.stack.len() < usize::max_value()); + debug_assert!(self.stack.len() < usize::MAX); self.stack.push(item); } @@ -521,7 +521,7 @@ impl AbstractState { if let Some(abstract_value) = self.register_move() { // Programs that are large enough to exceed this bound // will not be generated - debug_assert!(self.stack.len() < usize::max_value()); + debug_assert!(self.stack.len() < usize::MAX); self.stack.push(abstract_value); Ok(()) } else { diff --git a/external-crates/move/crates/test-generation/src/borrow_graph.rs b/external-crates/move/crates/test-generation/src/borrow_graph.rs index df1ea3fde6c18..8d5a489a78136 100644 --- a/external-crates/move/crates/test-generation/src/borrow_graph.rs +++ b/external-crates/move/crates/test-generation/src/borrow_graph.rs @@ -67,7 +67,7 @@ impl BorrowGraph { } self.partition_map.insert(self.partition_counter, vec![n]); // Implication of `checked_add` - debug_assert!(self.partitions.len() < usize::max_value()); + debug_assert!(self.partitions.len() < usize::MAX); self.partitions.push(self.partition_counter); Ok(()) } else { diff --git a/external-crates/move/crates/test-generation/src/bytecode_generator.rs b/external-crates/move/crates/test-generation/src/bytecode_generator.rs index 3177706654b32..efbb3aae9141e 100644 --- a/external-crates/move/crates/test-generation/src/bytecode_generator.rs +++ b/external-crates/move/crates/test-generation/src/bytecode_generator.rs @@ -157,7 +157,7 @@ impl FunctionGenerationContext { pub fn incr_instruction_count(&mut self) -> Option<()> { self.bytecode_len += 1; - if self.bytecode_len >= (u16::max_value() - 1) as u64 { + if self.bytecode_len >= (u16::MAX - 1) as u64 { return None; } Some(()) @@ -357,25 +357,23 @@ impl<'a> BytecodeGenerator<'a> { } BytecodeType::U8(instruction) => { // Generate a random u8 constant to load - Some(instruction(self.rng.gen_range(0..u8::max_value()))) + Some(instruction(self.rng.gen_range(0..u8::MAX))) } BytecodeType::U16(instruction) => { // Generate a random u16 constant to load - Some(instruction(self.rng.gen_range(0..u16::max_value()))) + Some(instruction(self.rng.gen_range(0..u16::MAX))) } BytecodeType::U32(instruction) => { // Generate a random u32 constant to load - Some(instruction(self.rng.gen_range(0..u32::max_value()))) + Some(instruction(self.rng.gen_range(0..u32::MAX))) } BytecodeType::U64(instruction) => { // Generate a random u64 constant to load - Some(instruction(self.rng.gen_range(0..u64::max_value()))) + Some(instruction(self.rng.gen_range(0..u64::MAX))) } BytecodeType::U128(instruction) => { // Generate a random u128 constant to load - Some(instruction(Box::new( - self.rng.gen_range(0..u128::max_value()), - ))) + Some(instruction(Box::new(self.rng.gen_range(0..u128::MAX)))) } BytecodeType::U256(instruction) => { // Generate a random u256 constant to load @@ -442,7 +440,7 @@ impl<'a> BytecodeGenerator<'a> { || unsatisfied_preconditions == 0 { // The size of matches cannot be greater than the number of bytecode instructions - debug_assert!(matches.len() < usize::max_value()); + debug_assert!(matches.len() < usize::MAX); matches.push((*stack_effect, instruction)); } } @@ -572,7 +570,7 @@ impl<'a> BytecodeGenerator<'a> { exact: bool, ) -> Option { // Bytecode will never be generated this large - debug_assert!(bytecode.len() < usize::max_value()); + debug_assert!(bytecode.len() < usize::MAX); debug!("**********************"); debug!("State1: {}", state); debug!("Next instr: {:?}", instruction); diff --git a/external-crates/move/crates/test-generation/src/control_flow_graph.rs b/external-crates/move/crates/test-generation/src/control_flow_graph.rs index 78100aa710d89..341d68ed5f6d7 100644 --- a/external-crates/move/crates/test-generation/src/control_flow_graph.rs +++ b/external-crates/move/crates/test-generation/src/control_flow_graph.rs @@ -95,22 +95,22 @@ impl CFG { // The number of edges will be at most `2*target_blocks`` // Since target blocks is at most a `u16`, this will not overflow even if // `usize` is a `u32` - debug_assert!(edges.len() < usize::max_value()); + debug_assert!(edges.len() < usize::MAX); edges.push((parent_block_id, current_block_id)); block_queue.push_back(current_block_id); // `current_block_id` is bound by the max og `target_block_size` - debug_assert!(current_block_id < u16::max_value()); + debug_assert!(current_block_id < u16::MAX); current_block_id += 1; // Generate a second child edge with prob = 1/2 if rng.gen_bool(0.5) && current_block_id < target_blocks { // The number of edges will be at most `2*target_blocks`` // Since target blocks is at most a `u16`, this will not overflow even if // `usize` is a `u32` - debug_assert!(edges.len() < usize::max_value()); + debug_assert!(edges.len() < usize::MAX); edges.push((parent_block_id, current_block_id)); block_queue.push_back(current_block_id); // `current_block_id` is bound by the max og `target_block_size` - debug_assert!(current_block_id < u16::max_value()); + debug_assert!(current_block_id < u16::MAX); current_block_id += 1; } } @@ -156,7 +156,7 @@ impl CFG { for (parent, child) in self.edges.iter() { if *parent == block_id { // Length is bound by iteration on `self.edges` - debug_assert!(children_ids.len() < usize::max_value()); + debug_assert!(children_ids.len() < usize::MAX); children_ids.push(*child); } } @@ -175,7 +175,7 @@ impl CFG { for (parent, child) in self.edges.iter() { if *child == block_id { // Iteration is bound by the self.edges vector length - debug_assert!(parent_ids.len() < usize::max_value()); + debug_assert!(parent_ids.len() < usize::MAX); parent_ids.push(*parent); } } diff --git a/external-crates/move/crates/test-generation/src/lib.rs b/external-crates/move/crates/test-generation/src/lib.rs index 4a4ce8449659a..676044d88b632 100644 --- a/external-crates/move/crates/test-generation/src/lib.rs +++ b/external-crates/move/crates/test-generation/src/lib.rs @@ -236,9 +236,7 @@ pub fn module_frame_generation( let mut module = generate_module(&mut rng, generation_options.clone()); // Either get the number of iterations provided by the user, or iterate "infinitely"--up to // u128::MAX number of times. - let iters = num_iters - .map(|x| x as u128) - .unwrap_or_else(|| std::u128::MAX); + let iters = num_iters.map(|x| x as u128).unwrap_or_else(|| u128::MAX); while generated < iters && sender.send(module).is_ok() { module = generate_module(&mut rng, generation_options.clone()); diff --git a/external-crates/move/crates/test-generation/tests/generic_instructions.rs b/external-crates/move/crates/test-generation/tests/generic_instructions.rs index 66eb17078de50..0f6305f419638 100644 --- a/external-crates/move/crates/test-generation/tests/generic_instructions.rs +++ b/external-crates/move/crates/test-generation/tests/generic_instructions.rs @@ -13,7 +13,7 @@ use test_generation::transitions::Subst; #[test] fn unify_no_subst() { use SignatureToken::*; - let tys = vec![Bool, U64, Vector(Box::new(U8)), Address]; + let tys = [Bool, U64, Vector(Box::new(U8)), Address]; for tok1 in tys.iter() { for tok2 in tys.iter() { let should_unify = tok1.clone() == tok2.clone(); diff --git a/external-crates/move/move-execution/v0/crates/bytecode-verifier-tests/src/unit_tests/mod.rs b/external-crates/move/move-execution/v0/crates/bytecode-verifier-tests/src/unit_tests/mod.rs index b18560955e1e9..20ef87ddf6fa1 100644 --- a/external-crates/move/move-execution/v0/crates/bytecode-verifier-tests/src/unit_tests/mod.rs +++ b/external-crates/move/move-execution/v0/crates/bytecode-verifier-tests/src/unit_tests/mod.rs @@ -53,6 +53,6 @@ pub(crate) fn production_config() -> (VerifierConfig, MeterConfig) { bytecode_version: VERSION_6, max_variants_in_enum: Some(VARIANT_COUNT_MAX), }, - MeterConfig::default(), + MeterConfig::old_default(), ) } diff --git a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/control_flow_v5.rs b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/control_flow_v5.rs index b24d7aeca62b9..eb705b9498b8b 100644 --- a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/control_flow_v5.rs +++ b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/control_flow_v5.rs @@ -37,7 +37,7 @@ pub fn verify( fn verify_fallthrough( current_function: FunctionDefinitionIndex, - code: &Vec, + code: &[Bytecode], ) -> PartialVMResult<()> { // Check to make sure that the bytecode vector ends with a branching instruction. match code.last() { diff --git a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs index d8d7b9f00a9a9..673660db4466b 100644 --- a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs +++ b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs @@ -512,6 +512,7 @@ impl AbstractState { Ok(AbstractValue::Reference(elem_id)) } + #[allow(deprecated)] pub fn call( &mut self, offset: CodeOffset, diff --git a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/script_signature.rs b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/script_signature.rs index b55ea638e1e3d..35b065b204dbe 100644 --- a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/script_signature.rs +++ b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/script_signature.rs @@ -9,6 +9,7 @@ //! - (DEPRECATED) Has an empty return type //! - All return types are not references //! - Satisfies the additional checks provided as an argument via `check_signature` +//! //! `check_signature` should be used by adapters to quickly and easily verify custom signature //! rules for entrypoints diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/tracing.rs b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/tracing.rs index b016879c9bb45..a4c984c62a537 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/tracing.rs +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/tracing.rs @@ -48,7 +48,6 @@ static DEBUGGING_ENABLED: Lazy = static LOGGING_FILE: Lazy> = Lazy::new(|| { Mutex::new( OpenOptions::new() - .write(true) .create(true) .append(true) .open(&*FILE_PATH) diff --git a/external-crates/move/move-execution/v1/crates/bytecode-verifier-tests/src/unit_tests/mod.rs b/external-crates/move/move-execution/v1/crates/bytecode-verifier-tests/src/unit_tests/mod.rs index b18560955e1e9..20ef87ddf6fa1 100644 --- a/external-crates/move/move-execution/v1/crates/bytecode-verifier-tests/src/unit_tests/mod.rs +++ b/external-crates/move/move-execution/v1/crates/bytecode-verifier-tests/src/unit_tests/mod.rs @@ -53,6 +53,6 @@ pub(crate) fn production_config() -> (VerifierConfig, MeterConfig) { bytecode_version: VERSION_6, max_variants_in_enum: Some(VARIANT_COUNT_MAX), }, - MeterConfig::default(), + MeterConfig::old_default(), ) } diff --git a/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/control_flow_v5.rs b/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/control_flow_v5.rs index b24d7aeca62b9..eb705b9498b8b 100644 --- a/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/control_flow_v5.rs +++ b/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/control_flow_v5.rs @@ -37,7 +37,7 @@ pub fn verify( fn verify_fallthrough( current_function: FunctionDefinitionIndex, - code: &Vec, + code: &[Bytecode], ) -> PartialVMResult<()> { // Check to make sure that the bytecode vector ends with a branching instruction. match code.last() { diff --git a/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs b/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs index d8d7b9f00a9a9..673660db4466b 100644 --- a/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs +++ b/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs @@ -512,6 +512,7 @@ impl AbstractState { Ok(AbstractValue::Reference(elem_id)) } + #[allow(deprecated)] pub fn call( &mut self, offset: CodeOffset, diff --git a/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/script_signature.rs b/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/script_signature.rs index b55ea638e1e3d..35b065b204dbe 100644 --- a/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/script_signature.rs +++ b/external-crates/move/move-execution/v1/crates/move-bytecode-verifier/src/script_signature.rs @@ -9,6 +9,7 @@ //! - (DEPRECATED) Has an empty return type //! - All return types are not references //! - Satisfies the additional checks provided as an argument via `check_signature` +//! //! `check_signature` should be used by adapters to quickly and easily verify custom signature //! rules for entrypoints diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/tracing.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/tracing.rs index b016879c9bb45..a4c984c62a537 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/tracing.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/tracing.rs @@ -48,7 +48,6 @@ static DEBUGGING_ENABLED: Lazy = static LOGGING_FILE: Lazy> = Lazy::new(|| { Mutex::new( OpenOptions::new() - .write(true) .create(true) .append(true) .open(&*FILE_PATH) diff --git a/external-crates/move/move-execution/v2/crates/bytecode-verifier-tests/src/unit_tests/mod.rs b/external-crates/move/move-execution/v2/crates/bytecode-verifier-tests/src/unit_tests/mod.rs index b18560955e1e9..20ef87ddf6fa1 100644 --- a/external-crates/move/move-execution/v2/crates/bytecode-verifier-tests/src/unit_tests/mod.rs +++ b/external-crates/move/move-execution/v2/crates/bytecode-verifier-tests/src/unit_tests/mod.rs @@ -53,6 +53,6 @@ pub(crate) fn production_config() -> (VerifierConfig, MeterConfig) { bytecode_version: VERSION_6, max_variants_in_enum: Some(VARIANT_COUNT_MAX), }, - MeterConfig::default(), + MeterConfig::old_default(), ) } diff --git a/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/control_flow_v5.rs b/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/control_flow_v5.rs index b24d7aeca62b9..eb705b9498b8b 100644 --- a/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/control_flow_v5.rs +++ b/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/control_flow_v5.rs @@ -37,7 +37,7 @@ pub fn verify( fn verify_fallthrough( current_function: FunctionDefinitionIndex, - code: &Vec, + code: &[Bytecode], ) -> PartialVMResult<()> { // Check to make sure that the bytecode vector ends with a branching instruction. match code.last() { diff --git a/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs b/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs index d8d7b9f00a9a9..673660db4466b 100644 --- a/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs +++ b/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/reference_safety/abstract_state.rs @@ -512,6 +512,7 @@ impl AbstractState { Ok(AbstractValue::Reference(elem_id)) } + #[allow(deprecated)] pub fn call( &mut self, offset: CodeOffset, diff --git a/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/script_signature.rs b/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/script_signature.rs index b55ea638e1e3d..35b065b204dbe 100644 --- a/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/script_signature.rs +++ b/external-crates/move/move-execution/v2/crates/move-bytecode-verifier/src/script_signature.rs @@ -9,6 +9,7 @@ //! - (DEPRECATED) Has an empty return type //! - All return types are not references //! - Satisfies the additional checks provided as an argument via `check_signature` +//! //! `check_signature` should be used by adapters to quickly and easily verify custom signature //! rules for entrypoints diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/tracing.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/tracing.rs index b016879c9bb45..a4c984c62a537 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/tracing.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/tracing.rs @@ -48,7 +48,6 @@ static DEBUGGING_ENABLED: Lazy = static LOGGING_FILE: Lazy> = Lazy::new(|| { Mutex::new( OpenOptions::new() - .write(true) .create(true) .append(true) .open(&*FILE_PATH) diff --git a/narwhal/config/src/lib.rs b/narwhal/config/src/lib.rs index 37f92aec91fb9..6ee97a903091f 100644 --- a/narwhal/config/src/lib.rs +++ b/narwhal/config/src/lib.rs @@ -99,7 +99,11 @@ impl Import for D {} pub trait Export: Serialize { fn export(&self, path: &str) -> Result<(), ConfigError> { let writer = || -> Result<(), std::io::Error> { - let file = OpenOptions::new().create(true).write(true).open(path)?; + let file = OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(path)?; let mut writer = BufWriter::new(file); let data = serde_json::to_string_pretty(self).unwrap(); writer.write_all(data.as_ref())?; diff --git a/narwhal/node/src/benchmark_client.rs b/narwhal/node/src/benchmark_client.rs index 1f6e10843dfec..a8d721f882d8c 100644 --- a/narwhal/node/src/benchmark_client.rs +++ b/narwhal/node/src/benchmark_client.rs @@ -31,6 +31,7 @@ use worker::LazyNarwhalClient; /// * the size of the transactions via the --size property /// * the worker address to send the transactions to. A url format is expected ex http://127.0.0.1:7000 /// * the rate of sending transactions via the --rate parameter +/// /// Optionally the --nodes parameter can be passed where a list of worker addresses /// should be passed. The benchmarking client will first try to connect to all of those nodes before start sending /// any transactions. That confirms the system is up and running and ready to start processing the transactions. diff --git a/narwhal/node/src/main.rs b/narwhal/node/src/main.rs index 8de09ecd12106..3aa919b9b5012 100644 --- a/narwhal/node/src/main.rs +++ b/narwhal/node/src/main.rs @@ -303,7 +303,7 @@ async fn main() -> Result<(), eyre::Report> { /// Generate all the genesis files required for benchmarks. fn benchmark_genesis( - ips: &Vec, + ips: &[String], working_directory: &PathBuf, num_workers: usize, base_port: usize, diff --git a/narwhal/primary/src/consensus/metrics.rs b/narwhal/primary/src/consensus/metrics.rs index d855a249327bf..11a16da29d8f9 100644 --- a/narwhal/primary/src/consensus/metrics.rs +++ b/narwhal/primary/src/consensus/metrics.rs @@ -37,7 +37,7 @@ pub struct ConsensusMetrics { /// all the nodes are expected to report the same results. For every leader of each round the /// output can be one of the following: /// * committed: the leader has been found and its subdag will get committed - no matter if the leader - /// is committed on its time or not (part of recursion) + /// is committed on its time or not (part of recursion) /// * not_found: the leader has not been found on the commit path and doesn't get committed /// * no_path: the leader exists but there is no path that leads to it pub leader_election: IntCounterVec, @@ -135,9 +135,9 @@ pub struct ChannelMetrics { /// occupancy of the channel from the `Consensus` to `SubscriberHandler`. /// See also: /// * tx_committed_certificates in primary, where the committed certificates - /// from `Consensus` are sent to `primary::StateHandler` + /// from `Consensus` are sent to `primary::StateHandler` /// * tx_new_certificates where the newly accepted certificates are sent - /// from `primary::Synchronizer` to `Consensus` + /// from `primary::Synchronizer` to `Consensus` pub tx_sequence: IntGauge, } diff --git a/narwhal/primary/src/synchronizer.rs b/narwhal/primary/src/synchronizer.rs index 6ce964d43501a..f7953c8bfab87 100644 --- a/narwhal/primary/src/synchronizer.rs +++ b/narwhal/primary/src/synchronizer.rs @@ -336,6 +336,7 @@ impl Inner { /// - Validating and accepting certificates received from peers. /// - Triggering fetching for certificates and batches. /// - Broadcasting created certificates. +/// /// `Synchronizer` contains most of the certificate processing logic in Narwhal. #[derive(Clone)] pub struct Synchronizer { @@ -1516,9 +1517,7 @@ impl State { ) -> Option<((u64, CertificateDigest), Option)> { // Accept suspended certificates at and below gc round because their parents will not // be accepted into the DAG store anymore, in sanitize_certificate(). - let Some(((round, digest), _children)) = self.missing.first_key_value() else { - return None; - }; + let ((round, digest), _children) = self.missing.first_key_value()?; // Note that gc_round is the highest round where certificates are gc'ed, and which will // never be in a consensus commit. It's safe to gc up to gc_round, so anything suspended on gc_round + 1 // can safely be accepted as their parents (of gc_round) have already been removed from the DAG. diff --git a/narwhal/primary/tests/randomized_tests.rs b/narwhal/primary/tests/randomized_tests.rs index 79274b9dbdd18..db357ab53ae5a 100644 --- a/narwhal/primary/tests/randomized_tests.rs +++ b/narwhal/primary/tests/randomized_tests.rs @@ -299,7 +299,7 @@ fn generate_randomised_dag( /// * nodes that don't create certificates at all for some rounds (failures) /// * leaders that don't get enough support (f+1) for their immediate round /// * slow nodes - nodes that create certificates but those might not referenced by nodes of -/// subsequent rounds. +/// subsequent rounds. pub fn make_certificates_with_parameters( seed: u64, committee: &Committee, diff --git a/narwhal/storage/src/certificate_store.rs b/narwhal/storage/src/certificate_store.rs index 17517c5edd686..8f1264f94745d 100644 --- a/narwhal/storage/src/certificate_store.rs +++ b/narwhal/storage/src/certificate_store.rs @@ -187,50 +187,6 @@ impl Cache for CertificateStoreCache { } } -/// An implementation that basically disables the caching functionality when used for CertificateStore. -#[derive(Clone)] -struct NoCache {} - -impl Cache for NoCache { - fn write(&self, _certificate: Certificate) { - // no-op - } - - fn write_all(&self, _certificate: Vec) { - // no-op - } - - fn read(&self, _digest: &CertificateDigest) -> Option { - None - } - - fn read_all( - &self, - digests: Vec, - ) -> Vec<(CertificateDigest, Option)> { - digests.into_iter().map(|digest| (digest, None)).collect() - } - - fn contains(&self, _digest: &CertificateDigest) -> bool { - false - } - - fn multi_contains<'a>( - &self, - digests: impl Iterator, - ) -> Vec { - digests.map(|_| false).collect() - } - - fn remove(&self, _digest: &CertificateDigest) { - // no-op - } - - fn remove_all(&self, _digests: Vec) { - // no-op - } -} - /// The main storage when we have to deal with certificates. It maintains /// two storages, one main which saves the certificates by their ids, and a /// secondary one which acts as an index to allow us fast retrieval based @@ -731,7 +687,7 @@ impl CertificateStore { #[cfg(test)] mod test { - use crate::certificate_store::{CertificateStore, NoCache}; + use crate::certificate_store::CertificateStore; use crate::{Cache, CertificateStoreCache}; use config::AuthorityIdentifier; use fastcrypto::hash::Hash; @@ -749,6 +705,50 @@ mod test { use test_utils::{latest_protocol_version, temp_dir, CommitteeFixture}; use types::{Certificate, CertificateAPI, CertificateDigest, HeaderAPI, Round}; + /// An implementation that basically disables the caching functionality when used for CertificateStore. + #[derive(Clone)] + struct NoCache {} + + impl Cache for NoCache { + fn write(&self, _certificate: Certificate) { + // no-op + } + + fn write_all(&self, _certificate: Vec) { + // no-op + } + + fn read(&self, _digest: &CertificateDigest) -> Option { + None + } + + fn read_all( + &self, + digests: Vec, + ) -> Vec<(CertificateDigest, Option)> { + digests.into_iter().map(|digest| (digest, None)).collect() + } + + fn contains(&self, _digest: &CertificateDigest) -> bool { + false + } + + fn multi_contains<'a>( + &self, + digests: impl Iterator, + ) -> Vec { + digests.map(|_| false).collect() + } + + fn remove(&self, _digest: &CertificateDigest) { + // no-op + } + + fn remove_all(&self, _digests: Vec) { + // no-op + } + } + fn new_store(path: std::path::PathBuf) -> CertificateStore { let (certificate_map, certificate_id_by_round_map, certificate_id_by_origin_map) = create_db_maps(path); diff --git a/narwhal/test-utils/src/cluster.rs b/narwhal/test-utils/src/cluster.rs index 84fbf658a12b2..cd0e426f2a90d 100644 --- a/narwhal/test-utils/src/cluster.rs +++ b/narwhal/test-utils/src/cluster.rs @@ -178,6 +178,7 @@ impl Cluster { /// Returns all the running authorities. Any authority that: /// * has been started ever /// * or has been stopped + /// /// will not be returned by this method. pub async fn authorities(&self) -> Vec { let mut result = Vec::new(); diff --git a/narwhal/types/src/primary.rs b/narwhal/types/src/primary.rs index a2ab6d0afcd68..a478479ab0e54 100644 --- a/narwhal/types/src/primary.rs +++ b/narwhal/types/src/primary.rs @@ -1729,6 +1729,7 @@ pub struct FetchCertificatesRequest { /// This contains per authority serialized RoaringBitmap for the round diffs between /// - rounds of certificates to be skipped from the response and /// - the GC round. + /// /// These rounds are skipped because the requestor already has them. pub skip_rounds: Vec<(AuthorityIdentifier, Vec)>, /// Maximum number of certificates that should be returned. diff --git a/narwhal/worker/Cargo.toml b/narwhal/worker/Cargo.toml index bfb56b3886dfe..6b70a2be94f81 100644 --- a/narwhal/worker/Cargo.toml +++ b/narwhal/worker/Cargo.toml @@ -6,6 +6,9 @@ authors = ["Mysten Labs "] edition = "2021" publish = false +[lints] +workspace = true + [dependencies] arc-swap.workspace = true async-trait.workspace = true diff --git a/narwhal/worker/src/handlers.rs b/narwhal/worker/src/handlers.rs index e908739dbd7ba..8aab5309d1820 100644 --- a/narwhal/worker/src/handlers.rs +++ b/narwhal/worker/src/handlers.rs @@ -114,6 +114,7 @@ impl WorkerToWorker for WorkerReceiverHandler { /// Defines how the network receiver handles incoming primary messages. pub struct PrimaryReceiverHandler { // The id of this authority. + #[allow(unused)] pub authority_id: AuthorityIdentifier, // The id of this worker. pub id: WorkerId, @@ -127,6 +128,7 @@ pub struct PrimaryReceiverHandler { // Timeout on RequestBatches RPC. pub request_batches_timeout: Duration, // Number of random nodes to query when retrying batch requests. + #[allow(unused)] pub request_batches_retry_nodes: usize, // Synchronize header payloads from other workers. pub network: Option, diff --git a/release_notes.txt b/release_notes.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 744175d523f9a..a56a283d2abc1 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.78" +channel = "1.80.1" diff --git a/scripts/release_notes.py b/scripts/release_notes.py index 4629a304db631..fe5c4e68f83a0 100755 --- a/scripts/release_notes.py +++ b/scripts/release_notes.py @@ -304,7 +304,7 @@ def do_generate(from_, to): print(f"## {impacted}") if impacted == "Protocol": - print(f"Sui Protocol Version in this release: {protocol_version}") + print(f"#### Sui Protocol Version in this release: `{protocol_version}`") print() for pr, note in reversed(notes): diff --git a/scripts/simtest/cargo-simtest b/scripts/simtest/cargo-simtest index 2c591c251d653..bd07db8332388 100755 --- a/scripts/simtest/cargo-simtest +++ b/scripts/simtest/cargo-simtest @@ -54,9 +54,9 @@ if [ -n "$LOCAL_MSIM_PATH" ]; then else cargo_patch_args=( --config 'patch.crates-io.tokio.git = "https://github.com/MystenLabs/mysten-sim.git"' - --config 'patch.crates-io.tokio.rev = "220f52a15804a768610ac0ae3b8da7de4a5c4d2b"' + --config 'patch.crates-io.tokio.rev = "b320996d8dfb99b273fe31c0222c659332283c99"' --config 'patch.crates-io.futures-timer.git = "https://github.com/MystenLabs/mysten-sim.git"' - --config 'patch.crates-io.futures-timer.rev = "220f52a15804a768610ac0ae3b8da7de4a5c4d2b"' + --config 'patch.crates-io.futures-timer.rev = "b320996d8dfb99b273fe31c0222c659332283c99"' ) fi diff --git a/scripts/simtest/codecov.sh b/scripts/simtest/codecov.sh index afdd148855633..6df7c9ccaef05 100755 --- a/scripts/simtest/codecov.sh +++ b/scripts/simtest/codecov.sh @@ -11,6 +11,9 @@ fi # apply git patch git apply ./scripts/simtest/config-patch +root_dir=$(git rev-parse --show-toplevel) +export SIMTEST_STATIC_INIT_MOVE=$root_dir"/examples/move/basics" + MSIM_WATCHDOG_TIMEOUT_MS=60000 MSIM_TEST_SEED=1 cargo llvm-cov --ignore-run-fail --lcov --output-path lcov-simtest.info nextest --cargo-profile simulator # remove the patch diff --git a/scripts/simtest/config-patch b/scripts/simtest/config-patch index e30af462b02ad..655ca6ce64adc 100644 --- a/scripts/simtest/config-patch +++ b/scripts/simtest/config-patch @@ -12,12 +12,11 @@ diff --git a/Cargo.toml b/Cargo.toml index c0829bc1b6..4007f97d66 100644 --- a/Cargo.toml +++ b/Cargo.toml -@@ -682,6 +682,8 @@ field_names = "0.2.0" +@@ -682,5 +682,7 @@ field_names = "0.2.0" semver = "1.0.16" spinners = "4.1.0" include_dir = "0.7.3" [patch.crates-io] - quinn-proto = { git = "https://github.com/quinn-rs/quinn.git", rev = "f0fa66f871b80b9d2d7075d76967c649aecc0b77" } -+tokio = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "220f52a15804a768610ac0ae3b8da7de4a5c4d2b" } -+futures-timer = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "220f52a15804a768610ac0ae3b8da7de4a5c4d2b" } ++tokio = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "b320996d8dfb99b273fe31c0222c659332283c99" } ++futures-timer = { git = "https://github.com/MystenLabs/mysten-sim.git", rev = "b320996d8dfb99b273fe31c0222c659332283c99" } diff --git a/sdk/create-dapp/CHANGELOG.md b/sdk/create-dapp/CHANGELOG.md index 6be6c02ea471d..8b32d4af32408 100644 --- a/sdk/create-dapp/CHANGELOG.md +++ b/sdk/create-dapp/CHANGELOG.md @@ -1,5 +1,25 @@ # @mysten/create-dapp +## 0.3.17 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + - @mysten/dapp-kit@0.14.17 + +## 0.3.16 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + - @mysten/dapp-kit@0.14.16 + ## 0.3.15 ### Patch Changes diff --git a/sdk/create-dapp/package.json b/sdk/create-dapp/package.json index cd056e2674ecf..ef18887ca7d3e 100644 --- a/sdk/create-dapp/package.json +++ b/sdk/create-dapp/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "A CLI for creating new Sui dApps", "homepage": "https://sdk.mystenlabs.com", - "version": "0.3.15", + "version": "0.3.17", "license": "Apache-2.0", "files": [ "CHANGELOG.md", diff --git a/sdk/dapp-kit/CHANGELOG.md b/sdk/dapp-kit/CHANGELOG.md index 4a84bce2afdc8..05a307a8bfc72 100644 --- a/sdk/dapp-kit/CHANGELOG.md +++ b/sdk/dapp-kit/CHANGELOG.md @@ -1,5 +1,27 @@ # @mysten/dapp-kit +## 0.14.17 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + - @mysten/wallet-standard@0.13.2 + - @mysten/zksend@0.10.6 + +## 0.14.16 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + - @mysten/wallet-standard@0.13.1 + - @mysten/zksend@0.10.5 + ## 0.14.15 ### Patch Changes diff --git a/sdk/dapp-kit/package.json b/sdk/dapp-kit/package.json index bd53555902844..4547755a41712 100644 --- a/sdk/dapp-kit/package.json +++ b/sdk/dapp-kit/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "A collection of React hooks and components for interacting with the Sui blockchain and wallets.", "homepage": "https://sdk.mystenlabs.com/typescript", - "version": "0.14.15", + "version": "0.14.17", "license": "Apache-2.0", "files": [ "CHANGELOG.md", diff --git a/sdk/deepbook-v3/CHANGELOG.md b/sdk/deepbook-v3/CHANGELOG.md index b0482b6d3110d..2501d9bb5de9e 100644 --- a/sdk/deepbook-v3/CHANGELOG.md +++ b/sdk/deepbook-v3/CHANGELOG.md @@ -1,5 +1,48 @@ # @mysten/deepbook-v3 +## 0.3.3 + +### Patch Changes + +- ed221a6: Update package address + +## 0.3.2 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.3.1 + +### Patch Changes + +- d70e8ff: Upgrade Package + +## 0.3.0 + +### Minor Changes + +- 36f1c6f: Rounding for numbers, exports update +- c51f186: New contract constants + +## 0.2.1 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + +## 0.2.0 + +### Minor Changes + +- 41361b6: Constants update, manager sdk update + ## 0.1.0 ### Minor Changes diff --git a/sdk/deepbook-v3/package.json b/sdk/deepbook-v3/package.json index 6abe3ed25b2f5..418706faa1890 100644 --- a/sdk/deepbook-v3/package.json +++ b/sdk/deepbook-v3/package.json @@ -2,7 +2,7 @@ "name": "@mysten/deepbook-v3", "author": "Mysten Labs ", "description": "Sui Deepbook SDK", - "version": "0.1.0", + "version": "0.3.3", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/deepbook-v3/src/client.ts b/sdk/deepbook-v3/src/client.ts index 5269f5b447a51..b27c1aa68d7fd 100644 --- a/sdk/deepbook-v3/src/client.ts +++ b/sdk/deepbook-v3/src/client.ts @@ -93,7 +93,7 @@ export class DeepBookClient { return { coinType: coin.type, - balance: adjusted_balance, + balance: Number(adjusted_balance.toFixed(9)), }; } @@ -142,9 +142,9 @@ export class DeepBookClient { return { baseQuantity, - baseOut: baseOut / baseScalar, - quoteOut: quoteOut / quoteScalar, - deepRequired: deepRequired / DEEP_SCALAR, + baseOut: Number((baseOut / baseScalar).toFixed(9)), + quoteOut: Number((quoteOut / quoteScalar).toFixed(9)), + deepRequired: Number((deepRequired / DEEP_SCALAR).toFixed(9)), }; } @@ -173,9 +173,9 @@ export class DeepBookClient { return { quoteQuantity: quoteQuantity, - baseOut: baseOut / baseScalar, - quoteOut: quoteOut / quoteScalar, - deepRequired: deepRequired / DEEP_SCALAR, + baseOut: Number((baseOut / baseScalar).toFixed(9)), + quoteOut: Number((quoteOut / quoteScalar).toFixed(9)), + deepRequired: Number((deepRequired / DEEP_SCALAR).toFixed(9)), }; } @@ -206,9 +206,9 @@ export class DeepBookClient { return { baseQuantity, quoteQuantity, - baseOut: baseOut / baseScalar, - quoteOut: quoteOut / quoteScalar, - deepRequired: deepRequired / DEEP_SCALAR, + baseOut: Number((baseOut / baseScalar).toFixed(9)), + quoteOut: Number((quoteOut / quoteScalar).toFixed(9)), + deepRequired: Number((deepRequired / DEEP_SCALAR).toFixed(9)), }; } @@ -301,10 +301,12 @@ export class DeepBookClient { const parsed_quantities = bcs.vector(bcs.u64()).parse(new Uint8Array(quantities)); return { - prices: parsed_prices.map( - (price) => (Number(price) / FLOAT_SCALAR / quoteCoin.scalar) * baseCoin.scalar, + prices: parsed_prices.map((price) => + Number(((Number(price) / FLOAT_SCALAR / quoteCoin.scalar) * baseCoin.scalar).toFixed(9)), + ), + quantities: parsed_quantities.map((price) => + Number((Number(price) / baseCoin.scalar).toFixed(9)), ), - quantities: parsed_quantities.map((price) => Number(price) / baseCoin.scalar), }; } @@ -338,14 +340,18 @@ export class DeepBookClient { const ask_parsed_quantities = bcs.vector(bcs.u64()).parse(new Uint8Array(ask_quantities)); return { - bid_prices: bid_parsed_prices.map( - (price) => (Number(price) / FLOAT_SCALAR / quoteCoin.scalar) * baseCoin.scalar, + bid_prices: bid_parsed_prices.map((price) => + Number(((Number(price) / FLOAT_SCALAR / quoteCoin.scalar) * baseCoin.scalar).toFixed(9)), + ), + bid_quantities: bid_parsed_quantities.map((quantity) => + Number((Number(quantity) / baseCoin.scalar).toFixed(9)), + ), + ask_prices: ask_parsed_prices.map((price) => + Number(((Number(price) / FLOAT_SCALAR / quoteCoin.scalar) * baseCoin.scalar).toFixed(9)), ), - bid_quantities: bid_parsed_quantities.map((quantity) => Number(quantity) / baseCoin.scalar), - ask_prices: ask_parsed_prices.map( - (price) => (Number(price) / FLOAT_SCALAR / quoteCoin.scalar) * baseCoin.scalar, + ask_quantities: ask_parsed_quantities.map((quantity) => + Number((Number(quantity) / baseCoin.scalar).toFixed(9)), ), - ask_quantities: ask_parsed_quantities.map((quantity) => Number(quantity) / baseCoin.scalar), }; } @@ -372,9 +378,9 @@ export class DeepBookClient { const deepInVault = Number(bcs.U64.parse(new Uint8Array(res.results![0].returnValues![2][0]))); return { - base: baseInVault / baseScalar, - quote: quoteInVault / quoteScalar, - deep: deepInVault / DEEP_SCALAR, + base: Number((baseInVault / baseScalar).toFixed(9)), + quote: Number((quoteInVault / quoteScalar).toFixed(9)), + deep: Number((deepInVault / DEEP_SCALAR).toFixed(9)), }; } @@ -424,6 +430,6 @@ export class DeepBookClient { const adjusted_mid_price = (parsed_mid_price * baseCoin.scalar) / quoteCoin.scalar / FLOAT_SCALAR; - return adjusted_mid_price; + return Number(adjusted_mid_price.toFixed(9)); } } diff --git a/sdk/deepbook-v3/src/index.ts b/sdk/deepbook-v3/src/index.ts index f8f6f0d28a03e..fad2c28551413 100644 --- a/sdk/deepbook-v3/src/index.ts +++ b/sdk/deepbook-v3/src/index.ts @@ -8,3 +8,5 @@ export { DeepBookAdminContract } from './transactions/deepbookAdmin.js'; export { FlashLoanContract } from './transactions/flashLoans.js'; export { GovernanceContract } from './transactions/governance.js'; export { DeepBookConfig } from './utils/config.js'; +export type { BalanceManager, Coin, Pool } from './types/index.js'; +export type { CoinMap, PoolMap } from './utils/constants.js'; diff --git a/sdk/deepbook-v3/src/transactions/balanceManager.ts b/sdk/deepbook-v3/src/transactions/balanceManager.ts index 5b6496b3d274d..55fdfaa527586 100644 --- a/sdk/deepbook-v3/src/transactions/balanceManager.ts +++ b/sdk/deepbook-v3/src/transactions/balanceManager.ts @@ -28,8 +28,9 @@ export class BalanceManagerContract { }); tx.moveCall({ - target: `${this.#config.DEEPBOOK_PACKAGE_ID}::balance_manager::share`, + target: '0x2::transfer::public_share_object', arguments: [manager], + typeArguments: [`${this.#config.DEEPBOOK_PACKAGE_ID}::balance_manager::BalanceManager`], }); }; @@ -45,9 +46,10 @@ export class BalanceManagerContract { tx.setSenderIfNotSet(this.#config.address); const managerId = this.#config.getBalanceManager(managerKey).address; const coin = this.#config.getCoin(coinKey); + const depositInput = Math.round(amountToDeposit * coin.scalar); const deposit = coinWithBalance({ type: coin.type, - balance: amountToDeposit * coin.scalar, + balance: depositInput, }); tx.moveCall({ @@ -70,9 +72,10 @@ export class BalanceManagerContract { (tx: Transaction) => { const managerId = this.#config.getBalanceManager(managerKey).address; const coin = this.#config.getCoin(coinKey); + const withdrawInput = Math.round(amountToWithdraw * coin.scalar); const coinObject = tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::balance_manager::withdraw`, - arguments: [tx.object(managerId), tx.pure.u64(amountToWithdraw * coin.scalar)], + arguments: [tx.object(managerId), tx.pure.u64(withdrawInput)], typeArguments: [coin.type], }); diff --git a/sdk/deepbook-v3/src/transactions/deepbook.ts b/sdk/deepbook-v3/src/transactions/deepbook.ts index aff618b9a41d6..8dfa2b792e9a1 100644 --- a/sdk/deepbook-v3/src/transactions/deepbook.ts +++ b/sdk/deepbook-v3/src/transactions/deepbook.ts @@ -46,8 +46,8 @@ export class DeepBookContract { const balanceManager = this.#config.getBalanceManager(balanceManagerKey); const baseCoin = this.#config.getCoin(pool.baseCoin); const quoteCoin = this.#config.getCoin(pool.quoteCoin); - const inputPrice = (price * FLOAT_SCALAR * quoteCoin.scalar) / baseCoin.scalar; - const inputQuantity = quantity * baseCoin.scalar; + const inputPrice = Math.round((price * FLOAT_SCALAR * quoteCoin.scalar) / baseCoin.scalar); + const inputQuantity = Math.round(quantity * baseCoin.scalar); const tradeProof = tx.add(this.#config.balanceManager.generateProof(balanceManagerKey)); @@ -93,6 +93,7 @@ export class DeepBookContract { const baseCoin = this.#config.getCoin(pool.baseCoin); const quoteCoin = this.#config.getCoin(pool.quoteCoin); const tradeProof = tx.add(this.#config.balanceManager.generateProof(balanceManagerKey)); + const inputQuantity = Math.round(quantity * baseCoin.scalar); tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::place_market_order`, @@ -102,7 +103,7 @@ export class DeepBookContract { tradeProof, tx.pure.u64(clientOrderId), tx.pure.u8(selfMatchingOption), - tx.pure.u64(quantity * baseCoin.scalar), + tx.pure.u64(inputQuantity), tx.pure.bool(isBid), tx.pure.bool(payWithDeep), tx.object(SUI_CLOCK_OBJECT_ID), @@ -127,6 +128,7 @@ export class DeepBookContract { const baseCoin = this.#config.getCoin(pool.baseCoin); const quoteCoin = this.#config.getCoin(pool.quoteCoin); const tradeProof = tx.add(this.#config.balanceManager.generateProof(balanceManagerKey)); + const inputQuantity = Math.round(newQuantity * baseCoin.scalar); tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::modify_order`, @@ -135,7 +137,7 @@ export class DeepBookContract { tx.object(balanceManager.address), tradeProof, tx.pure.u128(orderId), - tx.pure.u64(newQuantity), + tx.pure.u64(inputQuantity), tx.object(SUI_CLOCK_OBJECT_ID), ], typeArguments: [baseCoin.type, quoteCoin.type], @@ -524,10 +526,13 @@ export class DeepBookContract { const baseCoinInput = params.baseCoin ?? - coinWithBalance({ type: baseCoin.type, balance: baseAmount * baseCoin.scalar }); + coinWithBalance({ type: baseCoin.type, balance: Math.round(baseAmount * baseCoin.scalar) }); const deepCoin = - params.deepCoin ?? coinWithBalance({ type: deepCoinType, balance: deepAmount * DEEP_SCALAR }); + params.deepCoin ?? + coinWithBalance({ type: deepCoinType, balance: Math.round(deepAmount * DEEP_SCALAR) }); + + const minQuoteInput = Math.round(minQuote * quoteCoin.scalar); const [baseCoinResult, quoteCoinResult, deepCoinResult] = tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::swap_exact_base_for_quote`, @@ -535,7 +540,7 @@ export class DeepBookContract { tx.object(pool.address), baseCoinInput, deepCoin, - tx.pure.u64(quoteCoin.scalar * minQuote), + tx.pure.u64(minQuoteInput), tx.object(SUI_CLOCK_OBJECT_ID), ], typeArguments: [baseCoin.type, quoteCoin.type], @@ -565,10 +570,16 @@ export class DeepBookContract { const quoteCoinInput = params.quoteCoin ?? - coinWithBalance({ type: quoteCoin.type, balance: quoteAmount * quoteCoin.scalar }); + coinWithBalance({ + type: quoteCoin.type, + balance: Math.round(quoteAmount * quoteCoin.scalar), + }); const deepCoin = - params.deepCoin ?? coinWithBalance({ type: deepCoinType, balance: deepAmount * DEEP_SCALAR }); + params.deepCoin ?? + coinWithBalance({ type: deepCoinType, balance: Math.round(deepAmount * DEEP_SCALAR) }); + + const minBaseInput = Math.round(minBase * baseCoin.scalar); const [baseCoinResult, quoteCoinResult, deepCoinResult] = tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::swap_exact_quote_for_base`, @@ -576,7 +587,7 @@ export class DeepBookContract { tx.object(pool.address), quoteCoinInput, deepCoin, - tx.pure.u64(baseCoin.scalar * minBase), + tx.pure.u64(minBaseInput), tx.object(SUI_CLOCK_OBJECT_ID), ], typeArguments: [baseCoin.type, quoteCoin.type], diff --git a/sdk/deepbook-v3/src/transactions/flashLoans.ts b/sdk/deepbook-v3/src/transactions/flashLoans.ts index bffdae0c09572..f61c5eb66f523 100644 --- a/sdk/deepbook-v3/src/transactions/flashLoans.ts +++ b/sdk/deepbook-v3/src/transactions/flashLoans.ts @@ -27,9 +27,10 @@ export class FlashLoanContract { const pool = this.#config.getPool(poolKey); const baseCoin = this.#config.getCoin(pool.baseCoin); const quoteCoin = this.#config.getCoin(pool.quoteCoin); + const inputQuantity = Math.round(borrowAmount * baseCoin.scalar); const [baseCoinResult, flashLoan] = tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::borrow_flashloan_base`, - arguments: [tx.object(pool.address), tx.pure.u64(borrowAmount * baseCoin.scalar)], + arguments: [tx.object(pool.address), tx.pure.u64(inputQuantity)], typeArguments: [baseCoin.type, quoteCoin.type], }); return [baseCoinResult, flashLoan] as const; @@ -57,7 +58,7 @@ export class FlashLoanContract { const borrowScalar = baseCoin.scalar; const [baseCoinReturn] = tx.splitCoins(baseCoinInput, [ - tx.pure.u64(borrowAmount * borrowScalar), + tx.pure.u64(Math.round(borrowAmount * borrowScalar)), ]); tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::return_flashloan_base`, @@ -78,9 +79,10 @@ export class FlashLoanContract { const pool = this.#config.getPool(poolKey); const baseCoin = this.#config.getCoin(pool.baseCoin); const quoteCoin = this.#config.getCoin(pool.quoteCoin); + const inputQuantity = Math.round(borrowAmount * quoteCoin.scalar); const [quoteCoinResult, flashLoan] = tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::borrow_flashloan_quote`, - arguments: [tx.object(pool.address), tx.pure.u64(borrowAmount * quoteCoin.scalar)], + arguments: [tx.object(pool.address), tx.pure.u64(inputQuantity)], typeArguments: [baseCoin.type, quoteCoin.type], }); return [quoteCoinResult, flashLoan] as const; @@ -108,7 +110,7 @@ export class FlashLoanContract { const borrowScalar = quoteCoin.scalar; const [quoteCoinReturn] = tx.splitCoins(quoteCoinInput, [ - tx.pure.u64(borrowAmount * borrowScalar), + tx.pure.u64(Math.round(borrowAmount * borrowScalar)), ]); tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::return_flashloan_quote`, diff --git a/sdk/deepbook-v3/src/transactions/governance.ts b/sdk/deepbook-v3/src/transactions/governance.ts index 9428e8df982de..907be66d6ad13 100644 --- a/sdk/deepbook-v3/src/transactions/governance.ts +++ b/sdk/deepbook-v3/src/transactions/governance.ts @@ -33,6 +33,7 @@ export class GovernanceContract { const tradeProof = tx.add(this.#config.balanceManager.generateProof(balanceManagerKey)); const baseCoin = this.#config.getCoin(pool.baseCoin); const quoteCoin = this.#config.getCoin(pool.quoteCoin); + const stakeInput = Math.round(stakeAmount * DEEP_SCALAR); tx.moveCall({ target: `${this.#config.DEEPBOOK_PACKAGE_ID}::pool::stake`, @@ -40,7 +41,7 @@ export class GovernanceContract { tx.object(pool.address), tx.object(balanceManager.address), tradeProof, - tx.pure.u64(stakeAmount * DEEP_SCALAR), + tx.pure.u64(stakeInput), ], typeArguments: [baseCoin.type, quoteCoin.type], }); @@ -86,9 +87,9 @@ export class GovernanceContract { tx.object(pool.address), tx.object(balanceManager.address), tradeProof, - tx.pure.u64(takerFee * FLOAT_SCALAR), - tx.pure.u64(makerFee * FLOAT_SCALAR), - tx.pure.u64(stakeRequired * DEEP_SCALAR), + tx.pure.u64(Math.round(takerFee * FLOAT_SCALAR)), + tx.pure.u64(Math.round(makerFee * FLOAT_SCALAR)), + tx.pure.u64(Math.round(stakeRequired * DEEP_SCALAR)), ], typeArguments: [baseCoin.type, quoteCoin.type], }); diff --git a/sdk/deepbook-v3/src/utils/constants.ts b/sdk/deepbook-v3/src/utils/constants.ts index 7e6b71c22154a..97119bab2f4b7 100644 --- a/sdk/deepbook-v3/src/utils/constants.ts +++ b/sdk/deepbook-v3/src/utils/constants.ts @@ -12,8 +12,8 @@ export interface DeepbookPackageIds { } export const testnetPackageIds = { - DEEPBOOK_PACKAGE_ID: '0xa81ddf024bb253df8b1671d9309ad2c752e1de15670304a5941aca09d5febead', - REGISTRY_ID: '0x83a5bd620e094ed892102ea24eb1e9161921733301194e64bdd56d3a8317e3f4', + DEEPBOOK_PACKAGE_ID: '0xc671049379e6d512e3ecd0e79da50cb28840f09764d80a342b904863d87e5389', + REGISTRY_ID: '0x9162317a81a9eb66ecd42705529b2a39c7805f98f42312275c2e7a599d518437', DEEP_TREASURY_ID: '0x69fffdae0075f8f71f4fa793549c11079266910e8905169845af1f5d00e09dcb', } satisfies DeepbookPackageIds; @@ -76,22 +76,22 @@ export const mainnetCoins: CoinMap = { export const testnetPools: PoolMap = { DEEP_SUI: { - address: `0x0700829d7a29cf17e2bb7509dafbc8088dd147694cf02a910f90403a163780c8`, + address: `0x2decc59a6f05c5800e5c8a1135f9d133d1746f562bf56673e6e81ef4f7ccd3b7`, baseCoin: 'DEEP', quoteCoin: 'SUI', }, SUI_DBUSDC: { - address: `0x16754e137465b0c8131fd5c27d9cd0448789a89e4edf1b814f9c43270dc12b43`, + address: `0xace543e8239f0c19783e57bacb02c581fd38d52899bdce117e49c91b494c8b10`, baseCoin: 'SUI', quoteCoin: 'DBUSDC', }, DEEP_DBUSDC: { - address: `0x7f1521a844805fe4ace03bf06ad6192631c4db18d7aeac142019b5bbad4cc8e8`, + address: `0x1faaa544a84c16215ef005edb046ddf8e1cfec0792aec3032e86e554b33bd33a`, baseCoin: 'DEEP', quoteCoin: 'DBUSDC', }, DBUSDT_DBUSDC: { - address: `0x83a8aa044739740f37170d1701938b82362bc1811c9a667b63362b9fd2c7b521`, + address: `0x83aca040eaeaf061e3d482a44d1a87a5b8b6206ad52edae9d0479b830a38106f`, baseCoin: 'DBUSDT', quoteCoin: 'DBUSDC', }, diff --git a/sdk/deepbook/CHANGELOG.md b/sdk/deepbook/CHANGELOG.md index f2bb6f521eb79..f9fdfffcfd923 100644 --- a/sdk/deepbook/CHANGELOG.md +++ b/sdk/deepbook/CHANGELOG.md @@ -1,5 +1,23 @@ # @mysten/deepbook +## 0.8.16 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.8.15 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + ## 0.8.14 ### Patch Changes diff --git a/sdk/deepbook/package.json b/sdk/deepbook/package.json index b493663de506b..dd90fff911719 100644 --- a/sdk/deepbook/package.json +++ b/sdk/deepbook/package.json @@ -2,7 +2,7 @@ "name": "@mysten/deepbook", "author": "Mysten Labs ", "description": "Sui Deepbook SDK", - "version": "0.8.14", + "version": "0.8.16", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/deepbook/vitest.config.ts b/sdk/deepbook/vitest.config.ts index 71d126eaef86c..07452fe1219c2 100644 --- a/sdk/deepbook/vitest.config.ts +++ b/sdk/deepbook/vitest.config.ts @@ -5,8 +5,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { - minThreads: 1, - maxThreads: 8, + minWorkers: 1, + maxWorkers: 4, hookTimeout: 1000000, testTimeout: 1000000, env: { diff --git a/sdk/docs/pages/dapp-kit/wallet-hooks/useSignAndExecuteTransaction.mdx b/sdk/docs/pages/dapp-kit/wallet-hooks/useSignAndExecuteTransaction.mdx index 45f9aebb4d826..c6ca63d1b815e 100644 --- a/sdk/docs/pages/dapp-kit/wallet-hooks/useSignAndExecuteTransaction.mdx +++ b/sdk/docs/pages/dapp-kit/wallet-hooks/useSignAndExecuteTransaction.mdx @@ -59,14 +59,14 @@ you can pass a custom `execute` function. ```ts import { ConnectButton, - useClient, + useSuiClient, useCurrentAccount, useSignAndExecuteTransaction, } from '@mysten/dapp-kit'; import { useState } from 'react'; function MyComponent() { - const client = useClient(); + const client = useSuiClient(); const { mutate: signAndExecuteTransaction } = useSignAndExecuteTransaction({ execute: async ({ bytes, signature }) => await client.executeTransactionBlock({ diff --git a/sdk/docs/pages/typescript/cryptography/keypairs.mdx b/sdk/docs/pages/typescript/cryptography/keypairs.mdx index bdabb9a8cec9f..b779184e20059 100644 --- a/sdk/docs/pages/typescript/cryptography/keypairs.mdx +++ b/sdk/docs/pages/typescript/cryptography/keypairs.mdx @@ -118,6 +118,25 @@ if (publicKey.toSuiAddress() !== keypair.getPublicKey().toSuiAddress()) { } ``` +## Verifying zkLogin signatures + +ZkLogin signatures can't be verified purely on the client. When verifying a zkLogin signature, the +SDK uses the GraphQL API to verify the signature. This will work for mainnet signatures without any +additional configuration. + +For testnet signatures, you will need to provide a testnet GraphQL Client: + +```typescript +import { SuiGraphQLClient } from '@mysten/sui/graphql'; +import { verifyPersonalMessageSignature } from '@mysten/sui/verify'; + +const publicKey = await verifyPersonalMessageSignature(message, zkSignature, { + client: new SuiGraphQLClient({ + url: 'https://sui-testnet.mystenlabs.com/graphql', + }), +}); +``` + ## Deriving a key pair from a mnemonic The Sui TypeScript SDK supports deriving a key pair from a mnemonic phrase. This can be useful when diff --git a/sdk/docs/pages/typescript/executors.mdx b/sdk/docs/pages/typescript/executors.mdx index 2080f26af6908..05eeed173ea9c 100644 --- a/sdk/docs/pages/typescript/executors.mdx +++ b/sdk/docs/pages/typescript/executors.mdx @@ -25,6 +25,13 @@ when multiple transactions use the same objects. `SerialTransactionExecutor` maintains an internal queue, so you don't need to wait for previous transactions to finish before sending the next one. +`SerialTransactionExecutor` can be configured with a number of options: + +- `client`: An instance of `SuiClient` used to execute transactions. +- `signer`: The signer/keypair used for signed transactions. +- `defaultBudget`: The default budget for transactions, which will be used if the transaction does + not specify a budget (default `50_000_000n`), + ```ts import { getFullnodeUrl, SuiClient } from '@mysten/sui/client'; import { SerialTransactionExecutor } from '@mysten/sui/transactions'; diff --git a/sdk/docs/pages/typescript/index.mdx b/sdk/docs/pages/typescript/index.mdx index d3c9e91a09570..c006083efa3f8 100644 --- a/sdk/docs/pages/typescript/index.mdx +++ b/sdk/docs/pages/typescript/index.mdx @@ -95,7 +95,7 @@ what you need to keep your code light and compact. - [`@mysten/sui/client`](/typescript/sui-client) - A client for interacting with Sui RPC nodes. - [`@mysten/sui/bcs`](/typescript/bcs) - A BCS builder with pre-defined types for Sui. -- [`@mysten/sui/transaction`](/typescript/transaction-building/basics) - Utilities for building and +- [`@mysten/sui/transactions`](/typescript/transaction-building/basics) - Utilities for building and interacting with transactions. - [`@mysten/sui/keypairs/*`](/typescript/cryptography/keypairs) - Modular exports for specific KeyPair implementations. diff --git a/sdk/docs/pages/typescript/transaction-building/basics.mdx b/sdk/docs/pages/typescript/transaction-building/basics.mdx index 4c53a4a5ea2fd..4a3d5bc82ae7d 100644 --- a/sdk/docs/pages/typescript/transaction-building/basics.mdx +++ b/sdk/docs/pages/typescript/transaction-building/basics.mdx @@ -53,6 +53,33 @@ After you have the transaction defined, you can directly execute it with a signe client.signAndExecuteTransaction({ signer: keypair, transaction: tx }); ``` +## Observing the results of a transaction + +When you use `client.signAndExecuteTransaction` or `client.executeTransactionBlock`, the transaction +will be finalized on the blockchain before the function resolves, but the effects of the transaction +may not be immediately observable. + +There are 2 ways to observe the results of a transaction. Methods like +`client.signAndExecuteTransaction` accept an `options` object with options like `showObjectChanges` +and `showBalanceChanges` (see +[the SuiClient docs for more details](/typescript/sui-client#arguments)). These options will cause +the request to contain additional details about the effects of the transaction that can be +immediately displayed to the user, or used for further processing in your application. + +The other way effects of transactions can be observed is by querying other RPC methods like +`client.getBalances` that return objects or balances owned by a specific address. These RPC calls +depend on the RPC node having indexed the effects of the transaction, which may not have happened +immediately after a transaction has been executed. To ensure that effects of a transaction are +represented in future RPC calls, you can use the `waitForTransaction` method on the client: + +```typescript +const result = await client.signAndExecuteTransaction({ signer: keypair, transaction: tx }); +await client.waitForTransaction({ digest: result.digest }); +``` + +Once `waitForTransaction` resolves, any future RPC calls will be guaranteed to reflect the effects +of the transaction. + ## Transactions Programmable Transactions have two key concepts: inputs and transactions. diff --git a/sdk/enoki/CHANGELOG.md b/sdk/enoki/CHANGELOG.md index 54a1c1e7cbf18..99127ce42ddea 100644 --- a/sdk/enoki/CHANGELOG.md +++ b/sdk/enoki/CHANGELOG.md @@ -1,5 +1,38 @@ # @mysten/enoki +## 0.4.0 + +### Minor Changes + +- f589885: Add sdk methods for managing enoki subnames + +## 0.3.17 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + - @mysten/zklogin@0.7.17 + +## 0.3.16 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + - @mysten/zklogin@0.7.16 + +## 0.3.15 + +### Patch Changes + +- Updated dependencies [6f79ed9] + - @mysten/zklogin@0.7.15 + ## 0.3.14 ### Patch Changes diff --git a/sdk/enoki/package.json b/sdk/enoki/package.json index 6fe8acf5afd32..73e5a1d800774 100644 --- a/sdk/enoki/package.json +++ b/sdk/enoki/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/enoki", - "version": "0.3.14", + "version": "0.4.0", "description": "TODO: Description", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/enoki/src/EnokiClient/index.ts b/sdk/enoki/src/EnokiClient/index.ts index cc7f08421b84b..f9f5be737edeb 100644 --- a/sdk/enoki/src/EnokiClient/index.ts +++ b/sdk/enoki/src/EnokiClient/index.ts @@ -4,14 +4,20 @@ import type { CreateSponsoredTransactionApiInput, CreateSponsoredTransactionApiResponse, + CreateSubnameApiInput, + CreateSubnameApiResponse, CreateZkLoginNonceApiInput, CreateZkLoginNonceApiResponse, CreateZkLoginZkpApiInput, CreateZkLoginZkpApiResponse, + DeleteSubnameApiInput, + DeleteSubnameApiResponse, ExecuteSponsoredTransactionApiInput, ExecuteSponsoredTransactionApiResponse, GetAppApiInput, GetAppApiResponse, + GetSubnamesApiInput, + GetSubnamesApiResponse, GetZkLoginApiInput, GetZkLoginApiResponse, } from './type.js'; @@ -29,6 +35,8 @@ export interface EnokiClientConfig { export class EnokiClientError extends Error { errors: { code: string; message: string; data: unknown }[] = []; + status: number; + code: string; constructor(status: number, response: string) { let errors; @@ -46,6 +54,8 @@ export class EnokiClientError extends Error { }); this.errors = errors ?? []; this.name = 'EnokiClientError'; + this.status = status; + this.code = errors?.[0]?.code ?? 'unknown_error'; } } @@ -134,6 +144,53 @@ export class EnokiClient { ); } + getSubnames(input: GetSubnamesApiInput) { + const query = new URLSearchParams(); + if (input.address) { + query.set('address', input.address); + } + if (input.network) { + query.set('network', input.network); + } + if (input.domain) { + query.set('domain', input.domain); + } + return this.#fetch( + 'subnames' + (query.size > 0 ? `?${query.toString()}` : ''), + { + method: 'GET', + }, + ); + } + + createSubname(input: CreateSubnameApiInput) { + return this.#fetch('subnames', { + method: 'POST', + headers: input.jwt + ? { + [ZKLOGIN_HEADER]: input.jwt, + } + : {}, + body: JSON.stringify({ + network: input.network, + domain: input.domain, + subname: input.subname, + targetAddress: input.targetAddress, + }), + }); + } + + deleteSubname(input: DeleteSubnameApiInput) { + this.#fetch('subnames', { + method: 'DELETE', + body: JSON.stringify({ + network: input.network, + domain: input.domain, + subname: input.subname, + }), + }); + } + async #fetch(path: string, init: RequestInit): Promise { const res = await fetch(`${this.#apiUrl}/${this.#version}/${path}`, { ...init, diff --git a/sdk/enoki/src/EnokiClient/type.ts b/sdk/enoki/src/EnokiClient/type.ts index b5fb56ed7f133..77599cdc1ff49 100644 --- a/sdk/enoki/src/EnokiClient/type.ts +++ b/sdk/enoki/src/EnokiClient/type.ts @@ -7,13 +7,21 @@ import type { ZkLoginSignatureInputs } from '@mysten/sui/zklogin'; import type { AuthProvider } from '../EnokiFlow.js'; export type EnokiNetwork = 'mainnet' | 'testnet' | 'devnet'; +export type EnokiDomainNetwork = 'mainnet' | 'testnet'; +export type EnokiSubanameStatus = 'PENDING' | 'ACTIVE'; export interface GetAppApiInput {} export interface GetAppApiResponse { + allowedOrigins: string[]; authenticationProviders: { providerType: AuthProvider; clientId: string; }[]; + domains: { + nftId: string; + name: string; + network: EnokiDomainNetwork; + }[]; } export interface GetZkLoginApiInput { @@ -77,3 +85,42 @@ export interface ExecuteSponsoredTransactionApiInput { export interface ExecuteSponsoredTransactionApiResponse { digest: string; } + +export interface GetSubnamesApiInput { + address?: string; + network?: EnokiDomainNetwork; + domain?: string; +} +export interface GetSubnamesApiResponse { + subnames: { + name: string; + status: EnokiSubanameStatus; + }[]; +} + +export type CreateSubnameApiInput = { + domain: string; + network?: EnokiDomainNetwork; + subname: string; +} & ( + | { + jwt: string; + targetAddress?: never; + } + | { + targetAddress: string; + jwt?: never; + } +); +export interface CreateSubnameApiResponse { + name: string; +} + +export interface DeleteSubnameApiInput { + domain: string; + network?: EnokiDomainNetwork; + subname: string; +} +export interface DeleteSubnameApiResponse { + name: string; +} diff --git a/sdk/enoki/src/index.ts b/sdk/enoki/src/index.ts index 3b713d74a6604..591fbfd8fb30f 100644 --- a/sdk/enoki/src/index.ts +++ b/sdk/enoki/src/index.ts @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -export { EnokiClient, type EnokiClientConfig } from './EnokiClient/index.js'; +export { EnokiClient, type EnokiClientConfig, EnokiClientError } from './EnokiClient/index.js'; export { EnokiFlow, type AuthProvider, type EnokiFlowConfig } from './EnokiFlow.js'; export { createLocalStorage, diff --git a/sdk/graphql-transport/CHANGELOG.md b/sdk/graphql-transport/CHANGELOG.md index 03699d2c3810b..fcfa14f6c2b08 100644 --- a/sdk/graphql-transport/CHANGELOG.md +++ b/sdk/graphql-transport/CHANGELOG.md @@ -1,5 +1,23 @@ # @mysten/graphql-transport +## 0.2.16 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.2.15 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + ## 0.2.14 ### Patch Changes diff --git a/sdk/graphql-transport/package.json b/sdk/graphql-transport/package.json index c073c39d8dd05..08ee1bb6507d3 100644 --- a/sdk/graphql-transport/package.json +++ b/sdk/graphql-transport/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/graphql-transport", - "version": "0.2.14", + "version": "0.2.16", "description": "A GraphQL transport to allow SuiClient to work with RPC 2.0", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/graphql-transport/vitest.config.ts b/sdk/graphql-transport/vitest.config.ts index 2619e62f59c25..c728327ae6aaa 100644 --- a/sdk/graphql-transport/vitest.config.ts +++ b/sdk/graphql-transport/vitest.config.ts @@ -5,8 +5,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { - minThreads: 1, - maxThreads: 8, + minWorkers: 1, + maxWorkers: 4, hookTimeout: 1000000, testTimeout: 1000000, env: { diff --git a/sdk/kiosk/CHANGELOG.md b/sdk/kiosk/CHANGELOG.md index 6a70a56716abe..b41d2684a66a7 100644 --- a/sdk/kiosk/CHANGELOG.md +++ b/sdk/kiosk/CHANGELOG.md @@ -1,5 +1,23 @@ # @mysten/kiosk +## 0.9.16 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.9.15 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + ## 0.9.14 ### Patch Changes diff --git a/sdk/kiosk/package.json b/sdk/kiosk/package.json index 780eeb46433c2..3f4dcea4135d5 100644 --- a/sdk/kiosk/package.json +++ b/sdk/kiosk/package.json @@ -2,7 +2,7 @@ "name": "@mysten/kiosk", "author": "Mysten Labs ", "description": "Sui Kiosk library", - "version": "0.9.14", + "version": "0.9.16", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/kiosk/vitest.config.ts b/sdk/kiosk/vitest.config.ts index 71d126eaef86c..07452fe1219c2 100644 --- a/sdk/kiosk/vitest.config.ts +++ b/sdk/kiosk/vitest.config.ts @@ -5,8 +5,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { - minThreads: 1, - maxThreads: 8, + minWorkers: 1, + maxWorkers: 4, hookTimeout: 1000000, testTimeout: 1000000, env: { diff --git a/sdk/suins-toolkit/CHANGELOG.md b/sdk/suins-toolkit/CHANGELOG.md index 177ff6486d001..ba5294afb0e87 100644 --- a/sdk/suins-toolkit/CHANGELOG.md +++ b/sdk/suins-toolkit/CHANGELOG.md @@ -1,5 +1,23 @@ # @mysten/suins-toolkit +## 0.5.16 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.5.15 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + ## 0.5.14 ### Patch Changes diff --git a/sdk/suins-toolkit/package.json b/sdk/suins-toolkit/package.json index 6cbe8f4048b94..434d8dedf04d7 100644 --- a/sdk/suins-toolkit/package.json +++ b/sdk/suins-toolkit/package.json @@ -2,7 +2,7 @@ "name": "@mysten/suins-toolkit", "author": "Mysten Labs ", "description": "SuiNS TypeScript SDK", - "version": "0.5.14", + "version": "0.5.16", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/suins-toolkit/vitest.config.ts b/sdk/suins-toolkit/vitest.config.ts index ea622e90c555f..874241689c250 100644 --- a/sdk/suins-toolkit/vitest.config.ts +++ b/sdk/suins-toolkit/vitest.config.ts @@ -5,8 +5,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { - minThreads: 1, - maxThreads: 8, + minWorkers: 1, + maxWorkers: 4, hookTimeout: 1000000, testTimeout: 1000000, }, diff --git a/sdk/typescript/CHANGELOG.md b/sdk/typescript/CHANGELOG.md index c0387fd12f3d2..1d3e424afca54 100644 --- a/sdk/typescript/CHANGELOG.md +++ b/sdk/typescript/CHANGELOG.md @@ -1,5 +1,32 @@ # @mysten/sui.js +## 1.7.0 + +### Minor Changes + +- 143cd9d: Add new tx.object methods for defining inputs for well known object ids: + + - `tx.object.system()`: `0x5` + - `tx.object.clock()`: `0x6` + - `tx.object.random()`: `0x8` + - `tx.object.denyList()`: `0x403` + +- 4019dd7: Add default budget to transactions executed through the SerialTransactionExecutor class +- 4019dd7: Add options argument to executeTransaction methods on transaction executor classes +- 00a974d: Add global registry for transaction plugins + +### Patch Changes + +- 4357ac6: Add options argument to verifyTransactionSignature + +## 1.6.0 + +### Minor Changes + +- a3e32fe: `WaitForLocalExecution` now waits using client.waitForTransaction rather than sending + requestType to the RPC node. This change will preserve readAfterWrite consistency when local + execution is removed from fullnodes, at the cost of more network requests and higher latency. + ## 1.5.0 ### Minor Changes diff --git a/sdk/typescript/package.json b/sdk/typescript/package.json index 92a8627c1d19f..c544d7a672d78 100644 --- a/sdk/typescript/package.json +++ b/sdk/typescript/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "Sui TypeScript API(Work in Progress)", "homepage": "https://sdk.mystenlabs.com", - "version": "1.5.0", + "version": "1.7.0", "license": "Apache-2.0", "sideEffects": false, "files": [ diff --git a/sdk/typescript/scripts/update-graphql-schemas.ts b/sdk/typescript/scripts/update-graphql-schemas.ts index a129e7dfe1024..4a3843ede5cbc 100644 --- a/sdk/typescript/scripts/update-graphql-schemas.ts +++ b/sdk/typescript/scripts/update-graphql-schemas.ts @@ -29,7 +29,7 @@ const result = execSync(`git branch --remote --list "origin/releases/sui-graphql minor, patch, branch, - schema: `https://raw.githubusercontent.com/MystenLabs/sui/${branch}/crates/sui-graphql-rpc/schema/current_progress_schema.graphql`, + schema: `https://raw.githubusercontent.com/MystenLabs/sui/${branch}/crates/sui-graphql-rpc/schema.graphql`, } : null; }) diff --git a/sdk/typescript/src/client/client.ts b/sdk/typescript/src/client/client.ts index 530e846b1fea9..c425f9f3d2148 100644 --- a/sdk/typescript/src/client/client.ts +++ b/sdk/typescript/src/client/client.ts @@ -406,20 +406,32 @@ export class SuiClient { }); } - async executeTransactionBlock( - input: ExecuteTransactionBlockParams, - ): Promise { - return await this.transport.request({ + async executeTransactionBlock({ + transactionBlock, + signature, + options, + requestType, + }: ExecuteTransactionBlockParams): Promise { + const result: SuiTransactionBlockResponse = await this.transport.request({ method: 'sui_executeTransactionBlock', params: [ - typeof input.transactionBlock === 'string' - ? input.transactionBlock - : toB64(input.transactionBlock), - Array.isArray(input.signature) ? input.signature : [input.signature], - input.options, - input.requestType, + typeof transactionBlock === 'string' ? transactionBlock : toB64(transactionBlock), + Array.isArray(signature) ? signature : [signature], + options, ], }); + + if (requestType === 'WaitForLocalExecution') { + try { + await this.waitForTransaction({ + digest: result.digest, + }); + } catch (_) { + // Ignore error while waiting for transaction + } + } + + return result; } async signAndExecuteTransaction({ diff --git a/sdk/typescript/src/transactions/Arguments.ts b/sdk/typescript/src/transactions/Arguments.ts index 920b3ff203423..ff384a69c31d1 100644 --- a/sdk/typescript/src/transactions/Arguments.ts +++ b/sdk/typescript/src/transactions/Arguments.ts @@ -2,12 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 import type { Inputs } from './Inputs.js'; +import { createObjectMethods } from './object.js'; import { createPure } from './pure.js'; import type { Transaction, TransactionObjectInput } from './Transaction.js'; export const Arguments = { pure: createPure((value) => (tx: Transaction) => tx.pure(value)), - object: (value: TransactionObjectInput) => (tx: Transaction) => tx.object(value), + object: createObjectMethods( + (value: TransactionObjectInput) => (tx: Transaction) => tx.object(value), + ), sharedObjectRef: (...args: Parameters<(typeof Inputs)['SharedObjectRef']>) => (tx: Transaction) => diff --git a/sdk/typescript/src/transactions/Transaction.ts b/sdk/typescript/src/transactions/Transaction.ts index 75cac56c04735..30a7723beeb01 100644 --- a/sdk/typescript/src/transactions/Transaction.ts +++ b/sdk/typescript/src/transactions/Transaction.ts @@ -22,6 +22,7 @@ import type { TransactionPlugin, } from './json-rpc-resolver.js'; import { resolveTransactionData } from './json-rpc-resolver.js'; +import { createObjectMethods } from './object.js'; import { createPure } from './pure.js'; import { TransactionDataBuilder } from './TransactionData.js'; import { getIdFromCallArg } from './utils.js'; @@ -98,12 +99,37 @@ export function isTransaction(obj: unknown): obj is Transaction { export type TransactionObjectInput = string | CallArg | TransactionObjectArgument; +const modulePluginRegistry = { + buildPlugins: [] as TransactionPlugin[], + serializationPlugins: [] as TransactionPlugin[], +}; + +const TRANSACTION_REGISTRY_KEY = Symbol.for('@mysten/transaction/registry'); +function getGlobalPluginRegistry() { + try { + const target = globalThis as { + [TRANSACTION_REGISTRY_KEY]?: { + buildPlugins: TransactionPlugin[]; + serializationPlugins: TransactionPlugin[]; + }; + }; + + if (!target[TRANSACTION_REGISTRY_KEY]) { + target[TRANSACTION_REGISTRY_KEY] = modulePluginRegistry; + } + + return target[TRANSACTION_REGISTRY_KEY]; + } catch (e) { + return modulePluginRegistry; + } +} + /** * Transaction Builder */ export class Transaction { - #serializationPlugins: TransactionPlugin[] = []; - #buildPlugins: TransactionPlugin[] = []; + #serializationPlugins: TransactionPlugin[]; + #buildPlugins: TransactionPlugin[]; #intentResolvers = new Map(); /** @@ -142,6 +168,14 @@ export class Transaction { return newTransaction; } + static registerGlobalSerializationPlugin(step: TransactionPlugin) { + getGlobalPluginRegistry().serializationPlugins.push(step); + } + + static registerGlobalBuildPlugin(step: TransactionPlugin) { + getGlobalPluginRegistry().buildPlugins.push(step); + } + addSerializationPlugin(step: TransactionPlugin) { this.#serializationPlugins.push(step); } @@ -241,7 +275,10 @@ export class Transaction { } constructor() { + const globalPlugins = getGlobalPluginRegistry(); this.#data = new TransactionDataBuilder(); + this.#buildPlugins = [...globalPlugins.buildPlugins]; + this.#serializationPlugins = [...globalPlugins.serializationPlugins]; } /** Returns an argument for the gas coin, to be used in a transaction. */ @@ -252,37 +289,43 @@ export class Transaction { /** * Add a new object input to the transaction. */ - object(value: TransactionObjectInput): { $kind: 'Input'; Input: number; type?: 'object' } { - if (typeof value === 'function') { - return this.object(value(this)); - } + object = createObjectMethods( + (value: TransactionObjectInput): { $kind: 'Input'; Input: number; type?: 'object' } => { + if (typeof value === 'function') { + return this.object(value(this)); + } - if (typeof value === 'object' && is(Argument, value)) { - return value as { $kind: 'Input'; Input: number; type?: 'object' }; - } + if (typeof value === 'object' && is(Argument, value)) { + return value as { $kind: 'Input'; Input: number; type?: 'object' }; + } - const id = getIdFromCallArg(value); + const id = getIdFromCallArg(value); - const inserted = this.#data.inputs.find((i) => id === getIdFromCallArg(i)); + const inserted = this.#data.inputs.find((i) => id === getIdFromCallArg(i)); - // Upgrade shared object inputs to mutable if needed: - if (inserted?.Object?.SharedObject && typeof value === 'object' && value.Object?.SharedObject) { - inserted.Object.SharedObject.mutable = - inserted.Object.SharedObject.mutable || value.Object.SharedObject.mutable; - } + // Upgrade shared object inputs to mutable if needed: + if ( + inserted?.Object?.SharedObject && + typeof value === 'object' && + value.Object?.SharedObject + ) { + inserted.Object.SharedObject.mutable = + inserted.Object.SharedObject.mutable || value.Object.SharedObject.mutable; + } - return inserted - ? { $kind: 'Input', Input: this.#data.inputs.indexOf(inserted), type: 'object' } - : this.#data.addInput( - 'object', - typeof value === 'string' - ? { - $kind: 'UnresolvedObject', - UnresolvedObject: { objectId: normalizeSuiAddress(value) }, - } - : value, - ); - } + return inserted + ? { $kind: 'Input', Input: this.#data.inputs.indexOf(inserted), type: 'object' } + : this.#data.addInput( + 'object', + typeof value === 'string' + ? { + $kind: 'UnresolvedObject', + UnresolvedObject: { objectId: normalizeSuiAddress(value) }, + } + : value, + ); + }, + ); /** * Add a new object input to the transaction using the fully-resolved object reference. diff --git a/sdk/typescript/src/transactions/executor/parallel.ts b/sdk/typescript/src/transactions/executor/parallel.ts index a74eefc714920..da47dbe1325b2 100644 --- a/sdk/typescript/src/transactions/executor/parallel.ts +++ b/sdk/typescript/src/transactions/executor/parallel.ts @@ -5,7 +5,7 @@ import { toB64 } from '@mysten/bcs'; import { bcs } from '../../bcs/index.js'; import type { SuiObjectRef } from '../../bcs/types.js'; -import type { SuiClient } from '../../client/index.js'; +import type { SuiClient, SuiTransactionBlockResponseOptions } from '../../client/index.js'; import type { Signer } from '../../cryptography/index.js'; import type { ObjectCacheOptions } from '../ObjectCache.js'; import { Transaction } from '../Transaction.js'; @@ -104,7 +104,7 @@ export class ParallelTransactionExecutor { await this.#updateCache(() => this.#waitForLastDigest()); } - async executeTransaction(transaction: Transaction) { + async executeTransaction(transaction: Transaction, options?: SuiTransactionBlockResponseOptions) { const { promise, resolve, reject } = promiseWithResolvers<{ digest: string; effects: string; @@ -113,7 +113,7 @@ export class ParallelTransactionExecutor { const execute = () => { this.#executeQueue.runTask(() => { - const promise = this.#execute(transaction, usedObjects); + const promise = this.#execute(transaction, usedObjects, options); return promise.then(resolve, reject); }); @@ -174,7 +174,11 @@ export class ParallelTransactionExecutor { return usedObjects; } - async #execute(transaction: Transaction, usedObjects: Set) { + async #execute( + transaction: Transaction, + usedObjects: Set, + options?: SuiTransactionBlockResponseOptions, + ) { let gasCoin!: CoinWithBalance; try { transaction.setSenderIfNotSet(this.#signer.toSuiAddress()); @@ -186,9 +190,7 @@ export class ParallelTransactionExecutor { transaction.setGasPrice(await this.#getGasPrice()); } - if (!data.gasData.budget) { - transaction.setGasBudget(this.#defaultGasBudget); - } + transaction.setGasBudgetIfNotSet(this.#defaultGasBudget); await this.#updateCache(); gasCoin = await this.#getGasCoin(); @@ -213,6 +215,7 @@ export class ParallelTransactionExecutor { transaction: bytes, signature, options: { + ...options, showEffects: true, }, }); diff --git a/sdk/typescript/src/transactions/executor/serial.ts b/sdk/typescript/src/transactions/executor/serial.ts index f301825ff887e..603dfeae7bd6e 100644 --- a/sdk/typescript/src/transactions/executor/serial.ts +++ b/sdk/typescript/src/transactions/executor/serial.ts @@ -4,7 +4,7 @@ import { toB64 } from '@mysten/bcs'; import { bcs } from '../../bcs/index.js'; -import type { SuiClient } from '../../client/index.js'; +import type { SuiClient, SuiTransactionBlockResponseOptions } from '../../client/index.js'; import type { Signer } from '../../cryptography/keypair.js'; import type { ObjectCacheOptions } from '../ObjectCache.js'; import { isTransaction, Transaction } from '../Transaction.js'; @@ -15,15 +15,20 @@ export class SerialTransactionExecutor { #queue = new SerialQueue(); #signer: Signer; #cache: CachingTransactionExecutor; + #defaultGasBudget: bigint; constructor({ signer, + defaultGasBudget = 50_000_000n, ...options }: Omit & { client: SuiClient; signer: Signer; + /** The gasBudget to use if the transaction has not defined it's own gasBudget, defaults to `50_000_000n` */ + defaultGasBudget?: bigint; }) { this.#signer = signer; + this.#defaultGasBudget = defaultGasBudget; this.#cache = new CachingTransactionExecutor({ client: options.client, cache: options.cache, @@ -63,6 +68,7 @@ export class SerialTransactionExecutor { copy.setGasPayment([gasCoin]); } + copy.setGasBudgetIfNotSet(this.#defaultGasBudget); copy.setSenderIfNotSet(this.#signer.toSuiAddress()); return this.#cache.buildTransaction({ transaction: copy }); @@ -76,7 +82,10 @@ export class SerialTransactionExecutor { return this.#cache.waitForLastTransaction(); } - executeTransaction(transaction: Transaction | Uint8Array) { + executeTransaction( + transaction: Transaction | Uint8Array, + options?: SuiTransactionBlockResponseOptions, + ) { return this.#queue.runTask(async () => { const bytes = isTransaction(transaction) ? await this.#buildTransaction(transaction) @@ -87,6 +96,7 @@ export class SerialTransactionExecutor { .executeTransaction({ signature, transaction: bytes, + options, }) .catch(async (error) => { await this.resetCache(); diff --git a/sdk/typescript/src/transactions/object.ts b/sdk/typescript/src/transactions/object.ts new file mode 100644 index 0000000000000..ff83fac9dc750 --- /dev/null +++ b/sdk/typescript/src/transactions/object.ts @@ -0,0 +1,17 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import type { TransactionObjectInput } from './Transaction.js'; + +export function createObjectMethods(makeObject: (value: TransactionObjectInput) => T) { + function object(value: TransactionObjectInput) { + return makeObject(value); + } + + object.system = () => object('0x5'); + object.clock = () => object('0x6'); + object.random = () => object('0x8'); + object.denyList = () => object('0x403'); + + return object; +} diff --git a/sdk/typescript/src/verify/verify.ts b/sdk/typescript/src/verify/verify.ts index 49cbcba897b64..7ba6fcf63a6ab 100644 --- a/sdk/typescript/src/verify/verify.ts +++ b/sdk/typescript/src/verify/verify.ts @@ -45,8 +45,9 @@ export async function verifyPersonalMessageSignature( export async function verifyTransactionSignature( transaction: Uint8Array, signature: string, + options: { client?: SuiGraphQLClient } = {}, ): Promise { - const parsedSignature = parseSignature(signature); + const parsedSignature = parseSignature(signature, options); if ( !(await parsedSignature.publicKey.verifyTransaction( @@ -102,10 +103,13 @@ export function publicKeyFromRawBytes( } } -export function publicKeyFromSuiBytes(publicKey: string | Uint8Array) { +export function publicKeyFromSuiBytes( + publicKey: string | Uint8Array, + options: { client?: SuiGraphQLClient } = {}, +) { const bytes = typeof publicKey === 'string' ? fromB64(publicKey) : publicKey; const signatureScheme = SIGNATURE_FLAG_TO_SCHEME[bytes[0] as SignatureFlag]; - return publicKeyFromRawBytes(signatureScheme, bytes.slice(1)); + return publicKeyFromRawBytes(signatureScheme, bytes.slice(1), options); } diff --git a/sdk/typescript/src/version.ts b/sdk/typescript/src/version.ts index b637e1853c9a0..3615ca82466f4 100644 --- a/sdk/typescript/src/version.ts +++ b/sdk/typescript/src/version.ts @@ -3,5 +3,5 @@ // This file is generated by genversion.mjs. Do not edit it directly. -export const PACKAGE_VERSION = '1.5.0'; -export const TARGETED_RPC_VERSION = '1.31.0'; +export const PACKAGE_VERSION = '1.7.0'; +export const TARGETED_RPC_VERSION = '1.32.0'; diff --git a/sdk/typescript/src/zklogin/publickey.ts b/sdk/typescript/src/zklogin/publickey.ts index 2e6bf8b3e6a34..d42afefb0559d 100644 --- a/sdk/typescript/src/zklogin/publickey.ts +++ b/sdk/typescript/src/zklogin/publickey.ts @@ -71,6 +71,7 @@ export class ZkLoginPublicIdentifier extends PublicKey { verifyPersonalMessage(message: Uint8Array, signature: Uint8Array | string): Promise { const parsedSignature = parseSerializedZkLoginSignature(signature); const address = new ZkLoginPublicIdentifier(parsedSignature.publicKey).toSuiAddress(); + return graphqlVerifyZkLoginSignature({ address: address, bytes: toB64(message), diff --git a/sdk/typescript/test/e2e/id-entry-args.test.ts b/sdk/typescript/test/e2e/id-entry-args.test.ts index afa9d960cd905..25974ac4a1e24 100644 --- a/sdk/typescript/test/e2e/id-entry-args.test.ts +++ b/sdk/typescript/test/e2e/id-entry-args.test.ts @@ -29,6 +29,7 @@ describe('Test ID as args to entry functions', () => { showEffects: true, }, }); + await toolbox.client.waitForTransaction({ digest: result.digest }); expect(result.effects?.status.status).toEqual('success'); }); @@ -45,6 +46,7 @@ describe('Test ID as args to entry functions', () => { showEffects: true, }, }); + await toolbox.client.waitForTransaction({ digest: result.digest }); expect(result.effects?.status.status).toEqual('success'); }); }); diff --git a/sdk/typescript/test/e2e/multisig.test.ts b/sdk/typescript/test/e2e/multisig.test.ts index 205a9ee8cdf3c..53a50966638f0 100644 --- a/sdk/typescript/test/e2e/multisig.test.ts +++ b/sdk/typescript/test/e2e/multisig.test.ts @@ -105,6 +105,7 @@ describe('MultiSig with zklogin signature', () => { signature, options: { showEffects: true }, }); + await client.waitForTransaction({ digest: result.digest }); // check the execution result and digest. const localDigest = await tx.getDigest({ client }); diff --git a/sdk/typescript/test/e2e/object-cache.test.ts b/sdk/typescript/test/e2e/object-cache.test.ts index 5edb3cea8c630..3577b87389406 100644 --- a/sdk/typescript/test/e2e/object-cache.test.ts +++ b/sdk/typescript/test/e2e/object-cache.test.ts @@ -10,7 +10,7 @@ import { CachingTransactionExecutor } from '../../src/transactions/executor/cach import { normalizeSuiAddress } from '../../src/utils'; import { setup, TestToolbox } from './utils/setup'; -describe('CachingTransactionExecutor', async () => { +describe('CachingTransactionExecutor', { retry: 3 }, async () => { let toolbox: TestToolbox; let packageId: string; let rawPackageId: string; diff --git a/sdk/typescript/test/e2e/objects.test.ts b/sdk/typescript/test/e2e/objects.test.ts index 2a74636be5fcb..7f696b0404c7c 100644 --- a/sdk/typescript/test/e2e/objects.test.ts +++ b/sdk/typescript/test/e2e/objects.test.ts @@ -119,10 +119,11 @@ describe('Object Reading API', () => { // Transfer the entire gas object: tx.transferObjects([tx.gas], normalizeSuiAddress('0x2')); - await toolbox.client.signAndExecuteTransaction({ + const { digest } = await toolbox.client.signAndExecuteTransaction({ signer: toolbox.keypair, transaction: tx, }); + await toolbox.client.waitForTransaction({ digest }); const res = await toolbox.client.tryGetPastObject({ id: data[0].coinObjectId, diff --git a/sdk/typescript/test/e2e/parallel-executor.test.ts b/sdk/typescript/test/e2e/parallel-executor.test.ts index 88d7c58b73ea9..7d0d036fd4043 100644 --- a/sdk/typescript/test/e2e/parallel-executor.test.ts +++ b/sdk/typescript/test/e2e/parallel-executor.test.ts @@ -39,7 +39,7 @@ afterAll(() => { vi.restoreAllMocks(); }); -describe('ParallelTransactionExecutor', () => { +describe('ParallelTransactionExecutor', { retry: 3 }, () => { beforeEach(async () => { await executor.resetCache(); vi.clearAllMocks(); diff --git a/sdk/typescript/test/e2e/serial-executor.test.ts b/sdk/typescript/test/e2e/serial-executor.test.ts index 830d32fa8bd5c..d14ba26780dbb 100644 --- a/sdk/typescript/test/e2e/serial-executor.test.ts +++ b/sdk/typescript/test/e2e/serial-executor.test.ts @@ -29,7 +29,7 @@ afterAll(() => { vi.restoreAllMocks(); }); -describe('SerialExecutor', () => { +describe('SerialExecutor', { retry: 3 }, () => { beforeEach(async () => { vi.clearAllMocks(); await executor.resetCache(); diff --git a/sdk/typescript/test/e2e/txn-builder.test.ts b/sdk/typescript/test/e2e/txn-builder.test.ts index e92beced30a69..75486345488be 100644 --- a/sdk/typescript/test/e2e/txn-builder.test.ts +++ b/sdk/typescript/test/e2e/txn-builder.test.ts @@ -220,6 +220,7 @@ async function validateTransaction(client: SuiClient, signer: Keypair, tx: Trans showEffects: true, }, }); + await client.waitForTransaction({ digest: result.digest }); expect(localDigest).toEqual(result.digest); expect(result.effects?.status.status).toEqual('success'); } diff --git a/sdk/typescript/test/unit/arguments.test.ts b/sdk/typescript/test/unit/arguments.test.ts index d737e5cc51e02..736b0be0eab50 100644 --- a/sdk/typescript/test/unit/arguments.test.ts +++ b/sdk/typescript/test/unit/arguments.test.ts @@ -26,6 +26,10 @@ describe('Arguments helpers', () => { digest: toB58(new Uint8Array(32).fill(0x1)), }), Arguments.pure.address('0x2'), + Arguments.object.system(), + Arguments.object.clock(), + Arguments.object.random(), + Arguments.object.denyList(), ]; const tx = new Transaction(); @@ -67,6 +71,26 @@ describe('Arguments helpers', () => { "Input": 4, "type": "pure", }, + { + "$kind": "Input", + "Input": 5, + "type": "object", + }, + { + "$kind": "Input", + "Input": 6, + "type": "object", + }, + { + "$kind": "Input", + "Input": 7, + "type": "object", + }, + { + "$kind": "Input", + "Input": 8, + "type": "object", + }, ], "function": "bar", "module": "foo", @@ -128,6 +152,30 @@ describe('Arguments helpers', () => { "bytes": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI=", }, }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000005", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000006", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000008", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000403", + }, + }, ], "sender": null, "version": 2, diff --git a/sdk/typescript/test/unit/object-inputs.test.ts b/sdk/typescript/test/unit/object-inputs.test.ts new file mode 100644 index 0000000000000..ddde51d40331f --- /dev/null +++ b/sdk/typescript/test/unit/object-inputs.test.ts @@ -0,0 +1,183 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { toB58 } from '@mysten/bcs'; +import { describe, expect, it } from 'vitest'; + +import { Transaction } from '../../src/transactions'; + +describe('Transaction inputs', () => { + it('can use tx methods for creating inputs', async () => { + const tx = new Transaction(); + + tx.moveCall({ + target: '0x2::foo::bar', + arguments: [ + tx.object('0x123'), + tx.receivingRef({ + objectId: '1', + version: '123', + digest: toB58(new Uint8Array(32).fill(0x1)), + }), + tx.sharedObjectRef({ + objectId: '2', + mutable: true, + initialSharedVersion: '123', + }), + tx.objectRef({ + objectId: '3', + version: '123', + digest: toB58(new Uint8Array(32).fill(0x1)), + }), + tx.pure.address('0x2'), + tx.object.system(), + tx.object.clock(), + tx.object.random(), + tx.object.denyList(), + ], + }); + + expect(tx.getData()).toMatchInlineSnapshot(` + { + "commands": [ + { + "$kind": "MoveCall", + "MoveCall": { + "arguments": [ + { + "$kind": "Input", + "Input": 0, + "type": "object", + }, + { + "$kind": "Input", + "Input": 1, + "type": "object", + }, + { + "$kind": "Input", + "Input": 2, + "type": "object", + }, + { + "$kind": "Input", + "Input": 3, + "type": "object", + }, + { + "$kind": "Input", + "Input": 4, + "type": "pure", + }, + { + "$kind": "Input", + "Input": 5, + "type": "object", + }, + { + "$kind": "Input", + "Input": 6, + "type": "object", + }, + { + "$kind": "Input", + "Input": 7, + "type": "object", + }, + { + "$kind": "Input", + "Input": 8, + "type": "object", + }, + ], + "function": "bar", + "module": "foo", + "package": "0x0000000000000000000000000000000000000000000000000000000000000002", + "typeArguments": [], + }, + }, + ], + "expiration": null, + "gasData": { + "budget": null, + "owner": null, + "payment": null, + "price": null, + }, + "inputs": [ + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000123", + }, + }, + { + "$kind": "Object", + "Object": { + "$kind": "Receiving", + "Receiving": { + "digest": "4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi", + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000001", + "version": "123", + }, + }, + }, + { + "$kind": "Object", + "Object": { + "$kind": "SharedObject", + "SharedObject": { + "initialSharedVersion": "123", + "mutable": true, + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000002", + }, + }, + }, + { + "$kind": "Object", + "Object": { + "$kind": "ImmOrOwnedObject", + "ImmOrOwnedObject": { + "digest": "4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi", + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000003", + "version": "123", + }, + }, + }, + { + "$kind": "Pure", + "Pure": { + "bytes": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI=", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000005", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000006", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000008", + }, + }, + { + "$kind": "UnresolvedObject", + "UnresolvedObject": { + "objectId": "0x0000000000000000000000000000000000000000000000000000000000000403", + }, + }, + ], + "sender": null, + "version": 2, + } + `); + }); +}); diff --git a/sdk/typescript/vitest.config.ts b/sdk/typescript/vitest.config.ts index e58acd7035af6..231c84d702bbe 100644 --- a/sdk/typescript/vitest.config.ts +++ b/sdk/typescript/vitest.config.ts @@ -5,8 +5,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { - minThreads: 1, - maxThreads: 8, + minWorkers: 1, + maxWorkers: 4, hookTimeout: 1000000, testTimeout: 1000000, env: { diff --git a/sdk/wallet-standard/CHANGELOG.md b/sdk/wallet-standard/CHANGELOG.md index 058d65fbd3db3..3c88ee60b92f2 100644 --- a/sdk/wallet-standard/CHANGELOG.md +++ b/sdk/wallet-standard/CHANGELOG.md @@ -1,5 +1,23 @@ # @mysten/wallet-standard +## 0.13.2 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.13.1 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + ## 0.13.0 ### Minor Changes diff --git a/sdk/wallet-standard/package.json b/sdk/wallet-standard/package.json index 7247acdd51532..378c6b92b6e92 100644 --- a/sdk/wallet-standard/package.json +++ b/sdk/wallet-standard/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/wallet-standard", - "version": "0.13.0", + "version": "0.13.2", "description": "A suite of standard utilities for implementing wallets based on the Wallet Standard.", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/zklogin/CHANGELOG.md b/sdk/zklogin/CHANGELOG.md index bcbf8fbcfa335..011e1a18c2bc2 100644 --- a/sdk/zklogin/CHANGELOG.md +++ b/sdk/zklogin/CHANGELOG.md @@ -1,5 +1,29 @@ # @mysten/zklogin +## 0.7.17 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + +## 0.7.16 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + +## 0.7.15 + +### Patch Changes + +- 6f79ed9: Additional check for bigints + ## 0.7.14 ### Patch Changes diff --git a/sdk/zklogin/package.json b/sdk/zklogin/package.json index d850aff33d903..e42717b9670c0 100644 --- a/sdk/zklogin/package.json +++ b/sdk/zklogin/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/zklogin", - "version": "0.7.14", + "version": "0.7.17", "description": "Utilities for interacting with zkLogin in Sui", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/zklogin/src/poseidon.ts b/sdk/zklogin/src/poseidon.ts index f51ce896137cd..c002176e7df3a 100644 --- a/sdk/zklogin/src/poseidon.ts +++ b/sdk/zklogin/src/poseidon.ts @@ -39,7 +39,17 @@ const poseidonNumToHashFN = [ poseidon16, ]; +export const BN254_FIELD_SIZE = + 21888242871839275222246405745257275088548364400416034343698204186575808495617n; + export function poseidonHash(inputs: (number | bigint | string)[]): bigint { + inputs.forEach((x) => { + const b = BigInt(x); + if (b < 0 || b >= BN254_FIELD_SIZE) { + throw new Error(`Element ${b} not in the BN254 field`); + } + }); + const hashFN = poseidonNumToHashFN[inputs.length - 1]; if (hashFN) { diff --git a/sdk/zklogin/test/poseidon.test.ts b/sdk/zklogin/test/poseidon.test.ts new file mode 100644 index 0000000000000..7499b7164e5b4 --- /dev/null +++ b/sdk/zklogin/test/poseidon.test.ts @@ -0,0 +1,25 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import { expect, test } from 'vitest'; + +import { BN254_FIELD_SIZE, poseidonHash } from '../src/poseidon'; + +test('can hash single input', () => { + const result = poseidonHash([123]); + expect(result).toBeTypeOf('bigint'); +}); + +test('can hash multiple inputs', () => { + const result = poseidonHash([1, 2, 3, 4, 5]); + expect(result).toBeTypeOf('bigint'); +}); + +test('throws error for invalid input', () => { + expect(() => poseidonHash([-1])).toThrowError('Element -1 not in the BN254 field'); +}); + +test('throws error for invalid input greater than BN254_FIELD_SIZE', () => { + expect(() => poseidonHash([BN254_FIELD_SIZE])).toThrowError( + 'Element 21888242871839275222246405745257275088548364400416034343698204186575808495617 not in the BN254 field', + ); +}); diff --git a/sdk/zksend/CHANGELOG.md b/sdk/zksend/CHANGELOG.md index a5579f1de1fa8..7b41cfc38edc4 100644 --- a/sdk/zksend/CHANGELOG.md +++ b/sdk/zksend/CHANGELOG.md @@ -1,5 +1,25 @@ # @mysten/zksend +## 0.10.6 + +### Patch Changes + +- Updated dependencies [143cd9d] +- Updated dependencies [4357ac6] +- Updated dependencies [4019dd7] +- Updated dependencies [4019dd7] +- Updated dependencies [00a974d] + - @mysten/sui@1.7.0 + - @mysten/wallet-standard@0.13.2 + +## 0.10.5 + +### Patch Changes + +- Updated dependencies [a3e32fe] + - @mysten/sui@1.6.0 + - @mysten/wallet-standard@0.13.1 + ## 0.10.4 ### Patch Changes diff --git a/sdk/zksend/package.json b/sdk/zksend/package.json index dbc811d9bf12f..e105d7deb5bbf 100644 --- a/sdk/zksend/package.json +++ b/sdk/zksend/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/zksend", - "version": "0.10.4", + "version": "0.10.6", "description": "TODO: Write Description", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sui-execution/latest/sui-adapter/Cargo.toml b/sui-execution/latest/sui-adapter/Cargo.toml index 04f1d9734cf8d..7a58fe23095ff 100644 --- a/sui-execution/latest/sui-adapter/Cargo.toml +++ b/sui-execution/latest/sui-adapter/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, features = ["backtrace"] } bcs.workspace = true diff --git a/sui-execution/latest/sui-adapter/src/adapter.rs b/sui-execution/latest/sui-adapter/src/adapter.rs index 25da7df5a7617..149fa4b1a83b7 100644 --- a/sui-execution/latest/sui-adapter/src/adapter.rs +++ b/sui-execution/latest/sui-adapter/src/adapter.rs @@ -70,6 +70,8 @@ mod checked { // Don't augment errors with execution state on-chain error_execution_state: false, binary_config: to_binary_config(protocol_config), + rethrow_serialization_type_layout_errors: protocol_config + .rethrow_serialization_type_layout_errors(), }, ) .map_err(|_| SuiError::ExecutionInvariantViolation) diff --git a/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs b/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs index 7a740b6f2b191..93535741f7567 100644 --- a/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs +++ b/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs @@ -571,8 +571,8 @@ fn multi_scalar_mul( base_cost: Option, base_cost_per_addition: Option, max_len: u32, - scalars: &Vec, - points: &Vec, + scalars: &[u8], + points: &[u8], ) -> PartialVMResult where G: GroupElement diff --git a/sui-execution/latest/sui-verifier/src/entry_points_verifier.rs b/sui-execution/latest/sui-verifier/src/entry_points_verifier.rs index e3147b9d5a24d..9eadf862fcad0 100644 --- a/sui-execution/latest/sui-verifier/src/entry_points_verifier.rs +++ b/sui-execution/latest/sui-verifier/src/entry_points_verifier.rs @@ -28,8 +28,8 @@ use crate::{verification_failure, INIT_FN_NAME}; /// - The function must have `Visibility::Private` /// - The function can have at most two parameters: /// - mandatory &mut TxContext or &TxContext (see `is_tx_context`) in the last position -/// - optional one-time witness type (see one_time_witness verifier pass) passed by value in the first -/// position +/// - optional one-time witness type (see one_time_witness verifier pass) passed by value in the +/// first position /// /// For transaction entry points /// - The function must have `is_entry` true diff --git a/sui-execution/v0/sui-adapter/Cargo.toml b/sui-execution/v0/sui-adapter/Cargo.toml index 0eb11447ba112..745245766b109 100644 --- a/sui-execution/v0/sui-adapter/Cargo.toml +++ b/sui-execution/v0/sui-adapter/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, features = ["backtrace"] } bcs.workspace = true diff --git a/sui-execution/v0/sui-adapter/src/adapter.rs b/sui-execution/v0/sui-adapter/src/adapter.rs index 7570eac77a7d1..73fccf3124b9a 100644 --- a/sui-execution/v0/sui-adapter/src/adapter.rs +++ b/sui-execution/v0/sui-adapter/src/adapter.rs @@ -72,6 +72,8 @@ mod checked { profiler_config: vm_profiler_config, binary_config: to_binary_config(protocol_config), + rethrow_serialization_type_layout_errors: protocol_config + .rethrow_serialization_type_layout_errors(), }, ) .map_err(|_| SuiError::ExecutionInvariantViolation) diff --git a/sui-execution/v0/sui-verifier/Cargo.toml b/sui-execution/v0/sui-verifier/Cargo.toml index bb3dde2501689..e5304b9386808 100644 --- a/sui-execution/v0/sui-verifier/Cargo.toml +++ b/sui-execution/v0/sui-verifier/Cargo.toml @@ -7,6 +7,9 @@ description = "Move framework for Sui platform" license = "Apache-2.0" publish = false +[lints] +workspace = true + [dependencies] move-binary-format.workspace = true move-bytecode-utils.workspace = true diff --git a/sui-execution/v0/sui-verifier/src/entry_points_verifier.rs b/sui-execution/v0/sui-verifier/src/entry_points_verifier.rs index b32ba8fcde498..002d0b3a795c3 100644 --- a/sui-execution/v0/sui-verifier/src/entry_points_verifier.rs +++ b/sui-execution/v0/sui-verifier/src/entry_points_verifier.rs @@ -27,7 +27,7 @@ use crate::{verification_failure, INIT_FN_NAME}; /// - The function can have at most two parameters: /// - mandatory &mut TxContext or &TxContext (see `is_tx_context`) in the last position /// - optional one-time witness type (see one_time_witness verifier pass) passed by value in the first -/// position +/// position /// /// For transaction entry points /// - The function must have `is_entry` true diff --git a/sui-execution/v1/sui-adapter/Cargo.toml b/sui-execution/v1/sui-adapter/Cargo.toml index 8dce979551406..d986af97a9856 100644 --- a/sui-execution/v1/sui-adapter/Cargo.toml +++ b/sui-execution/v1/sui-adapter/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, features = ["backtrace"] } bcs.workspace = true diff --git a/sui-execution/v1/sui-adapter/src/adapter.rs b/sui-execution/v1/sui-adapter/src/adapter.rs index bd9f4eeccb2d0..633abec22e6b2 100644 --- a/sui-execution/v1/sui-adapter/src/adapter.rs +++ b/sui-execution/v1/sui-adapter/src/adapter.rs @@ -72,6 +72,8 @@ mod checked { // Don't augment errors with execution state on-chain error_execution_state: false, binary_config: to_binary_config(protocol_config), + rethrow_serialization_type_layout_errors: protocol_config + .rethrow_serialization_type_layout_errors(), }, ) .map_err(|_| SuiError::ExecutionInvariantViolation) diff --git a/sui-execution/v1/sui-verifier/src/entry_points_verifier.rs b/sui-execution/v1/sui-verifier/src/entry_points_verifier.rs index d107c81b98f4b..2f6a706e0f24d 100644 --- a/sui-execution/v1/sui-verifier/src/entry_points_verifier.rs +++ b/sui-execution/v1/sui-verifier/src/entry_points_verifier.rs @@ -27,7 +27,7 @@ use crate::{verification_failure, INIT_FN_NAME}; /// - The function can have at most two parameters: /// - mandatory &mut TxContext or &TxContext (see `is_tx_context`) in the last position /// - optional one-time witness type (see one_time_witness verifier pass) passed by value in the first -/// position +/// position /// /// For transaction entry points /// - The function must have `is_entry` true diff --git a/sui-execution/v2/sui-adapter/Cargo.toml b/sui-execution/v2/sui-adapter/Cargo.toml index ae29ab4845fc0..cb8bf360a0b12 100644 --- a/sui-execution/v2/sui-adapter/Cargo.toml +++ b/sui-execution/v2/sui-adapter/Cargo.toml @@ -7,6 +7,9 @@ license = "Apache-2.0" publish = false edition = "2021" +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, features = ["backtrace"] } bcs.workspace = true diff --git a/sui-execution/v2/sui-adapter/src/adapter.rs b/sui-execution/v2/sui-adapter/src/adapter.rs index d8d0d1bfbc7d7..b7f6ccfb3e04c 100644 --- a/sui-execution/v2/sui-adapter/src/adapter.rs +++ b/sui-execution/v2/sui-adapter/src/adapter.rs @@ -70,6 +70,8 @@ mod checked { // Don't augment errors with execution state on-chain error_execution_state: false, binary_config: to_binary_config(protocol_config), + rethrow_serialization_type_layout_errors: protocol_config + .rethrow_serialization_type_layout_errors(), }, ) .map_err(|_| SuiError::ExecutionInvariantViolation) diff --git a/sui-execution/v2/sui-move-natives/src/crypto/group_ops.rs b/sui-execution/v2/sui-move-natives/src/crypto/group_ops.rs index 54cc84f55be48..8f1e459486371 100644 --- a/sui-execution/v2/sui-move-natives/src/crypto/group_ops.rs +++ b/sui-execution/v2/sui-move-natives/src/crypto/group_ops.rs @@ -563,8 +563,8 @@ fn multi_scalar_mul( base_cost: Option, base_cost_per_addition: Option, max_len: u32, - scalars: &Vec, - points: &Vec, + scalars: &[u8], + points: &[u8], ) -> PartialVMResult where G: GroupElement diff --git a/sui-execution/v2/sui-verifier/src/entry_points_verifier.rs b/sui-execution/v2/sui-verifier/src/entry_points_verifier.rs index 9785762cf5581..6e17dba9ffa3b 100644 --- a/sui-execution/v2/sui-verifier/src/entry_points_verifier.rs +++ b/sui-execution/v2/sui-verifier/src/entry_points_verifier.rs @@ -29,7 +29,7 @@ use crate::{verification_failure, INIT_FN_NAME}; /// - The function can have at most two parameters: /// - mandatory &mut TxContext or &TxContext (see `is_tx_context`) in the last position /// - optional one-time witness type (see one_time_witness verifier pass) passed by value in the first -/// position +/// position /// /// For transaction entry points /// - The function must have `is_entry` true diff --git a/sui_programmability/examples/README.md b/sui_programmability/examples/README.md deleted file mode 100644 index b2601469571c6..0000000000000 --- a/sui_programmability/examples/README.md +++ /dev/null @@ -1,2 +0,0 @@ ->[!IMPORTANT] -> These examples have moved! **Find the latest versions in the [examples](../../examples) directory** which contains up-to-date [Move](../../examples/move) and end-to-end examples!